platform/x86: intel_pmc_core: Add option to set/clear LPM mode
[sfrench/cifs-2.6.git] / drivers / platform / x86 / intel_pmc_core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel Core SoC Power Management Controller Driver
4  *
5  * Copyright (c) 2016, Intel Corporation.
6  * All Rights Reserved.
7  *
8  * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
9  *          Vishwanath Somayaji <vishwanath.somayaji@intel.com>
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/acpi.h>
15 #include <linux/bitfield.h>
16 #include <linux/debugfs.h>
17 #include <linux/delay.h>
18 #include <linux/dmi.h>
19 #include <linux/io.h>
20 #include <linux/module.h>
21 #include <linux/pci.h>
22 #include <linux/platform_device.h>
23 #include <linux/slab.h>
24 #include <linux/suspend.h>
25 #include <linux/uaccess.h>
26 #include <linux/uuid.h>
27
28 #include <acpi/acpi_bus.h>
29 #include <asm/cpu_device_id.h>
30 #include <asm/intel-family.h>
31 #include <asm/msr.h>
32 #include <asm/tsc.h>
33
34 #include "intel_pmc_core.h"
35
36 #define ACPI_S0IX_DSM_UUID              "57a6512e-3979-4e9d-9708-ff13b2508972"
37 #define ACPI_GET_LOW_MODE_REGISTERS     1
38
39 /* PKGC MSRs are common across Intel Core SoCs */
40 static const struct pmc_bit_map msr_map[] = {
41         {"Package C2",                  MSR_PKG_C2_RESIDENCY},
42         {"Package C3",                  MSR_PKG_C3_RESIDENCY},
43         {"Package C6",                  MSR_PKG_C6_RESIDENCY},
44         {"Package C7",                  MSR_PKG_C7_RESIDENCY},
45         {"Package C8",                  MSR_PKG_C8_RESIDENCY},
46         {"Package C9",                  MSR_PKG_C9_RESIDENCY},
47         {"Package C10",                 MSR_PKG_C10_RESIDENCY},
48         {}
49 };
50
51 static const struct pmc_bit_map spt_pll_map[] = {
52         {"MIPI PLL",                    SPT_PMC_BIT_MPHY_CMN_LANE0},
53         {"GEN2 USB2PCIE2 PLL",          SPT_PMC_BIT_MPHY_CMN_LANE1},
54         {"DMIPCIE3 PLL",                SPT_PMC_BIT_MPHY_CMN_LANE2},
55         {"SATA PLL",                    SPT_PMC_BIT_MPHY_CMN_LANE3},
56         {}
57 };
58
59 static const struct pmc_bit_map spt_mphy_map[] = {
60         {"MPHY CORE LANE 0",           SPT_PMC_BIT_MPHY_LANE0},
61         {"MPHY CORE LANE 1",           SPT_PMC_BIT_MPHY_LANE1},
62         {"MPHY CORE LANE 2",           SPT_PMC_BIT_MPHY_LANE2},
63         {"MPHY CORE LANE 3",           SPT_PMC_BIT_MPHY_LANE3},
64         {"MPHY CORE LANE 4",           SPT_PMC_BIT_MPHY_LANE4},
65         {"MPHY CORE LANE 5",           SPT_PMC_BIT_MPHY_LANE5},
66         {"MPHY CORE LANE 6",           SPT_PMC_BIT_MPHY_LANE6},
67         {"MPHY CORE LANE 7",           SPT_PMC_BIT_MPHY_LANE7},
68         {"MPHY CORE LANE 8",           SPT_PMC_BIT_MPHY_LANE8},
69         {"MPHY CORE LANE 9",           SPT_PMC_BIT_MPHY_LANE9},
70         {"MPHY CORE LANE 10",          SPT_PMC_BIT_MPHY_LANE10},
71         {"MPHY CORE LANE 11",          SPT_PMC_BIT_MPHY_LANE11},
72         {"MPHY CORE LANE 12",          SPT_PMC_BIT_MPHY_LANE12},
73         {"MPHY CORE LANE 13",          SPT_PMC_BIT_MPHY_LANE13},
74         {"MPHY CORE LANE 14",          SPT_PMC_BIT_MPHY_LANE14},
75         {"MPHY CORE LANE 15",          SPT_PMC_BIT_MPHY_LANE15},
76         {}
77 };
78
79 static const struct pmc_bit_map spt_pfear_map[] = {
80         {"PMC",                         SPT_PMC_BIT_PMC},
81         {"OPI-DMI",                     SPT_PMC_BIT_OPI},
82         {"SPI / eSPI",                  SPT_PMC_BIT_SPI},
83         {"XHCI",                        SPT_PMC_BIT_XHCI},
84         {"SPA",                         SPT_PMC_BIT_SPA},
85         {"SPB",                         SPT_PMC_BIT_SPB},
86         {"SPC",                         SPT_PMC_BIT_SPC},
87         {"GBE",                         SPT_PMC_BIT_GBE},
88         {"SATA",                        SPT_PMC_BIT_SATA},
89         {"HDA-PGD0",                    SPT_PMC_BIT_HDA_PGD0},
90         {"HDA-PGD1",                    SPT_PMC_BIT_HDA_PGD1},
91         {"HDA-PGD2",                    SPT_PMC_BIT_HDA_PGD2},
92         {"HDA-PGD3",                    SPT_PMC_BIT_HDA_PGD3},
93         {"RSVD",                        SPT_PMC_BIT_RSVD_0B},
94         {"LPSS",                        SPT_PMC_BIT_LPSS},
95         {"LPC",                         SPT_PMC_BIT_LPC},
96         {"SMB",                         SPT_PMC_BIT_SMB},
97         {"ISH",                         SPT_PMC_BIT_ISH},
98         {"P2SB",                        SPT_PMC_BIT_P2SB},
99         {"DFX",                         SPT_PMC_BIT_DFX},
100         {"SCC",                         SPT_PMC_BIT_SCC},
101         {"RSVD",                        SPT_PMC_BIT_RSVD_0C},
102         {"FUSE",                        SPT_PMC_BIT_FUSE},
103         {"CAMERA",                      SPT_PMC_BIT_CAMREA},
104         {"RSVD",                        SPT_PMC_BIT_RSVD_0D},
105         {"USB3-OTG",                    SPT_PMC_BIT_USB3_OTG},
106         {"EXI",                         SPT_PMC_BIT_EXI},
107         {"CSE",                         SPT_PMC_BIT_CSE},
108         {"CSME_KVM",                    SPT_PMC_BIT_CSME_KVM},
109         {"CSME_PMT",                    SPT_PMC_BIT_CSME_PMT},
110         {"CSME_CLINK",                  SPT_PMC_BIT_CSME_CLINK},
111         {"CSME_PTIO",                   SPT_PMC_BIT_CSME_PTIO},
112         {"CSME_USBR",                   SPT_PMC_BIT_CSME_USBR},
113         {"CSME_SUSRAM",                 SPT_PMC_BIT_CSME_SUSRAM},
114         {"CSME_SMT",                    SPT_PMC_BIT_CSME_SMT},
115         {"RSVD",                        SPT_PMC_BIT_RSVD_1A},
116         {"CSME_SMS2",                   SPT_PMC_BIT_CSME_SMS2},
117         {"CSME_SMS1",                   SPT_PMC_BIT_CSME_SMS1},
118         {"CSME_RTC",                    SPT_PMC_BIT_CSME_RTC},
119         {"CSME_PSF",                    SPT_PMC_BIT_CSME_PSF},
120         {}
121 };
122
123 static const struct pmc_bit_map *ext_spt_pfear_map[] = {
124         /*
125          * Check intel_pmc_core_ids[] users of spt_reg_map for
126          * a list of core SoCs using this.
127          */
128         spt_pfear_map,
129         NULL
130 };
131
132 static const struct pmc_bit_map spt_ltr_show_map[] = {
133         {"SOUTHPORT_A",         SPT_PMC_LTR_SPA},
134         {"SOUTHPORT_B",         SPT_PMC_LTR_SPB},
135         {"SATA",                SPT_PMC_LTR_SATA},
136         {"GIGABIT_ETHERNET",    SPT_PMC_LTR_GBE},
137         {"XHCI",                SPT_PMC_LTR_XHCI},
138         {"Reserved",            SPT_PMC_LTR_RESERVED},
139         {"ME",                  SPT_PMC_LTR_ME},
140         /* EVA is Enterprise Value Add, doesn't really exist on PCH */
141         {"EVA",                 SPT_PMC_LTR_EVA},
142         {"SOUTHPORT_C",         SPT_PMC_LTR_SPC},
143         {"HD_AUDIO",            SPT_PMC_LTR_AZ},
144         {"LPSS",                SPT_PMC_LTR_LPSS},
145         {"SOUTHPORT_D",         SPT_PMC_LTR_SPD},
146         {"SOUTHPORT_E",         SPT_PMC_LTR_SPE},
147         {"CAMERA",              SPT_PMC_LTR_CAM},
148         {"ESPI",                SPT_PMC_LTR_ESPI},
149         {"SCC",                 SPT_PMC_LTR_SCC},
150         {"ISH",                 SPT_PMC_LTR_ISH},
151         /* Below two cannot be used for LTR_IGNORE */
152         {"CURRENT_PLATFORM",    SPT_PMC_LTR_CUR_PLT},
153         {"AGGREGATED_SYSTEM",   SPT_PMC_LTR_CUR_ASLT},
154         {}
155 };
156
157 static const struct pmc_reg_map spt_reg_map = {
158         .pfear_sts = ext_spt_pfear_map,
159         .mphy_sts = spt_mphy_map,
160         .pll_sts = spt_pll_map,
161         .ltr_show_sts = spt_ltr_show_map,
162         .msr_sts = msr_map,
163         .slp_s0_offset = SPT_PMC_SLP_S0_RES_COUNTER_OFFSET,
164         .slp_s0_res_counter_step = SPT_PMC_SLP_S0_RES_COUNTER_STEP,
165         .ltr_ignore_offset = SPT_PMC_LTR_IGNORE_OFFSET,
166         .regmap_length = SPT_PMC_MMIO_REG_LEN,
167         .ppfear0_offset = SPT_PMC_XRAM_PPFEAR0A,
168         .ppfear_buckets = SPT_PPFEAR_NUM_ENTRIES,
169         .pm_cfg_offset = SPT_PMC_PM_CFG_OFFSET,
170         .pm_read_disable_bit = SPT_PMC_READ_DISABLE_BIT,
171         .ltr_ignore_max = SPT_NUM_IP_IGN_ALLOWED,
172         .pm_vric1_offset = SPT_PMC_VRIC1_OFFSET,
173 };
174
175 /* Cannon Lake: PGD PFET Enable Ack Status Register(s) bitmap */
176 static const struct pmc_bit_map cnp_pfear_map[] = {
177         {"PMC",                 BIT(0)},
178         {"OPI-DMI",             BIT(1)},
179         {"SPI/eSPI",            BIT(2)},
180         {"XHCI",                BIT(3)},
181         {"SPA",                 BIT(4)},
182         {"SPB",                 BIT(5)},
183         {"SPC",                 BIT(6)},
184         {"GBE",                 BIT(7)},
185
186         {"SATA",                BIT(0)},
187         {"HDA_PGD0",            BIT(1)},
188         {"HDA_PGD1",            BIT(2)},
189         {"HDA_PGD2",            BIT(3)},
190         {"HDA_PGD3",            BIT(4)},
191         {"SPD",                 BIT(5)},
192         {"LPSS",                BIT(6)},
193         {"LPC",                 BIT(7)},
194
195         {"SMB",                 BIT(0)},
196         {"ISH",                 BIT(1)},
197         {"P2SB",                BIT(2)},
198         {"NPK_VNN",             BIT(3)},
199         {"SDX",                 BIT(4)},
200         {"SPE",                 BIT(5)},
201         {"Fuse",                BIT(6)},
202         {"SBR8",                BIT(7)},
203
204         {"CSME_FSC",            BIT(0)},
205         {"USB3_OTG",            BIT(1)},
206         {"EXI",                 BIT(2)},
207         {"CSE",                 BIT(3)},
208         {"CSME_KVM",            BIT(4)},
209         {"CSME_PMT",            BIT(5)},
210         {"CSME_CLINK",          BIT(6)},
211         {"CSME_PTIO",           BIT(7)},
212
213         {"CSME_USBR",           BIT(0)},
214         {"CSME_SUSRAM",         BIT(1)},
215         {"CSME_SMT1",           BIT(2)},
216         {"CSME_SMT4",           BIT(3)},
217         {"CSME_SMS2",           BIT(4)},
218         {"CSME_SMS1",           BIT(5)},
219         {"CSME_RTC",            BIT(6)},
220         {"CSME_PSF",            BIT(7)},
221
222         {"SBR0",                BIT(0)},
223         {"SBR1",                BIT(1)},
224         {"SBR2",                BIT(2)},
225         {"SBR3",                BIT(3)},
226         {"SBR4",                BIT(4)},
227         {"SBR5",                BIT(5)},
228         {"CSME_PECI",           BIT(6)},
229         {"PSF1",                BIT(7)},
230
231         {"PSF2",                BIT(0)},
232         {"PSF3",                BIT(1)},
233         {"PSF4",                BIT(2)},
234         {"CNVI",                BIT(3)},
235         {"UFS0",                BIT(4)},
236         {"EMMC",                BIT(5)},
237         {"SPF",                 BIT(6)},
238         {"SBR6",                BIT(7)},
239
240         {"SBR7",                BIT(0)},
241         {"NPK_AON",             BIT(1)},
242         {"HDA_PGD4",            BIT(2)},
243         {"HDA_PGD5",            BIT(3)},
244         {"HDA_PGD6",            BIT(4)},
245         {"PSF6",                BIT(5)},
246         {"PSF7",                BIT(6)},
247         {"PSF8",                BIT(7)},
248         {}
249 };
250
251 static const struct pmc_bit_map *ext_cnp_pfear_map[] = {
252         /*
253          * Check intel_pmc_core_ids[] users of cnp_reg_map for
254          * a list of core SoCs using this.
255          */
256         cnp_pfear_map,
257         NULL
258 };
259
260 static const struct pmc_bit_map icl_pfear_map[] = {
261         {"RES_65",              BIT(0)},
262         {"RES_66",              BIT(1)},
263         {"RES_67",              BIT(2)},
264         {"TAM",                 BIT(3)},
265         {"GBETSN",              BIT(4)},
266         {"TBTLSX",              BIT(5)},
267         {"RES_71",              BIT(6)},
268         {"RES_72",              BIT(7)},
269         {}
270 };
271
272 static const struct pmc_bit_map *ext_icl_pfear_map[] = {
273         /*
274          * Check intel_pmc_core_ids[] users of icl_reg_map for
275          * a list of core SoCs using this.
276          */
277         cnp_pfear_map,
278         icl_pfear_map,
279         NULL
280 };
281
282 static const struct pmc_bit_map tgl_pfear_map[] = {
283         {"PSF9",                BIT(0)},
284         {"RES_66",              BIT(1)},
285         {"RES_67",              BIT(2)},
286         {"RES_68",              BIT(3)},
287         {"RES_69",              BIT(4)},
288         {"RES_70",              BIT(5)},
289         {"TBTLSX",              BIT(6)},
290         {}
291 };
292
293 static const struct pmc_bit_map *ext_tgl_pfear_map[] = {
294         /*
295          * Check intel_pmc_core_ids[] users of tgl_reg_map for
296          * a list of core SoCs using this.
297          */
298         cnp_pfear_map,
299         tgl_pfear_map,
300         NULL
301 };
302
303 static const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
304         {"AUDIO_D3",            BIT(0)},
305         {"OTG_D3",              BIT(1)},
306         {"XHCI_D3",             BIT(2)},
307         {"LPIO_D3",             BIT(3)},
308         {"SDX_D3",              BIT(4)},
309         {"SATA_D3",             BIT(5)},
310         {"UFS0_D3",             BIT(6)},
311         {"UFS1_D3",             BIT(7)},
312         {"EMMC_D3",             BIT(8)},
313         {}
314 };
315
316 static const struct pmc_bit_map cnp_slps0_dbg1_map[] = {
317         {"SDIO_PLL_OFF",        BIT(0)},
318         {"USB2_PLL_OFF",        BIT(1)},
319         {"AUDIO_PLL_OFF",       BIT(2)},
320         {"OC_PLL_OFF",          BIT(3)},
321         {"MAIN_PLL_OFF",        BIT(4)},
322         {"XOSC_OFF",            BIT(5)},
323         {"LPC_CLKS_GATED",      BIT(6)},
324         {"PCIE_CLKREQS_IDLE",   BIT(7)},
325         {"AUDIO_ROSC_OFF",      BIT(8)},
326         {"HPET_XOSC_CLK_REQ",   BIT(9)},
327         {"PMC_ROSC_SLOW_CLK",   BIT(10)},
328         {"AON2_ROSC_GATED",     BIT(11)},
329         {"CLKACKS_DEASSERTED",  BIT(12)},
330         {}
331 };
332
333 static const struct pmc_bit_map cnp_slps0_dbg2_map[] = {
334         {"MPHY_CORE_GATED",     BIT(0)},
335         {"CSME_GATED",          BIT(1)},
336         {"USB2_SUS_GATED",      BIT(2)},
337         {"DYN_FLEX_IO_IDLE",    BIT(3)},
338         {"GBE_NO_LINK",         BIT(4)},
339         {"THERM_SEN_DISABLED",  BIT(5)},
340         {"PCIE_LOW_POWER",      BIT(6)},
341         {"ISH_VNNAON_REQ_ACT",  BIT(7)},
342         {"ISH_VNN_REQ_ACT",     BIT(8)},
343         {"CNV_VNNAON_REQ_ACT",  BIT(9)},
344         {"CNV_VNN_REQ_ACT",     BIT(10)},
345         {"NPK_VNNON_REQ_ACT",   BIT(11)},
346         {"PMSYNC_STATE_IDLE",   BIT(12)},
347         {"ALST_GT_THRES",       BIT(13)},
348         {"PMC_ARC_PG_READY",    BIT(14)},
349         {}
350 };
351
352 static const struct pmc_bit_map *cnp_slps0_dbg_maps[] = {
353         cnp_slps0_dbg0_map,
354         cnp_slps0_dbg1_map,
355         cnp_slps0_dbg2_map,
356         NULL
357 };
358
359 static const struct pmc_bit_map cnp_ltr_show_map[] = {
360         {"SOUTHPORT_A",         CNP_PMC_LTR_SPA},
361         {"SOUTHPORT_B",         CNP_PMC_LTR_SPB},
362         {"SATA",                CNP_PMC_LTR_SATA},
363         {"GIGABIT_ETHERNET",    CNP_PMC_LTR_GBE},
364         {"XHCI",                CNP_PMC_LTR_XHCI},
365         {"Reserved",            CNP_PMC_LTR_RESERVED},
366         {"ME",                  CNP_PMC_LTR_ME},
367         /* EVA is Enterprise Value Add, doesn't really exist on PCH */
368         {"EVA",                 CNP_PMC_LTR_EVA},
369         {"SOUTHPORT_C",         CNP_PMC_LTR_SPC},
370         {"HD_AUDIO",            CNP_PMC_LTR_AZ},
371         {"CNV",                 CNP_PMC_LTR_CNV},
372         {"LPSS",                CNP_PMC_LTR_LPSS},
373         {"SOUTHPORT_D",         CNP_PMC_LTR_SPD},
374         {"SOUTHPORT_E",         CNP_PMC_LTR_SPE},
375         {"CAMERA",              CNP_PMC_LTR_CAM},
376         {"ESPI",                CNP_PMC_LTR_ESPI},
377         {"SCC",                 CNP_PMC_LTR_SCC},
378         {"ISH",                 CNP_PMC_LTR_ISH},
379         {"UFSX2",               CNP_PMC_LTR_UFSX2},
380         {"EMMC",                CNP_PMC_LTR_EMMC},
381         /*
382          * Check intel_pmc_core_ids[] users of cnp_reg_map for
383          * a list of core SoCs using this.
384          */
385         {"WIGIG",               ICL_PMC_LTR_WIGIG},
386         /* Below two cannot be used for LTR_IGNORE */
387         {"CURRENT_PLATFORM",    CNP_PMC_LTR_CUR_PLT},
388         {"AGGREGATED_SYSTEM",   CNP_PMC_LTR_CUR_ASLT},
389         {}
390 };
391
392 static const struct pmc_reg_map cnp_reg_map = {
393         .pfear_sts = ext_cnp_pfear_map,
394         .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
395         .slp_s0_res_counter_step = SPT_PMC_SLP_S0_RES_COUNTER_STEP,
396         .slps0_dbg_maps = cnp_slps0_dbg_maps,
397         .ltr_show_sts = cnp_ltr_show_map,
398         .msr_sts = msr_map,
399         .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
400         .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
401         .regmap_length = CNP_PMC_MMIO_REG_LEN,
402         .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
403         .ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES,
404         .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
405         .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
406         .ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED,
407         .etr3_offset = ETR3_OFFSET,
408 };
409
410 static const struct pmc_reg_map icl_reg_map = {
411         .pfear_sts = ext_icl_pfear_map,
412         .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
413         .slp_s0_res_counter_step = ICL_PMC_SLP_S0_RES_COUNTER_STEP,
414         .slps0_dbg_maps = cnp_slps0_dbg_maps,
415         .ltr_show_sts = cnp_ltr_show_map,
416         .msr_sts = msr_map,
417         .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
418         .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
419         .regmap_length = CNP_PMC_MMIO_REG_LEN,
420         .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
421         .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
422         .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
423         .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
424         .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
425         .etr3_offset = ETR3_OFFSET,
426 };
427
428 static const struct pmc_bit_map tgl_clocksource_status_map[] = {
429         {"USB2PLL_OFF_STS",                     BIT(18)},
430         {"PCIe/USB3.1_Gen2PLL_OFF_STS",         BIT(19)},
431         {"PCIe_Gen3PLL_OFF_STS",                BIT(20)},
432         {"OPIOPLL_OFF_STS",                     BIT(21)},
433         {"OCPLL_OFF_STS",                       BIT(22)},
434         {"MainPLL_OFF_STS",                     BIT(23)},
435         {"MIPIPLL_OFF_STS",                     BIT(24)},
436         {"Fast_XTAL_Osc_OFF_STS",               BIT(25)},
437         {"AC_Ring_Osc_OFF_STS",                 BIT(26)},
438         {"MC_Ring_Osc_OFF_STS",                 BIT(27)},
439         {"SATAPLL_OFF_STS",                     BIT(29)},
440         {"XTAL_USB2PLL_OFF_STS",                BIT(31)},
441         {}
442 };
443
444 static const struct pmc_bit_map tgl_power_gating_status_map[] = {
445         {"CSME_PG_STS",                         BIT(0)},
446         {"SATA_PG_STS",                         BIT(1)},
447         {"xHCI_PG_STS",                         BIT(2)},
448         {"UFSX2_PG_STS",                        BIT(3)},
449         {"OTG_PG_STS",                          BIT(5)},
450         {"SPA_PG_STS",                          BIT(6)},
451         {"SPB_PG_STS",                          BIT(7)},
452         {"SPC_PG_STS",                          BIT(8)},
453         {"SPD_PG_STS",                          BIT(9)},
454         {"SPE_PG_STS",                          BIT(10)},
455         {"SPF_PG_STS",                          BIT(11)},
456         {"LSX_PG_STS",                          BIT(13)},
457         {"P2SB_PG_STS",                         BIT(14)},
458         {"PSF_PG_STS",                          BIT(15)},
459         {"SBR_PG_STS",                          BIT(16)},
460         {"OPIDMI_PG_STS",                       BIT(17)},
461         {"THC0_PG_STS",                         BIT(18)},
462         {"THC1_PG_STS",                         BIT(19)},
463         {"GBETSN_PG_STS",                       BIT(20)},
464         {"GBE_PG_STS",                          BIT(21)},
465         {"LPSS_PG_STS",                         BIT(22)},
466         {"MMP_UFSX2_PG_STS",                    BIT(23)},
467         {"MMP_UFSX2B_PG_STS",                   BIT(24)},
468         {"FIA_PG_STS",                          BIT(25)},
469         {}
470 };
471
472 static const struct pmc_bit_map tgl_d3_status_map[] = {
473         {"ADSP_D3_STS",                         BIT(0)},
474         {"SATA_D3_STS",                         BIT(1)},
475         {"xHCI0_D3_STS",                        BIT(2)},
476         {"xDCI1_D3_STS",                        BIT(5)},
477         {"SDX_D3_STS",                          BIT(6)},
478         {"EMMC_D3_STS",                         BIT(7)},
479         {"IS_D3_STS",                           BIT(8)},
480         {"THC0_D3_STS",                         BIT(9)},
481         {"THC1_D3_STS",                         BIT(10)},
482         {"GBE_D3_STS",                          BIT(11)},
483         {"GBE_TSN_D3_STS",                      BIT(12)},
484         {}
485 };
486
487 static const struct pmc_bit_map tgl_vnn_req_status_map[] = {
488         {"GPIO_COM0_VNN_REQ_STS",               BIT(1)},
489         {"GPIO_COM1_VNN_REQ_STS",               BIT(2)},
490         {"GPIO_COM2_VNN_REQ_STS",               BIT(3)},
491         {"GPIO_COM3_VNN_REQ_STS",               BIT(4)},
492         {"GPIO_COM4_VNN_REQ_STS",               BIT(5)},
493         {"GPIO_COM5_VNN_REQ_STS",               BIT(6)},
494         {"Audio_VNN_REQ_STS",                   BIT(7)},
495         {"ISH_VNN_REQ_STS",                     BIT(8)},
496         {"CNVI_VNN_REQ_STS",                    BIT(9)},
497         {"eSPI_VNN_REQ_STS",                    BIT(10)},
498         {"Display_VNN_REQ_STS",                 BIT(11)},
499         {"DTS_VNN_REQ_STS",                     BIT(12)},
500         {"SMBUS_VNN_REQ_STS",                   BIT(14)},
501         {"CSME_VNN_REQ_STS",                    BIT(15)},
502         {"SMLINK0_VNN_REQ_STS",                 BIT(16)},
503         {"SMLINK1_VNN_REQ_STS",                 BIT(17)},
504         {"CLINK_VNN_REQ_STS",                   BIT(20)},
505         {"DCI_VNN_REQ_STS",                     BIT(21)},
506         {"ITH_VNN_REQ_STS",                     BIT(22)},
507         {"CSME_VNN_REQ_STS",                    BIT(24)},
508         {"GBE_VNN_REQ_STS",                     BIT(25)},
509         {}
510 };
511
512 static const struct pmc_bit_map tgl_vnn_misc_status_map[] = {
513         {"CPU_C10_REQ_STS_0",                   BIT(0)},
514         {"PCIe_LPM_En_REQ_STS_3",               BIT(3)},
515         {"ITH_REQ_STS_5",                       BIT(5)},
516         {"CNVI_REQ_STS_6",                      BIT(6)},
517         {"ISH_REQ_STS_7",                       BIT(7)},
518         {"USB2_SUS_PG_Sys_REQ_STS_10",          BIT(10)},
519         {"PCIe_Clk_REQ_STS_12",                 BIT(12)},
520         {"MPHY_Core_DL_REQ_STS_16",             BIT(16)},
521         {"Break-even_En_REQ_STS_17",            BIT(17)},
522         {"Auto-demo_En_REQ_STS_18",             BIT(18)},
523         {"MPHY_SUS_REQ_STS_22",                 BIT(22)},
524         {"xDCI_attached_REQ_STS_24",            BIT(24)},
525         {}
526 };
527
528 static const struct pmc_bit_map tgl_signal_status_map[] = {
529         {"LSX_Wake0_En_STS",                    BIT(0)},
530         {"LSX_Wake0_Pol_STS",                   BIT(1)},
531         {"LSX_Wake1_En_STS",                    BIT(2)},
532         {"LSX_Wake1_Pol_STS",                   BIT(3)},
533         {"LSX_Wake2_En_STS",                    BIT(4)},
534         {"LSX_Wake2_Pol_STS",                   BIT(5)},
535         {"LSX_Wake3_En_STS",                    BIT(6)},
536         {"LSX_Wake3_Pol_STS",                   BIT(7)},
537         {"LSX_Wake4_En_STS",                    BIT(8)},
538         {"LSX_Wake4_Pol_STS",                   BIT(9)},
539         {"LSX_Wake5_En_STS",                    BIT(10)},
540         {"LSX_Wake5_Pol_STS",                   BIT(11)},
541         {"LSX_Wake6_En_STS",                    BIT(12)},
542         {"LSX_Wake6_Pol_STS",                   BIT(13)},
543         {"LSX_Wake7_En_STS",                    BIT(14)},
544         {"LSX_Wake7_Pol_STS",                   BIT(15)},
545         {"Intel_Se_IO_Wake0_En_STS",            BIT(16)},
546         {"Intel_Se_IO_Wake0_Pol_STS",           BIT(17)},
547         {"Intel_Se_IO_Wake1_En_STS",            BIT(18)},
548         {"Intel_Se_IO_Wake1_Pol_STS",           BIT(19)},
549         {"Int_Timer_SS_Wake0_En_STS",           BIT(20)},
550         {"Int_Timer_SS_Wake0_Pol_STS",          BIT(21)},
551         {"Int_Timer_SS_Wake1_En_STS",           BIT(22)},
552         {"Int_Timer_SS_Wake1_Pol_STS",          BIT(23)},
553         {"Int_Timer_SS_Wake2_En_STS",           BIT(24)},
554         {"Int_Timer_SS_Wake2_Pol_STS",          BIT(25)},
555         {"Int_Timer_SS_Wake3_En_STS",           BIT(26)},
556         {"Int_Timer_SS_Wake3_Pol_STS",          BIT(27)},
557         {"Int_Timer_SS_Wake4_En_STS",           BIT(28)},
558         {"Int_Timer_SS_Wake4_Pol_STS",          BIT(29)},
559         {"Int_Timer_SS_Wake5_En_STS",           BIT(30)},
560         {"Int_Timer_SS_Wake5_Pol_STS",          BIT(31)},
561         {}
562 };
563
564 static const struct pmc_bit_map *tgl_lpm_maps[] = {
565         tgl_clocksource_status_map,
566         tgl_power_gating_status_map,
567         tgl_d3_status_map,
568         tgl_vnn_req_status_map,
569         tgl_vnn_misc_status_map,
570         tgl_signal_status_map,
571         NULL
572 };
573
574 static const struct pmc_reg_map tgl_reg_map = {
575         .pfear_sts = ext_tgl_pfear_map,
576         .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
577         .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
578         .ltr_show_sts = cnp_ltr_show_map,
579         .msr_sts = msr_map,
580         .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
581         .regmap_length = CNP_PMC_MMIO_REG_LEN,
582         .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
583         .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
584         .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
585         .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
586         .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED,
587         .lpm_num_maps = TGL_LPM_NUM_MAPS,
588         .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
589         .lpm_sts_latch_en_offset = TGL_LPM_STS_LATCH_EN_OFFSET,
590         .lpm_en_offset = TGL_LPM_EN_OFFSET,
591         .lpm_priority_offset = TGL_LPM_PRI_OFFSET,
592         .lpm_residency_offset = TGL_LPM_RESIDENCY_OFFSET,
593         .lpm_sts = tgl_lpm_maps,
594         .lpm_status_offset = TGL_LPM_STATUS_OFFSET,
595         .lpm_live_status_offset = TGL_LPM_LIVE_STATUS_OFFSET,
596         .etr3_offset = ETR3_OFFSET,
597 };
598
599 static void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev)
600 {
601         struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
602         const int num_maps = pmcdev->map->lpm_num_maps;
603         u32 lpm_size = LPM_MAX_NUM_MODES * num_maps * 4;
604         union acpi_object *out_obj;
605         struct acpi_device *adev;
606         guid_t s0ix_dsm_guid;
607         u32 *lpm_req_regs, *addr;
608
609         adev = ACPI_COMPANION(&pdev->dev);
610         if (!adev)
611                 return;
612
613         guid_parse(ACPI_S0IX_DSM_UUID, &s0ix_dsm_guid);
614
615         out_obj = acpi_evaluate_dsm(adev->handle, &s0ix_dsm_guid, 0,
616                                     ACPI_GET_LOW_MODE_REGISTERS, NULL);
617         if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) {
618                 u32 size = out_obj->buffer.length;
619
620                 if (size != lpm_size) {
621                         acpi_handle_debug(adev->handle,
622                                 "_DSM returned unexpected buffer size, have %u, expect %u\n",
623                                 size, lpm_size);
624                         goto free_acpi_obj;
625                 }
626         } else {
627                 acpi_handle_debug(adev->handle,
628                                   "_DSM function 0 evaluation failed\n");
629                 goto free_acpi_obj;
630         }
631
632         addr = (u32 *)out_obj->buffer.pointer;
633
634         lpm_req_regs = devm_kzalloc(&pdev->dev, lpm_size * sizeof(u32),
635                                      GFP_KERNEL);
636         if (!lpm_req_regs)
637                 goto free_acpi_obj;
638
639         memcpy(lpm_req_regs, addr, lpm_size);
640         pmcdev->lpm_req_regs = lpm_req_regs;
641
642 free_acpi_obj:
643         ACPI_FREE(out_obj);
644 }
645
646 static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
647 {
648         return readl(pmcdev->regbase + reg_offset);
649 }
650
651 static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset,
652                                       u32 val)
653 {
654         writel(val, pmcdev->regbase + reg_offset);
655 }
656
657 static inline u64 pmc_core_adjust_slp_s0_step(struct pmc_dev *pmcdev, u32 value)
658 {
659         return (u64)value * pmcdev->map->slp_s0_res_counter_step;
660 }
661
662 static int set_etr3(struct pmc_dev *pmcdev)
663 {
664         const struct pmc_reg_map *map = pmcdev->map;
665         u32 reg;
666         int err;
667
668         if (!map->etr3_offset)
669                 return -EOPNOTSUPP;
670
671         mutex_lock(&pmcdev->lock);
672
673         /* check if CF9 is locked */
674         reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
675         if (reg & ETR3_CF9LOCK) {
676                 err = -EACCES;
677                 goto out_unlock;
678         }
679
680         /* write CF9 global reset bit */
681         reg |= ETR3_CF9GR;
682         pmc_core_reg_write(pmcdev, map->etr3_offset, reg);
683
684         reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
685         if (!(reg & ETR3_CF9GR)) {
686                 err = -EIO;
687                 goto out_unlock;
688         }
689
690         err = 0;
691
692 out_unlock:
693         mutex_unlock(&pmcdev->lock);
694         return err;
695 }
696 static umode_t etr3_is_visible(struct kobject *kobj,
697                                 struct attribute *attr,
698                                 int idx)
699 {
700         struct device *dev = container_of(kobj, struct device, kobj);
701         struct pmc_dev *pmcdev = dev_get_drvdata(dev);
702         const struct pmc_reg_map *map = pmcdev->map;
703         u32 reg;
704
705         mutex_lock(&pmcdev->lock);
706         reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
707         mutex_unlock(&pmcdev->lock);
708
709         return reg & ETR3_CF9LOCK ? attr->mode & (SYSFS_PREALLOC | 0444) : attr->mode;
710 }
711
712 static ssize_t etr3_show(struct device *dev,
713                                  struct device_attribute *attr, char *buf)
714 {
715         struct pmc_dev *pmcdev = dev_get_drvdata(dev);
716         const struct pmc_reg_map *map = pmcdev->map;
717         u32 reg;
718
719         if (!map->etr3_offset)
720                 return -EOPNOTSUPP;
721
722         mutex_lock(&pmcdev->lock);
723
724         reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
725         reg &= ETR3_CF9GR | ETR3_CF9LOCK;
726
727         mutex_unlock(&pmcdev->lock);
728
729         return sysfs_emit(buf, "0x%08x", reg);
730 }
731
732 static ssize_t etr3_store(struct device *dev,
733                                   struct device_attribute *attr,
734                                   const char *buf, size_t len)
735 {
736         struct pmc_dev *pmcdev = dev_get_drvdata(dev);
737         int err;
738         u32 reg;
739
740         err = kstrtouint(buf, 16, &reg);
741         if (err)
742                 return err;
743
744         /* allow only CF9 writes */
745         if (reg != ETR3_CF9GR)
746                 return -EINVAL;
747
748         err = set_etr3(pmcdev);
749         if (err)
750                 return err;
751
752         return len;
753 }
754 static DEVICE_ATTR_RW(etr3);
755
756 static struct attribute *pmc_attrs[] = {
757         &dev_attr_etr3.attr,
758         NULL
759 };
760
761 static const struct attribute_group pmc_attr_group = {
762         .attrs = pmc_attrs,
763         .is_visible = etr3_is_visible,
764 };
765
766 static const struct attribute_group *pmc_dev_groups[] = {
767         &pmc_attr_group,
768         NULL
769 };
770
771 static int pmc_core_dev_state_get(void *data, u64 *val)
772 {
773         struct pmc_dev *pmcdev = data;
774         const struct pmc_reg_map *map = pmcdev->map;
775         u32 value;
776
777         value = pmc_core_reg_read(pmcdev, map->slp_s0_offset);
778         *val = pmc_core_adjust_slp_s0_step(pmcdev, value);
779
780         return 0;
781 }
782
783 DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
784
785 static int pmc_core_check_read_lock_bit(struct pmc_dev *pmcdev)
786 {
787         u32 value;
788
789         value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_cfg_offset);
790         return value & BIT(pmcdev->map->pm_read_disable_bit);
791 }
792
793 static void pmc_core_slps0_display(struct pmc_dev *pmcdev, struct device *dev,
794                                    struct seq_file *s)
795 {
796         const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps;
797         const struct pmc_bit_map *map;
798         int offset = pmcdev->map->slps0_dbg_offset;
799         u32 data;
800
801         while (*maps) {
802                 map = *maps;
803                 data = pmc_core_reg_read(pmcdev, offset);
804                 offset += 4;
805                 while (map->name) {
806                         if (dev)
807                                 dev_info(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
808                                         map->name,
809                                         data & map->bit_mask ? "Yes" : "No");
810                         if (s)
811                                 seq_printf(s, "SLP_S0_DBG: %-32s\tState: %s\n",
812                                            map->name,
813                                            data & map->bit_mask ? "Yes" : "No");
814                         ++map;
815                 }
816                 ++maps;
817         }
818 }
819
820 static int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps)
821 {
822         int idx;
823
824         for (idx = 0; maps[idx]; idx++)
825                 ;/* Nothing */
826
827         return idx;
828 }
829
830 static void pmc_core_lpm_display(struct pmc_dev *pmcdev, struct device *dev,
831                                  struct seq_file *s, u32 offset,
832                                  const char *str,
833                                  const struct pmc_bit_map **maps)
834 {
835         int index, idx, len = 32, bit_mask, arr_size;
836         u32 *lpm_regs;
837
838         arr_size = pmc_core_lpm_get_arr_size(maps);
839         lpm_regs = kmalloc_array(arr_size, sizeof(*lpm_regs), GFP_KERNEL);
840         if (!lpm_regs)
841                 return;
842
843         for (index = 0; index < arr_size; index++) {
844                 lpm_regs[index] = pmc_core_reg_read(pmcdev, offset);
845                 offset += 4;
846         }
847
848         for (idx = 0; idx < arr_size; idx++) {
849                 if (dev)
850                         dev_info(dev, "\nLPM_%s_%d:\t0x%x\n", str, idx,
851                                 lpm_regs[idx]);
852                 if (s)
853                         seq_printf(s, "\nLPM_%s_%d:\t0x%x\n", str, idx,
854                                    lpm_regs[idx]);
855                 for (index = 0; maps[idx][index].name && index < len; index++) {
856                         bit_mask = maps[idx][index].bit_mask;
857                         if (dev)
858                                 dev_info(dev, "%-30s %-30d\n",
859                                         maps[idx][index].name,
860                                         lpm_regs[idx] & bit_mask ? 1 : 0);
861                         if (s)
862                                 seq_printf(s, "%-30s %-30d\n",
863                                            maps[idx][index].name,
864                                            lpm_regs[idx] & bit_mask ? 1 : 0);
865                 }
866         }
867
868         kfree(lpm_regs);
869 }
870
871 static bool slps0_dbg_latch;
872
873 static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
874 {
875         return readb(pmcdev->regbase + offset);
876 }
877
878 static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
879                                  u8 pf_reg, const struct pmc_bit_map **pf_map)
880 {
881         seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n",
882                    ip, pf_map[idx][index].name,
883                    pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On");
884 }
885
886 static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
887 {
888         struct pmc_dev *pmcdev = s->private;
889         const struct pmc_bit_map **maps = pmcdev->map->pfear_sts;
890         u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
891         int index, iter, idx, ip = 0;
892
893         iter = pmcdev->map->ppfear0_offset;
894
895         for (index = 0; index < pmcdev->map->ppfear_buckets &&
896              index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
897                 pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
898
899         for (idx = 0; maps[idx]; idx++) {
900                 for (index = 0; maps[idx][index].name &&
901                      index < pmcdev->map->ppfear_buckets * 8; ip++, index++)
902                         pmc_core_display_map(s, index, idx, ip,
903                                              pf_regs[index / 8], maps);
904         }
905
906         return 0;
907 }
908 DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear);
909
910 /* This function should return link status, 0 means ready */
911 static int pmc_core_mtpmc_link_status(struct pmc_dev *pmcdev)
912 {
913         u32 value;
914
915         value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_STS_OFFSET);
916         return value & BIT(SPT_PMC_MSG_FULL_STS_BIT);
917 }
918
919 static int pmc_core_send_msg(struct pmc_dev *pmcdev, u32 *addr_xram)
920 {
921         u32 dest;
922         int timeout;
923
924         for (timeout = NUM_RETRIES; timeout > 0; timeout--) {
925                 if (pmc_core_mtpmc_link_status(pmcdev) == 0)
926                         break;
927                 msleep(5);
928         }
929
930         if (timeout <= 0 && pmc_core_mtpmc_link_status(pmcdev))
931                 return -EBUSY;
932
933         dest = (*addr_xram & MTPMC_MASK) | (1U << 1);
934         pmc_core_reg_write(pmcdev, SPT_PMC_MTPMC_OFFSET, dest);
935         return 0;
936 }
937
938 static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
939 {
940         struct pmc_dev *pmcdev = s->private;
941         const struct pmc_bit_map *map = pmcdev->map->mphy_sts;
942         u32 mphy_core_reg_low, mphy_core_reg_high;
943         u32 val_low, val_high;
944         int index, err = 0;
945
946         if (pmcdev->pmc_xram_read_bit) {
947                 seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
948                 return 0;
949         }
950
951         mphy_core_reg_low  = (SPT_PMC_MPHY_CORE_STS_0 << 16);
952         mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16);
953
954         mutex_lock(&pmcdev->lock);
955
956         if (pmc_core_send_msg(pmcdev, &mphy_core_reg_low) != 0) {
957                 err = -EBUSY;
958                 goto out_unlock;
959         }
960
961         msleep(10);
962         val_low = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
963
964         if (pmc_core_send_msg(pmcdev, &mphy_core_reg_high) != 0) {
965                 err = -EBUSY;
966                 goto out_unlock;
967         }
968
969         msleep(10);
970         val_high = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
971
972         for (index = 0; index < 8 && map[index].name; index++) {
973                 seq_printf(s, "%-32s\tState: %s\n",
974                            map[index].name,
975                            map[index].bit_mask & val_low ? "Not power gated" :
976                            "Power gated");
977         }
978
979         for (index = 8; map[index].name; index++) {
980                 seq_printf(s, "%-32s\tState: %s\n",
981                            map[index].name,
982                            map[index].bit_mask & val_high ? "Not power gated" :
983                            "Power gated");
984         }
985
986 out_unlock:
987         mutex_unlock(&pmcdev->lock);
988         return err;
989 }
990 DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg);
991
992 static int pmc_core_pll_show(struct seq_file *s, void *unused)
993 {
994         struct pmc_dev *pmcdev = s->private;
995         const struct pmc_bit_map *map = pmcdev->map->pll_sts;
996         u32 mphy_common_reg, val;
997         int index, err = 0;
998
999         if (pmcdev->pmc_xram_read_bit) {
1000                 seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
1001                 return 0;
1002         }
1003
1004         mphy_common_reg  = (SPT_PMC_MPHY_COM_STS_0 << 16);
1005         mutex_lock(&pmcdev->lock);
1006
1007         if (pmc_core_send_msg(pmcdev, &mphy_common_reg) != 0) {
1008                 err = -EBUSY;
1009                 goto out_unlock;
1010         }
1011
1012         /* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
1013         msleep(10);
1014         val = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
1015
1016         for (index = 0; map[index].name ; index++) {
1017                 seq_printf(s, "%-32s\tState: %s\n",
1018                            map[index].name,
1019                            map[index].bit_mask & val ? "Active" : "Idle");
1020         }
1021
1022 out_unlock:
1023         mutex_unlock(&pmcdev->lock);
1024         return err;
1025 }
1026 DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
1027
1028 static int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value)
1029 {
1030         const struct pmc_reg_map *map = pmcdev->map;
1031         u32 reg;
1032         int err = 0;
1033
1034         mutex_lock(&pmcdev->lock);
1035
1036         if (value > map->ltr_ignore_max) {
1037                 err = -EINVAL;
1038                 goto out_unlock;
1039         }
1040
1041         reg = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset);
1042         reg |= BIT(value);
1043         pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, reg);
1044
1045 out_unlock:
1046         mutex_unlock(&pmcdev->lock);
1047
1048         return err;
1049 }
1050
1051 static ssize_t pmc_core_ltr_ignore_write(struct file *file,
1052                                          const char __user *userbuf,
1053                                          size_t count, loff_t *ppos)
1054 {
1055         struct seq_file *s = file->private_data;
1056         struct pmc_dev *pmcdev = s->private;
1057         u32 buf_size, value;
1058         int err;
1059
1060         buf_size = min_t(u32, count, 64);
1061
1062         err = kstrtou32_from_user(userbuf, buf_size, 10, &value);
1063         if (err)
1064                 return err;
1065
1066         err = pmc_core_send_ltr_ignore(pmcdev, value);
1067
1068         return err == 0 ? count : err;
1069 }
1070
1071 static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused)
1072 {
1073         return 0;
1074 }
1075
1076 static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file)
1077 {
1078         return single_open(file, pmc_core_ltr_ignore_show, inode->i_private);
1079 }
1080
1081 static const struct file_operations pmc_core_ltr_ignore_ops = {
1082         .open           = pmc_core_ltr_ignore_open,
1083         .read           = seq_read,
1084         .write          = pmc_core_ltr_ignore_write,
1085         .llseek         = seq_lseek,
1086         .release        = single_release,
1087 };
1088
1089 static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
1090 {
1091         const struct pmc_reg_map *map = pmcdev->map;
1092         u32 fd;
1093
1094         mutex_lock(&pmcdev->lock);
1095
1096         if (!reset && !slps0_dbg_latch)
1097                 goto out_unlock;
1098
1099         fd = pmc_core_reg_read(pmcdev, map->slps0_dbg_offset);
1100         if (reset)
1101                 fd &= ~CNP_PMC_LATCH_SLPS0_EVENTS;
1102         else
1103                 fd |= CNP_PMC_LATCH_SLPS0_EVENTS;
1104         pmc_core_reg_write(pmcdev, map->slps0_dbg_offset, fd);
1105
1106         slps0_dbg_latch = false;
1107
1108 out_unlock:
1109         mutex_unlock(&pmcdev->lock);
1110 }
1111
1112 static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused)
1113 {
1114         struct pmc_dev *pmcdev = s->private;
1115
1116         pmc_core_slps0_dbg_latch(pmcdev, false);
1117         pmc_core_slps0_display(pmcdev, NULL, s);
1118         pmc_core_slps0_dbg_latch(pmcdev, true);
1119
1120         return 0;
1121 }
1122 DEFINE_SHOW_ATTRIBUTE(pmc_core_slps0_dbg);
1123
1124 static u32 convert_ltr_scale(u32 val)
1125 {
1126         /*
1127          * As per PCIE specification supporting document
1128          * ECN_LatencyTolnReporting_14Aug08.pdf the Latency
1129          * Tolerance Reporting data payload is encoded in a
1130          * 3 bit scale and 10 bit value fields. Values are
1131          * multiplied by the indicated scale to yield an absolute time
1132          * value, expressible in a range from 1 nanosecond to
1133          * 2^25*(2^10-1) = 34,326,183,936 nanoseconds.
1134          *
1135          * scale encoding is as follows:
1136          *
1137          * ----------------------------------------------
1138          * |scale factor        |       Multiplier (ns) |
1139          * ----------------------------------------------
1140          * |    0               |       1               |
1141          * |    1               |       32              |
1142          * |    2               |       1024            |
1143          * |    3               |       32768           |
1144          * |    4               |       1048576         |
1145          * |    5               |       33554432        |
1146          * |    6               |       Invalid         |
1147          * |    7               |       Invalid         |
1148          * ----------------------------------------------
1149          */
1150         if (val > 5) {
1151                 pr_warn("Invalid LTR scale factor.\n");
1152                 return 0;
1153         }
1154
1155         return 1U << (5 * val);
1156 }
1157
1158 static int pmc_core_ltr_show(struct seq_file *s, void *unused)
1159 {
1160         struct pmc_dev *pmcdev = s->private;
1161         const struct pmc_bit_map *map = pmcdev->map->ltr_show_sts;
1162         u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
1163         u32 ltr_raw_data, scale, val;
1164         u16 snoop_ltr, nonsnoop_ltr;
1165         int index;
1166
1167         for (index = 0; map[index].name ; index++) {
1168                 decoded_snoop_ltr = decoded_non_snoop_ltr = 0;
1169                 ltr_raw_data = pmc_core_reg_read(pmcdev,
1170                                                  map[index].bit_mask);
1171                 snoop_ltr = ltr_raw_data & ~MTPMC_MASK;
1172                 nonsnoop_ltr = (ltr_raw_data >> 0x10) & ~MTPMC_MASK;
1173
1174                 if (FIELD_GET(LTR_REQ_NONSNOOP, ltr_raw_data)) {
1175                         scale = FIELD_GET(LTR_DECODED_SCALE, nonsnoop_ltr);
1176                         val = FIELD_GET(LTR_DECODED_VAL, nonsnoop_ltr);
1177                         decoded_non_snoop_ltr = val * convert_ltr_scale(scale);
1178                 }
1179
1180                 if (FIELD_GET(LTR_REQ_SNOOP, ltr_raw_data)) {
1181                         scale = FIELD_GET(LTR_DECODED_SCALE, snoop_ltr);
1182                         val = FIELD_GET(LTR_DECODED_VAL, snoop_ltr);
1183                         decoded_snoop_ltr = val * convert_ltr_scale(scale);
1184                 }
1185
1186                 seq_printf(s, "%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\n",
1187                            map[index].name, ltr_raw_data,
1188                            decoded_non_snoop_ltr,
1189                            decoded_snoop_ltr);
1190         }
1191         return 0;
1192 }
1193 DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
1194
1195 static inline u64 adjust_lpm_residency(struct pmc_dev *pmcdev, u32 offset,
1196                                        const int lpm_adj_x2)
1197 {
1198         u64 lpm_res = pmc_core_reg_read(pmcdev, offset);
1199
1200         return GET_X2_COUNTER((u64)lpm_adj_x2 * lpm_res);
1201 }
1202
1203 static int pmc_core_substate_res_show(struct seq_file *s, void *unused)
1204 {
1205         struct pmc_dev *pmcdev = s->private;
1206         const int lpm_adj_x2 = pmcdev->map->lpm_res_counter_step_x2;
1207         u32 offset = pmcdev->map->lpm_residency_offset;
1208         int i, mode;
1209
1210         seq_printf(s, "%-10s %-15s\n", "Substate", "Residency");
1211
1212         pmc_for_each_mode(i, mode, pmcdev) {
1213                 seq_printf(s, "%-10s %-15llu\n", pmc_lpm_modes[mode],
1214                            adjust_lpm_residency(pmcdev, offset + (4 * mode), lpm_adj_x2));
1215         }
1216
1217         return 0;
1218 }
1219 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res);
1220
1221 static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused)
1222 {
1223         struct pmc_dev *pmcdev = s->private;
1224         const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
1225         u32 offset = pmcdev->map->lpm_status_offset;
1226
1227         pmc_core_lpm_display(pmcdev, NULL, s, offset, "STATUS", maps);
1228
1229         return 0;
1230 }
1231 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs);
1232
1233 static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
1234 {
1235         struct pmc_dev *pmcdev = s->private;
1236         const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
1237         u32 offset = pmcdev->map->lpm_live_status_offset;
1238
1239         pmc_core_lpm_display(pmcdev, NULL, s, offset, "LIVE_STATUS", maps);
1240
1241         return 0;
1242 }
1243 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
1244
1245 static void pmc_core_substate_req_header_show(struct seq_file *s)
1246 {
1247         struct pmc_dev *pmcdev = s->private;
1248         int i, mode;
1249
1250         seq_printf(s, "%30s |", "Element");
1251         pmc_for_each_mode(i, mode, pmcdev)
1252                 seq_printf(s, " %9s |", pmc_lpm_modes[mode]);
1253
1254         seq_printf(s, " %9s |\n", "Status");
1255 }
1256
1257 static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
1258 {
1259         struct pmc_dev *pmcdev = s->private;
1260         const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
1261         const struct pmc_bit_map *map;
1262         const int num_maps = pmcdev->map->lpm_num_maps;
1263         u32 sts_offset = pmcdev->map->lpm_status_offset;
1264         u32 *lpm_req_regs = pmcdev->lpm_req_regs;
1265         int mp;
1266
1267         /* Display the header */
1268         pmc_core_substate_req_header_show(s);
1269
1270         /* Loop over maps */
1271         for (mp = 0; mp < num_maps; mp++) {
1272                 u32 req_mask = 0;
1273                 u32 lpm_status;
1274                 int mode, idx, i, len = 32;
1275
1276                 /*
1277                  * Capture the requirements and create a mask so that we only
1278                  * show an element if it's required for at least one of the
1279                  * enabled low power modes
1280                  */
1281                 pmc_for_each_mode(idx, mode, pmcdev)
1282                         req_mask |= lpm_req_regs[mp + (mode * num_maps)];
1283
1284                 /* Get the last latched status for this map */
1285                 lpm_status = pmc_core_reg_read(pmcdev, sts_offset + (mp * 4));
1286
1287                 /*  Loop over elements in this map */
1288                 map = maps[mp];
1289                 for (i = 0; map[i].name && i < len; i++) {
1290                         u32 bit_mask = map[i].bit_mask;
1291
1292                         if (!(bit_mask & req_mask))
1293                                 /*
1294                                  * Not required for any enabled states
1295                                  * so don't display
1296                                  */
1297                                 continue;
1298
1299                         /* Display the element name in the first column */
1300                         seq_printf(s, "%30s |", map[i].name);
1301
1302                         /* Loop over the enabled states and display if required */
1303                         pmc_for_each_mode(idx, mode, pmcdev) {
1304                                 if (lpm_req_regs[mp + (mode * num_maps)] & bit_mask)
1305                                         seq_printf(s, " %9s |",
1306                                                    "Required");
1307                                 else
1308                                         seq_printf(s, " %9s |", " ");
1309                         }
1310
1311                         /* In Status column, show the last captured state of this agent */
1312                         if (lpm_status & bit_mask)
1313                                 seq_printf(s, " %9s |", "Yes");
1314                         else
1315                                 seq_printf(s, " %9s |", " ");
1316
1317                         seq_puts(s, "\n");
1318                 }
1319         }
1320
1321         return 0;
1322 }
1323 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs);
1324
1325 static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused)
1326 {
1327         struct pmc_dev *pmcdev = s->private;
1328         bool c10;
1329         u32 reg;
1330         int idx, mode;
1331
1332         reg = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_sts_latch_en_offset);
1333         if (reg & LPM_STS_LATCH_MODE) {
1334                 seq_puts(s, "c10");
1335                 c10 = false;
1336         } else {
1337                 seq_puts(s, "[c10]");
1338                 c10 = true;
1339         }
1340
1341         pmc_for_each_mode(idx, mode, pmcdev) {
1342                 if ((BIT(mode) & reg) && !c10)
1343                         seq_printf(s, " [%s]", pmc_lpm_modes[mode]);
1344                 else
1345                         seq_printf(s, " %s", pmc_lpm_modes[mode]);
1346         }
1347
1348         seq_puts(s, " clear\n");
1349
1350         return 0;
1351 }
1352
1353 static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
1354                                              const char __user *userbuf,
1355                                              size_t count, loff_t *ppos)
1356 {
1357         struct seq_file *s = file->private_data;
1358         struct pmc_dev *pmcdev = s->private;
1359         bool clear = false, c10 = false;
1360         unsigned char buf[8];
1361         size_t ret;
1362         int idx, m, mode;
1363         u32 reg;
1364
1365         if (count > sizeof(buf) - 1)
1366                 return -EINVAL;
1367
1368         ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf, count);
1369         if (ret < 0)
1370                 return ret;
1371
1372         buf[count] = '\0';
1373
1374         /*
1375          * Allowed strings are:
1376          *      Any enabled substate, e.g. 'S0i2.0'
1377          *      'c10'
1378          *      'clear'
1379          */
1380         mode = sysfs_match_string(pmc_lpm_modes, buf);
1381
1382         /* Check string matches enabled mode */
1383         pmc_for_each_mode(idx, m, pmcdev)
1384                 if (mode == m)
1385                         break;
1386
1387         if (mode != m || mode < 0) {
1388                 if (sysfs_streq(buf, "clear"))
1389                         clear = true;
1390                 else if (sysfs_streq(buf, "c10"))
1391                         c10 = true;
1392                 else
1393                         return -EINVAL;
1394         }
1395
1396         if (clear) {
1397                 mutex_lock(&pmcdev->lock);
1398
1399                 reg = pmc_core_reg_read(pmcdev, pmcdev->map->etr3_offset);
1400                 reg |= ETR3_CLEAR_LPM_EVENTS;
1401                 pmc_core_reg_write(pmcdev, pmcdev->map->etr3_offset, reg);
1402
1403                 mutex_unlock(&pmcdev->lock);
1404
1405                 return count;
1406         }
1407
1408         if (c10) {
1409                 mutex_lock(&pmcdev->lock);
1410
1411                 reg = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_sts_latch_en_offset);
1412                 reg &= ~LPM_STS_LATCH_MODE;
1413                 pmc_core_reg_write(pmcdev, pmcdev->map->lpm_sts_latch_en_offset, reg);
1414
1415                 mutex_unlock(&pmcdev->lock);
1416
1417                 return count;
1418         }
1419
1420         /*
1421          * For LPM mode latching we set the latch enable bit and selected mode
1422          * and clear everything else.
1423          */
1424         reg = LPM_STS_LATCH_MODE | BIT(mode);
1425         mutex_lock(&pmcdev->lock);
1426         pmc_core_reg_write(pmcdev, pmcdev->map->lpm_sts_latch_en_offset, reg);
1427         mutex_unlock(&pmcdev->lock);
1428
1429         return count;
1430 }
1431 DEFINE_PMC_CORE_ATTR_WRITE(pmc_core_lpm_latch_mode);
1432
1433 static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
1434 {
1435         struct pmc_dev *pmcdev = s->private;
1436         const struct pmc_bit_map *map = pmcdev->map->msr_sts;
1437         u64 pcstate_count;
1438         int index;
1439
1440         for (index = 0; map[index].name ; index++) {
1441                 if (rdmsrl_safe(map[index].bit_mask, &pcstate_count))
1442                         continue;
1443
1444                 pcstate_count *= 1000;
1445                 do_div(pcstate_count, tsc_khz);
1446                 seq_printf(s, "%-8s : %llu\n", map[index].name,
1447                            pcstate_count);
1448         }
1449
1450         return 0;
1451 }
1452 DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
1453
1454 static void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev)
1455 {
1456         u8 lpm_priority[LPM_MAX_NUM_MODES];
1457         u32 lpm_en;
1458         int mode, i, p;
1459
1460         /* Use LPM Maps to indicate support for substates */
1461         if (!pmcdev->map->lpm_num_maps)
1462                 return;
1463
1464         lpm_en = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_en_offset);
1465         pmcdev->num_lpm_modes = hweight32(lpm_en);
1466
1467         /* Each byte contains information for 2 modes (7:4 and 3:0) */
1468         for (mode = 0; mode < LPM_MAX_NUM_MODES; mode += 2) {
1469                 u8 priority = pmc_core_reg_read_byte(pmcdev,
1470                                 pmcdev->map->lpm_priority_offset + (mode / 2));
1471                 int pri0 = GENMASK(3, 0) & priority;
1472                 int pri1 = (GENMASK(7, 4) & priority) >> 4;
1473
1474                 lpm_priority[pri0] = mode;
1475                 lpm_priority[pri1] = mode + 1;
1476         }
1477
1478         /*
1479          * Loop though all modes from lowest to highest priority,
1480          * and capture all enabled modes in order
1481          */
1482         i = 0;
1483         for (p = LPM_MAX_NUM_MODES - 1; p >= 0; p--) {
1484                 int mode = lpm_priority[p];
1485
1486                 if (!(BIT(mode) & lpm_en))
1487                         continue;
1488
1489                 pmcdev->lpm_en_modes[i++] = mode;
1490         }
1491 }
1492
1493 static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
1494 {
1495         debugfs_remove_recursive(pmcdev->dbgfs_dir);
1496 }
1497
1498 static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
1499 {
1500         struct dentry *dir;
1501
1502         dir = debugfs_create_dir("pmc_core", NULL);
1503         pmcdev->dbgfs_dir = dir;
1504
1505         debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev,
1506                             &pmc_core_dev_state);
1507
1508         if (pmcdev->map->pfear_sts)
1509                 debugfs_create_file("pch_ip_power_gating_status", 0444, dir,
1510                                     pmcdev, &pmc_core_ppfear_fops);
1511
1512         debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
1513                             &pmc_core_ltr_ignore_ops);
1514
1515         debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops);
1516
1517         debugfs_create_file("package_cstate_show", 0444, dir, pmcdev,
1518                             &pmc_core_pkgc_fops);
1519
1520         if (pmcdev->map->pll_sts)
1521                 debugfs_create_file("pll_status", 0444, dir, pmcdev,
1522                                     &pmc_core_pll_fops);
1523
1524         if (pmcdev->map->mphy_sts)
1525                 debugfs_create_file("mphy_core_lanes_power_gating_status",
1526                                     0444, dir, pmcdev,
1527                                     &pmc_core_mphy_pg_fops);
1528
1529         if (pmcdev->map->slps0_dbg_maps) {
1530                 debugfs_create_file("slp_s0_debug_status", 0444,
1531                                     dir, pmcdev,
1532                                     &pmc_core_slps0_dbg_fops);
1533
1534                 debugfs_create_bool("slp_s0_dbg_latch", 0644,
1535                                     dir, &slps0_dbg_latch);
1536         }
1537
1538         if (pmcdev->map->lpm_en_offset) {
1539                 debugfs_create_file("substate_residencies", 0444,
1540                                     pmcdev->dbgfs_dir, pmcdev,
1541                                     &pmc_core_substate_res_fops);
1542         }
1543
1544         if (pmcdev->map->lpm_status_offset) {
1545                 debugfs_create_file("substate_status_registers", 0444,
1546                                     pmcdev->dbgfs_dir, pmcdev,
1547                                     &pmc_core_substate_sts_regs_fops);
1548                 debugfs_create_file("substate_live_status_registers", 0444,
1549                                     pmcdev->dbgfs_dir, pmcdev,
1550                                     &pmc_core_substate_l_sts_regs_fops);
1551                 debugfs_create_file("lpm_latch_mode", 0644,
1552                                     pmcdev->dbgfs_dir, pmcdev,
1553                                     &pmc_core_lpm_latch_mode_fops);
1554         }
1555
1556         if (pmcdev->lpm_req_regs) {
1557                 debugfs_create_file("substate_requirements", 0444,
1558                                     pmcdev->dbgfs_dir, pmcdev,
1559                                     &pmc_core_substate_req_regs_fops);
1560         }
1561 }
1562
1563 static const struct x86_cpu_id intel_pmc_core_ids[] = {
1564         X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L,           &spt_reg_map),
1565         X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE,             &spt_reg_map),
1566         X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L,          &spt_reg_map),
1567         X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE,            &spt_reg_map),
1568         X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L,        &cnp_reg_map),
1569         X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L,           &icl_reg_map),
1570         X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI,        &icl_reg_map),
1571         X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE,           &cnp_reg_map),
1572         X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L,         &cnp_reg_map),
1573         X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L,         &tgl_reg_map),
1574         X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE,           &tgl_reg_map),
1575         X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT,        &tgl_reg_map),
1576         X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L,      &icl_reg_map),
1577         X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE,          &tgl_reg_map),
1578         {}
1579 };
1580
1581 MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
1582
1583 static const struct pci_device_id pmc_pci_ids[] = {
1584         { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
1585         { }
1586 };
1587
1588 /*
1589  * This quirk can be used on those platforms where
1590  * the platform BIOS enforces 24Mhz crystal to shutdown
1591  * before PMC can assert SLP_S0#.
1592  */
1593 static bool xtal_ignore;
1594 static int quirk_xtal_ignore(const struct dmi_system_id *id)
1595 {
1596         xtal_ignore = true;
1597         return 0;
1598 }
1599
1600 static void pmc_core_xtal_ignore(struct pmc_dev *pmcdev)
1601 {
1602         u32 value;
1603
1604         value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
1605         /* 24MHz Crystal Shutdown Qualification Disable */
1606         value |= SPT_PMC_VRIC1_XTALSDQDIS;
1607         /* Low Voltage Mode Enable */
1608         value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
1609         pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
1610 }
1611
1612 static const struct dmi_system_id pmc_core_dmi_table[]  = {
1613         {
1614         .callback = quirk_xtal_ignore,
1615         .ident = "HP Elite x2 1013 G3",
1616         .matches = {
1617                 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1618                 DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"),
1619                 },
1620         },
1621         {}
1622 };
1623
1624 static void pmc_core_do_dmi_quirks(struct pmc_dev *pmcdev)
1625 {
1626         dmi_check_system(pmc_core_dmi_table);
1627
1628         if (xtal_ignore)
1629                 pmc_core_xtal_ignore(pmcdev);
1630 }
1631
1632 static int pmc_core_probe(struct platform_device *pdev)
1633 {
1634         static bool device_initialized;
1635         struct pmc_dev *pmcdev;
1636         const struct x86_cpu_id *cpu_id;
1637         u64 slp_s0_addr;
1638
1639         if (device_initialized)
1640                 return -ENODEV;
1641
1642         pmcdev = devm_kzalloc(&pdev->dev, sizeof(*pmcdev), GFP_KERNEL);
1643         if (!pmcdev)
1644                 return -ENOMEM;
1645
1646         platform_set_drvdata(pdev, pmcdev);
1647
1648         cpu_id = x86_match_cpu(intel_pmc_core_ids);
1649         if (!cpu_id)
1650                 return -ENODEV;
1651
1652         pmcdev->map = (struct pmc_reg_map *)cpu_id->driver_data;
1653
1654         /*
1655          * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
1656          * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
1657          * in this case.
1658          */
1659         if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
1660                 pmcdev->map = &cnp_reg_map;
1661
1662         if (lpit_read_residency_count_address(&slp_s0_addr)) {
1663                 pmcdev->base_addr = PMC_BASE_ADDR_DEFAULT;
1664
1665                 if (page_is_ram(PHYS_PFN(pmcdev->base_addr)))
1666                         return -ENODEV;
1667         } else {
1668                 pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset;
1669         }
1670
1671         pmcdev->regbase = ioremap(pmcdev->base_addr,
1672                                   pmcdev->map->regmap_length);
1673         if (!pmcdev->regbase)
1674                 return -ENOMEM;
1675
1676         mutex_init(&pmcdev->lock);
1677
1678         pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(pmcdev);
1679         pmc_core_get_low_power_modes(pmcdev);
1680         pmc_core_do_dmi_quirks(pmcdev);
1681
1682         if (pmcdev->map == &tgl_reg_map)
1683                 pmc_core_get_tgl_lpm_reqs(pdev);
1684
1685         /*
1686          * On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when
1687          * a cable is attached. Tell the PMC to ignore it.
1688          */
1689         if (pmcdev->map == &tgl_reg_map) {
1690                 dev_dbg(&pdev->dev, "ignoring GBE LTR\n");
1691                 pmc_core_send_ltr_ignore(pmcdev, 3);
1692         }
1693
1694         pmc_core_dbgfs_register(pmcdev);
1695
1696         device_initialized = true;
1697         dev_info(&pdev->dev, " initialized\n");
1698
1699         return 0;
1700 }
1701
1702 static int pmc_core_remove(struct platform_device *pdev)
1703 {
1704         struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
1705
1706         pmc_core_dbgfs_unregister(pmcdev);
1707         platform_set_drvdata(pdev, NULL);
1708         mutex_destroy(&pmcdev->lock);
1709         iounmap(pmcdev->regbase);
1710         return 0;
1711 }
1712
1713 static bool warn_on_s0ix_failures;
1714 module_param(warn_on_s0ix_failures, bool, 0644);
1715 MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
1716
1717 static __maybe_unused int pmc_core_suspend(struct device *dev)
1718 {
1719         struct pmc_dev *pmcdev = dev_get_drvdata(dev);
1720
1721         pmcdev->check_counters = false;
1722
1723         /* No warnings on S0ix failures */
1724         if (!warn_on_s0ix_failures)
1725                 return 0;
1726
1727         /* Check if the syspend will actually use S0ix */
1728         if (pm_suspend_via_firmware())
1729                 return 0;
1730
1731         /* Save PC10 residency for checking later */
1732         if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter))
1733                 return -EIO;
1734
1735         /* Save S0ix residency for checking later */
1736         if (pmc_core_dev_state_get(pmcdev, &pmcdev->s0ix_counter))
1737                 return -EIO;
1738
1739         pmcdev->check_counters = true;
1740         return 0;
1741 }
1742
1743 static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev)
1744 {
1745         u64 pc10_counter;
1746
1747         if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter))
1748                 return false;
1749
1750         if (pc10_counter == pmcdev->pc10_counter)
1751                 return true;
1752
1753         return false;
1754 }
1755
1756 static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev)
1757 {
1758         u64 s0ix_counter;
1759
1760         if (pmc_core_dev_state_get(pmcdev, &s0ix_counter))
1761                 return false;
1762
1763         if (s0ix_counter == pmcdev->s0ix_counter)
1764                 return true;
1765
1766         return false;
1767 }
1768
1769 static __maybe_unused int pmc_core_resume(struct device *dev)
1770 {
1771         struct pmc_dev *pmcdev = dev_get_drvdata(dev);
1772         const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
1773         int offset = pmcdev->map->lpm_status_offset;
1774
1775         if (!pmcdev->check_counters)
1776                 return 0;
1777
1778         if (!pmc_core_is_s0ix_failed(pmcdev))
1779                 return 0;
1780
1781         if (pmc_core_is_pc10_failed(pmcdev)) {
1782                 /* S0ix failed because of PC10 entry failure */
1783                 dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n",
1784                          pmcdev->pc10_counter);
1785                 return 0;
1786         }
1787
1788         /* The real interesting case - S0ix failed - lets ask PMC why. */
1789         dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n",
1790                  pmcdev->s0ix_counter);
1791         if (pmcdev->map->slps0_dbg_maps)
1792                 pmc_core_slps0_display(pmcdev, dev, NULL);
1793         if (pmcdev->map->lpm_sts)
1794                 pmc_core_lpm_display(pmcdev, dev, NULL, offset, "STATUS", maps);
1795
1796         return 0;
1797 }
1798
1799 static const struct dev_pm_ops pmc_core_pm_ops = {
1800         SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
1801 };
1802
1803 static const struct acpi_device_id pmc_core_acpi_ids[] = {
1804         {"INT33A1", 0}, /* _HID for Intel Power Engine, _CID PNP0D80*/
1805         { }
1806 };
1807 MODULE_DEVICE_TABLE(acpi, pmc_core_acpi_ids);
1808
1809 static struct platform_driver pmc_core_driver = {
1810         .driver = {
1811                 .name = "intel_pmc_core",
1812                 .acpi_match_table = ACPI_PTR(pmc_core_acpi_ids),
1813                 .pm = &pmc_core_pm_ops,
1814                 .dev_groups = pmc_dev_groups,
1815         },
1816         .probe = pmc_core_probe,
1817         .remove = pmc_core_remove,
1818 };
1819
1820 module_platform_driver(pmc_core_driver);
1821
1822 MODULE_LICENSE("GPL v2");
1823 MODULE_DESCRIPTION("Intel PMC Core Driver");