Merge tag 'zonefs-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal...
[sfrench/cifs-2.6.git] / drivers / platform / x86 / amd / pmf / core.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * AMD Platform Management Framework Driver
4  *
5  * Copyright (c) 2022, Advanced Micro Devices, Inc.
6  * All Rights Reserved.
7  *
8  * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
9  */
10
11 #include <asm/amd_nb.h>
12 #include <linux/debugfs.h>
13 #include <linux/iopoll.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/platform_device.h>
17 #include <linux/power_supply.h>
18 #include "pmf.h"
19
20 /* PMF-SMU communication registers */
21 #define AMD_PMF_REGISTER_MESSAGE        0xA18
22 #define AMD_PMF_REGISTER_RESPONSE       0xA78
23 #define AMD_PMF_REGISTER_ARGUMENT       0xA58
24
25 /* Base address of SMU for mapping physical address to virtual address */
26 #define AMD_PMF_MAPPING_SIZE            0x01000
27 #define AMD_PMF_BASE_ADDR_OFFSET        0x10000
28 #define AMD_PMF_BASE_ADDR_LO            0x13B102E8
29 #define AMD_PMF_BASE_ADDR_HI            0x13B102EC
30 #define AMD_PMF_BASE_ADDR_LO_MASK       GENMASK(15, 0)
31 #define AMD_PMF_BASE_ADDR_HI_MASK       GENMASK(31, 20)
32
33 /* SMU Response Codes */
34 #define AMD_PMF_RESULT_OK                    0x01
35 #define AMD_PMF_RESULT_CMD_REJECT_BUSY       0xFC
36 #define AMD_PMF_RESULT_CMD_REJECT_PREREQ     0xFD
37 #define AMD_PMF_RESULT_CMD_UNKNOWN           0xFE
38 #define AMD_PMF_RESULT_FAILED                0xFF
39
40 /* List of supported CPU ids */
41 #define AMD_CPU_ID_RMB                  0x14b5
42 #define AMD_CPU_ID_PS                   0x14e8
43 #define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
44
45 #define PMF_MSG_DELAY_MIN_US            50
46 #define RESPONSE_REGISTER_LOOP_MAX      20000
47
48 #define DELAY_MIN_US    2000
49 #define DELAY_MAX_US    3000
50
51 /* override Metrics Table sample size time (in ms) */
52 static int metrics_table_loop_ms = 1000;
53 module_param(metrics_table_loop_ms, int, 0644);
54 MODULE_PARM_DESC(metrics_table_loop_ms, "Metrics Table sample size time (default = 1000ms)");
55
56 /* Force load on supported older platforms */
57 static bool force_load;
58 module_param(force_load, bool, 0444);
59 MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)");
60
61 static int amd_pmf_pwr_src_notify_call(struct notifier_block *nb, unsigned long event, void *data)
62 {
63         struct amd_pmf_dev *pmf = container_of(nb, struct amd_pmf_dev, pwr_src_notifier);
64
65         if (event != PSY_EVENT_PROP_CHANGED)
66                 return NOTIFY_OK;
67
68         if (is_apmf_func_supported(pmf, APMF_FUNC_AUTO_MODE) ||
69             is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_DC) ||
70             is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_AC)) {
71                 if ((pmf->amt_enabled || pmf->cnqf_enabled) && is_pprof_balanced(pmf))
72                         return NOTIFY_DONE;
73         }
74
75         if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR))
76                 amd_pmf_set_sps_power_limits(pmf);
77
78         if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE))
79                 amd_pmf_power_slider_update_event(pmf);
80
81         return NOTIFY_OK;
82 }
83
84 static int current_power_limits_show(struct seq_file *seq, void *unused)
85 {
86         struct amd_pmf_dev *dev = seq->private;
87         struct amd_pmf_static_slider_granular table;
88         int mode, src = 0;
89
90         mode = amd_pmf_get_pprof_modes(dev);
91         if (mode < 0)
92                 return mode;
93
94         src = amd_pmf_get_power_source();
95         amd_pmf_update_slider(dev, SLIDER_OP_GET, mode, &table);
96         seq_printf(seq, "spl:%u fppt:%u sppt:%u sppt_apu_only:%u stt_min:%u stt[APU]:%u stt[HS2]: %u\n",
97                    table.prop[src][mode].spl,
98                    table.prop[src][mode].fppt,
99                    table.prop[src][mode].sppt,
100                    table.prop[src][mode].sppt_apu_only,
101                    table.prop[src][mode].stt_min,
102                    table.prop[src][mode].stt_skin_temp[STT_TEMP_APU],
103                    table.prop[src][mode].stt_skin_temp[STT_TEMP_HS2]);
104         return 0;
105 }
106 DEFINE_SHOW_ATTRIBUTE(current_power_limits);
107
108 static void amd_pmf_dbgfs_unregister(struct amd_pmf_dev *dev)
109 {
110         debugfs_remove_recursive(dev->dbgfs_dir);
111 }
112
113 static void amd_pmf_dbgfs_register(struct amd_pmf_dev *dev)
114 {
115         dev->dbgfs_dir = debugfs_create_dir("amd_pmf", NULL);
116         debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
117                             &current_power_limits_fops);
118 }
119
120 int amd_pmf_get_power_source(void)
121 {
122         if (power_supply_is_system_supplied() > 0)
123                 return POWER_SOURCE_AC;
124         else
125                 return POWER_SOURCE_DC;
126 }
127
128 static void amd_pmf_get_metrics(struct work_struct *work)
129 {
130         struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, work_buffer.work);
131         ktime_t time_elapsed_ms;
132         int socket_power;
133
134         mutex_lock(&dev->update_mutex);
135         /* Transfer table contents */
136         memset(dev->buf, 0, sizeof(dev->m_table));
137         amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
138         memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table));
139
140         time_elapsed_ms = ktime_to_ms(ktime_get()) - dev->start_time;
141         /* Calculate the avg SoC power consumption */
142         socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power;
143
144         if (dev->amt_enabled) {
145                 /* Apply the Auto Mode transition */
146                 amd_pmf_trans_automode(dev, socket_power, time_elapsed_ms);
147         }
148
149         if (dev->cnqf_enabled) {
150                 /* Apply the CnQF transition */
151                 amd_pmf_trans_cnqf(dev, socket_power, time_elapsed_ms);
152         }
153
154         dev->start_time = ktime_to_ms(ktime_get());
155         schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms));
156         mutex_unlock(&dev->update_mutex);
157 }
158
159 static inline u32 amd_pmf_reg_read(struct amd_pmf_dev *dev, int reg_offset)
160 {
161         return ioread32(dev->regbase + reg_offset);
162 }
163
164 static inline void amd_pmf_reg_write(struct amd_pmf_dev *dev, int reg_offset, u32 val)
165 {
166         iowrite32(val, dev->regbase + reg_offset);
167 }
168
169 static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev)
170 {
171         u32 value;
172
173         value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_RESPONSE);
174         dev_dbg(dev->dev, "AMD_PMF_REGISTER_RESPONSE:%x\n", value);
175
176         value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
177         dev_dbg(dev->dev, "AMD_PMF_REGISTER_ARGUMENT:%d\n", value);
178
179         value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_MESSAGE);
180         dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value);
181 }
182
183 int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data)
184 {
185         int rc;
186         u32 val;
187
188         mutex_lock(&dev->lock);
189
190         /* Wait until we get a valid response */
191         rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
192                                 val, val != 0, PMF_MSG_DELAY_MIN_US,
193                                 PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
194         if (rc) {
195                 dev_err(dev->dev, "failed to talk to SMU\n");
196                 goto out_unlock;
197         }
198
199         /* Write zero to response register */
200         amd_pmf_reg_write(dev, AMD_PMF_REGISTER_RESPONSE, 0);
201
202         /* Write argument into argument register */
203         amd_pmf_reg_write(dev, AMD_PMF_REGISTER_ARGUMENT, arg);
204
205         /* Write message ID to message ID register */
206         amd_pmf_reg_write(dev, AMD_PMF_REGISTER_MESSAGE, message);
207
208         /* Wait until we get a valid response */
209         rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
210                                 val, val != 0, PMF_MSG_DELAY_MIN_US,
211                                 PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
212         if (rc) {
213                 dev_err(dev->dev, "SMU response timed out\n");
214                 goto out_unlock;
215         }
216
217         switch (val) {
218         case AMD_PMF_RESULT_OK:
219                 if (get) {
220                         /* PMFW may take longer time to return back the data */
221                         usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
222                         *data = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
223                 }
224                 break;
225         case AMD_PMF_RESULT_CMD_REJECT_BUSY:
226                 dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
227                 rc = -EBUSY;
228                 goto out_unlock;
229         case AMD_PMF_RESULT_CMD_UNKNOWN:
230                 dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
231                 rc = -EINVAL;
232                 goto out_unlock;
233         case AMD_PMF_RESULT_CMD_REJECT_PREREQ:
234         case AMD_PMF_RESULT_FAILED:
235         default:
236                 dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
237                 rc = -EIO;
238                 goto out_unlock;
239         }
240
241 out_unlock:
242         mutex_unlock(&dev->lock);
243         amd_pmf_dump_registers(dev);
244         return rc;
245 }
246
247 static const struct pci_device_id pmf_pci_ids[] = {
248         { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RMB) },
249         { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
250         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
251         { }
252 };
253
254 int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer)
255 {
256         u64 phys_addr;
257         u32 hi, low;
258
259         /* Get Metrics Table Address */
260         if (alloc_buffer) {
261                 dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL);
262                 if (!dev->buf)
263                         return -ENOMEM;
264         }
265
266         phys_addr = virt_to_phys(dev->buf);
267         hi = phys_addr >> 32;
268         low = phys_addr & GENMASK(31, 0);
269
270         amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
271         amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
272
273         return 0;
274 }
275
276 int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
277 {
278         int ret;
279
280         INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
281
282         ret = amd_pmf_set_dram_addr(dev, true);
283         if (ret)
284                 return ret;
285
286         /*
287          * Start collecting the metrics data after a small delay
288          * or else, we might end up getting stale values from PMFW.
289          */
290         schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms * 3));
291
292         return 0;
293 }
294
295 static int amd_pmf_suspend_handler(struct device *dev)
296 {
297         struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
298
299         if (pdev->smart_pc_enabled)
300                 cancel_delayed_work_sync(&pdev->pb_work);
301
302         return 0;
303 }
304
305 static int amd_pmf_resume_handler(struct device *dev)
306 {
307         struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
308         int ret;
309
310         if (pdev->buf) {
311                 ret = amd_pmf_set_dram_addr(pdev, false);
312                 if (ret)
313                         return ret;
314         }
315
316         if (pdev->smart_pc_enabled)
317                 schedule_delayed_work(&pdev->pb_work, msecs_to_jiffies(2000));
318
319         return 0;
320 }
321
322 static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, amd_pmf_suspend_handler, amd_pmf_resume_handler);
323
324 static void amd_pmf_init_features(struct amd_pmf_dev *dev)
325 {
326         int ret;
327
328         /* Enable Static Slider */
329         if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
330             is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
331                 amd_pmf_init_sps(dev);
332                 dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
333                 power_supply_reg_notifier(&dev->pwr_src_notifier);
334                 dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
335         }
336
337         amd_pmf_init_smart_pc(dev);
338         if (dev->smart_pc_enabled) {
339                 dev_dbg(dev->dev, "Smart PC Solution Enabled\n");
340                 /* If Smart PC is enabled, no need to check for other features */
341                 return;
342         }
343
344         if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
345                 amd_pmf_init_auto_mode(dev);
346                 dev_dbg(dev->dev, "Auto Mode Init done\n");
347         } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
348                           is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
349                 ret = amd_pmf_init_cnqf(dev);
350                 if (ret)
351                         dev_warn(dev->dev, "CnQF Init failed\n");
352         }
353 }
354
355 static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
356 {
357         if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
358             is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
359                 power_supply_unreg_notifier(&dev->pwr_src_notifier);
360                 amd_pmf_deinit_sps(dev);
361         }
362
363         if (dev->smart_pc_enabled) {
364                 amd_pmf_deinit_smart_pc(dev);
365         } else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
366                 amd_pmf_deinit_auto_mode(dev);
367         } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
368                           is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
369                 amd_pmf_deinit_cnqf(dev);
370         }
371 }
372
373 static const struct acpi_device_id amd_pmf_acpi_ids[] = {
374         {"AMDI0100", 0x100},
375         {"AMDI0102", 0},
376         {"AMDI0103", 0},
377         { }
378 };
379 MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
380
381 static int amd_pmf_probe(struct platform_device *pdev)
382 {
383         const struct acpi_device_id *id;
384         struct amd_pmf_dev *dev;
385         struct pci_dev *rdev;
386         u32 base_addr_lo;
387         u32 base_addr_hi;
388         u64 base_addr;
389         u32 val;
390         int err;
391
392         id = acpi_match_device(amd_pmf_acpi_ids, &pdev->dev);
393         if (!id)
394                 return -ENODEV;
395
396         if (id->driver_data == 0x100 && !force_load)
397                 return -ENODEV;
398
399         dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
400         if (!dev)
401                 return -ENOMEM;
402
403         dev->dev = &pdev->dev;
404
405         rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
406         if (!rdev || !pci_match_id(pmf_pci_ids, rdev)) {
407                 pci_dev_put(rdev);
408                 return -ENODEV;
409         }
410
411         dev->cpu_id = rdev->device;
412
413         err = amd_smn_read(0, AMD_PMF_BASE_ADDR_LO, &val);
414         if (err) {
415                 dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_LO);
416                 pci_dev_put(rdev);
417                 return pcibios_err_to_errno(err);
418         }
419
420         base_addr_lo = val & AMD_PMF_BASE_ADDR_HI_MASK;
421
422         err = amd_smn_read(0, AMD_PMF_BASE_ADDR_HI, &val);
423         if (err) {
424                 dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_HI);
425                 pci_dev_put(rdev);
426                 return pcibios_err_to_errno(err);
427         }
428
429         base_addr_hi = val & AMD_PMF_BASE_ADDR_LO_MASK;
430         pci_dev_put(rdev);
431         base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
432
433         dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMF_BASE_ADDR_OFFSET,
434                                     AMD_PMF_MAPPING_SIZE);
435         if (!dev->regbase)
436                 return -ENOMEM;
437
438         mutex_init(&dev->lock);
439         mutex_init(&dev->update_mutex);
440
441         apmf_acpi_init(dev);
442         platform_set_drvdata(pdev, dev);
443         amd_pmf_dbgfs_register(dev);
444         amd_pmf_init_features(dev);
445         apmf_install_handler(dev);
446
447         dev_info(dev->dev, "registered PMF device successfully\n");
448
449         return 0;
450 }
451
452 static void amd_pmf_remove(struct platform_device *pdev)
453 {
454         struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
455
456         amd_pmf_deinit_features(dev);
457         apmf_acpi_deinit(dev);
458         amd_pmf_dbgfs_unregister(dev);
459         mutex_destroy(&dev->lock);
460         mutex_destroy(&dev->update_mutex);
461         kfree(dev->buf);
462 }
463
464 static const struct attribute_group *amd_pmf_driver_groups[] = {
465         &cnqf_feature_attribute_group,
466         NULL,
467 };
468
469 static struct platform_driver amd_pmf_driver = {
470         .driver = {
471                 .name = "amd-pmf",
472                 .acpi_match_table = amd_pmf_acpi_ids,
473                 .dev_groups = amd_pmf_driver_groups,
474                 .pm = pm_sleep_ptr(&amd_pmf_pm),
475         },
476         .probe = amd_pmf_probe,
477         .remove_new = amd_pmf_remove,
478 };
479 module_platform_driver(amd_pmf_driver);
480
481 MODULE_LICENSE("GPL");
482 MODULE_DESCRIPTION("AMD Platform Management Framework Driver");
483 MODULE_SOFTDEP("pre: amdtee");