Merge tag 'iommu-fixes-v5.3-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / misc / mei / pci-me.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
4  * Intel Management Engine Interface (Intel MEI) Linux driver
5  */
6
7 #include <linux/module.h>
8 #include <linux/moduleparam.h>
9 #include <linux/kernel.h>
10 #include <linux/device.h>
11 #include <linux/fs.h>
12 #include <linux/errno.h>
13 #include <linux/types.h>
14 #include <linux/fcntl.h>
15 #include <linux/pci.h>
16 #include <linux/poll.h>
17 #include <linux/ioctl.h>
18 #include <linux/cdev.h>
19 #include <linux/sched.h>
20 #include <linux/uuid.h>
21 #include <linux/compat.h>
22 #include <linux/jiffies.h>
23 #include <linux/interrupt.h>
24
25 #include <linux/pm_domain.h>
26 #include <linux/pm_runtime.h>
27
28 #include <linux/mei.h>
29
30 #include "mei_dev.h"
31 #include "client.h"
32 #include "hw-me-regs.h"
33 #include "hw-me.h"
34
35 /* mei_pci_tbl - PCI Device ID Table */
36 static const struct pci_device_id mei_me_pci_tbl[] = {
37         {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, MEI_ME_ICH_CFG)},
38         {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, MEI_ME_ICH_CFG)},
39         {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, MEI_ME_ICH_CFG)},
40         {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, MEI_ME_ICH_CFG)},
41         {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, MEI_ME_ICH_CFG)},
42         {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, MEI_ME_ICH_CFG)},
43         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, MEI_ME_ICH_CFG)},
44         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, MEI_ME_ICH_CFG)},
45         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, MEI_ME_ICH_CFG)},
46         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, MEI_ME_ICH_CFG)},
47         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, MEI_ME_ICH_CFG)},
48
49         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, MEI_ME_ICH_CFG)},
50         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, MEI_ME_ICH_CFG)},
51         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, MEI_ME_ICH_CFG)},
52         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, MEI_ME_ICH_CFG)},
53         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, MEI_ME_ICH_CFG)},
54         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, MEI_ME_ICH_CFG)},
55         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, MEI_ME_ICH_CFG)},
56         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, MEI_ME_ICH_CFG)},
57         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, MEI_ME_ICH_CFG)},
58
59         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, MEI_ME_ICH10_CFG)},
60         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, MEI_ME_ICH10_CFG)},
61         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)},
62         {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)},
63
64         {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH_CFG)},
65         {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH_CFG)},
66         {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)},
67         {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)},
68         {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH_CFG)},
69         {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH_CFG)},
70         {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH_CFG)},
71         {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)},
72         {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)},
73         {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
74         {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_CFG)},
75         {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)},
76         {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)},
77
78         {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)},
79         {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
80         {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
81         {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
82         {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)},
83
84         {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
85         {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
86
87         {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
88
89         {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
90
91         {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
92         {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
93
94         {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)},
95         {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_4, MEI_ME_PCH8_CFG)},
96         {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
97         {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
98
99         {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
100
101         {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
102
103         {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
104         {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
105
106         /* required last entry */
107         {0, }
108 };
109
110 MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl);
111
112 #ifdef CONFIG_PM
113 static inline void mei_me_set_pm_domain(struct mei_device *dev);
114 static inline void mei_me_unset_pm_domain(struct mei_device *dev);
115 #else
116 static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
117 static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
118 #endif /* CONFIG_PM */
119
120 /**
121  * mei_me_quirk_probe - probe for devices that doesn't valid ME interface
122  *
123  * @pdev: PCI device structure
124  * @cfg: per generation config
125  *
126  * Return: true if ME Interface is valid, false otherwise
127  */
128 static bool mei_me_quirk_probe(struct pci_dev *pdev,
129                                 const struct mei_cfg *cfg)
130 {
131         if (cfg->quirk_probe && cfg->quirk_probe(pdev)) {
132                 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
133                 return false;
134         }
135
136         return true;
137 }
138
139 /**
140  * mei_me_probe - Device Initialization Routine
141  *
142  * @pdev: PCI device structure
143  * @ent: entry in kcs_pci_tbl
144  *
145  * Return: 0 on success, <0 on failure.
146  */
147 static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
148 {
149         const struct mei_cfg *cfg;
150         struct mei_device *dev;
151         struct mei_me_hw *hw;
152         unsigned int irqflags;
153         int err;
154
155         cfg = mei_me_get_cfg(ent->driver_data);
156         if (!cfg)
157                 return -ENODEV;
158
159         if (!mei_me_quirk_probe(pdev, cfg))
160                 return -ENODEV;
161
162         /* enable pci dev */
163         err = pcim_enable_device(pdev);
164         if (err) {
165                 dev_err(&pdev->dev, "failed to enable pci device.\n");
166                 goto end;
167         }
168         /* set PCI host mastering  */
169         pci_set_master(pdev);
170         /* pci request regions and mapping IO device memory for mei driver */
171         err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
172         if (err) {
173                 dev_err(&pdev->dev, "failed to get pci regions.\n");
174                 goto end;
175         }
176
177         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
178             dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
179
180                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
181                 if (err)
182                         err = dma_set_coherent_mask(&pdev->dev,
183                                                     DMA_BIT_MASK(32));
184         }
185         if (err) {
186                 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
187                 goto end;
188         }
189
190         /* allocates and initializes the mei dev structure */
191         dev = mei_me_dev_init(pdev, cfg);
192         if (!dev) {
193                 err = -ENOMEM;
194                 goto end;
195         }
196         hw = to_me_hw(dev);
197         hw->mem_addr = pcim_iomap_table(pdev)[0];
198
199         pci_enable_msi(pdev);
200
201          /* request and enable interrupt */
202         irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
203
204         err = request_threaded_irq(pdev->irq,
205                         mei_me_irq_quick_handler,
206                         mei_me_irq_thread_handler,
207                         irqflags, KBUILD_MODNAME, dev);
208         if (err) {
209                 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
210                        pdev->irq);
211                 goto end;
212         }
213
214         if (mei_start(dev)) {
215                 dev_err(&pdev->dev, "init hw failure.\n");
216                 err = -ENODEV;
217                 goto release_irq;
218         }
219
220         pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
221         pm_runtime_use_autosuspend(&pdev->dev);
222
223         err = mei_register(dev, &pdev->dev);
224         if (err)
225                 goto stop;
226
227         pci_set_drvdata(pdev, dev);
228
229         /*
230          * MEI requires to resume from runtime suspend mode
231          * in order to perform link reset flow upon system suspend.
232          */
233         dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
234
235         /*
236          * ME maps runtime suspend/resume to D0i states,
237          * hence we need to go around native PCI runtime service which
238          * eventually brings the device into D3cold/hot state,
239          * but the mei device cannot wake up from D3 unlike from D0i3.
240          * To get around the PCI device native runtime pm,
241          * ME uses runtime pm domain handlers which take precedence
242          * over the driver's pm handlers.
243          */
244         mei_me_set_pm_domain(dev);
245
246         if (mei_pg_is_enabled(dev)) {
247                 pm_runtime_put_noidle(&pdev->dev);
248                 if (hw->d0i3_supported)
249                         pm_runtime_allow(&pdev->dev);
250         }
251
252         dev_dbg(&pdev->dev, "initialization successful.\n");
253
254         return 0;
255
256 stop:
257         mei_stop(dev);
258 release_irq:
259         mei_cancel_work(dev);
260         mei_disable_interrupts(dev);
261         free_irq(pdev->irq, dev);
262 end:
263         dev_err(&pdev->dev, "initialization failed.\n");
264         return err;
265 }
266
267 /**
268  * mei_me_shutdown - Device Removal Routine
269  *
270  * @pdev: PCI device structure
271  *
272  * mei_me_shutdown is called from the reboot notifier
273  * it's a simplified version of remove so we go down
274  * faster.
275  */
276 static void mei_me_shutdown(struct pci_dev *pdev)
277 {
278         struct mei_device *dev;
279
280         dev = pci_get_drvdata(pdev);
281         if (!dev)
282                 return;
283
284         dev_dbg(&pdev->dev, "shutdown\n");
285         mei_stop(dev);
286
287         mei_me_unset_pm_domain(dev);
288
289         mei_disable_interrupts(dev);
290         free_irq(pdev->irq, dev);
291 }
292
293 /**
294  * mei_me_remove - Device Removal Routine
295  *
296  * @pdev: PCI device structure
297  *
298  * mei_me_remove is called by the PCI subsystem to alert the driver
299  * that it should release a PCI device.
300  */
301 static void mei_me_remove(struct pci_dev *pdev)
302 {
303         struct mei_device *dev;
304
305         dev = pci_get_drvdata(pdev);
306         if (!dev)
307                 return;
308
309         if (mei_pg_is_enabled(dev))
310                 pm_runtime_get_noresume(&pdev->dev);
311
312         dev_dbg(&pdev->dev, "stop\n");
313         mei_stop(dev);
314
315         mei_me_unset_pm_domain(dev);
316
317         mei_disable_interrupts(dev);
318
319         free_irq(pdev->irq, dev);
320
321         mei_deregister(dev);
322 }
323
324 #ifdef CONFIG_PM_SLEEP
325 static int mei_me_pci_suspend(struct device *device)
326 {
327         struct pci_dev *pdev = to_pci_dev(device);
328         struct mei_device *dev = pci_get_drvdata(pdev);
329
330         if (!dev)
331                 return -ENODEV;
332
333         dev_dbg(&pdev->dev, "suspend\n");
334
335         mei_stop(dev);
336
337         mei_disable_interrupts(dev);
338
339         free_irq(pdev->irq, dev);
340         pci_disable_msi(pdev);
341
342         return 0;
343 }
344
345 static int mei_me_pci_resume(struct device *device)
346 {
347         struct pci_dev *pdev = to_pci_dev(device);
348         struct mei_device *dev;
349         unsigned int irqflags;
350         int err;
351
352         dev = pci_get_drvdata(pdev);
353         if (!dev)
354                 return -ENODEV;
355
356         pci_enable_msi(pdev);
357
358         irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
359
360         /* request and enable interrupt */
361         err = request_threaded_irq(pdev->irq,
362                         mei_me_irq_quick_handler,
363                         mei_me_irq_thread_handler,
364                         irqflags, KBUILD_MODNAME, dev);
365
366         if (err) {
367                 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
368                                 pdev->irq);
369                 return err;
370         }
371
372         err = mei_restart(dev);
373         if (err)
374                 return err;
375
376         /* Start timer if stopped in suspend */
377         schedule_delayed_work(&dev->timer_work, HZ);
378
379         return 0;
380 }
381 #endif /* CONFIG_PM_SLEEP */
382
383 #ifdef CONFIG_PM
384 static int mei_me_pm_runtime_idle(struct device *device)
385 {
386         struct pci_dev *pdev = to_pci_dev(device);
387         struct mei_device *dev;
388
389         dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n");
390
391         dev = pci_get_drvdata(pdev);
392         if (!dev)
393                 return -ENODEV;
394         if (mei_write_is_idle(dev))
395                 pm_runtime_autosuspend(device);
396
397         return -EBUSY;
398 }
399
400 static int mei_me_pm_runtime_suspend(struct device *device)
401 {
402         struct pci_dev *pdev = to_pci_dev(device);
403         struct mei_device *dev;
404         int ret;
405
406         dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n");
407
408         dev = pci_get_drvdata(pdev);
409         if (!dev)
410                 return -ENODEV;
411
412         mutex_lock(&dev->device_lock);
413
414         if (mei_write_is_idle(dev))
415                 ret = mei_me_pg_enter_sync(dev);
416         else
417                 ret = -EAGAIN;
418
419         mutex_unlock(&dev->device_lock);
420
421         dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret);
422
423         if (ret && ret != -EAGAIN)
424                 schedule_work(&dev->reset_work);
425
426         return ret;
427 }
428
429 static int mei_me_pm_runtime_resume(struct device *device)
430 {
431         struct pci_dev *pdev = to_pci_dev(device);
432         struct mei_device *dev;
433         int ret;
434
435         dev_dbg(&pdev->dev, "rpm: me: runtime resume\n");
436
437         dev = pci_get_drvdata(pdev);
438         if (!dev)
439                 return -ENODEV;
440
441         mutex_lock(&dev->device_lock);
442
443         ret = mei_me_pg_exit_sync(dev);
444
445         mutex_unlock(&dev->device_lock);
446
447         dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret);
448
449         if (ret)
450                 schedule_work(&dev->reset_work);
451
452         return ret;
453 }
454
455 /**
456  * mei_me_set_pm_domain - fill and set pm domain structure for device
457  *
458  * @dev: mei_device
459  */
460 static inline void mei_me_set_pm_domain(struct mei_device *dev)
461 {
462         struct pci_dev *pdev  = to_pci_dev(dev->dev);
463
464         if (pdev->dev.bus && pdev->dev.bus->pm) {
465                 dev->pg_domain.ops = *pdev->dev.bus->pm;
466
467                 dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend;
468                 dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume;
469                 dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle;
470
471                 dev_pm_domain_set(&pdev->dev, &dev->pg_domain);
472         }
473 }
474
475 /**
476  * mei_me_unset_pm_domain - clean pm domain structure for device
477  *
478  * @dev: mei_device
479  */
480 static inline void mei_me_unset_pm_domain(struct mei_device *dev)
481 {
482         /* stop using pm callbacks if any */
483         dev_pm_domain_set(dev->dev, NULL);
484 }
485
486 static const struct dev_pm_ops mei_me_pm_ops = {
487         SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend,
488                                 mei_me_pci_resume)
489         SET_RUNTIME_PM_OPS(
490                 mei_me_pm_runtime_suspend,
491                 mei_me_pm_runtime_resume,
492                 mei_me_pm_runtime_idle)
493 };
494
495 #define MEI_ME_PM_OPS   (&mei_me_pm_ops)
496 #else
497 #define MEI_ME_PM_OPS   NULL
498 #endif /* CONFIG_PM */
499 /*
500  *  PCI driver structure
501  */
502 static struct pci_driver mei_me_driver = {
503         .name = KBUILD_MODNAME,
504         .id_table = mei_me_pci_tbl,
505         .probe = mei_me_probe,
506         .remove = mei_me_remove,
507         .shutdown = mei_me_shutdown,
508         .driver.pm = MEI_ME_PM_OPS,
509         .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS,
510 };
511
512 module_pci_driver(mei_me_driver);
513
514 MODULE_AUTHOR("Intel Corporation");
515 MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
516 MODULE_LICENSE("GPL v2");