EDAC/igen6: ecclog_llist can be static
[sfrench/cifs-2.6.git] / drivers / dma / idxd / init.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/pci.h>
8 #include <linux/interrupt.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/workqueue.h>
12 #include <linux/aer.h>
13 #include <linux/fs.h>
14 #include <linux/io-64-nonatomic-lo-hi.h>
15 #include <linux/device.h>
16 #include <linux/idr.h>
17 #include <uapi/linux/idxd.h>
18 #include <linux/dmaengine.h>
19 #include "../dmaengine.h"
20 #include "registers.h"
21 #include "idxd.h"
22
23 MODULE_VERSION(IDXD_DRIVER_VERSION);
24 MODULE_LICENSE("GPL v2");
25 MODULE_AUTHOR("Intel Corporation");
26
27 #define DRV_NAME "idxd"
28
29 static struct idr idxd_idrs[IDXD_TYPE_MAX];
30 static struct mutex idxd_idr_lock;
31
32 static struct pci_device_id idxd_pci_tbl[] = {
33         /* DSA ver 1.0 platforms */
34         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
35         { 0, }
36 };
37 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
38
39 static char *idxd_name[] = {
40         "dsa",
41 };
42
43 const char *idxd_get_dev_name(struct idxd_device *idxd)
44 {
45         return idxd_name[idxd->type];
46 }
47
48 static int idxd_setup_interrupts(struct idxd_device *idxd)
49 {
50         struct pci_dev *pdev = idxd->pdev;
51         struct device *dev = &pdev->dev;
52         struct msix_entry *msix;
53         struct idxd_irq_entry *irq_entry;
54         int i, msixcnt;
55         int rc = 0;
56
57         msixcnt = pci_msix_vec_count(pdev);
58         if (msixcnt < 0) {
59                 dev_err(dev, "Not MSI-X interrupt capable.\n");
60                 goto err_no_irq;
61         }
62
63         idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
64                         msixcnt, GFP_KERNEL);
65         if (!idxd->msix_entries) {
66                 rc = -ENOMEM;
67                 goto err_no_irq;
68         }
69
70         for (i = 0; i < msixcnt; i++)
71                 idxd->msix_entries[i].entry = i;
72
73         rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
74         if (rc) {
75                 dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
76                 goto err_no_irq;
77         }
78         dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
79
80         /*
81          * We implement 1 completion list per MSI-X entry except for
82          * entry 0, which is for errors and others.
83          */
84         idxd->irq_entries = devm_kcalloc(dev, msixcnt,
85                                          sizeof(struct idxd_irq_entry),
86                                          GFP_KERNEL);
87         if (!idxd->irq_entries) {
88                 rc = -ENOMEM;
89                 goto err_no_irq;
90         }
91
92         for (i = 0; i < msixcnt; i++) {
93                 idxd->irq_entries[i].id = i;
94                 idxd->irq_entries[i].idxd = idxd;
95         }
96
97         msix = &idxd->msix_entries[0];
98         irq_entry = &idxd->irq_entries[0];
99         rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
100                                        idxd_misc_thread, 0, "idxd-misc",
101                                        irq_entry);
102         if (rc < 0) {
103                 dev_err(dev, "Failed to allocate misc interrupt.\n");
104                 goto err_no_irq;
105         }
106
107         dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
108                 msix->vector);
109
110         /* first MSI-X entry is not for wq interrupts */
111         idxd->num_wq_irqs = msixcnt - 1;
112
113         for (i = 1; i < msixcnt; i++) {
114                 msix = &idxd->msix_entries[i];
115                 irq_entry = &idxd->irq_entries[i];
116
117                 init_llist_head(&idxd->irq_entries[i].pending_llist);
118                 INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
119                 rc = devm_request_threaded_irq(dev, msix->vector,
120                                                idxd_irq_handler,
121                                                idxd_wq_thread, 0,
122                                                "idxd-portal", irq_entry);
123                 if (rc < 0) {
124                         dev_err(dev, "Failed to allocate irq %d.\n",
125                                 msix->vector);
126                         goto err_no_irq;
127                 }
128                 dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
129                         i, msix->vector);
130         }
131
132         idxd_unmask_error_interrupts(idxd);
133
134         return 0;
135
136  err_no_irq:
137         /* Disable error interrupt generation */
138         idxd_mask_error_interrupts(idxd);
139         pci_disable_msix(pdev);
140         dev_err(dev, "No usable interrupts\n");
141         return rc;
142 }
143
144 static int idxd_setup_internals(struct idxd_device *idxd)
145 {
146         struct device *dev = &idxd->pdev->dev;
147         int i;
148
149         init_waitqueue_head(&idxd->cmd_waitq);
150         idxd->groups = devm_kcalloc(dev, idxd->max_groups,
151                                     sizeof(struct idxd_group), GFP_KERNEL);
152         if (!idxd->groups)
153                 return -ENOMEM;
154
155         for (i = 0; i < idxd->max_groups; i++) {
156                 idxd->groups[i].idxd = idxd;
157                 idxd->groups[i].id = i;
158                 idxd->groups[i].tc_a = -1;
159                 idxd->groups[i].tc_b = -1;
160         }
161
162         idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
163                                  GFP_KERNEL);
164         if (!idxd->wqs)
165                 return -ENOMEM;
166
167         idxd->engines = devm_kcalloc(dev, idxd->max_engines,
168                                      sizeof(struct idxd_engine), GFP_KERNEL);
169         if (!idxd->engines)
170                 return -ENOMEM;
171
172         for (i = 0; i < idxd->max_wqs; i++) {
173                 struct idxd_wq *wq = &idxd->wqs[i];
174
175                 wq->id = i;
176                 wq->idxd = idxd;
177                 mutex_init(&wq->wq_lock);
178                 wq->idxd_cdev.minor = -1;
179                 wq->max_xfer_bytes = idxd->max_xfer_bytes;
180                 wq->max_batch_size = idxd->max_batch_size;
181         }
182
183         for (i = 0; i < idxd->max_engines; i++) {
184                 idxd->engines[i].idxd = idxd;
185                 idxd->engines[i].id = i;
186         }
187
188         idxd->wq = create_workqueue(dev_name(dev));
189         if (!idxd->wq)
190                 return -ENOMEM;
191
192         return 0;
193 }
194
195 static void idxd_read_table_offsets(struct idxd_device *idxd)
196 {
197         union offsets_reg offsets;
198         struct device *dev = &idxd->pdev->dev;
199
200         offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
201         offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET
202                         + sizeof(u64));
203         idxd->grpcfg_offset = offsets.grpcfg * 0x100;
204         dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
205         idxd->wqcfg_offset = offsets.wqcfg * 0x100;
206         dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n",
207                 idxd->wqcfg_offset);
208         idxd->msix_perm_offset = offsets.msix_perm * 0x100;
209         dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n",
210                 idxd->msix_perm_offset);
211         idxd->perfmon_offset = offsets.perfmon * 0x100;
212         dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
213 }
214
215 static void idxd_read_caps(struct idxd_device *idxd)
216 {
217         struct device *dev = &idxd->pdev->dev;
218         int i;
219
220         /* reading generic capabilities */
221         idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
222         dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
223         idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
224         dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
225         idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
226         dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
227         if (idxd->hw.gen_cap.config_en)
228                 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
229
230         /* reading group capabilities */
231         idxd->hw.group_cap.bits =
232                 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
233         dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
234         idxd->max_groups = idxd->hw.group_cap.num_groups;
235         dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
236         idxd->max_tokens = idxd->hw.group_cap.total_tokens;
237         dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
238         idxd->nr_tokens = idxd->max_tokens;
239
240         /* read engine capabilities */
241         idxd->hw.engine_cap.bits =
242                 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
243         dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
244         idxd->max_engines = idxd->hw.engine_cap.num_engines;
245         dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
246
247         /* read workqueue capabilities */
248         idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
249         dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
250         idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
251         dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
252         idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
253         dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
254
255         /* reading operation capabilities */
256         for (i = 0; i < 4; i++) {
257                 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
258                                 IDXD_OPCAP_OFFSET + i * sizeof(u64));
259                 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
260         }
261 }
262
263 static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
264                                       void __iomem * const *iomap)
265 {
266         struct device *dev = &pdev->dev;
267         struct idxd_device *idxd;
268
269         idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
270         if (!idxd)
271                 return NULL;
272
273         idxd->pdev = pdev;
274         idxd->reg_base = iomap[IDXD_MMIO_BAR];
275         spin_lock_init(&idxd->dev_lock);
276
277         return idxd;
278 }
279
280 static int idxd_probe(struct idxd_device *idxd)
281 {
282         struct pci_dev *pdev = idxd->pdev;
283         struct device *dev = &pdev->dev;
284         int rc;
285
286         dev_dbg(dev, "%s entered and resetting device\n", __func__);
287         idxd_device_init_reset(idxd);
288         dev_dbg(dev, "IDXD reset complete\n");
289
290         idxd_read_caps(idxd);
291         idxd_read_table_offsets(idxd);
292
293         rc = idxd_setup_internals(idxd);
294         if (rc)
295                 goto err_setup;
296
297         rc = idxd_setup_interrupts(idxd);
298         if (rc)
299                 goto err_setup;
300
301         dev_dbg(dev, "IDXD interrupt setup complete.\n");
302
303         mutex_lock(&idxd_idr_lock);
304         idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
305         mutex_unlock(&idxd_idr_lock);
306         if (idxd->id < 0) {
307                 rc = -ENOMEM;
308                 goto err_idr_fail;
309         }
310
311         idxd->major = idxd_cdev_get_major(idxd);
312
313         dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
314         return 0;
315
316  err_idr_fail:
317         idxd_mask_error_interrupts(idxd);
318         idxd_mask_msix_vectors(idxd);
319  err_setup:
320         return rc;
321 }
322
323 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
324 {
325         void __iomem * const *iomap;
326         struct device *dev = &pdev->dev;
327         struct idxd_device *idxd;
328         int rc;
329         unsigned int mask;
330
331         rc = pcim_enable_device(pdev);
332         if (rc)
333                 return rc;
334
335         dev_dbg(dev, "Mapping BARs\n");
336         mask = (1 << IDXD_MMIO_BAR);
337         rc = pcim_iomap_regions(pdev, mask, DRV_NAME);
338         if (rc)
339                 return rc;
340
341         iomap = pcim_iomap_table(pdev);
342         if (!iomap)
343                 return -ENOMEM;
344
345         dev_dbg(dev, "Set DMA masks\n");
346         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
347         if (rc)
348                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
349         if (rc)
350                 return rc;
351
352         rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
353         if (rc)
354                 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
355         if (rc)
356                 return rc;
357
358         dev_dbg(dev, "Alloc IDXD context\n");
359         idxd = idxd_alloc(pdev, iomap);
360         if (!idxd)
361                 return -ENOMEM;
362
363         idxd_set_type(idxd);
364
365         dev_dbg(dev, "Set PCI master\n");
366         pci_set_master(pdev);
367         pci_set_drvdata(pdev, idxd);
368
369         idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
370         rc = idxd_probe(idxd);
371         if (rc) {
372                 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
373                 return -ENODEV;
374         }
375
376         rc = idxd_setup_sysfs(idxd);
377         if (rc) {
378                 dev_err(dev, "IDXD sysfs setup failed\n");
379                 return -ENODEV;
380         }
381
382         idxd->state = IDXD_DEV_CONF_READY;
383
384         dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
385                  idxd->hw.version);
386
387         return 0;
388 }
389
390 static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
391 {
392         struct idxd_desc *desc, *itr;
393         struct llist_node *head;
394
395         head = llist_del_all(&ie->pending_llist);
396         if (!head)
397                 return;
398
399         llist_for_each_entry_safe(desc, itr, head, llnode) {
400                 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
401                 idxd_free_desc(desc->wq, desc);
402         }
403 }
404
405 static void idxd_flush_work_list(struct idxd_irq_entry *ie)
406 {
407         struct idxd_desc *desc, *iter;
408
409         list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
410                 list_del(&desc->list);
411                 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
412                 idxd_free_desc(desc->wq, desc);
413         }
414 }
415
416 static void idxd_shutdown(struct pci_dev *pdev)
417 {
418         struct idxd_device *idxd = pci_get_drvdata(pdev);
419         int rc, i;
420         struct idxd_irq_entry *irq_entry;
421         int msixcnt = pci_msix_vec_count(pdev);
422
423         rc = idxd_device_disable(idxd);
424         if (rc)
425                 dev_err(&pdev->dev, "Disabling device failed\n");
426
427         dev_dbg(&pdev->dev, "%s called\n", __func__);
428         idxd_mask_msix_vectors(idxd);
429         idxd_mask_error_interrupts(idxd);
430
431         for (i = 0; i < msixcnt; i++) {
432                 irq_entry = &idxd->irq_entries[i];
433                 synchronize_irq(idxd->msix_entries[i].vector);
434                 if (i == 0)
435                         continue;
436                 idxd_flush_pending_llist(irq_entry);
437                 idxd_flush_work_list(irq_entry);
438         }
439
440         destroy_workqueue(idxd->wq);
441 }
442
443 static void idxd_remove(struct pci_dev *pdev)
444 {
445         struct idxd_device *idxd = pci_get_drvdata(pdev);
446
447         dev_dbg(&pdev->dev, "%s called\n", __func__);
448         idxd_cleanup_sysfs(idxd);
449         idxd_shutdown(pdev);
450         mutex_lock(&idxd_idr_lock);
451         idr_remove(&idxd_idrs[idxd->type], idxd->id);
452         mutex_unlock(&idxd_idr_lock);
453 }
454
455 static struct pci_driver idxd_pci_driver = {
456         .name           = DRV_NAME,
457         .id_table       = idxd_pci_tbl,
458         .probe          = idxd_pci_probe,
459         .remove         = idxd_remove,
460         .shutdown       = idxd_shutdown,
461 };
462
463 static int __init idxd_init_module(void)
464 {
465         int err, i;
466
467         /*
468          * If the CPU does not support write512, there's no point in
469          * enumerating the device. We can not utilize it.
470          */
471         if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
472                 pr_warn("idxd driver failed to load without MOVDIR64B.\n");
473                 return -ENODEV;
474         }
475
476         pr_info("%s: Intel(R) Accelerator Devices Driver %s\n",
477                 DRV_NAME, IDXD_DRIVER_VERSION);
478
479         mutex_init(&idxd_idr_lock);
480         for (i = 0; i < IDXD_TYPE_MAX; i++)
481                 idr_init(&idxd_idrs[i]);
482
483         err = idxd_register_bus_type();
484         if (err < 0)
485                 return err;
486
487         err = idxd_register_driver();
488         if (err < 0)
489                 goto err_idxd_driver_register;
490
491         err = idxd_cdev_register();
492         if (err)
493                 goto err_cdev_register;
494
495         err = pci_register_driver(&idxd_pci_driver);
496         if (err)
497                 goto err_pci_register;
498
499         return 0;
500
501 err_pci_register:
502         idxd_cdev_remove();
503 err_cdev_register:
504         idxd_unregister_driver();
505 err_idxd_driver_register:
506         idxd_unregister_bus_type();
507         return err;
508 }
509 module_init(idxd_init_module);
510
511 static void __exit idxd_exit_module(void)
512 {
513         pci_unregister_driver(&idxd_pci_driver);
514         idxd_cdev_remove();
515         idxd_unregister_bus_type();
516 }
517 module_exit(idxd_exit_module);