1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
8 #include <linux/interrupt.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/workqueue.h>
12 #include <linux/aer.h>
14 #include <linux/io-64-nonatomic-lo-hi.h>
15 #include <linux/device.h>
16 #include <linux/idr.h>
17 #include <uapi/linux/idxd.h>
18 #include <linux/dmaengine.h>
19 #include "../dmaengine.h"
20 #include "registers.h"
23 MODULE_VERSION(IDXD_DRIVER_VERSION);
24 MODULE_LICENSE("GPL v2");
25 MODULE_AUTHOR("Intel Corporation");
27 #define DRV_NAME "idxd"
29 static struct idr idxd_idrs[IDXD_TYPE_MAX];
30 static struct mutex idxd_idr_lock;
32 static struct pci_device_id idxd_pci_tbl[] = {
33 /* DSA ver 1.0 platforms */
34 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
37 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
39 static char *idxd_name[] = {
43 const char *idxd_get_dev_name(struct idxd_device *idxd)
45 return idxd_name[idxd->type];
48 static int idxd_setup_interrupts(struct idxd_device *idxd)
50 struct pci_dev *pdev = idxd->pdev;
51 struct device *dev = &pdev->dev;
52 struct msix_entry *msix;
53 struct idxd_irq_entry *irq_entry;
57 msixcnt = pci_msix_vec_count(pdev);
59 dev_err(dev, "Not MSI-X interrupt capable.\n");
63 idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
65 if (!idxd->msix_entries) {
70 for (i = 0; i < msixcnt; i++)
71 idxd->msix_entries[i].entry = i;
73 rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
75 dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
78 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
81 * We implement 1 completion list per MSI-X entry except for
82 * entry 0, which is for errors and others.
84 idxd->irq_entries = devm_kcalloc(dev, msixcnt,
85 sizeof(struct idxd_irq_entry),
87 if (!idxd->irq_entries) {
92 for (i = 0; i < msixcnt; i++) {
93 idxd->irq_entries[i].id = i;
94 idxd->irq_entries[i].idxd = idxd;
97 msix = &idxd->msix_entries[0];
98 irq_entry = &idxd->irq_entries[0];
99 rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
100 idxd_misc_thread, 0, "idxd-misc",
103 dev_err(dev, "Failed to allocate misc interrupt.\n");
107 dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
110 /* first MSI-X entry is not for wq interrupts */
111 idxd->num_wq_irqs = msixcnt - 1;
113 for (i = 1; i < msixcnt; i++) {
114 msix = &idxd->msix_entries[i];
115 irq_entry = &idxd->irq_entries[i];
117 init_llist_head(&idxd->irq_entries[i].pending_llist);
118 INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
119 rc = devm_request_threaded_irq(dev, msix->vector,
122 "idxd-portal", irq_entry);
124 dev_err(dev, "Failed to allocate irq %d.\n",
128 dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
132 idxd_unmask_error_interrupts(idxd);
137 /* Disable error interrupt generation */
138 idxd_mask_error_interrupts(idxd);
139 pci_disable_msix(pdev);
140 dev_err(dev, "No usable interrupts\n");
144 static int idxd_setup_internals(struct idxd_device *idxd)
146 struct device *dev = &idxd->pdev->dev;
149 init_waitqueue_head(&idxd->cmd_waitq);
150 idxd->groups = devm_kcalloc(dev, idxd->max_groups,
151 sizeof(struct idxd_group), GFP_KERNEL);
155 for (i = 0; i < idxd->max_groups; i++) {
156 idxd->groups[i].idxd = idxd;
157 idxd->groups[i].id = i;
158 idxd->groups[i].tc_a = -1;
159 idxd->groups[i].tc_b = -1;
162 idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
167 idxd->engines = devm_kcalloc(dev, idxd->max_engines,
168 sizeof(struct idxd_engine), GFP_KERNEL);
172 for (i = 0; i < idxd->max_wqs; i++) {
173 struct idxd_wq *wq = &idxd->wqs[i];
177 mutex_init(&wq->wq_lock);
178 wq->idxd_cdev.minor = -1;
179 wq->max_xfer_bytes = idxd->max_xfer_bytes;
180 wq->max_batch_size = idxd->max_batch_size;
183 for (i = 0; i < idxd->max_engines; i++) {
184 idxd->engines[i].idxd = idxd;
185 idxd->engines[i].id = i;
188 idxd->wq = create_workqueue(dev_name(dev));
195 static void idxd_read_table_offsets(struct idxd_device *idxd)
197 union offsets_reg offsets;
198 struct device *dev = &idxd->pdev->dev;
200 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
201 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET
203 idxd->grpcfg_offset = offsets.grpcfg * 0x100;
204 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
205 idxd->wqcfg_offset = offsets.wqcfg * 0x100;
206 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n",
208 idxd->msix_perm_offset = offsets.msix_perm * 0x100;
209 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n",
210 idxd->msix_perm_offset);
211 idxd->perfmon_offset = offsets.perfmon * 0x100;
212 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
215 static void idxd_read_caps(struct idxd_device *idxd)
217 struct device *dev = &idxd->pdev->dev;
220 /* reading generic capabilities */
221 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
222 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
223 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
224 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
225 idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
226 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
227 if (idxd->hw.gen_cap.config_en)
228 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
230 /* reading group capabilities */
231 idxd->hw.group_cap.bits =
232 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
233 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
234 idxd->max_groups = idxd->hw.group_cap.num_groups;
235 dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
236 idxd->max_tokens = idxd->hw.group_cap.total_tokens;
237 dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
238 idxd->nr_tokens = idxd->max_tokens;
240 /* read engine capabilities */
241 idxd->hw.engine_cap.bits =
242 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
243 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
244 idxd->max_engines = idxd->hw.engine_cap.num_engines;
245 dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
247 /* read workqueue capabilities */
248 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
249 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
250 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
251 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
252 idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
253 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
255 /* reading operation capabilities */
256 for (i = 0; i < 4; i++) {
257 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
258 IDXD_OPCAP_OFFSET + i * sizeof(u64));
259 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
263 static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
264 void __iomem * const *iomap)
266 struct device *dev = &pdev->dev;
267 struct idxd_device *idxd;
269 idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
274 idxd->reg_base = iomap[IDXD_MMIO_BAR];
275 spin_lock_init(&idxd->dev_lock);
280 static int idxd_probe(struct idxd_device *idxd)
282 struct pci_dev *pdev = idxd->pdev;
283 struct device *dev = &pdev->dev;
286 dev_dbg(dev, "%s entered and resetting device\n", __func__);
287 idxd_device_init_reset(idxd);
288 dev_dbg(dev, "IDXD reset complete\n");
290 idxd_read_caps(idxd);
291 idxd_read_table_offsets(idxd);
293 rc = idxd_setup_internals(idxd);
297 rc = idxd_setup_interrupts(idxd);
301 dev_dbg(dev, "IDXD interrupt setup complete.\n");
303 mutex_lock(&idxd_idr_lock);
304 idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
305 mutex_unlock(&idxd_idr_lock);
311 idxd->major = idxd_cdev_get_major(idxd);
313 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
317 idxd_mask_error_interrupts(idxd);
318 idxd_mask_msix_vectors(idxd);
323 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
325 void __iomem * const *iomap;
326 struct device *dev = &pdev->dev;
327 struct idxd_device *idxd;
331 rc = pcim_enable_device(pdev);
335 dev_dbg(dev, "Mapping BARs\n");
336 mask = (1 << IDXD_MMIO_BAR);
337 rc = pcim_iomap_regions(pdev, mask, DRV_NAME);
341 iomap = pcim_iomap_table(pdev);
345 dev_dbg(dev, "Set DMA masks\n");
346 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
348 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
352 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
354 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
358 dev_dbg(dev, "Alloc IDXD context\n");
359 idxd = idxd_alloc(pdev, iomap);
365 dev_dbg(dev, "Set PCI master\n");
366 pci_set_master(pdev);
367 pci_set_drvdata(pdev, idxd);
369 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
370 rc = idxd_probe(idxd);
372 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
376 rc = idxd_setup_sysfs(idxd);
378 dev_err(dev, "IDXD sysfs setup failed\n");
382 idxd->state = IDXD_DEV_CONF_READY;
384 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
390 static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
392 struct idxd_desc *desc, *itr;
393 struct llist_node *head;
395 head = llist_del_all(&ie->pending_llist);
399 llist_for_each_entry_safe(desc, itr, head, llnode) {
400 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
401 idxd_free_desc(desc->wq, desc);
405 static void idxd_flush_work_list(struct idxd_irq_entry *ie)
407 struct idxd_desc *desc, *iter;
409 list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
410 list_del(&desc->list);
411 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
412 idxd_free_desc(desc->wq, desc);
416 static void idxd_shutdown(struct pci_dev *pdev)
418 struct idxd_device *idxd = pci_get_drvdata(pdev);
420 struct idxd_irq_entry *irq_entry;
421 int msixcnt = pci_msix_vec_count(pdev);
423 rc = idxd_device_disable(idxd);
425 dev_err(&pdev->dev, "Disabling device failed\n");
427 dev_dbg(&pdev->dev, "%s called\n", __func__);
428 idxd_mask_msix_vectors(idxd);
429 idxd_mask_error_interrupts(idxd);
431 for (i = 0; i < msixcnt; i++) {
432 irq_entry = &idxd->irq_entries[i];
433 synchronize_irq(idxd->msix_entries[i].vector);
436 idxd_flush_pending_llist(irq_entry);
437 idxd_flush_work_list(irq_entry);
440 destroy_workqueue(idxd->wq);
443 static void idxd_remove(struct pci_dev *pdev)
445 struct idxd_device *idxd = pci_get_drvdata(pdev);
447 dev_dbg(&pdev->dev, "%s called\n", __func__);
448 idxd_cleanup_sysfs(idxd);
450 mutex_lock(&idxd_idr_lock);
451 idr_remove(&idxd_idrs[idxd->type], idxd->id);
452 mutex_unlock(&idxd_idr_lock);
455 static struct pci_driver idxd_pci_driver = {
457 .id_table = idxd_pci_tbl,
458 .probe = idxd_pci_probe,
459 .remove = idxd_remove,
460 .shutdown = idxd_shutdown,
463 static int __init idxd_init_module(void)
468 * If the CPU does not support write512, there's no point in
469 * enumerating the device. We can not utilize it.
471 if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
472 pr_warn("idxd driver failed to load without MOVDIR64B.\n");
476 pr_info("%s: Intel(R) Accelerator Devices Driver %s\n",
477 DRV_NAME, IDXD_DRIVER_VERSION);
479 mutex_init(&idxd_idr_lock);
480 for (i = 0; i < IDXD_TYPE_MAX; i++)
481 idr_init(&idxd_idrs[i]);
483 err = idxd_register_bus_type();
487 err = idxd_register_driver();
489 goto err_idxd_driver_register;
491 err = idxd_cdev_register();
493 goto err_cdev_register;
495 err = pci_register_driver(&idxd_pci_driver);
497 goto err_pci_register;
504 idxd_unregister_driver();
505 err_idxd_driver_register:
506 idxd_unregister_bus_type();
509 module_init(idxd_init_module);
511 static void __exit idxd_exit_module(void)
513 pci_unregister_driver(&idxd_pci_driver);
515 idxd_unregister_bus_type();
517 module_exit(idxd_exit_module);