2 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
3 * Author: Alex Williamson <alex.williamson@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Derived from original vfio:
10 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
11 * Author: Tom Lyon, pugs@cisco.com
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/device.h>
17 #include <linux/eventfd.h>
18 #include <linux/file.h>
19 #include <linux/interrupt.h>
20 #include <linux/iommu.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/notifier.h>
24 #include <linux/pci.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
29 #include <linux/vfio.h>
30 #include <linux/vgaarb.h>
31 #include <linux/nospec.h>
33 #include "vfio_pci_private.h"
35 #define DRIVER_VERSION "0.2"
36 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
37 #define DRIVER_DESC "VFIO PCI - User Level meta-driver"
39 static char ids[1024] __initdata;
40 module_param_string(ids, ids, sizeof(ids), 0);
41 MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
43 static bool nointxmask;
44 module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR);
45 MODULE_PARM_DESC(nointxmask,
46 "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
48 #ifdef CONFIG_VFIO_PCI_VGA
49 static bool disable_vga;
50 module_param(disable_vga, bool, S_IRUGO);
51 MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci");
54 static bool disable_idle_d3;
55 module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR);
56 MODULE_PARM_DESC(disable_idle_d3,
57 "Disable using the PCI D3 low power state for idle, unused devices");
59 static DEFINE_MUTEX(driver_lock);
61 static inline bool vfio_vga_disabled(void)
63 #ifdef CONFIG_VFIO_PCI_VGA
71 * Our VGA arbiter participation is limited since we don't know anything
72 * about the device itself. However, if the device is the only VGA device
73 * downstream of a bridge and VFIO VGA support is disabled, then we can
74 * safely return legacy VGA IO and memory as not decoded since the user
75 * has no way to get to it and routing can be disabled externally at the
78 static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga)
80 struct vfio_pci_device *vdev = opaque;
81 struct pci_dev *tmp = NULL, *pdev = vdev->pdev;
82 unsigned char max_busnr;
85 if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
86 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
87 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
89 max_busnr = pci_bus_max_busnr(pdev->bus);
90 decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
92 while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
94 pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
95 pci_is_root_bus(tmp->bus))
98 if (tmp->bus->number >= pdev->bus->number &&
99 tmp->bus->number <= max_busnr) {
101 decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
109 static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
111 return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
114 static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
116 struct resource *res;
118 struct vfio_pci_dummy_resource *dummy_res;
120 INIT_LIST_HEAD(&vdev->dummy_resources_list);
122 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
123 res = vdev->pdev->resource + bar;
125 if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
128 if (!(res->flags & IORESOURCE_MEM))
132 * The PCI core shouldn't set up a resource with a
133 * type but zero size. But there may be bugs that
134 * cause us to do that.
136 if (!resource_size(res))
139 if (resource_size(res) >= PAGE_SIZE) {
140 vdev->bar_mmap_supported[bar] = true;
144 if (!(res->start & ~PAGE_MASK)) {
146 * Add a dummy resource to reserve the remainder
147 * of the exclusive page in case that hot-add
148 * device's bar is assigned into it.
150 dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
151 if (dummy_res == NULL)
154 dummy_res->resource.name = "vfio sub-page reserved";
155 dummy_res->resource.start = res->end + 1;
156 dummy_res->resource.end = res->start + PAGE_SIZE - 1;
157 dummy_res->resource.flags = res->flags;
158 if (request_resource(res->parent,
159 &dummy_res->resource)) {
163 dummy_res->index = bar;
164 list_add(&dummy_res->res_next,
165 &vdev->dummy_resources_list);
166 vdev->bar_mmap_supported[bar] = true;
170 * Here we don't handle the case when the BAR is not page
171 * aligned because we can't expect the BAR will be
172 * assigned into the same location in a page in guest
173 * when we passthrough the BAR. And it's hard to access
174 * this BAR in userspace because we have no way to get
175 * the BAR's location in a page.
178 vdev->bar_mmap_supported[bar] = false;
182 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
183 static void vfio_pci_disable(struct vfio_pci_device *vdev);
186 * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
187 * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
188 * If a device implements the former but not the latter we would typically
189 * expect broken_intx_masking be set and require an exclusive interrupt.
190 * However since we do have control of the device's ability to assert INTx,
191 * we can instead pretend that the device does not implement INTx, virtualizing
192 * the pin register to report zero and maintaining DisINTx set on the host.
194 static bool vfio_pci_nointx(struct pci_dev *pdev)
196 switch (pdev->vendor) {
197 case PCI_VENDOR_ID_INTEL:
198 switch (pdev->device) {
199 /* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */
202 case 0x1580 ... 0x1581:
203 case 0x1583 ... 0x158b:
204 case 0x37d0 ... 0x37d2:
214 static int vfio_pci_enable(struct vfio_pci_device *vdev)
216 struct pci_dev *pdev = vdev->pdev;
221 pci_set_power_state(pdev, PCI_D0);
223 /* Don't allow our initial saved state to include busmaster */
224 pci_clear_master(pdev);
226 ret = pci_enable_device(pdev);
230 /* If reset fails because of the device lock, fail this path entirely */
231 ret = pci_try_reset_function(pdev);
232 if (ret == -EAGAIN) {
233 pci_disable_device(pdev);
237 vdev->reset_works = !ret;
238 pci_save_state(pdev);
239 vdev->pci_saved_state = pci_store_saved_state(pdev);
240 if (!vdev->pci_saved_state)
241 pr_debug("%s: Couldn't store %s saved state\n",
242 __func__, dev_name(&pdev->dev));
244 if (likely(!nointxmask)) {
245 if (vfio_pci_nointx(pdev)) {
246 dev_info(&pdev->dev, "Masking broken INTx support\n");
250 vdev->pci_2_3 = pci_intx_mask_supported(pdev);
253 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
254 if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
255 cmd &= ~PCI_COMMAND_INTX_DISABLE;
256 pci_write_config_word(pdev, PCI_COMMAND, cmd);
259 ret = vfio_config_init(vdev);
261 kfree(vdev->pci_saved_state);
262 vdev->pci_saved_state = NULL;
263 pci_disable_device(pdev);
267 msix_pos = pdev->msix_cap;
272 pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
273 pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
275 vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
276 vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
277 vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
279 vdev->msix_bar = 0xFF;
281 if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
282 vdev->has_vga = true;
285 if (vfio_pci_is_vga(pdev) &&
286 pdev->vendor == PCI_VENDOR_ID_INTEL &&
287 IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
288 ret = vfio_pci_igd_init(vdev);
290 dev_warn(&vdev->pdev->dev,
291 "Failed to setup Intel IGD regions\n");
296 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
297 IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2)) {
298 ret = vfio_pci_nvdia_v100_nvlink2_init(vdev);
299 if (ret && ret != -ENODEV) {
300 dev_warn(&vdev->pdev->dev,
301 "Failed to setup NVIDIA NV2 RAM region\n");
306 if (pdev->vendor == PCI_VENDOR_ID_IBM &&
307 IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2)) {
308 ret = vfio_pci_ibm_npu2_init(vdev);
309 if (ret && ret != -ENODEV) {
310 dev_warn(&vdev->pdev->dev,
311 "Failed to setup NVIDIA NV2 ATSD region\n");
316 vfio_pci_probe_mmaps(vdev);
321 vfio_pci_disable(vdev);
325 static void vfio_pci_disable(struct vfio_pci_device *vdev)
327 struct pci_dev *pdev = vdev->pdev;
328 struct vfio_pci_dummy_resource *dummy_res, *tmp;
329 struct vfio_pci_ioeventfd *ioeventfd, *ioeventfd_tmp;
332 /* Stop the device from further DMA */
333 pci_clear_master(pdev);
335 vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
336 VFIO_IRQ_SET_ACTION_TRIGGER,
337 vdev->irq_type, 0, 0, NULL);
339 /* Device closed, don't need mutex here */
340 list_for_each_entry_safe(ioeventfd, ioeventfd_tmp,
341 &vdev->ioeventfds_list, next) {
342 vfio_virqfd_disable(&ioeventfd->virqfd);
343 list_del(&ioeventfd->next);
346 vdev->ioeventfds_nr = 0;
348 vdev->virq_disabled = false;
350 for (i = 0; i < vdev->num_regions; i++)
351 vdev->region[i].ops->release(vdev, &vdev->region[i]);
353 vdev->num_regions = 0;
355 vdev->region = NULL; /* don't krealloc a freed pointer */
357 vfio_config_free(vdev);
359 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
360 if (!vdev->barmap[bar])
362 pci_iounmap(pdev, vdev->barmap[bar]);
363 pci_release_selected_regions(pdev, 1 << bar);
364 vdev->barmap[bar] = NULL;
367 list_for_each_entry_safe(dummy_res, tmp,
368 &vdev->dummy_resources_list, res_next) {
369 list_del(&dummy_res->res_next);
370 release_resource(&dummy_res->resource);
374 vdev->needs_reset = true;
377 * If we have saved state, restore it. If we can reset the device,
378 * even better. Resetting with current state seems better than
379 * nothing, but saving and restoring current state without reset
382 if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
383 pr_info("%s: Couldn't reload %s saved state\n",
384 __func__, dev_name(&pdev->dev));
386 if (!vdev->reset_works)
389 pci_save_state(pdev);
393 * Disable INTx and MSI, presumably to avoid spurious interrupts
394 * during reset. Stolen from pci_reset_function()
396 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
399 * Try to reset the device. The success of this is dependent on
400 * being able to lock the device, which is not always possible.
402 if (vdev->reset_works && !pci_try_reset_function(pdev))
403 vdev->needs_reset = false;
405 pci_restore_state(pdev);
407 pci_disable_device(pdev);
409 vfio_pci_try_bus_reset(vdev);
411 if (!disable_idle_d3)
412 pci_set_power_state(pdev, PCI_D3hot);
415 static void vfio_pci_release(void *device_data)
417 struct vfio_pci_device *vdev = device_data;
419 mutex_lock(&driver_lock);
421 if (!(--vdev->refcnt)) {
422 vfio_spapr_pci_eeh_release(vdev->pdev);
423 vfio_pci_disable(vdev);
426 mutex_unlock(&driver_lock);
428 module_put(THIS_MODULE);
431 static int vfio_pci_open(void *device_data)
433 struct vfio_pci_device *vdev = device_data;
436 if (!try_module_get(THIS_MODULE))
439 mutex_lock(&driver_lock);
442 ret = vfio_pci_enable(vdev);
446 vfio_spapr_pci_eeh_open(vdev->pdev);
450 mutex_unlock(&driver_lock);
452 module_put(THIS_MODULE);
456 static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
458 if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
461 if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) ||
462 vdev->nointx || vdev->pdev->is_virtfn)
465 pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
468 } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
472 pos = vdev->pdev->msi_cap;
474 pci_read_config_word(vdev->pdev,
475 pos + PCI_MSI_FLAGS, &flags);
476 return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
478 } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
482 pos = vdev->pdev->msix_cap;
484 pci_read_config_word(vdev->pdev,
485 pos + PCI_MSIX_FLAGS, &flags);
487 return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
489 } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
490 if (pci_is_pcie(vdev->pdev))
492 } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
499 static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
505 struct vfio_pci_fill_info {
508 struct vfio_pci_dependent_device *devices;
511 static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
513 struct vfio_pci_fill_info *fill = data;
514 struct iommu_group *iommu_group;
516 if (fill->cur == fill->max)
517 return -EAGAIN; /* Something changed, try again */
519 iommu_group = iommu_group_get(&pdev->dev);
521 return -EPERM; /* Cannot reset non-isolated devices */
523 fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
524 fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
525 fill->devices[fill->cur].bus = pdev->bus->number;
526 fill->devices[fill->cur].devfn = pdev->devfn;
528 iommu_group_put(iommu_group);
532 struct vfio_pci_group_entry {
533 struct vfio_group *group;
537 struct vfio_pci_group_info {
539 struct vfio_pci_group_entry *groups;
542 static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data)
544 struct vfio_pci_group_info *info = data;
545 struct iommu_group *group;
548 group = iommu_group_get(&pdev->dev);
552 id = iommu_group_id(group);
554 for (i = 0; i < info->count; i++)
555 if (info->groups[i].id == id)
558 iommu_group_put(group);
560 return (i == info->count) ? -EINVAL : 0;
563 static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
565 for (; pdev; pdev = pdev->bus->self)
566 if (pdev->bus == slot->bus)
567 return (pdev->slot == slot);
571 struct vfio_pci_walk_info {
572 int (*fn)(struct pci_dev *, void *data);
574 struct pci_dev *pdev;
579 static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
581 struct vfio_pci_walk_info *walk = data;
583 if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
584 walk->ret = walk->fn(pdev, walk->data);
589 static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
590 int (*fn)(struct pci_dev *,
591 void *data), void *data,
594 struct vfio_pci_walk_info walk = {
595 .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
598 pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
603 static int msix_mmappable_cap(struct vfio_pci_device *vdev,
604 struct vfio_info_cap *caps)
606 struct vfio_info_cap_header header = {
607 .id = VFIO_REGION_INFO_CAP_MSIX_MAPPABLE,
611 return vfio_info_add_capability(caps, &header, sizeof(header));
614 int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
615 unsigned int type, unsigned int subtype,
616 const struct vfio_pci_regops *ops,
617 size_t size, u32 flags, void *data)
619 struct vfio_pci_region *region;
621 region = krealloc(vdev->region,
622 (vdev->num_regions + 1) * sizeof(*region),
627 vdev->region = region;
628 vdev->region[vdev->num_regions].type = type;
629 vdev->region[vdev->num_regions].subtype = subtype;
630 vdev->region[vdev->num_regions].ops = ops;
631 vdev->region[vdev->num_regions].size = size;
632 vdev->region[vdev->num_regions].flags = flags;
633 vdev->region[vdev->num_regions].data = data;
640 static long vfio_pci_ioctl(void *device_data,
641 unsigned int cmd, unsigned long arg)
643 struct vfio_pci_device *vdev = device_data;
646 if (cmd == VFIO_DEVICE_GET_INFO) {
647 struct vfio_device_info info;
649 minsz = offsetofend(struct vfio_device_info, num_irqs);
651 if (copy_from_user(&info, (void __user *)arg, minsz))
654 if (info.argsz < minsz)
657 info.flags = VFIO_DEVICE_FLAGS_PCI;
659 if (vdev->reset_works)
660 info.flags |= VFIO_DEVICE_FLAGS_RESET;
662 info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
663 info.num_irqs = VFIO_PCI_NUM_IRQS;
665 return copy_to_user((void __user *)arg, &info, minsz) ?
668 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
669 struct pci_dev *pdev = vdev->pdev;
670 struct vfio_region_info info;
671 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
674 minsz = offsetofend(struct vfio_region_info, offset);
676 if (copy_from_user(&info, (void __user *)arg, minsz))
679 if (info.argsz < minsz)
682 switch (info.index) {
683 case VFIO_PCI_CONFIG_REGION_INDEX:
684 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
685 info.size = pdev->cfg_size;
686 info.flags = VFIO_REGION_INFO_FLAG_READ |
687 VFIO_REGION_INFO_FLAG_WRITE;
689 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
690 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
691 info.size = pci_resource_len(pdev, info.index);
697 info.flags = VFIO_REGION_INFO_FLAG_READ |
698 VFIO_REGION_INFO_FLAG_WRITE;
699 if (vdev->bar_mmap_supported[info.index]) {
700 info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
701 if (info.index == vdev->msix_bar) {
702 ret = msix_mmappable_cap(vdev, &caps);
709 case VFIO_PCI_ROM_REGION_INDEX:
714 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
717 /* Report the BAR size, not the ROM size */
718 info.size = pci_resource_len(pdev, info.index);
720 /* Shadow ROMs appear as PCI option ROMs */
721 if (pdev->resource[PCI_ROM_RESOURCE].flags &
722 IORESOURCE_ROM_SHADOW)
728 /* Is it really there? */
729 io = pci_map_rom(pdev, &size);
734 pci_unmap_rom(pdev, io);
736 info.flags = VFIO_REGION_INFO_FLAG_READ;
739 case VFIO_PCI_VGA_REGION_INDEX:
743 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
745 info.flags = VFIO_REGION_INFO_FLAG_READ |
746 VFIO_REGION_INFO_FLAG_WRITE;
751 struct vfio_region_info_cap_type cap_type = {
752 .header.id = VFIO_REGION_INFO_CAP_TYPE,
753 .header.version = 1 };
756 VFIO_PCI_NUM_REGIONS + vdev->num_regions)
758 info.index = array_index_nospec(info.index,
759 VFIO_PCI_NUM_REGIONS +
762 i = info.index - VFIO_PCI_NUM_REGIONS;
764 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
765 info.size = vdev->region[i].size;
766 info.flags = vdev->region[i].flags;
768 cap_type.type = vdev->region[i].type;
769 cap_type.subtype = vdev->region[i].subtype;
771 ret = vfio_info_add_capability(&caps, &cap_type.header,
776 if (vdev->region[i].ops->add_capability) {
777 ret = vdev->region[i].ops->add_capability(vdev,
778 &vdev->region[i], &caps);
786 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
787 if (info.argsz < sizeof(info) + caps.size) {
788 info.argsz = sizeof(info) + caps.size;
791 vfio_info_cap_shift(&caps, sizeof(info));
792 if (copy_to_user((void __user *)arg +
793 sizeof(info), caps.buf,
798 info.cap_offset = sizeof(info);
804 return copy_to_user((void __user *)arg, &info, minsz) ?
807 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
808 struct vfio_irq_info info;
810 minsz = offsetofend(struct vfio_irq_info, count);
812 if (copy_from_user(&info, (void __user *)arg, minsz))
815 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
818 switch (info.index) {
819 case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
820 case VFIO_PCI_REQ_IRQ_INDEX:
822 case VFIO_PCI_ERR_IRQ_INDEX:
823 if (pci_is_pcie(vdev->pdev))
830 info.flags = VFIO_IRQ_INFO_EVENTFD;
832 info.count = vfio_pci_get_irq_count(vdev, info.index);
834 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
835 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
836 VFIO_IRQ_INFO_AUTOMASKED);
838 info.flags |= VFIO_IRQ_INFO_NORESIZE;
840 return copy_to_user((void __user *)arg, &info, minsz) ?
843 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
844 struct vfio_irq_set hdr;
847 size_t data_size = 0;
849 minsz = offsetofend(struct vfio_irq_set, count);
851 if (copy_from_user(&hdr, (void __user *)arg, minsz))
854 max = vfio_pci_get_irq_count(vdev, hdr.index);
856 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
857 VFIO_PCI_NUM_IRQS, &data_size);
862 data = memdup_user((void __user *)(arg + minsz),
865 return PTR_ERR(data);
868 mutex_lock(&vdev->igate);
870 ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
871 hdr.start, hdr.count, data);
873 mutex_unlock(&vdev->igate);
878 } else if (cmd == VFIO_DEVICE_RESET) {
879 return vdev->reset_works ?
880 pci_try_reset_function(vdev->pdev) : -EINVAL;
882 } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
883 struct vfio_pci_hot_reset_info hdr;
884 struct vfio_pci_fill_info fill = { 0 };
885 struct vfio_pci_dependent_device *devices = NULL;
889 minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
891 if (copy_from_user(&hdr, (void __user *)arg, minsz))
894 if (hdr.argsz < minsz)
899 /* Can we do a slot or bus reset or neither? */
900 if (!pci_probe_reset_slot(vdev->pdev->slot))
902 else if (pci_probe_reset_bus(vdev->pdev->bus))
905 /* How many devices are affected? */
906 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
912 WARN_ON(!fill.max); /* Should always be at least one */
915 * If there's enough space, fill it now, otherwise return
916 * -ENOSPC and the number of devices affected.
918 if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
920 hdr.count = fill.max;
921 goto reset_info_exit;
924 devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
928 fill.devices = devices;
930 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
935 * If a device was removed between counting and filling,
936 * we may come up short of fill.max. If a device was
937 * added, we'll have a return of -EAGAIN above.
940 hdr.count = fill.cur;
943 if (copy_to_user((void __user *)arg, &hdr, minsz))
947 if (copy_to_user((void __user *)(arg + minsz), devices,
948 hdr.count * sizeof(*devices)))
955 } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
956 struct vfio_pci_hot_reset hdr;
958 struct vfio_pci_group_entry *groups;
959 struct vfio_pci_group_info info;
961 int i, count = 0, ret = 0;
963 minsz = offsetofend(struct vfio_pci_hot_reset, count);
965 if (copy_from_user(&hdr, (void __user *)arg, minsz))
968 if (hdr.argsz < minsz || hdr.flags)
971 /* Can we do a slot or bus reset or neither? */
972 if (!pci_probe_reset_slot(vdev->pdev->slot))
974 else if (pci_probe_reset_bus(vdev->pdev->bus))
978 * We can't let userspace give us an arbitrarily large
979 * buffer to copy, so verify how many we think there
980 * could be. Note groups can have multiple devices so
981 * one group per device is the max.
983 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
989 /* Somewhere between 1 and count is OK */
990 if (!hdr.count || hdr.count > count)
993 group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
994 groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
995 if (!group_fds || !groups) {
1001 if (copy_from_user(group_fds, (void __user *)(arg + minsz),
1002 hdr.count * sizeof(*group_fds))) {
1009 * For each group_fd, get the group through the vfio external
1010 * user interface and store the group and iommu ID. This
1011 * ensures the group is held across the reset.
1013 for (i = 0; i < hdr.count; i++) {
1014 struct vfio_group *group;
1015 struct fd f = fdget(group_fds[i]);
1021 group = vfio_group_get_external_user(f.file);
1023 if (IS_ERR(group)) {
1024 ret = PTR_ERR(group);
1028 groups[i].group = group;
1029 groups[i].id = vfio_external_user_iommu_id(group);
1034 /* release reference to groups on error */
1036 goto hot_reset_release;
1038 info.count = hdr.count;
1039 info.groups = groups;
1042 * Test whether all the affected devices are contained
1043 * by the set of groups provided by the user.
1045 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1046 vfio_pci_validate_devs,
1049 /* User has access, do the reset */
1050 ret = pci_reset_bus(vdev->pdev);
1053 for (i--; i >= 0; i--)
1054 vfio_group_put_external_user(groups[i].group);
1058 } else if (cmd == VFIO_DEVICE_IOEVENTFD) {
1059 struct vfio_device_ioeventfd ioeventfd;
1062 minsz = offsetofend(struct vfio_device_ioeventfd, fd);
1064 if (copy_from_user(&ioeventfd, (void __user *)arg, minsz))
1067 if (ioeventfd.argsz < minsz)
1070 if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
1073 count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
1075 if (hweight8(count) != 1 || ioeventfd.fd < -1)
1078 return vfio_pci_ioeventfd(vdev, ioeventfd.offset,
1079 ioeventfd.data, count, ioeventfd.fd);
1085 static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
1086 size_t count, loff_t *ppos, bool iswrite)
1088 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1089 struct vfio_pci_device *vdev = device_data;
1091 if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1095 case VFIO_PCI_CONFIG_REGION_INDEX:
1096 return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
1098 case VFIO_PCI_ROM_REGION_INDEX:
1101 return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
1103 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1104 return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
1106 case VFIO_PCI_VGA_REGION_INDEX:
1107 return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
1109 index -= VFIO_PCI_NUM_REGIONS;
1110 return vdev->region[index].ops->rw(vdev, buf,
1111 count, ppos, iswrite);
1117 static ssize_t vfio_pci_read(void *device_data, char __user *buf,
1118 size_t count, loff_t *ppos)
1123 return vfio_pci_rw(device_data, buf, count, ppos, false);
1126 static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
1127 size_t count, loff_t *ppos)
1132 return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
1135 static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
1137 struct vfio_pci_device *vdev = device_data;
1138 struct pci_dev *pdev = vdev->pdev;
1140 u64 phys_len, req_len, pgoff, req_start;
1143 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1145 if (vma->vm_end < vma->vm_start)
1147 if ((vma->vm_flags & VM_SHARED) == 0)
1149 if (index >= VFIO_PCI_NUM_REGIONS) {
1150 int regnum = index - VFIO_PCI_NUM_REGIONS;
1151 struct vfio_pci_region *region = vdev->region + regnum;
1153 if (region && region->ops && region->ops->mmap &&
1154 (region->flags & VFIO_REGION_INFO_FLAG_MMAP))
1155 return region->ops->mmap(vdev, region, vma);
1158 if (index >= VFIO_PCI_ROM_REGION_INDEX)
1160 if (!vdev->bar_mmap_supported[index])
1163 phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
1164 req_len = vma->vm_end - vma->vm_start;
1165 pgoff = vma->vm_pgoff &
1166 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1167 req_start = pgoff << PAGE_SHIFT;
1169 if (req_start + req_len > phys_len)
1173 * Even though we don't make use of the barmap for the mmap,
1174 * we need to request the region and the barmap tracks that.
1176 if (!vdev->barmap[index]) {
1177 ret = pci_request_selected_regions(pdev,
1178 1 << index, "vfio-pci");
1182 vdev->barmap[index] = pci_iomap(pdev, index, 0);
1183 if (!vdev->barmap[index]) {
1184 pci_release_selected_regions(pdev, 1 << index);
1189 vma->vm_private_data = vdev;
1190 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1191 vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
1193 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1194 req_len, vma->vm_page_prot);
1197 static void vfio_pci_request(void *device_data, unsigned int count)
1199 struct vfio_pci_device *vdev = device_data;
1201 mutex_lock(&vdev->igate);
1203 if (vdev->req_trigger) {
1205 dev_notice_ratelimited(&vdev->pdev->dev,
1206 "Relaying device request to user (#%u)\n",
1208 eventfd_signal(vdev->req_trigger, 1);
1209 } else if (count == 0) {
1210 dev_warn(&vdev->pdev->dev,
1211 "No device request channel registered, blocked until released by user\n");
1214 mutex_unlock(&vdev->igate);
1217 static const struct vfio_device_ops vfio_pci_ops = {
1219 .open = vfio_pci_open,
1220 .release = vfio_pci_release,
1221 .ioctl = vfio_pci_ioctl,
1222 .read = vfio_pci_read,
1223 .write = vfio_pci_write,
1224 .mmap = vfio_pci_mmap,
1225 .request = vfio_pci_request,
1228 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1230 struct vfio_pci_device *vdev;
1231 struct iommu_group *group;
1234 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
1238 * Prevent binding to PFs with VFs enabled, this too easily allows
1239 * userspace instance with VFs and PFs from the same device, which
1240 * cannot work. Disabling SR-IOV here would initiate removing the
1241 * VFs, which would unbind the driver, which is prone to blocking
1242 * if that VF is also in use by vfio-pci. Just reject these PFs
1243 * and let the user sort it out.
1245 if (pci_num_vf(pdev)) {
1246 pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n");
1250 group = vfio_iommu_group_get(&pdev->dev);
1254 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
1256 vfio_iommu_group_put(group, &pdev->dev);
1261 vdev->irq_type = VFIO_PCI_NUM_IRQS;
1262 mutex_init(&vdev->igate);
1263 spin_lock_init(&vdev->irqlock);
1264 mutex_init(&vdev->ioeventfds_lock);
1265 INIT_LIST_HEAD(&vdev->ioeventfds_list);
1267 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
1269 vfio_iommu_group_put(group, &pdev->dev);
1274 if (vfio_pci_is_vga(pdev)) {
1275 vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
1276 vga_set_legacy_decoding(pdev,
1277 vfio_pci_set_vga_decode(vdev, false));
1280 if (!disable_idle_d3) {
1282 * pci-core sets the device power state to an unknown value at
1283 * bootup and after being removed from a driver. The only
1284 * transition it allows from this unknown state is to D0, which
1285 * typically happens when a driver calls pci_enable_device().
1286 * We're not ready to enable the device yet, but we do want to
1287 * be able to get to D3. Therefore first do a D0 transition
1288 * before going to D3.
1290 pci_set_power_state(pdev, PCI_D0);
1291 pci_set_power_state(pdev, PCI_D3hot);
1297 static void vfio_pci_remove(struct pci_dev *pdev)
1299 struct vfio_pci_device *vdev;
1301 vdev = vfio_del_group_dev(&pdev->dev);
1305 vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
1306 kfree(vdev->region);
1307 mutex_destroy(&vdev->ioeventfds_lock);
1310 if (vfio_pci_is_vga(pdev)) {
1311 vga_client_register(pdev, NULL, NULL, NULL);
1312 vga_set_legacy_decoding(pdev,
1313 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
1314 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
1317 if (!disable_idle_d3)
1318 pci_set_power_state(pdev, PCI_D0);
1321 static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
1322 pci_channel_state_t state)
1324 struct vfio_pci_device *vdev;
1325 struct vfio_device *device;
1327 device = vfio_device_get_from_dev(&pdev->dev);
1329 return PCI_ERS_RESULT_DISCONNECT;
1331 vdev = vfio_device_data(device);
1333 vfio_device_put(device);
1334 return PCI_ERS_RESULT_DISCONNECT;
1337 mutex_lock(&vdev->igate);
1339 if (vdev->err_trigger)
1340 eventfd_signal(vdev->err_trigger, 1);
1342 mutex_unlock(&vdev->igate);
1344 vfio_device_put(device);
1346 return PCI_ERS_RESULT_CAN_RECOVER;
1349 static const struct pci_error_handlers vfio_err_handlers = {
1350 .error_detected = vfio_pci_aer_err_detected,
1353 static struct pci_driver vfio_pci_driver = {
1355 .id_table = NULL, /* only dynamic ids */
1356 .probe = vfio_pci_probe,
1357 .remove = vfio_pci_remove,
1358 .err_handler = &vfio_err_handlers,
1361 struct vfio_devices {
1362 struct vfio_device **devices;
1367 static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
1369 struct vfio_devices *devs = data;
1370 struct vfio_device *device;
1372 if (devs->cur_index == devs->max_index)
1375 device = vfio_device_get_from_dev(&pdev->dev);
1379 if (pci_dev_driver(pdev) != &vfio_pci_driver) {
1380 vfio_device_put(device);
1384 devs->devices[devs->cur_index++] = device;
1389 * Attempt to do a bus/slot reset if there are devices affected by a reset for
1390 * this device that are needs_reset and all of the affected devices are unused
1391 * (!refcnt). Callers are required to hold driver_lock when calling this to
1392 * prevent device opens and concurrent bus reset attempts. We prevent device
1393 * unbinds by acquiring and holding a reference to the vfio_device.
1395 * NB: vfio-core considers a group to be viable even if some devices are
1396 * bound to drivers like pci-stub or pcieport. Here we require all devices
1397 * to be bound to vfio_pci since that's the only way we can be sure they
1400 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
1402 struct vfio_devices devs = { .cur_index = 0 };
1403 int i = 0, ret = -EINVAL;
1404 bool needs_reset = false, slot = false;
1405 struct vfio_pci_device *tmp;
1407 if (!pci_probe_reset_slot(vdev->pdev->slot))
1409 else if (pci_probe_reset_bus(vdev->pdev->bus))
1412 if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
1417 devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL);
1421 if (vfio_pci_for_each_slot_or_bus(vdev->pdev,
1422 vfio_pci_get_devs, &devs, slot))
1425 for (i = 0; i < devs.cur_index; i++) {
1426 tmp = vfio_device_data(devs.devices[i]);
1427 if (tmp->needs_reset)
1434 ret = pci_reset_bus(vdev->pdev);
1437 for (i = 0; i < devs.cur_index; i++) {
1438 tmp = vfio_device_data(devs.devices[i]);
1440 tmp->needs_reset = false;
1442 if (!tmp->refcnt && !disable_idle_d3)
1443 pci_set_power_state(tmp->pdev, PCI_D3hot);
1445 vfio_device_put(devs.devices[i]);
1448 kfree(devs.devices);
1451 static void __exit vfio_pci_cleanup(void)
1453 pci_unregister_driver(&vfio_pci_driver);
1454 vfio_pci_uninit_perm_bits();
1457 static void __init vfio_pci_fill_ids(void)
1462 /* no ids passed actually */
1466 /* add ids specified in the module parameter */
1468 while ((id = strsep(&p, ","))) {
1469 unsigned int vendor, device, subvendor = PCI_ANY_ID,
1470 subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
1476 fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
1477 &vendor, &device, &subvendor, &subdevice,
1478 &class, &class_mask);
1481 pr_warn("invalid id string \"%s\"\n", id);
1485 rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
1486 subvendor, subdevice, class, class_mask, 0);
1488 pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
1489 vendor, device, subvendor, subdevice,
1490 class, class_mask, rc);
1492 pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
1493 vendor, device, subvendor, subdevice,
1498 static int __init vfio_pci_init(void)
1502 /* Allocate shared config space permision data used by all devices */
1503 ret = vfio_pci_init_perm_bits();
1507 /* Register and scan for devices */
1508 ret = pci_register_driver(&vfio_pci_driver);
1512 vfio_pci_fill_ids();
1517 vfio_pci_uninit_perm_bits();
1521 module_init(vfio_pci_init);
1522 module_exit(vfio_pci_cleanup);
1524 MODULE_VERSION(DRIVER_VERSION);
1525 MODULE_LICENSE("GPL v2");
1526 MODULE_AUTHOR(DRIVER_AUTHOR);
1527 MODULE_DESCRIPTION(DRIVER_DESC);