2 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
3 * Author: Alex Williamson <alex.williamson@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Derived from original vfio:
10 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
11 * Author: Tom Lyon, pugs@cisco.com
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/device.h>
17 #include <linux/eventfd.h>
18 #include <linux/file.h>
19 #include <linux/interrupt.h>
20 #include <linux/iommu.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/notifier.h>
24 #include <linux/pci.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
29 #include <linux/vfio.h>
30 #include <linux/vgaarb.h>
31 #include <linux/nospec.h>
33 #include "vfio_pci_private.h"
35 #define DRIVER_VERSION "0.2"
36 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
37 #define DRIVER_DESC "VFIO PCI - User Level meta-driver"
39 static char ids[1024] __initdata;
40 module_param_string(ids, ids, sizeof(ids), 0);
41 MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
43 static bool nointxmask;
44 module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR);
45 MODULE_PARM_DESC(nointxmask,
46 "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
48 #ifdef CONFIG_VFIO_PCI_VGA
49 static bool disable_vga;
50 module_param(disable_vga, bool, S_IRUGO);
51 MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci");
54 static bool disable_idle_d3;
55 module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR);
56 MODULE_PARM_DESC(disable_idle_d3,
57 "Disable using the PCI D3 low power state for idle, unused devices");
59 static inline bool vfio_vga_disabled(void)
61 #ifdef CONFIG_VFIO_PCI_VGA
69 * Our VGA arbiter participation is limited since we don't know anything
70 * about the device itself. However, if the device is the only VGA device
71 * downstream of a bridge and VFIO VGA support is disabled, then we can
72 * safely return legacy VGA IO and memory as not decoded since the user
73 * has no way to get to it and routing can be disabled externally at the
76 static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga)
78 struct vfio_pci_device *vdev = opaque;
79 struct pci_dev *tmp = NULL, *pdev = vdev->pdev;
80 unsigned char max_busnr;
83 if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
84 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
85 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
87 max_busnr = pci_bus_max_busnr(pdev->bus);
88 decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
90 while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
92 pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
93 pci_is_root_bus(tmp->bus))
96 if (tmp->bus->number >= pdev->bus->number &&
97 tmp->bus->number <= max_busnr) {
99 decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
107 static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
109 return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
112 static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
114 struct resource *res;
116 struct vfio_pci_dummy_resource *dummy_res;
118 INIT_LIST_HEAD(&vdev->dummy_resources_list);
120 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
121 res = vdev->pdev->resource + bar;
123 if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
126 if (!(res->flags & IORESOURCE_MEM))
130 * The PCI core shouldn't set up a resource with a
131 * type but zero size. But there may be bugs that
132 * cause us to do that.
134 if (!resource_size(res))
137 if (resource_size(res) >= PAGE_SIZE) {
138 vdev->bar_mmap_supported[bar] = true;
142 if (!(res->start & ~PAGE_MASK)) {
144 * Add a dummy resource to reserve the remainder
145 * of the exclusive page in case that hot-add
146 * device's bar is assigned into it.
148 dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
149 if (dummy_res == NULL)
152 dummy_res->resource.name = "vfio sub-page reserved";
153 dummy_res->resource.start = res->end + 1;
154 dummy_res->resource.end = res->start + PAGE_SIZE - 1;
155 dummy_res->resource.flags = res->flags;
156 if (request_resource(res->parent,
157 &dummy_res->resource)) {
161 dummy_res->index = bar;
162 list_add(&dummy_res->res_next,
163 &vdev->dummy_resources_list);
164 vdev->bar_mmap_supported[bar] = true;
168 * Here we don't handle the case when the BAR is not page
169 * aligned because we can't expect the BAR will be
170 * assigned into the same location in a page in guest
171 * when we passthrough the BAR. And it's hard to access
172 * this BAR in userspace because we have no way to get
173 * the BAR's location in a page.
176 vdev->bar_mmap_supported[bar] = false;
180 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
181 static void vfio_pci_disable(struct vfio_pci_device *vdev);
184 * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
185 * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
186 * If a device implements the former but not the latter we would typically
187 * expect broken_intx_masking be set and require an exclusive interrupt.
188 * However since we do have control of the device's ability to assert INTx,
189 * we can instead pretend that the device does not implement INTx, virtualizing
190 * the pin register to report zero and maintaining DisINTx set on the host.
192 static bool vfio_pci_nointx(struct pci_dev *pdev)
194 switch (pdev->vendor) {
195 case PCI_VENDOR_ID_INTEL:
196 switch (pdev->device) {
197 /* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */
200 case 0x1580 ... 0x1581:
201 case 0x1583 ... 0x158b:
202 case 0x37d0 ... 0x37d2:
212 static int vfio_pci_enable(struct vfio_pci_device *vdev)
214 struct pci_dev *pdev = vdev->pdev;
219 pci_set_power_state(pdev, PCI_D0);
221 /* Don't allow our initial saved state to include busmaster */
222 pci_clear_master(pdev);
224 ret = pci_enable_device(pdev);
228 /* If reset fails because of the device lock, fail this path entirely */
229 ret = pci_try_reset_function(pdev);
230 if (ret == -EAGAIN) {
231 pci_disable_device(pdev);
235 vdev->reset_works = !ret;
236 pci_save_state(pdev);
237 vdev->pci_saved_state = pci_store_saved_state(pdev);
238 if (!vdev->pci_saved_state)
239 pr_debug("%s: Couldn't store %s saved state\n",
240 __func__, dev_name(&pdev->dev));
242 if (likely(!nointxmask)) {
243 if (vfio_pci_nointx(pdev)) {
244 dev_info(&pdev->dev, "Masking broken INTx support\n");
248 vdev->pci_2_3 = pci_intx_mask_supported(pdev);
251 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
252 if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
253 cmd &= ~PCI_COMMAND_INTX_DISABLE;
254 pci_write_config_word(pdev, PCI_COMMAND, cmd);
257 ret = vfio_config_init(vdev);
259 kfree(vdev->pci_saved_state);
260 vdev->pci_saved_state = NULL;
261 pci_disable_device(pdev);
265 msix_pos = pdev->msix_cap;
270 pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
271 pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
273 vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
274 vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
275 vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
277 vdev->msix_bar = 0xFF;
279 if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
280 vdev->has_vga = true;
283 if (vfio_pci_is_vga(pdev) &&
284 pdev->vendor == PCI_VENDOR_ID_INTEL &&
285 IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
286 ret = vfio_pci_igd_init(vdev);
288 dev_warn(&vdev->pdev->dev,
289 "Failed to setup Intel IGD regions\n");
290 vfio_pci_disable(vdev);
295 vfio_pci_probe_mmaps(vdev);
300 static void vfio_pci_disable(struct vfio_pci_device *vdev)
302 struct pci_dev *pdev = vdev->pdev;
303 struct vfio_pci_dummy_resource *dummy_res, *tmp;
304 struct vfio_pci_ioeventfd *ioeventfd, *ioeventfd_tmp;
307 /* Stop the device from further DMA */
308 pci_clear_master(pdev);
310 vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
311 VFIO_IRQ_SET_ACTION_TRIGGER,
312 vdev->irq_type, 0, 0, NULL);
314 /* Device closed, don't need mutex here */
315 list_for_each_entry_safe(ioeventfd, ioeventfd_tmp,
316 &vdev->ioeventfds_list, next) {
317 vfio_virqfd_disable(&ioeventfd->virqfd);
318 list_del(&ioeventfd->next);
321 vdev->ioeventfds_nr = 0;
323 vdev->virq_disabled = false;
325 for (i = 0; i < vdev->num_regions; i++)
326 vdev->region[i].ops->release(vdev, &vdev->region[i]);
328 vdev->num_regions = 0;
330 vdev->region = NULL; /* don't krealloc a freed pointer */
332 vfio_config_free(vdev);
334 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
335 if (!vdev->barmap[bar])
337 pci_iounmap(pdev, vdev->barmap[bar]);
338 pci_release_selected_regions(pdev, 1 << bar);
339 vdev->barmap[bar] = NULL;
342 list_for_each_entry_safe(dummy_res, tmp,
343 &vdev->dummy_resources_list, res_next) {
344 list_del(&dummy_res->res_next);
345 release_resource(&dummy_res->resource);
349 vdev->needs_reset = true;
352 * If we have saved state, restore it. If we can reset the device,
353 * even better. Resetting with current state seems better than
354 * nothing, but saving and restoring current state without reset
357 if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
358 pr_info("%s: Couldn't reload %s saved state\n",
359 __func__, dev_name(&pdev->dev));
361 if (!vdev->reset_works)
364 pci_save_state(pdev);
368 * Disable INTx and MSI, presumably to avoid spurious interrupts
369 * during reset. Stolen from pci_reset_function()
371 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
374 * Try to reset the device. The success of this is dependent on
375 * being able to lock the device, which is not always possible.
377 if (vdev->reset_works && !pci_try_reset_function(pdev))
378 vdev->needs_reset = false;
380 pci_restore_state(pdev);
382 pci_disable_device(pdev);
384 vfio_pci_try_bus_reset(vdev);
386 if (!disable_idle_d3)
387 pci_set_power_state(pdev, PCI_D3hot);
390 static void vfio_pci_release(void *device_data)
392 struct vfio_pci_device *vdev = device_data;
394 mutex_lock(&vdev->reflck->lock);
396 if (!(--vdev->refcnt)) {
397 vfio_spapr_pci_eeh_release(vdev->pdev);
398 vfio_pci_disable(vdev);
401 mutex_unlock(&vdev->reflck->lock);
403 module_put(THIS_MODULE);
406 static int vfio_pci_open(void *device_data)
408 struct vfio_pci_device *vdev = device_data;
411 if (!try_module_get(THIS_MODULE))
414 mutex_lock(&vdev->reflck->lock);
417 ret = vfio_pci_enable(vdev);
421 vfio_spapr_pci_eeh_open(vdev->pdev);
425 mutex_unlock(&vdev->reflck->lock);
427 module_put(THIS_MODULE);
431 static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
433 if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
436 if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) ||
437 vdev->nointx || vdev->pdev->is_virtfn)
440 pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
443 } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
447 pos = vdev->pdev->msi_cap;
449 pci_read_config_word(vdev->pdev,
450 pos + PCI_MSI_FLAGS, &flags);
451 return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
453 } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
457 pos = vdev->pdev->msix_cap;
459 pci_read_config_word(vdev->pdev,
460 pos + PCI_MSIX_FLAGS, &flags);
462 return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
464 } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
465 if (pci_is_pcie(vdev->pdev))
467 } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
474 static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
480 struct vfio_pci_fill_info {
483 struct vfio_pci_dependent_device *devices;
486 static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
488 struct vfio_pci_fill_info *fill = data;
489 struct iommu_group *iommu_group;
491 if (fill->cur == fill->max)
492 return -EAGAIN; /* Something changed, try again */
494 iommu_group = iommu_group_get(&pdev->dev);
496 return -EPERM; /* Cannot reset non-isolated devices */
498 fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
499 fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
500 fill->devices[fill->cur].bus = pdev->bus->number;
501 fill->devices[fill->cur].devfn = pdev->devfn;
503 iommu_group_put(iommu_group);
507 struct vfio_pci_group_entry {
508 struct vfio_group *group;
512 struct vfio_pci_group_info {
514 struct vfio_pci_group_entry *groups;
517 static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data)
519 struct vfio_pci_group_info *info = data;
520 struct iommu_group *group;
523 group = iommu_group_get(&pdev->dev);
527 id = iommu_group_id(group);
529 for (i = 0; i < info->count; i++)
530 if (info->groups[i].id == id)
533 iommu_group_put(group);
535 return (i == info->count) ? -EINVAL : 0;
538 static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
540 for (; pdev; pdev = pdev->bus->self)
541 if (pdev->bus == slot->bus)
542 return (pdev->slot == slot);
546 struct vfio_pci_walk_info {
547 int (*fn)(struct pci_dev *, void *data);
549 struct pci_dev *pdev;
554 static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
556 struct vfio_pci_walk_info *walk = data;
558 if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
559 walk->ret = walk->fn(pdev, walk->data);
564 static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
565 int (*fn)(struct pci_dev *,
566 void *data), void *data,
569 struct vfio_pci_walk_info walk = {
570 .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
573 pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
578 static int msix_mmappable_cap(struct vfio_pci_device *vdev,
579 struct vfio_info_cap *caps)
581 struct vfio_info_cap_header header = {
582 .id = VFIO_REGION_INFO_CAP_MSIX_MAPPABLE,
586 return vfio_info_add_capability(caps, &header, sizeof(header));
589 int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
590 unsigned int type, unsigned int subtype,
591 const struct vfio_pci_regops *ops,
592 size_t size, u32 flags, void *data)
594 struct vfio_pci_region *region;
596 region = krealloc(vdev->region,
597 (vdev->num_regions + 1) * sizeof(*region),
602 vdev->region = region;
603 vdev->region[vdev->num_regions].type = type;
604 vdev->region[vdev->num_regions].subtype = subtype;
605 vdev->region[vdev->num_regions].ops = ops;
606 vdev->region[vdev->num_regions].size = size;
607 vdev->region[vdev->num_regions].flags = flags;
608 vdev->region[vdev->num_regions].data = data;
615 static long vfio_pci_ioctl(void *device_data,
616 unsigned int cmd, unsigned long arg)
618 struct vfio_pci_device *vdev = device_data;
621 if (cmd == VFIO_DEVICE_GET_INFO) {
622 struct vfio_device_info info;
624 minsz = offsetofend(struct vfio_device_info, num_irqs);
626 if (copy_from_user(&info, (void __user *)arg, minsz))
629 if (info.argsz < minsz)
632 info.flags = VFIO_DEVICE_FLAGS_PCI;
634 if (vdev->reset_works)
635 info.flags |= VFIO_DEVICE_FLAGS_RESET;
637 info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
638 info.num_irqs = VFIO_PCI_NUM_IRQS;
640 return copy_to_user((void __user *)arg, &info, minsz) ?
643 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
644 struct pci_dev *pdev = vdev->pdev;
645 struct vfio_region_info info;
646 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
649 minsz = offsetofend(struct vfio_region_info, offset);
651 if (copy_from_user(&info, (void __user *)arg, minsz))
654 if (info.argsz < minsz)
657 switch (info.index) {
658 case VFIO_PCI_CONFIG_REGION_INDEX:
659 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
660 info.size = pdev->cfg_size;
661 info.flags = VFIO_REGION_INFO_FLAG_READ |
662 VFIO_REGION_INFO_FLAG_WRITE;
664 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
665 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
666 info.size = pci_resource_len(pdev, info.index);
672 info.flags = VFIO_REGION_INFO_FLAG_READ |
673 VFIO_REGION_INFO_FLAG_WRITE;
674 if (vdev->bar_mmap_supported[info.index]) {
675 info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
676 if (info.index == vdev->msix_bar) {
677 ret = msix_mmappable_cap(vdev, &caps);
684 case VFIO_PCI_ROM_REGION_INDEX:
689 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
692 /* Report the BAR size, not the ROM size */
693 info.size = pci_resource_len(pdev, info.index);
695 /* Shadow ROMs appear as PCI option ROMs */
696 if (pdev->resource[PCI_ROM_RESOURCE].flags &
697 IORESOURCE_ROM_SHADOW)
703 /* Is it really there? */
704 io = pci_map_rom(pdev, &size);
709 pci_unmap_rom(pdev, io);
711 info.flags = VFIO_REGION_INFO_FLAG_READ;
714 case VFIO_PCI_VGA_REGION_INDEX:
718 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
720 info.flags = VFIO_REGION_INFO_FLAG_READ |
721 VFIO_REGION_INFO_FLAG_WRITE;
726 struct vfio_region_info_cap_type cap_type = {
727 .header.id = VFIO_REGION_INFO_CAP_TYPE,
728 .header.version = 1 };
731 VFIO_PCI_NUM_REGIONS + vdev->num_regions)
733 info.index = array_index_nospec(info.index,
734 VFIO_PCI_NUM_REGIONS +
737 i = info.index - VFIO_PCI_NUM_REGIONS;
739 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
740 info.size = vdev->region[i].size;
741 info.flags = vdev->region[i].flags;
743 cap_type.type = vdev->region[i].type;
744 cap_type.subtype = vdev->region[i].subtype;
746 ret = vfio_info_add_capability(&caps, &cap_type.header,
755 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
756 if (info.argsz < sizeof(info) + caps.size) {
757 info.argsz = sizeof(info) + caps.size;
760 vfio_info_cap_shift(&caps, sizeof(info));
761 if (copy_to_user((void __user *)arg +
762 sizeof(info), caps.buf,
767 info.cap_offset = sizeof(info);
773 return copy_to_user((void __user *)arg, &info, minsz) ?
776 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
777 struct vfio_irq_info info;
779 minsz = offsetofend(struct vfio_irq_info, count);
781 if (copy_from_user(&info, (void __user *)arg, minsz))
784 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
787 switch (info.index) {
788 case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
789 case VFIO_PCI_REQ_IRQ_INDEX:
791 case VFIO_PCI_ERR_IRQ_INDEX:
792 if (pci_is_pcie(vdev->pdev))
799 info.flags = VFIO_IRQ_INFO_EVENTFD;
801 info.count = vfio_pci_get_irq_count(vdev, info.index);
803 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
804 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
805 VFIO_IRQ_INFO_AUTOMASKED);
807 info.flags |= VFIO_IRQ_INFO_NORESIZE;
809 return copy_to_user((void __user *)arg, &info, minsz) ?
812 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
813 struct vfio_irq_set hdr;
816 size_t data_size = 0;
818 minsz = offsetofend(struct vfio_irq_set, count);
820 if (copy_from_user(&hdr, (void __user *)arg, minsz))
823 max = vfio_pci_get_irq_count(vdev, hdr.index);
825 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
826 VFIO_PCI_NUM_IRQS, &data_size);
831 data = memdup_user((void __user *)(arg + minsz),
834 return PTR_ERR(data);
837 mutex_lock(&vdev->igate);
839 ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
840 hdr.start, hdr.count, data);
842 mutex_unlock(&vdev->igate);
847 } else if (cmd == VFIO_DEVICE_RESET) {
848 return vdev->reset_works ?
849 pci_try_reset_function(vdev->pdev) : -EINVAL;
851 } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
852 struct vfio_pci_hot_reset_info hdr;
853 struct vfio_pci_fill_info fill = { 0 };
854 struct vfio_pci_dependent_device *devices = NULL;
858 minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
860 if (copy_from_user(&hdr, (void __user *)arg, minsz))
863 if (hdr.argsz < minsz)
868 /* Can we do a slot or bus reset or neither? */
869 if (!pci_probe_reset_slot(vdev->pdev->slot))
871 else if (pci_probe_reset_bus(vdev->pdev->bus))
874 /* How many devices are affected? */
875 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
881 WARN_ON(!fill.max); /* Should always be at least one */
884 * If there's enough space, fill it now, otherwise return
885 * -ENOSPC and the number of devices affected.
887 if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
889 hdr.count = fill.max;
890 goto reset_info_exit;
893 devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
897 fill.devices = devices;
899 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
904 * If a device was removed between counting and filling,
905 * we may come up short of fill.max. If a device was
906 * added, we'll have a return of -EAGAIN above.
909 hdr.count = fill.cur;
912 if (copy_to_user((void __user *)arg, &hdr, minsz))
916 if (copy_to_user((void __user *)(arg + minsz), devices,
917 hdr.count * sizeof(*devices)))
924 } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
925 struct vfio_pci_hot_reset hdr;
927 struct vfio_pci_group_entry *groups;
928 struct vfio_pci_group_info info;
930 int i, count = 0, ret = 0;
932 minsz = offsetofend(struct vfio_pci_hot_reset, count);
934 if (copy_from_user(&hdr, (void __user *)arg, minsz))
937 if (hdr.argsz < minsz || hdr.flags)
940 /* Can we do a slot or bus reset or neither? */
941 if (!pci_probe_reset_slot(vdev->pdev->slot))
943 else if (pci_probe_reset_bus(vdev->pdev->bus))
947 * We can't let userspace give us an arbitrarily large
948 * buffer to copy, so verify how many we think there
949 * could be. Note groups can have multiple devices so
950 * one group per device is the max.
952 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
958 /* Somewhere between 1 and count is OK */
959 if (!hdr.count || hdr.count > count)
962 group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
963 groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
964 if (!group_fds || !groups) {
970 if (copy_from_user(group_fds, (void __user *)(arg + minsz),
971 hdr.count * sizeof(*group_fds))) {
978 * For each group_fd, get the group through the vfio external
979 * user interface and store the group and iommu ID. This
980 * ensures the group is held across the reset.
982 for (i = 0; i < hdr.count; i++) {
983 struct vfio_group *group;
984 struct fd f = fdget(group_fds[i]);
990 group = vfio_group_get_external_user(f.file);
993 ret = PTR_ERR(group);
997 groups[i].group = group;
998 groups[i].id = vfio_external_user_iommu_id(group);
1003 /* release reference to groups on error */
1005 goto hot_reset_release;
1007 info.count = hdr.count;
1008 info.groups = groups;
1011 * Test whether all the affected devices are contained
1012 * by the set of groups provided by the user.
1014 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1015 vfio_pci_validate_devs,
1018 /* User has access, do the reset */
1019 ret = pci_reset_bus(vdev->pdev);
1022 for (i--; i >= 0; i--)
1023 vfio_group_put_external_user(groups[i].group);
1027 } else if (cmd == VFIO_DEVICE_IOEVENTFD) {
1028 struct vfio_device_ioeventfd ioeventfd;
1031 minsz = offsetofend(struct vfio_device_ioeventfd, fd);
1033 if (copy_from_user(&ioeventfd, (void __user *)arg, minsz))
1036 if (ioeventfd.argsz < minsz)
1039 if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
1042 count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
1044 if (hweight8(count) != 1 || ioeventfd.fd < -1)
1047 return vfio_pci_ioeventfd(vdev, ioeventfd.offset,
1048 ioeventfd.data, count, ioeventfd.fd);
1054 static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
1055 size_t count, loff_t *ppos, bool iswrite)
1057 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1058 struct vfio_pci_device *vdev = device_data;
1060 if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1064 case VFIO_PCI_CONFIG_REGION_INDEX:
1065 return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
1067 case VFIO_PCI_ROM_REGION_INDEX:
1070 return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
1072 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1073 return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
1075 case VFIO_PCI_VGA_REGION_INDEX:
1076 return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
1078 index -= VFIO_PCI_NUM_REGIONS;
1079 return vdev->region[index].ops->rw(vdev, buf,
1080 count, ppos, iswrite);
1086 static ssize_t vfio_pci_read(void *device_data, char __user *buf,
1087 size_t count, loff_t *ppos)
1092 return vfio_pci_rw(device_data, buf, count, ppos, false);
1095 static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
1096 size_t count, loff_t *ppos)
1101 return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
1104 static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
1106 struct vfio_pci_device *vdev = device_data;
1107 struct pci_dev *pdev = vdev->pdev;
1109 u64 phys_len, req_len, pgoff, req_start;
1112 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1114 if (vma->vm_end < vma->vm_start)
1116 if ((vma->vm_flags & VM_SHARED) == 0)
1118 if (index >= VFIO_PCI_ROM_REGION_INDEX)
1120 if (!vdev->bar_mmap_supported[index])
1123 phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
1124 req_len = vma->vm_end - vma->vm_start;
1125 pgoff = vma->vm_pgoff &
1126 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1127 req_start = pgoff << PAGE_SHIFT;
1129 if (req_start + req_len > phys_len)
1133 * Even though we don't make use of the barmap for the mmap,
1134 * we need to request the region and the barmap tracks that.
1136 if (!vdev->barmap[index]) {
1137 ret = pci_request_selected_regions(pdev,
1138 1 << index, "vfio-pci");
1142 vdev->barmap[index] = pci_iomap(pdev, index, 0);
1143 if (!vdev->barmap[index]) {
1144 pci_release_selected_regions(pdev, 1 << index);
1149 vma->vm_private_data = vdev;
1150 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1151 vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
1153 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1154 req_len, vma->vm_page_prot);
1157 static void vfio_pci_request(void *device_data, unsigned int count)
1159 struct vfio_pci_device *vdev = device_data;
1161 mutex_lock(&vdev->igate);
1163 if (vdev->req_trigger) {
1165 dev_notice_ratelimited(&vdev->pdev->dev,
1166 "Relaying device request to user (#%u)\n",
1168 eventfd_signal(vdev->req_trigger, 1);
1169 } else if (count == 0) {
1170 dev_warn(&vdev->pdev->dev,
1171 "No device request channel registered, blocked until released by user\n");
1174 mutex_unlock(&vdev->igate);
1177 static const struct vfio_device_ops vfio_pci_ops = {
1179 .open = vfio_pci_open,
1180 .release = vfio_pci_release,
1181 .ioctl = vfio_pci_ioctl,
1182 .read = vfio_pci_read,
1183 .write = vfio_pci_write,
1184 .mmap = vfio_pci_mmap,
1185 .request = vfio_pci_request,
1188 static int vfio_pci_reflck_attach(struct vfio_pci_device *vdev);
1189 static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck);
1191 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1193 struct vfio_pci_device *vdev;
1194 struct iommu_group *group;
1197 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
1201 * Prevent binding to PFs with VFs enabled, this too easily allows
1202 * userspace instance with VFs and PFs from the same device, which
1203 * cannot work. Disabling SR-IOV here would initiate removing the
1204 * VFs, which would unbind the driver, which is prone to blocking
1205 * if that VF is also in use by vfio-pci. Just reject these PFs
1206 * and let the user sort it out.
1208 if (pci_num_vf(pdev)) {
1209 pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n");
1213 group = vfio_iommu_group_get(&pdev->dev);
1217 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
1219 vfio_iommu_group_put(group, &pdev->dev);
1224 vdev->irq_type = VFIO_PCI_NUM_IRQS;
1225 mutex_init(&vdev->igate);
1226 spin_lock_init(&vdev->irqlock);
1227 mutex_init(&vdev->ioeventfds_lock);
1228 INIT_LIST_HEAD(&vdev->ioeventfds_list);
1230 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
1232 vfio_iommu_group_put(group, &pdev->dev);
1237 ret = vfio_pci_reflck_attach(vdev);
1239 vfio_del_group_dev(&pdev->dev);
1240 vfio_iommu_group_put(group, &pdev->dev);
1245 if (vfio_pci_is_vga(pdev)) {
1246 vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
1247 vga_set_legacy_decoding(pdev,
1248 vfio_pci_set_vga_decode(vdev, false));
1251 if (!disable_idle_d3) {
1253 * pci-core sets the device power state to an unknown value at
1254 * bootup and after being removed from a driver. The only
1255 * transition it allows from this unknown state is to D0, which
1256 * typically happens when a driver calls pci_enable_device().
1257 * We're not ready to enable the device yet, but we do want to
1258 * be able to get to D3. Therefore first do a D0 transition
1259 * before going to D3.
1261 pci_set_power_state(pdev, PCI_D0);
1262 pci_set_power_state(pdev, PCI_D3hot);
1268 static void vfio_pci_remove(struct pci_dev *pdev)
1270 struct vfio_pci_device *vdev;
1272 vdev = vfio_del_group_dev(&pdev->dev);
1276 vfio_pci_reflck_put(vdev->reflck);
1278 vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
1279 kfree(vdev->region);
1280 mutex_destroy(&vdev->ioeventfds_lock);
1283 if (vfio_pci_is_vga(pdev)) {
1284 vga_client_register(pdev, NULL, NULL, NULL);
1285 vga_set_legacy_decoding(pdev,
1286 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
1287 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
1290 if (!disable_idle_d3)
1291 pci_set_power_state(pdev, PCI_D0);
1294 static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
1295 pci_channel_state_t state)
1297 struct vfio_pci_device *vdev;
1298 struct vfio_device *device;
1300 device = vfio_device_get_from_dev(&pdev->dev);
1302 return PCI_ERS_RESULT_DISCONNECT;
1304 vdev = vfio_device_data(device);
1306 vfio_device_put(device);
1307 return PCI_ERS_RESULT_DISCONNECT;
1310 mutex_lock(&vdev->igate);
1312 if (vdev->err_trigger)
1313 eventfd_signal(vdev->err_trigger, 1);
1315 mutex_unlock(&vdev->igate);
1317 vfio_device_put(device);
1319 return PCI_ERS_RESULT_CAN_RECOVER;
1322 static const struct pci_error_handlers vfio_err_handlers = {
1323 .error_detected = vfio_pci_aer_err_detected,
1326 static struct pci_driver vfio_pci_driver = {
1328 .id_table = NULL, /* only dynamic ids */
1329 .probe = vfio_pci_probe,
1330 .remove = vfio_pci_remove,
1331 .err_handler = &vfio_err_handlers,
1334 static DEFINE_MUTEX(reflck_lock);
1336 static struct vfio_pci_reflck *vfio_pci_reflck_alloc(void)
1338 struct vfio_pci_reflck *reflck;
1340 reflck = kzalloc(sizeof(*reflck), GFP_KERNEL);
1342 return ERR_PTR(-ENOMEM);
1344 kref_init(&reflck->kref);
1345 mutex_init(&reflck->lock);
1350 static void vfio_pci_reflck_get(struct vfio_pci_reflck *reflck)
1352 kref_get(&reflck->kref);
1355 static int vfio_pci_reflck_find(struct pci_dev *pdev, void *data)
1357 struct vfio_pci_reflck **preflck = data;
1358 struct vfio_device *device;
1359 struct vfio_pci_device *vdev;
1361 device = vfio_device_get_from_dev(&pdev->dev);
1365 if (pci_dev_driver(pdev) != &vfio_pci_driver) {
1366 vfio_device_put(device);
1370 vdev = vfio_device_data(device);
1373 vfio_pci_reflck_get(vdev->reflck);
1374 *preflck = vdev->reflck;
1375 vfio_device_put(device);
1379 vfio_device_put(device);
1383 static int vfio_pci_reflck_attach(struct vfio_pci_device *vdev)
1385 bool slot = !pci_probe_reset_slot(vdev->pdev->slot);
1387 mutex_lock(&reflck_lock);
1389 if (pci_is_root_bus(vdev->pdev->bus) ||
1390 vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_reflck_find,
1391 &vdev->reflck, slot) <= 0)
1392 vdev->reflck = vfio_pci_reflck_alloc();
1394 mutex_unlock(&reflck_lock);
1396 return PTR_ERR_OR_ZERO(vdev->reflck);
1399 static void vfio_pci_reflck_release(struct kref *kref)
1401 struct vfio_pci_reflck *reflck = container_of(kref,
1402 struct vfio_pci_reflck,
1406 mutex_unlock(&reflck_lock);
1409 static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck)
1411 kref_put_mutex(&reflck->kref, vfio_pci_reflck_release, &reflck_lock);
1414 struct vfio_devices {
1415 struct vfio_device **devices;
1420 static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data)
1422 struct vfio_devices *devs = data;
1423 struct vfio_device *device;
1424 struct vfio_pci_device *vdev;
1426 if (devs->cur_index == devs->max_index)
1429 device = vfio_device_get_from_dev(&pdev->dev);
1433 if (pci_dev_driver(pdev) != &vfio_pci_driver) {
1434 vfio_device_put(device);
1438 vdev = vfio_device_data(device);
1440 /* Fault if the device is not unused */
1442 vfio_device_put(device);
1446 devs->devices[devs->cur_index++] = device;
1451 * If a bus or slot reset is available for the provided device and:
1452 * - All of the devices affected by that bus or slot reset are unused
1454 * - At least one of the affected devices is marked dirty via
1455 * needs_reset (such as by lack of FLR support)
1456 * Then attempt to perform that bus or slot reset. Callers are required
1457 * to hold vdev->reflck->lock, protecting the bus/slot reset group from
1458 * concurrent opens. A vfio_device reference is acquired for each device
1459 * to prevent unbinds during the reset operation.
1461 * NB: vfio-core considers a group to be viable even if some devices are
1462 * bound to drivers like pci-stub or pcieport. Here we require all devices
1463 * to be bound to vfio_pci since that's the only way we can be sure they
1466 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
1468 struct vfio_devices devs = { .cur_index = 0 };
1469 int i = 0, ret = -EINVAL;
1471 struct vfio_pci_device *tmp;
1473 if (!pci_probe_reset_slot(vdev->pdev->slot))
1475 else if (pci_probe_reset_bus(vdev->pdev->bus))
1478 if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
1483 devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL);
1487 if (vfio_pci_for_each_slot_or_bus(vdev->pdev,
1488 vfio_pci_get_unused_devs,
1492 /* Does at least one need a reset? */
1493 for (i = 0; i < devs.cur_index; i++) {
1494 tmp = vfio_device_data(devs.devices[i]);
1495 if (tmp->needs_reset) {
1496 ret = pci_reset_bus(vdev->pdev);
1502 for (i = 0; i < devs.cur_index; i++) {
1503 tmp = vfio_device_data(devs.devices[i]);
1506 * If reset was successful, affected devices no longer need
1507 * a reset and we should return all the collateral devices
1508 * to low power. If not successful, we either didn't reset
1509 * the bus or timed out waiting for it, so let's not touch
1513 tmp->needs_reset = false;
1515 if (tmp != vdev && !disable_idle_d3)
1516 pci_set_power_state(tmp->pdev, PCI_D3hot);
1519 vfio_device_put(devs.devices[i]);
1522 kfree(devs.devices);
1525 static void __exit vfio_pci_cleanup(void)
1527 pci_unregister_driver(&vfio_pci_driver);
1528 vfio_pci_uninit_perm_bits();
1531 static void __init vfio_pci_fill_ids(void)
1536 /* no ids passed actually */
1540 /* add ids specified in the module parameter */
1542 while ((id = strsep(&p, ","))) {
1543 unsigned int vendor, device, subvendor = PCI_ANY_ID,
1544 subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
1550 fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
1551 &vendor, &device, &subvendor, &subdevice,
1552 &class, &class_mask);
1555 pr_warn("invalid id string \"%s\"\n", id);
1559 rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
1560 subvendor, subdevice, class, class_mask, 0);
1562 pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
1563 vendor, device, subvendor, subdevice,
1564 class, class_mask, rc);
1566 pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
1567 vendor, device, subvendor, subdevice,
1572 static int __init vfio_pci_init(void)
1576 /* Allocate shared config space permision data used by all devices */
1577 ret = vfio_pci_init_perm_bits();
1581 /* Register and scan for devices */
1582 ret = pci_register_driver(&vfio_pci_driver);
1586 vfio_pci_fill_ids();
1591 vfio_pci_uninit_perm_bits();
1595 module_init(vfio_pci_init);
1596 module_exit(vfio_pci_cleanup);
1598 MODULE_VERSION(DRIVER_VERSION);
1599 MODULE_LICENSE("GPL v2");
1600 MODULE_AUTHOR(DRIVER_AUTHOR);
1601 MODULE_DESCRIPTION(DRIVER_DESC);