2 * KVMGT - the implementation of Intel mediated pass-through framework for KVM
4 * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Kevin Tian <kevin.tian@intel.com>
27 * Jike Song <jike.song@intel.com>
28 * Xiaoguang Chen <xiaoguang.chen@intel.com>
31 #include <linux/init.h>
32 #include <linux/device.h>
34 #include <linux/mmu_context.h>
35 #include <linux/sched/mm.h>
36 #include <linux/types.h>
37 #include <linux/list.h>
38 #include <linux/rbtree.h>
39 #include <linux/spinlock.h>
40 #include <linux/eventfd.h>
41 #include <linux/uuid.h>
42 #include <linux/kvm_host.h>
43 #include <linux/vfio.h>
44 #include <linux/mdev.h>
45 #include <linux/debugfs.h>
47 #include <linux/nospec.h>
52 static const struct intel_gvt_ops *intel_gvt_ops;
54 /* helper macros copied from vfio-pci */
55 #define VFIO_PCI_OFFSET_SHIFT 40
56 #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
57 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
58 #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
60 #define EDID_BLOB_OFFSET (PAGE_SIZE/2)
62 #define OPREGION_SIGNATURE "IntelGraphicsMem"
65 struct intel_vgpu_regops {
66 size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
67 size_t count, loff_t *ppos, bool iswrite);
68 void (*release)(struct intel_vgpu *vgpu,
69 struct vfio_region *region);
77 const struct intel_vgpu_regops *ops;
81 struct vfio_edid_region {
82 struct vfio_region_gfx_edid vfio_edid_regs;
88 struct hlist_node hnode;
91 struct kvmgt_guest_info {
93 struct intel_vgpu *vgpu;
94 struct kvm_page_track_notifier_node track_node;
95 #define NR_BKT (1 << 18)
96 struct hlist_head ptable[NR_BKT];
98 struct dentry *debugfs_cache_entries;
102 struct intel_vgpu *vgpu;
103 struct rb_node gfn_node;
104 struct rb_node dma_addr_node;
111 static inline bool handle_valid(unsigned long handle)
113 return !!(handle & ~0xff);
116 static int kvmgt_guest_init(struct mdev_device *mdev);
117 static void intel_vgpu_release_work(struct work_struct *work);
118 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
120 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
127 total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
129 for (npage = 0; npage < total_pages; npage++) {
130 unsigned long cur_gfn = gfn + npage;
132 ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
137 /* Pin a normal or compound guest page for dma. */
138 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
139 unsigned long size, struct page **page)
141 unsigned long base_pfn = 0;
146 total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
148 * We pin the pages one-by-one to avoid allocating a big arrary
149 * on stack to hold pfns.
151 for (npage = 0; npage < total_pages; npage++) {
152 unsigned long cur_gfn = gfn + npage;
155 ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
156 IOMMU_READ | IOMMU_WRITE, &pfn);
158 gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
163 if (!pfn_valid(pfn)) {
164 gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
172 else if (base_pfn + npage != pfn) {
173 gvt_vgpu_err("The pages are not continuous\n");
180 *page = pfn_to_page(base_pfn);
183 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
187 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
188 dma_addr_t *dma_addr, unsigned long size)
190 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
191 struct page *page = NULL;
194 ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
198 /* Setup DMA mapping. */
199 *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
200 if (dma_mapping_error(dev, *dma_addr)) {
201 gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
202 page_to_pfn(page), ret);
203 gvt_unpin_guest_page(vgpu, gfn, size);
210 static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
211 dma_addr_t dma_addr, unsigned long size)
213 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
215 dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
216 gvt_unpin_guest_page(vgpu, gfn, size);
219 static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
222 struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node;
226 itr = rb_entry(node, struct gvt_dma, dma_addr_node);
228 if (dma_addr < itr->dma_addr)
229 node = node->rb_left;
230 else if (dma_addr > itr->dma_addr)
231 node = node->rb_right;
238 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
240 struct rb_node *node = vgpu->vdev.gfn_cache.rb_node;
244 itr = rb_entry(node, struct gvt_dma, gfn_node);
247 node = node->rb_left;
248 else if (gfn > itr->gfn)
249 node = node->rb_right;
256 static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
257 dma_addr_t dma_addr, unsigned long size)
259 struct gvt_dma *new, *itr;
260 struct rb_node **link, *parent = NULL;
262 new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
268 new->dma_addr = dma_addr;
270 kref_init(&new->ref);
272 /* gfn_cache maps gfn to struct gvt_dma. */
273 link = &vgpu->vdev.gfn_cache.rb_node;
276 itr = rb_entry(parent, struct gvt_dma, gfn_node);
279 link = &parent->rb_left;
281 link = &parent->rb_right;
283 rb_link_node(&new->gfn_node, parent, link);
284 rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache);
286 /* dma_addr_cache maps dma addr to struct gvt_dma. */
288 link = &vgpu->vdev.dma_addr_cache.rb_node;
291 itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
293 if (dma_addr < itr->dma_addr)
294 link = &parent->rb_left;
296 link = &parent->rb_right;
298 rb_link_node(&new->dma_addr_node, parent, link);
299 rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
301 vgpu->vdev.nr_cache_entries++;
305 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
306 struct gvt_dma *entry)
308 rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache);
309 rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache);
311 vgpu->vdev.nr_cache_entries--;
314 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
317 struct rb_node *node = NULL;
320 mutex_lock(&vgpu->vdev.cache_lock);
321 node = rb_first(&vgpu->vdev.gfn_cache);
323 mutex_unlock(&vgpu->vdev.cache_lock);
326 dma = rb_entry(node, struct gvt_dma, gfn_node);
327 gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
328 __gvt_cache_remove_entry(vgpu, dma);
329 mutex_unlock(&vgpu->vdev.cache_lock);
333 static void gvt_cache_init(struct intel_vgpu *vgpu)
335 vgpu->vdev.gfn_cache = RB_ROOT;
336 vgpu->vdev.dma_addr_cache = RB_ROOT;
337 vgpu->vdev.nr_cache_entries = 0;
338 mutex_init(&vgpu->vdev.cache_lock);
341 static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
343 hash_init(info->ptable);
346 static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
348 struct kvmgt_pgfn *p;
349 struct hlist_node *tmp;
352 hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
358 static struct kvmgt_pgfn *
359 __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
361 struct kvmgt_pgfn *p, *res = NULL;
363 hash_for_each_possible(info->ptable, p, hnode, gfn) {
373 static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
376 struct kvmgt_pgfn *p;
378 p = __kvmgt_protect_table_find(info, gfn);
382 static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
384 struct kvmgt_pgfn *p;
386 if (kvmgt_gfn_is_write_protected(info, gfn))
389 p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
390 if (WARN(!p, "gfn: 0x%llx\n", gfn))
394 hash_add(info->ptable, &p->hnode, gfn);
397 static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
400 struct kvmgt_pgfn *p;
402 p = __kvmgt_protect_table_find(info, gfn);
409 static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
410 size_t count, loff_t *ppos, bool iswrite)
412 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
413 VFIO_PCI_NUM_REGIONS;
414 void *base = vgpu->vdev.region[i].data;
415 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
417 if (pos >= vgpu->vdev.region[i].size || iswrite) {
418 gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
421 count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
422 memcpy(buf, base + pos, count);
427 static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
428 struct vfio_region *region)
432 static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
433 .rw = intel_vgpu_reg_rw_opregion,
434 .release = intel_vgpu_reg_release_opregion,
437 static int handle_edid_regs(struct intel_vgpu *vgpu,
438 struct vfio_edid_region *region, char *buf,
439 size_t count, u16 offset, bool is_write)
441 struct vfio_region_gfx_edid *regs = ®ion->vfio_edid_regs;
444 if (offset + count > sizeof(*regs))
451 data = *((unsigned int *)buf);
453 case offsetof(struct vfio_region_gfx_edid, link_state):
454 if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) {
455 if (!drm_edid_block_valid(
456 (u8 *)region->edid_blob,
460 gvt_vgpu_err("invalid EDID blob\n");
463 intel_gvt_ops->emulate_hotplug(vgpu, true);
464 } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN)
465 intel_gvt_ops->emulate_hotplug(vgpu, false);
467 gvt_vgpu_err("invalid EDID link state %d\n",
471 regs->link_state = data;
473 case offsetof(struct vfio_region_gfx_edid, edid_size):
474 if (data > regs->edid_max_size) {
475 gvt_vgpu_err("EDID size is bigger than %d!\n",
476 regs->edid_max_size);
479 regs->edid_size = data;
483 gvt_vgpu_err("write read-only EDID region at offset %d\n",
488 memcpy(buf, (char *)regs + offset, count);
494 static int handle_edid_blob(struct vfio_edid_region *region, char *buf,
495 size_t count, u16 offset, bool is_write)
497 if (offset + count > region->vfio_edid_regs.edid_size)
501 memcpy(region->edid_blob + offset, buf, count);
503 memcpy(buf, region->edid_blob + offset, count);
508 static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
509 size_t count, loff_t *ppos, bool iswrite)
512 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
513 VFIO_PCI_NUM_REGIONS;
514 struct vfio_edid_region *region =
515 (struct vfio_edid_region *)vgpu->vdev.region[i].data;
516 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
518 if (pos < region->vfio_edid_regs.edid_offset) {
519 ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite);
521 pos -= EDID_BLOB_OFFSET;
522 ret = handle_edid_blob(region, buf, count, pos, iswrite);
526 gvt_vgpu_err("failed to access EDID region\n");
531 static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu,
532 struct vfio_region *region)
537 static const struct intel_vgpu_regops intel_vgpu_regops_edid = {
538 .rw = intel_vgpu_reg_rw_edid,
539 .release = intel_vgpu_reg_release_edid,
542 static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
543 unsigned int type, unsigned int subtype,
544 const struct intel_vgpu_regops *ops,
545 size_t size, u32 flags, void *data)
547 struct vfio_region *region;
549 region = krealloc(vgpu->vdev.region,
550 (vgpu->vdev.num_regions + 1) * sizeof(*region),
555 vgpu->vdev.region = region;
556 vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
557 vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
558 vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
559 vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
560 vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
561 vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
562 vgpu->vdev.num_regions++;
566 static int kvmgt_get_vfio_device(void *p_vgpu)
568 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
570 vgpu->vdev.vfio_device = vfio_device_get_from_dev(
571 mdev_dev(vgpu->vdev.mdev));
572 if (!vgpu->vdev.vfio_device) {
573 gvt_vgpu_err("failed to get vfio device\n");
580 static int kvmgt_set_opregion(void *p_vgpu)
582 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
586 /* Each vgpu has its own opregion, although VFIO would create another
587 * one later. This one is used to expose opregion to VFIO. And the
588 * other one created by VFIO later, is used by guest actually.
590 base = vgpu_opregion(vgpu)->va;
594 if (memcmp(base, OPREGION_SIGNATURE, 16)) {
599 ret = intel_vgpu_register_reg(vgpu,
600 PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
601 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
602 &intel_vgpu_regops_opregion, OPREGION_SIZE,
603 VFIO_REGION_INFO_FLAG_READ, base);
608 static int kvmgt_set_edid(void *p_vgpu, int port_num)
610 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
611 struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
612 struct vfio_edid_region *base;
615 base = kzalloc(sizeof(*base), GFP_KERNEL);
619 /* TODO: Add multi-port and EDID extension block support */
620 base->vfio_edid_regs.edid_offset = EDID_BLOB_OFFSET;
621 base->vfio_edid_regs.edid_max_size = EDID_SIZE;
622 base->vfio_edid_regs.edid_size = EDID_SIZE;
623 base->vfio_edid_regs.max_xres = vgpu_edid_xres(port->id);
624 base->vfio_edid_regs.max_yres = vgpu_edid_yres(port->id);
625 base->edid_blob = port->edid->edid_block;
627 ret = intel_vgpu_register_reg(vgpu,
628 VFIO_REGION_TYPE_GFX,
629 VFIO_REGION_SUBTYPE_GFX_EDID,
630 &intel_vgpu_regops_edid, EDID_SIZE,
631 VFIO_REGION_INFO_FLAG_READ |
632 VFIO_REGION_INFO_FLAG_WRITE |
633 VFIO_REGION_INFO_FLAG_CAPS, base);
638 static void kvmgt_put_vfio_device(void *vgpu)
640 if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
643 vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
646 static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
648 struct intel_vgpu *vgpu = NULL;
649 struct intel_vgpu_type *type;
654 pdev = mdev_parent_dev(mdev);
655 gvt = kdev_to_i915(pdev)->gvt;
657 type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj));
659 gvt_vgpu_err("failed to find type %s to create\n",
665 vgpu = intel_gvt_ops->vgpu_create(gvt, type);
666 if (IS_ERR_OR_NULL(vgpu)) {
667 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
668 gvt_err("failed to create intel vgpu: %d\n", ret);
672 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
674 vgpu->vdev.mdev = mdev;
675 mdev_set_drvdata(mdev, vgpu);
677 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
678 dev_name(mdev_dev(mdev)));
685 static int intel_vgpu_remove(struct mdev_device *mdev)
687 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
689 if (handle_valid(vgpu->handle))
692 intel_gvt_ops->vgpu_destroy(vgpu);
696 static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
697 unsigned long action, void *data)
699 struct intel_vgpu *vgpu = container_of(nb,
701 vdev.iommu_notifier);
703 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
704 struct vfio_iommu_type1_dma_unmap *unmap = data;
705 struct gvt_dma *entry;
706 unsigned long iov_pfn, end_iov_pfn;
708 iov_pfn = unmap->iova >> PAGE_SHIFT;
709 end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
711 mutex_lock(&vgpu->vdev.cache_lock);
712 for (; iov_pfn < end_iov_pfn; iov_pfn++) {
713 entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
717 gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
719 __gvt_cache_remove_entry(vgpu, entry);
721 mutex_unlock(&vgpu->vdev.cache_lock);
727 static int intel_vgpu_group_notifier(struct notifier_block *nb,
728 unsigned long action, void *data)
730 struct intel_vgpu *vgpu = container_of(nb,
732 vdev.group_notifier);
734 /* the only action we care about */
735 if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
736 vgpu->vdev.kvm = data;
739 schedule_work(&vgpu->vdev.release_work);
745 static int intel_vgpu_open(struct mdev_device *mdev)
747 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
748 unsigned long events;
751 vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
752 vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
754 events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
755 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
756 &vgpu->vdev.iommu_notifier);
758 gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
763 events = VFIO_GROUP_NOTIFY_SET_KVM;
764 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
765 &vgpu->vdev.group_notifier);
767 gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
772 /* Take a module reference as mdev core doesn't take
773 * a reference for vendor driver.
775 if (!try_module_get(THIS_MODULE))
778 ret = kvmgt_guest_init(mdev);
782 intel_gvt_ops->vgpu_activate(vgpu);
784 atomic_set(&vgpu->vdev.released, 0);
788 vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
789 &vgpu->vdev.group_notifier);
792 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
793 &vgpu->vdev.iommu_notifier);
798 static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
800 struct eventfd_ctx *trigger;
802 trigger = vgpu->vdev.msi_trigger;
804 eventfd_ctx_put(trigger);
805 vgpu->vdev.msi_trigger = NULL;
809 static void __intel_vgpu_release(struct intel_vgpu *vgpu)
811 struct kvmgt_guest_info *info;
814 if (!handle_valid(vgpu->handle))
817 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
820 intel_gvt_ops->vgpu_release(vgpu);
822 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
823 &vgpu->vdev.iommu_notifier);
824 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
826 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
827 &vgpu->vdev.group_notifier);
828 WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
830 /* dereference module reference taken at open */
831 module_put(THIS_MODULE);
833 info = (struct kvmgt_guest_info *)vgpu->handle;
834 kvmgt_guest_exit(info);
836 intel_vgpu_release_msi_eventfd_ctx(vgpu);
838 vgpu->vdev.kvm = NULL;
842 static void intel_vgpu_release(struct mdev_device *mdev)
844 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
846 __intel_vgpu_release(vgpu);
849 static void intel_vgpu_release_work(struct work_struct *work)
851 struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
854 __intel_vgpu_release(vgpu);
857 static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
859 u32 start_lo, start_hi;
862 start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
863 PCI_BASE_ADDRESS_MEM_MASK;
864 mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
865 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
868 case PCI_BASE_ADDRESS_MEM_TYPE_64:
869 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
872 case PCI_BASE_ADDRESS_MEM_TYPE_32:
873 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
874 /* 1M mem BAR treated as 32-bit BAR */
876 /* mem unknown type treated as 32-bit BAR */
881 return ((u64)start_hi << 32) | start_lo;
884 static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
885 void *buf, unsigned int count, bool is_write)
887 u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
891 ret = intel_gvt_ops->emulate_mmio_write(vgpu,
892 bar_start + off, buf, count);
894 ret = intel_gvt_ops->emulate_mmio_read(vgpu,
895 bar_start + off, buf, count);
899 static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
901 return off >= vgpu_aperture_offset(vgpu) &&
902 off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
905 static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
906 void *buf, unsigned long count, bool is_write)
908 void __iomem *aperture_va;
910 if (!intel_vgpu_in_aperture(vgpu, off) ||
911 !intel_vgpu_in_aperture(vgpu, off + count)) {
912 gvt_vgpu_err("Invalid aperture offset %llu\n", off);
916 aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
917 ALIGN_DOWN(off, PAGE_SIZE),
918 count + offset_in_page(off));
923 memcpy_toio(aperture_va + offset_in_page(off), buf, count);
925 memcpy_fromio(buf, aperture_va + offset_in_page(off), count);
927 io_mapping_unmap(aperture_va);
932 static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
933 size_t count, loff_t *ppos, bool is_write)
935 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
936 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
937 u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
941 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
942 gvt_vgpu_err("invalid index: %u\n", index);
947 case VFIO_PCI_CONFIG_REGION_INDEX:
949 ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
952 ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
955 case VFIO_PCI_BAR0_REGION_INDEX:
956 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
957 buf, count, is_write);
959 case VFIO_PCI_BAR2_REGION_INDEX:
960 ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
962 case VFIO_PCI_BAR1_REGION_INDEX:
963 case VFIO_PCI_BAR3_REGION_INDEX:
964 case VFIO_PCI_BAR4_REGION_INDEX:
965 case VFIO_PCI_BAR5_REGION_INDEX:
966 case VFIO_PCI_VGA_REGION_INDEX:
967 case VFIO_PCI_ROM_REGION_INDEX:
970 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
973 index -= VFIO_PCI_NUM_REGIONS;
974 return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
978 return ret == 0 ? count : ret;
981 static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
983 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
984 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
985 struct intel_gvt *gvt = vgpu->gvt;
988 /* Only allow MMIO GGTT entry access */
989 if (index != PCI_BASE_ADDRESS_0)
992 offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) -
993 intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
995 return (offset >= gvt->device_info.gtt_start_offset &&
996 offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ?
1000 static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
1001 size_t count, loff_t *ppos)
1003 unsigned int done = 0;
1009 /* Only support GGTT entry 8 bytes read */
1010 if (count >= 8 && !(*ppos % 8) &&
1011 gtt_entry(mdev, ppos)) {
1014 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1019 if (copy_to_user(buf, &val, sizeof(val)))
1023 } else if (count >= 4 && !(*ppos % 4)) {
1026 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1031 if (copy_to_user(buf, &val, sizeof(val)))
1035 } else if (count >= 2 && !(*ppos % 2)) {
1038 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1043 if (copy_to_user(buf, &val, sizeof(val)))
1050 ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
1055 if (copy_to_user(buf, &val, sizeof(val)))
1073 static ssize_t intel_vgpu_write(struct mdev_device *mdev,
1074 const char __user *buf,
1075 size_t count, loff_t *ppos)
1077 unsigned int done = 0;
1083 /* Only support GGTT entry 8 bytes write */
1084 if (count >= 8 && !(*ppos % 8) &&
1085 gtt_entry(mdev, ppos)) {
1088 if (copy_from_user(&val, buf, sizeof(val)))
1091 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1097 } else if (count >= 4 && !(*ppos % 4)) {
1100 if (copy_from_user(&val, buf, sizeof(val)))
1103 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1109 } else if (count >= 2 && !(*ppos % 2)) {
1112 if (copy_from_user(&val, buf, sizeof(val)))
1115 ret = intel_vgpu_rw(mdev, (char *)&val,
1116 sizeof(val), ppos, true);
1124 if (copy_from_user(&val, buf, sizeof(val)))
1127 ret = intel_vgpu_rw(mdev, &val, sizeof(val),
1146 static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
1150 unsigned long req_size, pgoff, req_start;
1152 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1154 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1155 if (index >= VFIO_PCI_ROM_REGION_INDEX)
1158 if (vma->vm_end < vma->vm_start)
1160 if ((vma->vm_flags & VM_SHARED) == 0)
1162 if (index != VFIO_PCI_BAR2_REGION_INDEX)
1165 pg_prot = vma->vm_page_prot;
1166 virtaddr = vma->vm_start;
1167 req_size = vma->vm_end - vma->vm_start;
1168 pgoff = vma->vm_pgoff &
1169 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1170 req_start = pgoff << PAGE_SHIFT;
1172 if (!intel_vgpu_in_aperture(vgpu, req_start))
1174 if (req_start + req_size >
1175 vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
1178 pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
1180 return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
1183 static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
1185 if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
1191 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
1192 unsigned int index, unsigned int start,
1193 unsigned int count, u32 flags,
1199 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
1200 unsigned int index, unsigned int start,
1201 unsigned int count, u32 flags, void *data)
1206 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
1207 unsigned int index, unsigned int start, unsigned int count,
1208 u32 flags, void *data)
1213 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
1214 unsigned int index, unsigned int start, unsigned int count,
1215 u32 flags, void *data)
1217 struct eventfd_ctx *trigger;
1219 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
1220 int fd = *(int *)data;
1222 trigger = eventfd_ctx_fdget(fd);
1223 if (IS_ERR(trigger)) {
1224 gvt_vgpu_err("eventfd_ctx_fdget failed\n");
1225 return PTR_ERR(trigger);
1227 vgpu->vdev.msi_trigger = trigger;
1228 } else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
1229 intel_vgpu_release_msi_eventfd_ctx(vgpu);
1234 static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
1235 unsigned int index, unsigned int start, unsigned int count,
1238 int (*func)(struct intel_vgpu *vgpu, unsigned int index,
1239 unsigned int start, unsigned int count, u32 flags,
1243 case VFIO_PCI_INTX_IRQ_INDEX:
1244 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1245 case VFIO_IRQ_SET_ACTION_MASK:
1246 func = intel_vgpu_set_intx_mask;
1248 case VFIO_IRQ_SET_ACTION_UNMASK:
1249 func = intel_vgpu_set_intx_unmask;
1251 case VFIO_IRQ_SET_ACTION_TRIGGER:
1252 func = intel_vgpu_set_intx_trigger;
1256 case VFIO_PCI_MSI_IRQ_INDEX:
1257 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1258 case VFIO_IRQ_SET_ACTION_MASK:
1259 case VFIO_IRQ_SET_ACTION_UNMASK:
1260 /* XXX Need masking support exported */
1262 case VFIO_IRQ_SET_ACTION_TRIGGER:
1263 func = intel_vgpu_set_msi_trigger;
1272 return func(vgpu, index, start, count, flags, data);
1275 static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1278 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1279 unsigned long minsz;
1281 gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
1283 if (cmd == VFIO_DEVICE_GET_INFO) {
1284 struct vfio_device_info info;
1286 minsz = offsetofend(struct vfio_device_info, num_irqs);
1288 if (copy_from_user(&info, (void __user *)arg, minsz))
1291 if (info.argsz < minsz)
1294 info.flags = VFIO_DEVICE_FLAGS_PCI;
1295 info.flags |= VFIO_DEVICE_FLAGS_RESET;
1296 info.num_regions = VFIO_PCI_NUM_REGIONS +
1297 vgpu->vdev.num_regions;
1298 info.num_irqs = VFIO_PCI_NUM_IRQS;
1300 return copy_to_user((void __user *)arg, &info, minsz) ?
1303 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1304 struct vfio_region_info info;
1305 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
1308 struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
1313 minsz = offsetofend(struct vfio_region_info, offset);
1315 if (copy_from_user(&info, (void __user *)arg, minsz))
1318 if (info.argsz < minsz)
1321 switch (info.index) {
1322 case VFIO_PCI_CONFIG_REGION_INDEX:
1323 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1324 info.size = vgpu->gvt->device_info.cfg_space_size;
1325 info.flags = VFIO_REGION_INFO_FLAG_READ |
1326 VFIO_REGION_INFO_FLAG_WRITE;
1328 case VFIO_PCI_BAR0_REGION_INDEX:
1329 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1330 info.size = vgpu->cfg_space.bar[info.index].size;
1336 info.flags = VFIO_REGION_INFO_FLAG_READ |
1337 VFIO_REGION_INFO_FLAG_WRITE;
1339 case VFIO_PCI_BAR1_REGION_INDEX:
1340 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1344 case VFIO_PCI_BAR2_REGION_INDEX:
1345 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1346 info.flags = VFIO_REGION_INFO_FLAG_CAPS |
1347 VFIO_REGION_INFO_FLAG_MMAP |
1348 VFIO_REGION_INFO_FLAG_READ |
1349 VFIO_REGION_INFO_FLAG_WRITE;
1350 info.size = gvt_aperture_sz(vgpu->gvt);
1352 size = sizeof(*sparse) +
1353 (nr_areas * sizeof(*sparse->areas));
1354 sparse = kzalloc(size, GFP_KERNEL);
1358 sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1359 sparse->header.version = 1;
1360 sparse->nr_areas = nr_areas;
1361 cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1362 sparse->areas[0].offset =
1363 PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1364 sparse->areas[0].size = vgpu_aperture_sz(vgpu);
1367 case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1368 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1372 gvt_dbg_core("get region info bar:%d\n", info.index);
1375 case VFIO_PCI_ROM_REGION_INDEX:
1376 case VFIO_PCI_VGA_REGION_INDEX:
1377 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1381 gvt_dbg_core("get region info index:%d\n", info.index);
1385 struct vfio_region_info_cap_type cap_type = {
1386 .header.id = VFIO_REGION_INFO_CAP_TYPE,
1387 .header.version = 1 };
1389 if (info.index >= VFIO_PCI_NUM_REGIONS +
1390 vgpu->vdev.num_regions)
1393 array_index_nospec(info.index,
1394 VFIO_PCI_NUM_REGIONS +
1395 vgpu->vdev.num_regions);
1397 i = info.index - VFIO_PCI_NUM_REGIONS;
1400 VFIO_PCI_INDEX_TO_OFFSET(info.index);
1401 info.size = vgpu->vdev.region[i].size;
1402 info.flags = vgpu->vdev.region[i].flags;
1404 cap_type.type = vgpu->vdev.region[i].type;
1405 cap_type.subtype = vgpu->vdev.region[i].subtype;
1407 ret = vfio_info_add_capability(&caps,
1415 if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
1416 switch (cap_type_id) {
1417 case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
1418 ret = vfio_info_add_capability(&caps,
1419 &sparse->header, sizeof(*sparse) +
1421 sizeof(*sparse->areas)));
1434 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
1435 if (info.argsz < sizeof(info) + caps.size) {
1436 info.argsz = sizeof(info) + caps.size;
1437 info.cap_offset = 0;
1439 vfio_info_cap_shift(&caps, sizeof(info));
1440 if (copy_to_user((void __user *)arg +
1441 sizeof(info), caps.buf,
1447 info.cap_offset = sizeof(info);
1454 return copy_to_user((void __user *)arg, &info, minsz) ?
1456 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1457 struct vfio_irq_info info;
1459 minsz = offsetofend(struct vfio_irq_info, count);
1461 if (copy_from_user(&info, (void __user *)arg, minsz))
1464 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1467 switch (info.index) {
1468 case VFIO_PCI_INTX_IRQ_INDEX:
1469 case VFIO_PCI_MSI_IRQ_INDEX:
1475 info.flags = VFIO_IRQ_INFO_EVENTFD;
1477 info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1479 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1480 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1481 VFIO_IRQ_INFO_AUTOMASKED);
1483 info.flags |= VFIO_IRQ_INFO_NORESIZE;
1485 return copy_to_user((void __user *)arg, &info, minsz) ?
1487 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
1488 struct vfio_irq_set hdr;
1491 size_t data_size = 0;
1493 minsz = offsetofend(struct vfio_irq_set, count);
1495 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1498 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
1499 int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1501 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1502 VFIO_PCI_NUM_IRQS, &data_size);
1504 gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1508 data = memdup_user((void __user *)(arg + minsz),
1511 return PTR_ERR(data);
1515 ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1516 hdr.start, hdr.count, data);
1520 } else if (cmd == VFIO_DEVICE_RESET) {
1521 intel_gvt_ops->vgpu_reset(vgpu);
1523 } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
1524 struct vfio_device_gfx_plane_info dmabuf;
1527 minsz = offsetofend(struct vfio_device_gfx_plane_info,
1529 if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
1531 if (dmabuf.argsz < minsz)
1534 ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
1538 return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
1540 } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
1544 if (get_user(dmabuf_id, (__u32 __user *)arg))
1547 dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
1556 vgpu_id_show(struct device *dev, struct device_attribute *attr,
1559 struct mdev_device *mdev = mdev_from_dev(dev);
1562 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1563 mdev_get_drvdata(mdev);
1564 return sprintf(buf, "%d\n", vgpu->id);
1566 return sprintf(buf, "\n");
1570 hw_id_show(struct device *dev, struct device_attribute *attr,
1573 struct mdev_device *mdev = mdev_from_dev(dev);
1576 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1577 mdev_get_drvdata(mdev);
1578 return sprintf(buf, "%u\n",
1579 vgpu->submission.shadow[0]->gem_context->hw_id);
1581 return sprintf(buf, "\n");
1584 static DEVICE_ATTR_RO(vgpu_id);
1585 static DEVICE_ATTR_RO(hw_id);
1587 static struct attribute *intel_vgpu_attrs[] = {
1588 &dev_attr_vgpu_id.attr,
1589 &dev_attr_hw_id.attr,
1593 static const struct attribute_group intel_vgpu_group = {
1594 .name = "intel_vgpu",
1595 .attrs = intel_vgpu_attrs,
1598 static const struct attribute_group *intel_vgpu_groups[] = {
1603 static struct mdev_parent_ops intel_vgpu_ops = {
1604 .mdev_attr_groups = intel_vgpu_groups,
1605 .create = intel_vgpu_create,
1606 .remove = intel_vgpu_remove,
1608 .open = intel_vgpu_open,
1609 .release = intel_vgpu_release,
1611 .read = intel_vgpu_read,
1612 .write = intel_vgpu_write,
1613 .mmap = intel_vgpu_mmap,
1614 .ioctl = intel_vgpu_ioctl,
1617 static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
1619 struct attribute **kvm_type_attrs;
1620 struct attribute_group **kvm_vgpu_type_groups;
1622 intel_gvt_ops = ops;
1623 if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs,
1624 &kvm_vgpu_type_groups))
1626 intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
1628 return mdev_register_device(dev, &intel_vgpu_ops);
1631 static void kvmgt_host_exit(struct device *dev)
1633 mdev_unregister_device(dev);
1636 static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
1638 struct kvmgt_guest_info *info;
1640 struct kvm_memory_slot *slot;
1643 if (!handle_valid(handle))
1646 info = (struct kvmgt_guest_info *)handle;
1649 idx = srcu_read_lock(&kvm->srcu);
1650 slot = gfn_to_memslot(kvm, gfn);
1652 srcu_read_unlock(&kvm->srcu, idx);
1656 spin_lock(&kvm->mmu_lock);
1658 if (kvmgt_gfn_is_write_protected(info, gfn))
1661 kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1662 kvmgt_protect_table_add(info, gfn);
1665 spin_unlock(&kvm->mmu_lock);
1666 srcu_read_unlock(&kvm->srcu, idx);
1670 static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
1672 struct kvmgt_guest_info *info;
1674 struct kvm_memory_slot *slot;
1677 if (!handle_valid(handle))
1680 info = (struct kvmgt_guest_info *)handle;
1683 idx = srcu_read_lock(&kvm->srcu);
1684 slot = gfn_to_memslot(kvm, gfn);
1686 srcu_read_unlock(&kvm->srcu, idx);
1690 spin_lock(&kvm->mmu_lock);
1692 if (!kvmgt_gfn_is_write_protected(info, gfn))
1695 kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1696 kvmgt_protect_table_del(info, gfn);
1699 spin_unlock(&kvm->mmu_lock);
1700 srcu_read_unlock(&kvm->srcu, idx);
1704 static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1705 const u8 *val, int len,
1706 struct kvm_page_track_notifier_node *node)
1708 struct kvmgt_guest_info *info = container_of(node,
1709 struct kvmgt_guest_info, track_node);
1711 if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
1712 intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
1716 static void kvmgt_page_track_flush_slot(struct kvm *kvm,
1717 struct kvm_memory_slot *slot,
1718 struct kvm_page_track_notifier_node *node)
1722 struct kvmgt_guest_info *info = container_of(node,
1723 struct kvmgt_guest_info, track_node);
1725 spin_lock(&kvm->mmu_lock);
1726 for (i = 0; i < slot->npages; i++) {
1727 gfn = slot->base_gfn + i;
1728 if (kvmgt_gfn_is_write_protected(info, gfn)) {
1729 kvm_slot_page_track_remove_page(kvm, slot, gfn,
1730 KVM_PAGE_TRACK_WRITE);
1731 kvmgt_protect_table_del(info, gfn);
1734 spin_unlock(&kvm->mmu_lock);
1737 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
1739 struct intel_vgpu *itr;
1740 struct kvmgt_guest_info *info;
1744 mutex_lock(&vgpu->gvt->lock);
1745 for_each_active_vgpu(vgpu->gvt, itr, id) {
1746 if (!handle_valid(itr->handle))
1749 info = (struct kvmgt_guest_info *)itr->handle;
1750 if (kvm && kvm == info->kvm) {
1756 mutex_unlock(&vgpu->gvt->lock);
1760 static int kvmgt_guest_init(struct mdev_device *mdev)
1762 struct kvmgt_guest_info *info;
1763 struct intel_vgpu *vgpu;
1766 vgpu = mdev_get_drvdata(mdev);
1767 if (handle_valid(vgpu->handle))
1770 kvm = vgpu->vdev.kvm;
1771 if (!kvm || kvm->mm != current->mm) {
1772 gvt_vgpu_err("KVM is required to use Intel vGPU\n");
1776 if (__kvmgt_vgpu_exist(vgpu, kvm))
1779 info = vzalloc(sizeof(struct kvmgt_guest_info));
1783 vgpu->handle = (unsigned long)info;
1786 kvm_get_kvm(info->kvm);
1788 kvmgt_protect_table_init(info);
1789 gvt_cache_init(vgpu);
1791 init_completion(&vgpu->vblank_done);
1793 info->track_node.track_write = kvmgt_page_track_write;
1794 info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
1795 kvm_page_track_register_notifier(kvm, &info->track_node);
1797 info->debugfs_cache_entries = debugfs_create_ulong(
1798 "kvmgt_nr_cache_entries",
1799 0444, vgpu->debugfs,
1800 &vgpu->vdev.nr_cache_entries);
1801 if (!info->debugfs_cache_entries)
1802 gvt_vgpu_err("Cannot create kvmgt debugfs entry\n");
1807 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1809 debugfs_remove(info->debugfs_cache_entries);
1811 kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1812 kvm_put_kvm(info->kvm);
1813 kvmgt_protect_table_destroy(info);
1814 gvt_cache_destroy(info->vgpu);
1820 static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
1822 /* nothing to do here */
1826 static void kvmgt_detach_vgpu(void *p_vgpu)
1829 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
1831 if (!vgpu->vdev.region)
1834 for (i = 0; i < vgpu->vdev.num_regions; i++)
1835 if (vgpu->vdev.region[i].ops->release)
1836 vgpu->vdev.region[i].ops->release(vgpu,
1837 &vgpu->vdev.region[i]);
1838 vgpu->vdev.num_regions = 0;
1839 kfree(vgpu->vdev.region);
1840 vgpu->vdev.region = NULL;
1843 static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
1845 struct kvmgt_guest_info *info;
1846 struct intel_vgpu *vgpu;
1848 if (!handle_valid(handle))
1851 info = (struct kvmgt_guest_info *)handle;
1855 * When guest is poweroff, msi_trigger is set to NULL, but vgpu's
1856 * config and mmio register isn't restored to default during guest
1857 * poweroff. If this vgpu is still used in next vm, this vgpu's pipe
1858 * may be enabled, then once this vgpu is active, it will get inject
1859 * vblank interrupt request. But msi_trigger is null until msi is
1860 * enabled by guest. so if msi_trigger is null, success is still
1861 * returned and don't inject interrupt into guest.
1863 if (vgpu->vdev.msi_trigger == NULL)
1866 if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
1872 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1874 struct kvmgt_guest_info *info;
1877 if (!handle_valid(handle))
1878 return INTEL_GVT_INVALID_ADDR;
1880 info = (struct kvmgt_guest_info *)handle;
1882 pfn = gfn_to_pfn(info->kvm, gfn);
1883 if (is_error_noslot_pfn(pfn))
1884 return INTEL_GVT_INVALID_ADDR;
1889 static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
1890 unsigned long size, dma_addr_t *dma_addr)
1892 struct kvmgt_guest_info *info;
1893 struct intel_vgpu *vgpu;
1894 struct gvt_dma *entry;
1897 if (!handle_valid(handle))
1900 info = (struct kvmgt_guest_info *)handle;
1903 mutex_lock(&info->vgpu->vdev.cache_lock);
1905 entry = __gvt_cache_find_gfn(info->vgpu, gfn);
1907 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1911 ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
1915 kref_get(&entry->ref);
1916 *dma_addr = entry->dma_addr;
1919 mutex_unlock(&info->vgpu->vdev.cache_lock);
1923 gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
1925 mutex_unlock(&info->vgpu->vdev.cache_lock);
1929 static void __gvt_dma_release(struct kref *ref)
1931 struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
1933 gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
1935 __gvt_cache_remove_entry(entry->vgpu, entry);
1938 static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
1940 struct kvmgt_guest_info *info;
1941 struct gvt_dma *entry;
1943 if (!handle_valid(handle))
1946 info = (struct kvmgt_guest_info *)handle;
1948 mutex_lock(&info->vgpu->vdev.cache_lock);
1949 entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
1951 kref_put(&entry->ref, __gvt_dma_release);
1952 mutex_unlock(&info->vgpu->vdev.cache_lock);
1955 static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1956 void *buf, unsigned long len, bool write)
1958 struct kvmgt_guest_info *info;
1961 bool kthread = current->mm == NULL;
1963 if (!handle_valid(handle))
1966 info = (struct kvmgt_guest_info *)handle;
1970 if (!mmget_not_zero(kvm->mm))
1975 idx = srcu_read_lock(&kvm->srcu);
1976 ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1977 kvm_read_guest(kvm, gpa, buf, len);
1978 srcu_read_unlock(&kvm->srcu, idx);
1988 static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
1989 void *buf, unsigned long len)
1991 return kvmgt_rw_gpa(handle, gpa, buf, len, false);
1994 static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
1995 void *buf, unsigned long len)
1997 return kvmgt_rw_gpa(handle, gpa, buf, len, true);
2000 static unsigned long kvmgt_virt_to_pfn(void *addr)
2002 return PFN_DOWN(__pa(addr));
2005 static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
2007 struct kvmgt_guest_info *info;
2012 if (!handle_valid(handle))
2015 info = (struct kvmgt_guest_info *)handle;
2018 idx = srcu_read_lock(&kvm->srcu);
2019 ret = kvm_is_visible_gfn(kvm, gfn);
2020 srcu_read_unlock(&kvm->srcu, idx);
2025 static struct intel_gvt_mpt kvmgt_mpt = {
2026 .type = INTEL_GVT_HYPERVISOR_KVM,
2027 .host_init = kvmgt_host_init,
2028 .host_exit = kvmgt_host_exit,
2029 .attach_vgpu = kvmgt_attach_vgpu,
2030 .detach_vgpu = kvmgt_detach_vgpu,
2031 .inject_msi = kvmgt_inject_msi,
2032 .from_virt_to_mfn = kvmgt_virt_to_pfn,
2033 .enable_page_track = kvmgt_page_track_add,
2034 .disable_page_track = kvmgt_page_track_remove,
2035 .read_gpa = kvmgt_read_gpa,
2036 .write_gpa = kvmgt_write_gpa,
2037 .gfn_to_mfn = kvmgt_gfn_to_pfn,
2038 .dma_map_guest_page = kvmgt_dma_map_guest_page,
2039 .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
2040 .set_opregion = kvmgt_set_opregion,
2041 .set_edid = kvmgt_set_edid,
2042 .get_vfio_device = kvmgt_get_vfio_device,
2043 .put_vfio_device = kvmgt_put_vfio_device,
2044 .is_valid_gfn = kvmgt_is_valid_gfn,
2047 static int __init kvmgt_init(void)
2049 if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0)
2054 static void __exit kvmgt_exit(void)
2056 intel_gvt_unregister_hypervisor();
2059 module_init(kvmgt_init);
2060 module_exit(kvmgt_exit);
2062 MODULE_LICENSE("GPL and additional rights");
2063 MODULE_AUTHOR("Intel Corporation");