Merge tag 'gvt-next-2017-12-14' of https://github.com/intel/gvt-linux into drm-intel...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gvt / kvmgt.c
1 /*
2  * KVMGT - the implementation of Intel mediated pass-through framework for KVM
3  *
4  * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  *
25  * Authors:
26  *    Kevin Tian <kevin.tian@intel.com>
27  *    Jike Song <jike.song@intel.com>
28  *    Xiaoguang Chen <xiaoguang.chen@intel.com>
29  */
30
31 #include <linux/init.h>
32 #include <linux/device.h>
33 #include <linux/mm.h>
34 #include <linux/mmu_context.h>
35 #include <linux/types.h>
36 #include <linux/list.h>
37 #include <linux/rbtree.h>
38 #include <linux/spinlock.h>
39 #include <linux/eventfd.h>
40 #include <linux/uuid.h>
41 #include <linux/kvm_host.h>
42 #include <linux/vfio.h>
43 #include <linux/mdev.h>
44
45 #include "i915_drv.h"
46 #include "gvt.h"
47
48 static const struct intel_gvt_ops *intel_gvt_ops;
49
50 /* helper macros copied from vfio-pci */
51 #define VFIO_PCI_OFFSET_SHIFT   40
52 #define VFIO_PCI_OFFSET_TO_INDEX(off)   (off >> VFIO_PCI_OFFSET_SHIFT)
53 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
54 #define VFIO_PCI_OFFSET_MASK    (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
55
56 #define OPREGION_SIGNATURE "IntelGraphicsMem"
57
58 struct vfio_region;
59 struct intel_vgpu_regops {
60         size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
61                         size_t count, loff_t *ppos, bool iswrite);
62         void (*release)(struct intel_vgpu *vgpu,
63                         struct vfio_region *region);
64 };
65
66 struct vfio_region {
67         u32                             type;
68         u32                             subtype;
69         size_t                          size;
70         u32                             flags;
71         const struct intel_vgpu_regops  *ops;
72         void                            *data;
73 };
74
75 struct kvmgt_pgfn {
76         gfn_t gfn;
77         struct hlist_node hnode;
78 };
79
80 struct kvmgt_guest_info {
81         struct kvm *kvm;
82         struct intel_vgpu *vgpu;
83         struct kvm_page_track_notifier_node track_node;
84 #define NR_BKT (1 << 18)
85         struct hlist_head ptable[NR_BKT];
86 #undef NR_BKT
87 };
88
89 struct gvt_dma {
90         struct rb_node node;
91         gfn_t gfn;
92         unsigned long iova;
93 };
94
95 static inline bool handle_valid(unsigned long handle)
96 {
97         return !!(handle & ~0xff);
98 }
99
100 static int kvmgt_guest_init(struct mdev_device *mdev);
101 static void intel_vgpu_release_work(struct work_struct *work);
102 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
103
104 static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
105                 unsigned long *iova)
106 {
107         struct page *page;
108         struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
109         dma_addr_t daddr;
110
111         if (unlikely(!pfn_valid(pfn)))
112                 return -EFAULT;
113
114         page = pfn_to_page(pfn);
115         daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
116                         PCI_DMA_BIDIRECTIONAL);
117         if (dma_mapping_error(dev, daddr))
118                 return -ENOMEM;
119
120         *iova = (unsigned long)(daddr >> PAGE_SHIFT);
121         return 0;
122 }
123
124 static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova)
125 {
126         struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
127         dma_addr_t daddr;
128
129         daddr = (dma_addr_t)(iova << PAGE_SHIFT);
130         dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
131 }
132
133 static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
134 {
135         struct rb_node *node = vgpu->vdev.cache.rb_node;
136         struct gvt_dma *ret = NULL;
137
138         while (node) {
139                 struct gvt_dma *itr = rb_entry(node, struct gvt_dma, node);
140
141                 if (gfn < itr->gfn)
142                         node = node->rb_left;
143                 else if (gfn > itr->gfn)
144                         node = node->rb_right;
145                 else {
146                         ret = itr;
147                         goto out;
148                 }
149         }
150
151 out:
152         return ret;
153 }
154
155 static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
156 {
157         struct gvt_dma *entry;
158         unsigned long iova;
159
160         mutex_lock(&vgpu->vdev.cache_lock);
161
162         entry = __gvt_cache_find(vgpu, gfn);
163         iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova;
164
165         mutex_unlock(&vgpu->vdev.cache_lock);
166         return iova;
167 }
168
169 static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
170                 unsigned long iova)
171 {
172         struct gvt_dma *new, *itr;
173         struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
174
175         new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
176         if (!new)
177                 return;
178
179         new->gfn = gfn;
180         new->iova = iova;
181
182         mutex_lock(&vgpu->vdev.cache_lock);
183         while (*link) {
184                 parent = *link;
185                 itr = rb_entry(parent, struct gvt_dma, node);
186
187                 if (gfn == itr->gfn)
188                         goto out;
189                 else if (gfn < itr->gfn)
190                         link = &parent->rb_left;
191                 else
192                         link = &parent->rb_right;
193         }
194
195         rb_link_node(&new->node, parent, link);
196         rb_insert_color(&new->node, &vgpu->vdev.cache);
197         mutex_unlock(&vgpu->vdev.cache_lock);
198         return;
199
200 out:
201         mutex_unlock(&vgpu->vdev.cache_lock);
202         kfree(new);
203 }
204
205 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
206                                 struct gvt_dma *entry)
207 {
208         rb_erase(&entry->node, &vgpu->vdev.cache);
209         kfree(entry);
210 }
211
212 static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
213 {
214         struct device *dev = mdev_dev(vgpu->vdev.mdev);
215         struct gvt_dma *this;
216         unsigned long g1;
217         int rc;
218
219         mutex_lock(&vgpu->vdev.cache_lock);
220         this  = __gvt_cache_find(vgpu, gfn);
221         if (!this) {
222                 mutex_unlock(&vgpu->vdev.cache_lock);
223                 return;
224         }
225
226         g1 = gfn;
227         gvt_dma_unmap_iova(vgpu, this->iova);
228         rc = vfio_unpin_pages(dev, &g1, 1);
229         WARN_ON(rc != 1);
230         __gvt_cache_remove_entry(vgpu, this);
231         mutex_unlock(&vgpu->vdev.cache_lock);
232 }
233
234 static void gvt_cache_init(struct intel_vgpu *vgpu)
235 {
236         vgpu->vdev.cache = RB_ROOT;
237         mutex_init(&vgpu->vdev.cache_lock);
238 }
239
240 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
241 {
242         struct gvt_dma *dma;
243         struct rb_node *node = NULL;
244         struct device *dev = mdev_dev(vgpu->vdev.mdev);
245         unsigned long gfn;
246
247         for (;;) {
248                 mutex_lock(&vgpu->vdev.cache_lock);
249                 node = rb_first(&vgpu->vdev.cache);
250                 if (!node) {
251                         mutex_unlock(&vgpu->vdev.cache_lock);
252                         break;
253                 }
254                 dma = rb_entry(node, struct gvt_dma, node);
255                 gvt_dma_unmap_iova(vgpu, dma->iova);
256                 gfn = dma->gfn;
257                 __gvt_cache_remove_entry(vgpu, dma);
258                 mutex_unlock(&vgpu->vdev.cache_lock);
259                 vfio_unpin_pages(dev, &gfn, 1);
260         }
261 }
262
263 static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
264 {
265         hash_init(info->ptable);
266 }
267
268 static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
269 {
270         struct kvmgt_pgfn *p;
271         struct hlist_node *tmp;
272         int i;
273
274         hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
275                 hash_del(&p->hnode);
276                 kfree(p);
277         }
278 }
279
280 static struct kvmgt_pgfn *
281 __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
282 {
283         struct kvmgt_pgfn *p, *res = NULL;
284
285         hash_for_each_possible(info->ptable, p, hnode, gfn) {
286                 if (gfn == p->gfn) {
287                         res = p;
288                         break;
289                 }
290         }
291
292         return res;
293 }
294
295 static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
296                                 gfn_t gfn)
297 {
298         struct kvmgt_pgfn *p;
299
300         p = __kvmgt_protect_table_find(info, gfn);
301         return !!p;
302 }
303
304 static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
305 {
306         struct kvmgt_pgfn *p;
307
308         if (kvmgt_gfn_is_write_protected(info, gfn))
309                 return;
310
311         p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
312         if (WARN(!p, "gfn: 0x%llx\n", gfn))
313                 return;
314
315         p->gfn = gfn;
316         hash_add(info->ptable, &p->hnode, gfn);
317 }
318
319 static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
320                                 gfn_t gfn)
321 {
322         struct kvmgt_pgfn *p;
323
324         p = __kvmgt_protect_table_find(info, gfn);
325         if (p) {
326                 hash_del(&p->hnode);
327                 kfree(p);
328         }
329 }
330
331 static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
332                 size_t count, loff_t *ppos, bool iswrite)
333 {
334         unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
335                         VFIO_PCI_NUM_REGIONS;
336         void *base = vgpu->vdev.region[i].data;
337         loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
338
339         if (pos >= vgpu->vdev.region[i].size || iswrite) {
340                 gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
341                 return -EINVAL;
342         }
343         count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
344         memcpy(buf, base + pos, count);
345
346         return count;
347 }
348
349 static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
350                 struct vfio_region *region)
351 {
352 }
353
354 static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
355         .rw = intel_vgpu_reg_rw_opregion,
356         .release = intel_vgpu_reg_release_opregion,
357 };
358
359 static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
360                 unsigned int type, unsigned int subtype,
361                 const struct intel_vgpu_regops *ops,
362                 size_t size, u32 flags, void *data)
363 {
364         struct vfio_region *region;
365
366         region = krealloc(vgpu->vdev.region,
367                         (vgpu->vdev.num_regions + 1) * sizeof(*region),
368                         GFP_KERNEL);
369         if (!region)
370                 return -ENOMEM;
371
372         vgpu->vdev.region = region;
373         vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
374         vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
375         vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
376         vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
377         vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
378         vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
379         vgpu->vdev.num_regions++;
380         return 0;
381 }
382
383 static int kvmgt_get_vfio_device(void *p_vgpu)
384 {
385         struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
386
387         vgpu->vdev.vfio_device = vfio_device_get_from_dev(
388                 mdev_dev(vgpu->vdev.mdev));
389         if (!vgpu->vdev.vfio_device) {
390                 gvt_vgpu_err("failed to get vfio device\n");
391                 return -ENODEV;
392         }
393         return 0;
394 }
395
396
397 static int kvmgt_set_opregion(void *p_vgpu)
398 {
399         struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
400         void *base;
401         int ret;
402
403         /* Each vgpu has its own opregion, although VFIO would create another
404          * one later. This one is used to expose opregion to VFIO. And the
405          * other one created by VFIO later, is used by guest actually.
406          */
407         base = vgpu_opregion(vgpu)->va;
408         if (!base)
409                 return -ENOMEM;
410
411         if (memcmp(base, OPREGION_SIGNATURE, 16)) {
412                 memunmap(base);
413                 return -EINVAL;
414         }
415
416         ret = intel_vgpu_register_reg(vgpu,
417                         PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
418                         VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
419                         &intel_vgpu_regops_opregion, OPREGION_SIZE,
420                         VFIO_REGION_INFO_FLAG_READ, base);
421
422         return ret;
423 }
424
425 static void kvmgt_put_vfio_device(void *vgpu)
426 {
427         if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
428                 return;
429
430         vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
431 }
432
433 static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
434 {
435         struct intel_vgpu *vgpu = NULL;
436         struct intel_vgpu_type *type;
437         struct device *pdev;
438         void *gvt;
439         int ret;
440
441         pdev = mdev_parent_dev(mdev);
442         gvt = kdev_to_i915(pdev)->gvt;
443
444         type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj));
445         if (!type) {
446                 gvt_vgpu_err("failed to find type %s to create\n",
447                                                 kobject_name(kobj));
448                 ret = -EINVAL;
449                 goto out;
450         }
451
452         vgpu = intel_gvt_ops->vgpu_create(gvt, type);
453         if (IS_ERR_OR_NULL(vgpu)) {
454                 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
455                 gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
456                 goto out;
457         }
458
459         INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
460
461         vgpu->vdev.mdev = mdev;
462         mdev_set_drvdata(mdev, vgpu);
463
464         gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
465                      dev_name(mdev_dev(mdev)));
466         ret = 0;
467
468 out:
469         return ret;
470 }
471
472 static int intel_vgpu_remove(struct mdev_device *mdev)
473 {
474         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
475
476         if (handle_valid(vgpu->handle))
477                 return -EBUSY;
478
479         intel_gvt_ops->vgpu_destroy(vgpu);
480         return 0;
481 }
482
483 static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
484                                      unsigned long action, void *data)
485 {
486         struct intel_vgpu *vgpu = container_of(nb,
487                                         struct intel_vgpu,
488                                         vdev.iommu_notifier);
489
490         if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
491                 struct vfio_iommu_type1_dma_unmap *unmap = data;
492                 unsigned long gfn, end_gfn;
493
494                 gfn = unmap->iova >> PAGE_SHIFT;
495                 end_gfn = gfn + unmap->size / PAGE_SIZE;
496
497                 while (gfn < end_gfn)
498                         gvt_cache_remove(vgpu, gfn++);
499         }
500
501         return NOTIFY_OK;
502 }
503
504 static int intel_vgpu_group_notifier(struct notifier_block *nb,
505                                      unsigned long action, void *data)
506 {
507         struct intel_vgpu *vgpu = container_of(nb,
508                                         struct intel_vgpu,
509                                         vdev.group_notifier);
510
511         /* the only action we care about */
512         if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
513                 vgpu->vdev.kvm = data;
514
515                 if (!data)
516                         schedule_work(&vgpu->vdev.release_work);
517         }
518
519         return NOTIFY_OK;
520 }
521
522 static int intel_vgpu_open(struct mdev_device *mdev)
523 {
524         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
525         unsigned long events;
526         int ret;
527
528         vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
529         vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
530
531         events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
532         ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
533                                 &vgpu->vdev.iommu_notifier);
534         if (ret != 0) {
535                 gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
536                         ret);
537                 goto out;
538         }
539
540         events = VFIO_GROUP_NOTIFY_SET_KVM;
541         ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
542                                 &vgpu->vdev.group_notifier);
543         if (ret != 0) {
544                 gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
545                         ret);
546                 goto undo_iommu;
547         }
548
549         ret = kvmgt_guest_init(mdev);
550         if (ret)
551                 goto undo_group;
552
553         intel_gvt_ops->vgpu_activate(vgpu);
554
555         atomic_set(&vgpu->vdev.released, 0);
556         return ret;
557
558 undo_group:
559         vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
560                                         &vgpu->vdev.group_notifier);
561
562 undo_iommu:
563         vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
564                                         &vgpu->vdev.iommu_notifier);
565 out:
566         return ret;
567 }
568
569 static void __intel_vgpu_release(struct intel_vgpu *vgpu)
570 {
571         struct kvmgt_guest_info *info;
572         int ret;
573
574         if (!handle_valid(vgpu->handle))
575                 return;
576
577         if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
578                 return;
579
580         intel_gvt_ops->vgpu_deactivate(vgpu);
581
582         ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
583                                         &vgpu->vdev.iommu_notifier);
584         WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
585
586         ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
587                                         &vgpu->vdev.group_notifier);
588         WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
589
590         info = (struct kvmgt_guest_info *)vgpu->handle;
591         kvmgt_guest_exit(info);
592
593         vgpu->vdev.kvm = NULL;
594         vgpu->handle = 0;
595 }
596
597 static void intel_vgpu_release(struct mdev_device *mdev)
598 {
599         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
600
601         __intel_vgpu_release(vgpu);
602 }
603
604 static void intel_vgpu_release_work(struct work_struct *work)
605 {
606         struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
607                                         vdev.release_work);
608
609         __intel_vgpu_release(vgpu);
610 }
611
612 static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
613 {
614         u32 start_lo, start_hi;
615         u32 mem_type;
616
617         start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
618                         PCI_BASE_ADDRESS_MEM_MASK;
619         mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
620                         PCI_BASE_ADDRESS_MEM_TYPE_MASK;
621
622         switch (mem_type) {
623         case PCI_BASE_ADDRESS_MEM_TYPE_64:
624                 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
625                                                 + bar + 4));
626                 break;
627         case PCI_BASE_ADDRESS_MEM_TYPE_32:
628         case PCI_BASE_ADDRESS_MEM_TYPE_1M:
629                 /* 1M mem BAR treated as 32-bit BAR */
630         default:
631                 /* mem unknown type treated as 32-bit BAR */
632                 start_hi = 0;
633                 break;
634         }
635
636         return ((u64)start_hi << 32) | start_lo;
637 }
638
639 static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
640                              void *buf, unsigned int count, bool is_write)
641 {
642         uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
643         int ret;
644
645         if (is_write)
646                 ret = intel_gvt_ops->emulate_mmio_write(vgpu,
647                                         bar_start + off, buf, count);
648         else
649                 ret = intel_gvt_ops->emulate_mmio_read(vgpu,
650                                         bar_start + off, buf, count);
651         return ret;
652 }
653
654 static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
655                         size_t count, loff_t *ppos, bool is_write)
656 {
657         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
658         unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
659         uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
660         int ret = -EINVAL;
661
662
663         if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
664                 gvt_vgpu_err("invalid index: %u\n", index);
665                 return -EINVAL;
666         }
667
668         switch (index) {
669         case VFIO_PCI_CONFIG_REGION_INDEX:
670                 if (is_write)
671                         ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
672                                                 buf, count);
673                 else
674                         ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
675                                                 buf, count);
676                 break;
677         case VFIO_PCI_BAR0_REGION_INDEX:
678                 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
679                                         buf, count, is_write);
680                 break;
681         case VFIO_PCI_BAR2_REGION_INDEX:
682                 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_2, pos,
683                                         buf, count, is_write);
684                 break;
685         case VFIO_PCI_BAR1_REGION_INDEX:
686         case VFIO_PCI_BAR3_REGION_INDEX:
687         case VFIO_PCI_BAR4_REGION_INDEX:
688         case VFIO_PCI_BAR5_REGION_INDEX:
689         case VFIO_PCI_VGA_REGION_INDEX:
690         case VFIO_PCI_ROM_REGION_INDEX:
691                 break;
692         default:
693                 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
694                         return -EINVAL;
695
696                 index -= VFIO_PCI_NUM_REGIONS;
697                 return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
698                                 ppos, is_write);
699         }
700
701         return ret == 0 ? count : ret;
702 }
703
704 static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
705                         size_t count, loff_t *ppos)
706 {
707         unsigned int done = 0;
708         int ret;
709
710         while (count) {
711                 size_t filled;
712
713                 if (count >= 4 && !(*ppos % 4)) {
714                         u32 val;
715
716                         ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
717                                         ppos, false);
718                         if (ret <= 0)
719                                 goto read_err;
720
721                         if (copy_to_user(buf, &val, sizeof(val)))
722                                 goto read_err;
723
724                         filled = 4;
725                 } else if (count >= 2 && !(*ppos % 2)) {
726                         u16 val;
727
728                         ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
729                                         ppos, false);
730                         if (ret <= 0)
731                                 goto read_err;
732
733                         if (copy_to_user(buf, &val, sizeof(val)))
734                                 goto read_err;
735
736                         filled = 2;
737                 } else {
738                         u8 val;
739
740                         ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
741                                         false);
742                         if (ret <= 0)
743                                 goto read_err;
744
745                         if (copy_to_user(buf, &val, sizeof(val)))
746                                 goto read_err;
747
748                         filled = 1;
749                 }
750
751                 count -= filled;
752                 done += filled;
753                 *ppos += filled;
754                 buf += filled;
755         }
756
757         return done;
758
759 read_err:
760         return -EFAULT;
761 }
762
763 static ssize_t intel_vgpu_write(struct mdev_device *mdev,
764                                 const char __user *buf,
765                                 size_t count, loff_t *ppos)
766 {
767         unsigned int done = 0;
768         int ret;
769
770         while (count) {
771                 size_t filled;
772
773                 if (count >= 4 && !(*ppos % 4)) {
774                         u32 val;
775
776                         if (copy_from_user(&val, buf, sizeof(val)))
777                                 goto write_err;
778
779                         ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
780                                         ppos, true);
781                         if (ret <= 0)
782                                 goto write_err;
783
784                         filled = 4;
785                 } else if (count >= 2 && !(*ppos % 2)) {
786                         u16 val;
787
788                         if (copy_from_user(&val, buf, sizeof(val)))
789                                 goto write_err;
790
791                         ret = intel_vgpu_rw(mdev, (char *)&val,
792                                         sizeof(val), ppos, true);
793                         if (ret <= 0)
794                                 goto write_err;
795
796                         filled = 2;
797                 } else {
798                         u8 val;
799
800                         if (copy_from_user(&val, buf, sizeof(val)))
801                                 goto write_err;
802
803                         ret = intel_vgpu_rw(mdev, &val, sizeof(val),
804                                         ppos, true);
805                         if (ret <= 0)
806                                 goto write_err;
807
808                         filled = 1;
809                 }
810
811                 count -= filled;
812                 done += filled;
813                 *ppos += filled;
814                 buf += filled;
815         }
816
817         return done;
818 write_err:
819         return -EFAULT;
820 }
821
822 static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
823 {
824         unsigned int index;
825         u64 virtaddr;
826         unsigned long req_size, pgoff = 0;
827         pgprot_t pg_prot;
828         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
829
830         index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
831         if (index >= VFIO_PCI_ROM_REGION_INDEX)
832                 return -EINVAL;
833
834         if (vma->vm_end < vma->vm_start)
835                 return -EINVAL;
836         if ((vma->vm_flags & VM_SHARED) == 0)
837                 return -EINVAL;
838         if (index != VFIO_PCI_BAR2_REGION_INDEX)
839                 return -EINVAL;
840
841         pg_prot = vma->vm_page_prot;
842         virtaddr = vma->vm_start;
843         req_size = vma->vm_end - vma->vm_start;
844         pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
845
846         return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
847 }
848
849 static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
850 {
851         if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
852                 return 1;
853
854         return 0;
855 }
856
857 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
858                         unsigned int index, unsigned int start,
859                         unsigned int count, uint32_t flags,
860                         void *data)
861 {
862         return 0;
863 }
864
865 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
866                         unsigned int index, unsigned int start,
867                         unsigned int count, uint32_t flags, void *data)
868 {
869         return 0;
870 }
871
872 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
873                 unsigned int index, unsigned int start, unsigned int count,
874                 uint32_t flags, void *data)
875 {
876         return 0;
877 }
878
879 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
880                 unsigned int index, unsigned int start, unsigned int count,
881                 uint32_t flags, void *data)
882 {
883         struct eventfd_ctx *trigger;
884
885         if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
886                 int fd = *(int *)data;
887
888                 trigger = eventfd_ctx_fdget(fd);
889                 if (IS_ERR(trigger)) {
890                         gvt_vgpu_err("eventfd_ctx_fdget failed\n");
891                         return PTR_ERR(trigger);
892                 }
893                 vgpu->vdev.msi_trigger = trigger;
894         }
895
896         return 0;
897 }
898
899 static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
900                 unsigned int index, unsigned int start, unsigned int count,
901                 void *data)
902 {
903         int (*func)(struct intel_vgpu *vgpu, unsigned int index,
904                         unsigned int start, unsigned int count, uint32_t flags,
905                         void *data) = NULL;
906
907         switch (index) {
908         case VFIO_PCI_INTX_IRQ_INDEX:
909                 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
910                 case VFIO_IRQ_SET_ACTION_MASK:
911                         func = intel_vgpu_set_intx_mask;
912                         break;
913                 case VFIO_IRQ_SET_ACTION_UNMASK:
914                         func = intel_vgpu_set_intx_unmask;
915                         break;
916                 case VFIO_IRQ_SET_ACTION_TRIGGER:
917                         func = intel_vgpu_set_intx_trigger;
918                         break;
919                 }
920                 break;
921         case VFIO_PCI_MSI_IRQ_INDEX:
922                 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
923                 case VFIO_IRQ_SET_ACTION_MASK:
924                 case VFIO_IRQ_SET_ACTION_UNMASK:
925                         /* XXX Need masking support exported */
926                         break;
927                 case VFIO_IRQ_SET_ACTION_TRIGGER:
928                         func = intel_vgpu_set_msi_trigger;
929                         break;
930                 }
931                 break;
932         }
933
934         if (!func)
935                 return -ENOTTY;
936
937         return func(vgpu, index, start, count, flags, data);
938 }
939
940 static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
941                              unsigned long arg)
942 {
943         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
944         unsigned long minsz;
945
946         gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
947
948         if (cmd == VFIO_DEVICE_GET_INFO) {
949                 struct vfio_device_info info;
950
951                 minsz = offsetofend(struct vfio_device_info, num_irqs);
952
953                 if (copy_from_user(&info, (void __user *)arg, minsz))
954                         return -EFAULT;
955
956                 if (info.argsz < minsz)
957                         return -EINVAL;
958
959                 info.flags = VFIO_DEVICE_FLAGS_PCI;
960                 info.flags |= VFIO_DEVICE_FLAGS_RESET;
961                 info.num_regions = VFIO_PCI_NUM_REGIONS +
962                                 vgpu->vdev.num_regions;
963                 info.num_irqs = VFIO_PCI_NUM_IRQS;
964
965                 return copy_to_user((void __user *)arg, &info, minsz) ?
966                         -EFAULT : 0;
967
968         } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
969                 struct vfio_region_info info;
970                 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
971                 int i, ret;
972                 struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
973                 size_t size;
974                 int nr_areas = 1;
975                 int cap_type_id;
976
977                 minsz = offsetofend(struct vfio_region_info, offset);
978
979                 if (copy_from_user(&info, (void __user *)arg, minsz))
980                         return -EFAULT;
981
982                 if (info.argsz < minsz)
983                         return -EINVAL;
984
985                 switch (info.index) {
986                 case VFIO_PCI_CONFIG_REGION_INDEX:
987                         info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
988                         info.size = vgpu->gvt->device_info.cfg_space_size;
989                         info.flags = VFIO_REGION_INFO_FLAG_READ |
990                                      VFIO_REGION_INFO_FLAG_WRITE;
991                         break;
992                 case VFIO_PCI_BAR0_REGION_INDEX:
993                         info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
994                         info.size = vgpu->cfg_space.bar[info.index].size;
995                         if (!info.size) {
996                                 info.flags = 0;
997                                 break;
998                         }
999
1000                         info.flags = VFIO_REGION_INFO_FLAG_READ |
1001                                      VFIO_REGION_INFO_FLAG_WRITE;
1002                         break;
1003                 case VFIO_PCI_BAR1_REGION_INDEX:
1004                         info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1005                         info.size = 0;
1006                         info.flags = 0;
1007                         break;
1008                 case VFIO_PCI_BAR2_REGION_INDEX:
1009                         info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1010                         info.flags = VFIO_REGION_INFO_FLAG_CAPS |
1011                                         VFIO_REGION_INFO_FLAG_MMAP |
1012                                         VFIO_REGION_INFO_FLAG_READ |
1013                                         VFIO_REGION_INFO_FLAG_WRITE;
1014                         info.size = gvt_aperture_sz(vgpu->gvt);
1015
1016                         size = sizeof(*sparse) +
1017                                         (nr_areas * sizeof(*sparse->areas));
1018                         sparse = kzalloc(size, GFP_KERNEL);
1019                         if (!sparse)
1020                                 return -ENOMEM;
1021
1022                         sparse->nr_areas = nr_areas;
1023                         cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1024                         sparse->areas[0].offset =
1025                                         PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1026                         sparse->areas[0].size = vgpu_aperture_sz(vgpu);
1027                         break;
1028
1029                 case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1030                         info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1031                         info.size = 0;
1032                         info.flags = 0;
1033
1034                         gvt_dbg_core("get region info bar:%d\n", info.index);
1035                         break;
1036
1037                 case VFIO_PCI_ROM_REGION_INDEX:
1038                 case VFIO_PCI_VGA_REGION_INDEX:
1039                         info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1040                         info.size = 0;
1041                         info.flags = 0;
1042
1043                         gvt_dbg_core("get region info index:%d\n", info.index);
1044                         break;
1045                 default:
1046                         {
1047                                 struct vfio_region_info_cap_type cap_type;
1048
1049                                 if (info.index >= VFIO_PCI_NUM_REGIONS +
1050                                                 vgpu->vdev.num_regions)
1051                                         return -EINVAL;
1052
1053                                 i = info.index - VFIO_PCI_NUM_REGIONS;
1054
1055                                 info.offset =
1056                                         VFIO_PCI_INDEX_TO_OFFSET(info.index);
1057                                 info.size = vgpu->vdev.region[i].size;
1058                                 info.flags = vgpu->vdev.region[i].flags;
1059
1060                                 cap_type.type = vgpu->vdev.region[i].type;
1061                                 cap_type.subtype = vgpu->vdev.region[i].subtype;
1062
1063                                 ret = vfio_info_add_capability(&caps,
1064                                                 VFIO_REGION_INFO_CAP_TYPE,
1065                                                 &cap_type);
1066                                 if (ret)
1067                                         return ret;
1068                         }
1069                 }
1070
1071                 if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
1072                         switch (cap_type_id) {
1073                         case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
1074                                 ret = vfio_info_add_capability(&caps,
1075                                         VFIO_REGION_INFO_CAP_SPARSE_MMAP,
1076                                         sparse);
1077                                 kfree(sparse);
1078                                 if (ret)
1079                                         return ret;
1080                                 break;
1081                         default:
1082                                 return -EINVAL;
1083                         }
1084                 }
1085
1086                 if (caps.size) {
1087                         info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
1088                         if (info.argsz < sizeof(info) + caps.size) {
1089                                 info.argsz = sizeof(info) + caps.size;
1090                                 info.cap_offset = 0;
1091                         } else {
1092                                 vfio_info_cap_shift(&caps, sizeof(info));
1093                                 if (copy_to_user((void __user *)arg +
1094                                                   sizeof(info), caps.buf,
1095                                                   caps.size)) {
1096                                         kfree(caps.buf);
1097                                         return -EFAULT;
1098                                 }
1099                                 info.cap_offset = sizeof(info);
1100                         }
1101
1102                         kfree(caps.buf);
1103                 }
1104
1105                 return copy_to_user((void __user *)arg, &info, minsz) ?
1106                         -EFAULT : 0;
1107         } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1108                 struct vfio_irq_info info;
1109
1110                 minsz = offsetofend(struct vfio_irq_info, count);
1111
1112                 if (copy_from_user(&info, (void __user *)arg, minsz))
1113                         return -EFAULT;
1114
1115                 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1116                         return -EINVAL;
1117
1118                 switch (info.index) {
1119                 case VFIO_PCI_INTX_IRQ_INDEX:
1120                 case VFIO_PCI_MSI_IRQ_INDEX:
1121                         break;
1122                 default:
1123                         return -EINVAL;
1124                 }
1125
1126                 info.flags = VFIO_IRQ_INFO_EVENTFD;
1127
1128                 info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1129
1130                 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1131                         info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1132                                        VFIO_IRQ_INFO_AUTOMASKED);
1133                 else
1134                         info.flags |= VFIO_IRQ_INFO_NORESIZE;
1135
1136                 return copy_to_user((void __user *)arg, &info, minsz) ?
1137                         -EFAULT : 0;
1138         } else if (cmd == VFIO_DEVICE_SET_IRQS) {
1139                 struct vfio_irq_set hdr;
1140                 u8 *data = NULL;
1141                 int ret = 0;
1142                 size_t data_size = 0;
1143
1144                 minsz = offsetofend(struct vfio_irq_set, count);
1145
1146                 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1147                         return -EFAULT;
1148
1149                 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
1150                         int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1151
1152                         ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1153                                                 VFIO_PCI_NUM_IRQS, &data_size);
1154                         if (ret) {
1155                                 gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1156                                 return -EINVAL;
1157                         }
1158                         if (data_size) {
1159                                 data = memdup_user((void __user *)(arg + minsz),
1160                                                    data_size);
1161                                 if (IS_ERR(data))
1162                                         return PTR_ERR(data);
1163                         }
1164                 }
1165
1166                 ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1167                                         hdr.start, hdr.count, data);
1168                 kfree(data);
1169
1170                 return ret;
1171         } else if (cmd == VFIO_DEVICE_RESET) {
1172                 intel_gvt_ops->vgpu_reset(vgpu);
1173                 return 0;
1174         } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
1175                 struct vfio_device_gfx_plane_info dmabuf;
1176                 int ret = 0;
1177
1178                 minsz = offsetofend(struct vfio_device_gfx_plane_info,
1179                                     dmabuf_id);
1180                 if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
1181                         return -EFAULT;
1182                 if (dmabuf.argsz < minsz)
1183                         return -EINVAL;
1184
1185                 ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
1186                 if (ret != 0)
1187                         return ret;
1188
1189                 return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
1190                                                                 -EFAULT : 0;
1191         } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
1192                 __u32 dmabuf_id;
1193                 __s32 dmabuf_fd;
1194
1195                 if (get_user(dmabuf_id, (__u32 __user *)arg))
1196                         return -EFAULT;
1197
1198                 dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
1199                 return dmabuf_fd;
1200
1201         }
1202
1203         return 0;
1204 }
1205
1206 static ssize_t
1207 vgpu_id_show(struct device *dev, struct device_attribute *attr,
1208              char *buf)
1209 {
1210         struct mdev_device *mdev = mdev_from_dev(dev);
1211
1212         if (mdev) {
1213                 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1214                         mdev_get_drvdata(mdev);
1215                 return sprintf(buf, "%d\n", vgpu->id);
1216         }
1217         return sprintf(buf, "\n");
1218 }
1219
1220 static ssize_t
1221 hw_id_show(struct device *dev, struct device_attribute *attr,
1222            char *buf)
1223 {
1224         struct mdev_device *mdev = mdev_from_dev(dev);
1225
1226         if (mdev) {
1227                 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1228                         mdev_get_drvdata(mdev);
1229                 return sprintf(buf, "%u\n",
1230                                vgpu->submission.shadow_ctx->hw_id);
1231         }
1232         return sprintf(buf, "\n");
1233 }
1234
1235 static DEVICE_ATTR_RO(vgpu_id);
1236 static DEVICE_ATTR_RO(hw_id);
1237
1238 static struct attribute *intel_vgpu_attrs[] = {
1239         &dev_attr_vgpu_id.attr,
1240         &dev_attr_hw_id.attr,
1241         NULL
1242 };
1243
1244 static const struct attribute_group intel_vgpu_group = {
1245         .name = "intel_vgpu",
1246         .attrs = intel_vgpu_attrs,
1247 };
1248
1249 static const struct attribute_group *intel_vgpu_groups[] = {
1250         &intel_vgpu_group,
1251         NULL,
1252 };
1253
1254 static struct mdev_parent_ops intel_vgpu_ops = {
1255         .mdev_attr_groups       = intel_vgpu_groups,
1256         .create                 = intel_vgpu_create,
1257         .remove                 = intel_vgpu_remove,
1258
1259         .open                   = intel_vgpu_open,
1260         .release                = intel_vgpu_release,
1261
1262         .read                   = intel_vgpu_read,
1263         .write                  = intel_vgpu_write,
1264         .mmap                   = intel_vgpu_mmap,
1265         .ioctl                  = intel_vgpu_ioctl,
1266 };
1267
1268 static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
1269 {
1270         struct attribute **kvm_type_attrs;
1271         struct attribute_group **kvm_vgpu_type_groups;
1272
1273         intel_gvt_ops = ops;
1274         if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs,
1275                         &kvm_vgpu_type_groups))
1276                 return -EFAULT;
1277         intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
1278
1279         return mdev_register_device(dev, &intel_vgpu_ops);
1280 }
1281
1282 static void kvmgt_host_exit(struct device *dev, void *gvt)
1283 {
1284         mdev_unregister_device(dev);
1285 }
1286
1287 static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
1288 {
1289         struct kvmgt_guest_info *info;
1290         struct kvm *kvm;
1291         struct kvm_memory_slot *slot;
1292         int idx;
1293
1294         if (!handle_valid(handle))
1295                 return -ESRCH;
1296
1297         info = (struct kvmgt_guest_info *)handle;
1298         kvm = info->kvm;
1299
1300         idx = srcu_read_lock(&kvm->srcu);
1301         slot = gfn_to_memslot(kvm, gfn);
1302         if (!slot) {
1303                 srcu_read_unlock(&kvm->srcu, idx);
1304                 return -EINVAL;
1305         }
1306
1307         spin_lock(&kvm->mmu_lock);
1308
1309         if (kvmgt_gfn_is_write_protected(info, gfn))
1310                 goto out;
1311
1312         kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1313         kvmgt_protect_table_add(info, gfn);
1314
1315 out:
1316         spin_unlock(&kvm->mmu_lock);
1317         srcu_read_unlock(&kvm->srcu, idx);
1318         return 0;
1319 }
1320
1321 static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
1322 {
1323         struct kvmgt_guest_info *info;
1324         struct kvm *kvm;
1325         struct kvm_memory_slot *slot;
1326         int idx;
1327
1328         if (!handle_valid(handle))
1329                 return 0;
1330
1331         info = (struct kvmgt_guest_info *)handle;
1332         kvm = info->kvm;
1333
1334         idx = srcu_read_lock(&kvm->srcu);
1335         slot = gfn_to_memslot(kvm, gfn);
1336         if (!slot) {
1337                 srcu_read_unlock(&kvm->srcu, idx);
1338                 return -EINVAL;
1339         }
1340
1341         spin_lock(&kvm->mmu_lock);
1342
1343         if (!kvmgt_gfn_is_write_protected(info, gfn))
1344                 goto out;
1345
1346         kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1347         kvmgt_protect_table_del(info, gfn);
1348
1349 out:
1350         spin_unlock(&kvm->mmu_lock);
1351         srcu_read_unlock(&kvm->srcu, idx);
1352         return 0;
1353 }
1354
1355 static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1356                 const u8 *val, int len,
1357                 struct kvm_page_track_notifier_node *node)
1358 {
1359         struct kvmgt_guest_info *info = container_of(node,
1360                                         struct kvmgt_guest_info, track_node);
1361
1362         if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
1363                 intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa,
1364                                         (void *)val, len);
1365 }
1366
1367 static void kvmgt_page_track_flush_slot(struct kvm *kvm,
1368                 struct kvm_memory_slot *slot,
1369                 struct kvm_page_track_notifier_node *node)
1370 {
1371         int i;
1372         gfn_t gfn;
1373         struct kvmgt_guest_info *info = container_of(node,
1374                                         struct kvmgt_guest_info, track_node);
1375
1376         spin_lock(&kvm->mmu_lock);
1377         for (i = 0; i < slot->npages; i++) {
1378                 gfn = slot->base_gfn + i;
1379                 if (kvmgt_gfn_is_write_protected(info, gfn)) {
1380                         kvm_slot_page_track_remove_page(kvm, slot, gfn,
1381                                                 KVM_PAGE_TRACK_WRITE);
1382                         kvmgt_protect_table_del(info, gfn);
1383                 }
1384         }
1385         spin_unlock(&kvm->mmu_lock);
1386 }
1387
1388 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
1389 {
1390         struct intel_vgpu *itr;
1391         struct kvmgt_guest_info *info;
1392         int id;
1393         bool ret = false;
1394
1395         mutex_lock(&vgpu->gvt->lock);
1396         for_each_active_vgpu(vgpu->gvt, itr, id) {
1397                 if (!handle_valid(itr->handle))
1398                         continue;
1399
1400                 info = (struct kvmgt_guest_info *)itr->handle;
1401                 if (kvm && kvm == info->kvm) {
1402                         ret = true;
1403                         goto out;
1404                 }
1405         }
1406 out:
1407         mutex_unlock(&vgpu->gvt->lock);
1408         return ret;
1409 }
1410
1411 static int kvmgt_guest_init(struct mdev_device *mdev)
1412 {
1413         struct kvmgt_guest_info *info;
1414         struct intel_vgpu *vgpu;
1415         struct kvm *kvm;
1416
1417         vgpu = mdev_get_drvdata(mdev);
1418         if (handle_valid(vgpu->handle))
1419                 return -EEXIST;
1420
1421         kvm = vgpu->vdev.kvm;
1422         if (!kvm || kvm->mm != current->mm) {
1423                 gvt_vgpu_err("KVM is required to use Intel vGPU\n");
1424                 return -ESRCH;
1425         }
1426
1427         if (__kvmgt_vgpu_exist(vgpu, kvm))
1428                 return -EEXIST;
1429
1430         info = vzalloc(sizeof(struct kvmgt_guest_info));
1431         if (!info)
1432                 return -ENOMEM;
1433
1434         vgpu->handle = (unsigned long)info;
1435         info->vgpu = vgpu;
1436         info->kvm = kvm;
1437         kvm_get_kvm(info->kvm);
1438
1439         kvmgt_protect_table_init(info);
1440         gvt_cache_init(vgpu);
1441
1442         mutex_init(&vgpu->dmabuf_lock);
1443         init_completion(&vgpu->vblank_done);
1444
1445         info->track_node.track_write = kvmgt_page_track_write;
1446         info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
1447         kvm_page_track_register_notifier(kvm, &info->track_node);
1448
1449         return 0;
1450 }
1451
1452 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1453 {
1454         kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1455         kvm_put_kvm(info->kvm);
1456         kvmgt_protect_table_destroy(info);
1457         gvt_cache_destroy(info->vgpu);
1458         vfree(info);
1459
1460         return true;
1461 }
1462
1463 static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
1464 {
1465         /* nothing to do here */
1466         return 0;
1467 }
1468
1469 static void kvmgt_detach_vgpu(unsigned long handle)
1470 {
1471         /* nothing to do here */
1472 }
1473
1474 static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
1475 {
1476         struct kvmgt_guest_info *info;
1477         struct intel_vgpu *vgpu;
1478
1479         if (!handle_valid(handle))
1480                 return -ESRCH;
1481
1482         info = (struct kvmgt_guest_info *)handle;
1483         vgpu = info->vgpu;
1484
1485         if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
1486                 return 0;
1487
1488         return -EFAULT;
1489 }
1490
1491 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1492 {
1493         unsigned long iova, pfn;
1494         struct kvmgt_guest_info *info;
1495         struct device *dev;
1496         struct intel_vgpu *vgpu;
1497         int rc;
1498
1499         if (!handle_valid(handle))
1500                 return INTEL_GVT_INVALID_ADDR;
1501
1502         info = (struct kvmgt_guest_info *)handle;
1503         vgpu = info->vgpu;
1504         iova = gvt_cache_find(info->vgpu, gfn);
1505         if (iova != INTEL_GVT_INVALID_ADDR)
1506                 return iova;
1507
1508         pfn = INTEL_GVT_INVALID_ADDR;
1509         dev = mdev_dev(info->vgpu->vdev.mdev);
1510         rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
1511         if (rc != 1) {
1512                 gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
1513                         gfn, rc);
1514                 return INTEL_GVT_INVALID_ADDR;
1515         }
1516         /* transfer to host iova for GFX to use DMA */
1517         rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
1518         if (rc) {
1519                 gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
1520                 vfio_unpin_pages(dev, &gfn, 1);
1521                 return INTEL_GVT_INVALID_ADDR;
1522         }
1523
1524         gvt_cache_add(info->vgpu, gfn, iova);
1525         return iova;
1526 }
1527
1528 static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1529                         void *buf, unsigned long len, bool write)
1530 {
1531         struct kvmgt_guest_info *info;
1532         struct kvm *kvm;
1533         int idx, ret;
1534         bool kthread = current->mm == NULL;
1535
1536         if (!handle_valid(handle))
1537                 return -ESRCH;
1538
1539         info = (struct kvmgt_guest_info *)handle;
1540         kvm = info->kvm;
1541
1542         if (kthread)
1543                 use_mm(kvm->mm);
1544
1545         idx = srcu_read_lock(&kvm->srcu);
1546         ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1547                       kvm_read_guest(kvm, gpa, buf, len);
1548         srcu_read_unlock(&kvm->srcu, idx);
1549
1550         if (kthread)
1551                 unuse_mm(kvm->mm);
1552
1553         return ret;
1554 }
1555
1556 static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
1557                         void *buf, unsigned long len)
1558 {
1559         return kvmgt_rw_gpa(handle, gpa, buf, len, false);
1560 }
1561
1562 static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
1563                         void *buf, unsigned long len)
1564 {
1565         return kvmgt_rw_gpa(handle, gpa, buf, len, true);
1566 }
1567
1568 static unsigned long kvmgt_virt_to_pfn(void *addr)
1569 {
1570         return PFN_DOWN(__pa(addr));
1571 }
1572
1573 struct intel_gvt_mpt kvmgt_mpt = {
1574         .host_init = kvmgt_host_init,
1575         .host_exit = kvmgt_host_exit,
1576         .attach_vgpu = kvmgt_attach_vgpu,
1577         .detach_vgpu = kvmgt_detach_vgpu,
1578         .inject_msi = kvmgt_inject_msi,
1579         .from_virt_to_mfn = kvmgt_virt_to_pfn,
1580         .set_wp_page = kvmgt_write_protect_add,
1581         .unset_wp_page = kvmgt_write_protect_remove,
1582         .read_gpa = kvmgt_read_gpa,
1583         .write_gpa = kvmgt_write_gpa,
1584         .gfn_to_mfn = kvmgt_gfn_to_pfn,
1585         .set_opregion = kvmgt_set_opregion,
1586         .get_vfio_device = kvmgt_get_vfio_device,
1587         .put_vfio_device = kvmgt_put_vfio_device,
1588 };
1589 EXPORT_SYMBOL_GPL(kvmgt_mpt);
1590
1591 static int __init kvmgt_init(void)
1592 {
1593         return 0;
1594 }
1595
1596 static void __exit kvmgt_exit(void)
1597 {
1598 }
1599
1600 module_init(kvmgt_init);
1601 module_exit(kvmgt_exit);
1602
1603 MODULE_LICENSE("GPL and additional rights");
1604 MODULE_AUTHOR("Intel Corporation");