4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Zhi Wang <zhi.a.wang@intel.com>
27 * Zhenyu Wang <zhenyuw@linux.intel.com>
28 * Xiao Zheng <xiao.zheng@intel.com>
31 * Min He <min.he@intel.com>
32 * Bing Niu <bing.niu@intel.com>
38 #include "i915_pvinfo.h"
41 static bool enable_out_of_sync = false;
42 static int preallocated_oos_pages = 8192;
45 * validate a gm address and related range size,
46 * translate it to host gm address
48 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
50 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
51 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
52 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
59 /* translate a guest gmadr to host gmadr */
60 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
62 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
63 "invalid guest gmadr %llx\n", g_addr))
66 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
67 *h_addr = vgpu_aperture_gmadr_base(vgpu)
68 + (g_addr - vgpu_aperture_offset(vgpu));
70 *h_addr = vgpu_hidden_gmadr_base(vgpu)
71 + (g_addr - vgpu_hidden_offset(vgpu));
75 /* translate a host gmadr to guest gmadr */
76 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
78 if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
79 "invalid host gmadr %llx\n", h_addr))
82 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
83 *g_addr = vgpu_aperture_gmadr_base(vgpu)
84 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
86 *g_addr = vgpu_hidden_gmadr_base(vgpu)
87 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
91 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
92 unsigned long *h_index)
97 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
102 *h_index = h_addr >> I915_GTT_PAGE_SHIFT;
106 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
107 unsigned long *g_index)
112 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
117 *g_index = g_addr >> I915_GTT_PAGE_SHIFT;
121 #define gtt_type_is_entry(type) \
122 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
123 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
124 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
126 #define gtt_type_is_pt(type) \
127 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
129 #define gtt_type_is_pte_pt(type) \
130 (type == GTT_TYPE_PPGTT_PTE_PT)
132 #define gtt_type_is_root_pointer(type) \
133 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
135 #define gtt_init_entry(e, t, p, v) do { \
138 memcpy(&(e)->val64, &v, sizeof(v)); \
142 * Mappings between GTT_TYPE* enumerations.
143 * Following information can be found according to the given type:
144 * - type of next level page table
145 * - type of entry inside this level page table
146 * - type of entry with PSE set
148 * If the given type doesn't have such a kind of information,
149 * e.g. give a l4 root entry type, then request to get its PSE type,
150 * give a PTE page table type, then request to get its next level page
151 * table type, as we know l4 root entry doesn't have a PSE bit,
152 * and a PTE page table doesn't have a next level page table type,
153 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
157 struct gtt_type_table_entry {
164 #define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
166 .entry_type = e_type, \
167 .pt_type = cpt_type, \
168 .next_pt_type = npt_type, \
169 .pse_entry_type = pse_type, \
172 static struct gtt_type_table_entry gtt_type_table[] = {
173 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
174 GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
176 GTT_TYPE_PPGTT_PML4_PT,
178 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
179 GTT_TYPE_PPGTT_PML4_ENTRY,
180 GTT_TYPE_PPGTT_PML4_PT,
181 GTT_TYPE_PPGTT_PDP_PT,
183 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
184 GTT_TYPE_PPGTT_PML4_ENTRY,
185 GTT_TYPE_PPGTT_PML4_PT,
186 GTT_TYPE_PPGTT_PDP_PT,
188 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
189 GTT_TYPE_PPGTT_PDP_ENTRY,
190 GTT_TYPE_PPGTT_PDP_PT,
191 GTT_TYPE_PPGTT_PDE_PT,
192 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
193 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
194 GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
196 GTT_TYPE_PPGTT_PDE_PT,
197 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
198 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
199 GTT_TYPE_PPGTT_PDP_ENTRY,
200 GTT_TYPE_PPGTT_PDP_PT,
201 GTT_TYPE_PPGTT_PDE_PT,
202 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
203 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
204 GTT_TYPE_PPGTT_PDE_ENTRY,
205 GTT_TYPE_PPGTT_PDE_PT,
206 GTT_TYPE_PPGTT_PTE_PT,
207 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
208 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
209 GTT_TYPE_PPGTT_PDE_ENTRY,
210 GTT_TYPE_PPGTT_PDE_PT,
211 GTT_TYPE_PPGTT_PTE_PT,
212 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
213 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
214 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
215 GTT_TYPE_PPGTT_PTE_PT,
218 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
219 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
220 GTT_TYPE_PPGTT_PTE_PT,
223 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
224 GTT_TYPE_PPGTT_PDE_ENTRY,
225 GTT_TYPE_PPGTT_PDE_PT,
227 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
228 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
229 GTT_TYPE_PPGTT_PDP_ENTRY,
230 GTT_TYPE_PPGTT_PDP_PT,
232 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
233 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
240 static inline int get_next_pt_type(int type)
242 return gtt_type_table[type].next_pt_type;
245 static inline int get_pt_type(int type)
247 return gtt_type_table[type].pt_type;
250 static inline int get_entry_type(int type)
252 return gtt_type_table[type].entry_type;
255 static inline int get_pse_type(int type)
257 return gtt_type_table[type].pse_entry_type;
260 static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
262 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
267 static void gtt_invalidate(struct drm_i915_private *dev_priv)
269 mmio_hw_access_pre(dev_priv);
270 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
271 mmio_hw_access_post(dev_priv);
274 static void write_pte64(struct drm_i915_private *dev_priv,
275 unsigned long index, u64 pte)
277 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
282 static inline int gtt_get_entry64(void *pt,
283 struct intel_gvt_gtt_entry *e,
284 unsigned long index, bool hypervisor_access, unsigned long gpa,
285 struct intel_vgpu *vgpu)
287 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
290 if (WARN_ON(info->gtt_entry_size != 8))
293 if (hypervisor_access) {
294 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
295 (index << info->gtt_entry_size_shift),
300 e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
302 e->val64 = *((u64 *)pt + index);
307 static inline int gtt_set_entry64(void *pt,
308 struct intel_gvt_gtt_entry *e,
309 unsigned long index, bool hypervisor_access, unsigned long gpa,
310 struct intel_vgpu *vgpu)
312 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
315 if (WARN_ON(info->gtt_entry_size != 8))
318 if (hypervisor_access) {
319 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
320 (index << info->gtt_entry_size_shift),
325 write_pte64(vgpu->gvt->dev_priv, index, e->val64);
327 *((u64 *)pt + index) = e->val64;
334 #define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
335 #define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
336 #define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
338 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
342 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
343 pfn = (e->val64 & ADDR_1G_MASK) >> 12;
344 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
345 pfn = (e->val64 & ADDR_2M_MASK) >> 12;
347 pfn = (e->val64 & ADDR_4K_MASK) >> 12;
351 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
353 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
354 e->val64 &= ~ADDR_1G_MASK;
355 pfn &= (ADDR_1G_MASK >> 12);
356 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
357 e->val64 &= ~ADDR_2M_MASK;
358 pfn &= (ADDR_2M_MASK >> 12);
360 e->val64 &= ~ADDR_4K_MASK;
361 pfn &= (ADDR_4K_MASK >> 12);
364 e->val64 |= (pfn << 12);
367 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
369 /* Entry doesn't have PSE bit. */
370 if (get_pse_type(e->type) == GTT_TYPE_INVALID)
373 e->type = get_entry_type(e->type);
374 if (!(e->val64 & BIT(7)))
377 e->type = get_pse_type(e->type);
381 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
384 * i915 writes PDP root pointer registers without present bit,
385 * it also works, so we need to treat root pointer entry
388 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
389 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
390 return (e->val64 != 0);
392 return (e->val64 & BIT(0));
395 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
400 static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
406 * Per-platform GMA routines.
408 static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
410 unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
412 trace_gma_index(__func__, gma, x);
416 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
417 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
419 unsigned long x = (exp); \
420 trace_gma_index(__func__, gma, x); \
424 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
425 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
426 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
427 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
428 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
430 static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
431 .get_entry = gtt_get_entry64,
432 .set_entry = gtt_set_entry64,
433 .clear_present = gtt_entry_clear_present,
434 .set_present = gtt_entry_set_present,
435 .test_present = gen8_gtt_test_present,
436 .test_pse = gen8_gtt_test_pse,
437 .get_pfn = gen8_gtt_get_pfn,
438 .set_pfn = gen8_gtt_set_pfn,
441 static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
442 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
443 .gma_to_pte_index = gen8_gma_to_pte_index,
444 .gma_to_pde_index = gen8_gma_to_pde_index,
445 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
446 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
447 .gma_to_pml4_index = gen8_gma_to_pml4_index,
450 static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
451 struct intel_gvt_gtt_entry *m)
453 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
454 unsigned long gfn, mfn;
458 if (!ops->test_present(p))
461 gfn = ops->get_pfn(p);
463 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
464 if (mfn == INTEL_GVT_INVALID_ADDR) {
465 gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
469 ops->set_pfn(m, mfn);
476 int intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
477 void *page_table, struct intel_gvt_gtt_entry *e,
480 struct intel_gvt *gvt = mm->vgpu->gvt;
481 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
484 e->type = mm->page_table_entry_type;
486 ret = ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
494 int intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
495 void *page_table, struct intel_gvt_gtt_entry *e,
498 struct intel_gvt *gvt = mm->vgpu->gvt;
499 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
501 return ops->set_entry(page_table, e, index, false, 0, mm->vgpu);
505 * PPGTT shadow page table helpers.
507 static inline int ppgtt_spt_get_entry(
508 struct intel_vgpu_ppgtt_spt *spt,
509 void *page_table, int type,
510 struct intel_gvt_gtt_entry *e, unsigned long index,
513 struct intel_gvt *gvt = spt->vgpu->gvt;
514 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
517 e->type = get_entry_type(type);
519 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
522 ret = ops->get_entry(page_table, e, index, guest,
523 spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
532 static inline int ppgtt_spt_set_entry(
533 struct intel_vgpu_ppgtt_spt *spt,
534 void *page_table, int type,
535 struct intel_gvt_gtt_entry *e, unsigned long index,
538 struct intel_gvt *gvt = spt->vgpu->gvt;
539 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
541 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
544 return ops->set_entry(page_table, e, index, guest,
545 spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
549 #define ppgtt_get_guest_entry(spt, e, index) \
550 ppgtt_spt_get_entry(spt, NULL, \
551 spt->guest_page_type, e, index, true)
553 #define ppgtt_set_guest_entry(spt, e, index) \
554 ppgtt_spt_set_entry(spt, NULL, \
555 spt->guest_page_type, e, index, true)
557 #define ppgtt_get_shadow_entry(spt, e, index) \
558 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
559 spt->shadow_page.type, e, index, false)
561 #define ppgtt_set_shadow_entry(spt, e, index) \
562 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
563 spt->shadow_page.type, e, index, false)
566 * intel_vgpu_init_page_track - init a page track data structure
568 * @t: a page track data structure
569 * @gfn: guest memory page frame number
570 * @handler: the function will be called when target guest memory page has
573 * This function is called when a user wants to prepare a page track data
574 * structure to track a guest memory page.
577 * Zero on success, negative error code if failed.
579 int intel_vgpu_init_page_track(struct intel_vgpu *vgpu,
580 struct intel_vgpu_page_track *t,
582 int (*handler)(void *, u64, void *, int),
585 INIT_HLIST_NODE(&t->node);
589 t->handler = handler;
592 hash_add(vgpu->gtt.tracked_guest_page_hash_table, &t->node, t->gfn);
597 * intel_vgpu_clean_page_track - release a page track data structure
599 * @t: a page track data structure
601 * This function is called before a user frees a page track data structure.
603 void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu,
604 struct intel_vgpu_page_track *t)
606 if (!hlist_unhashed(&t->node))
610 intel_gvt_hypervisor_disable_page_track(vgpu, t);
614 * intel_vgpu_find_tracked_page - find a tracked guest page
616 * @gfn: guest memory page frame number
618 * This function is called when the emulation layer wants to figure out if a
619 * trapped GFN is a tracked guest page.
622 * Pointer to page track data structure, NULL if not found.
624 struct intel_vgpu_page_track *intel_vgpu_find_tracked_page(
625 struct intel_vgpu *vgpu, unsigned long gfn)
627 struct intel_vgpu_page_track *t;
629 hash_for_each_possible(vgpu->gtt.tracked_guest_page_hash_table,
637 static int init_guest_page(struct intel_vgpu *vgpu,
638 struct intel_vgpu_guest_page *p,
640 int (*handler)(void *, u64, void *, int),
646 return intel_vgpu_init_page_track(vgpu, &p->track, gfn, handler, data);
649 static int detach_oos_page(struct intel_vgpu *vgpu,
650 struct intel_vgpu_oos_page *oos_page);
652 static void clean_guest_page(struct intel_vgpu *vgpu,
653 struct intel_vgpu_guest_page *p)
656 detach_oos_page(vgpu, p->oos_page);
658 intel_vgpu_clean_page_track(vgpu, &p->track);
661 static inline int init_shadow_page(struct intel_vgpu *vgpu,
662 struct intel_vgpu_shadow_page *p, int type, bool hash)
664 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
667 daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
668 if (dma_mapping_error(kdev, daddr)) {
669 gvt_vgpu_err("fail to map dma addr\n");
673 p->vaddr = page_address(p->page);
676 INIT_HLIST_NODE(&p->node);
678 p->mfn = daddr >> I915_GTT_PAGE_SHIFT;
680 hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
684 static inline void clean_shadow_page(struct intel_vgpu *vgpu,
685 struct intel_vgpu_shadow_page *p)
687 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
689 dma_unmap_page(kdev, p->mfn << I915_GTT_PAGE_SHIFT, 4096,
690 PCI_DMA_BIDIRECTIONAL);
692 if (!hlist_unhashed(&p->node))
696 static inline struct intel_vgpu_shadow_page *find_shadow_page(
697 struct intel_vgpu *vgpu, unsigned long mfn)
699 struct intel_vgpu_shadow_page *p;
701 hash_for_each_possible(vgpu->gtt.shadow_page_hash_table,
709 #define page_track_to_guest_page(ptr) \
710 container_of(ptr, struct intel_vgpu_guest_page, track)
712 #define guest_page_to_ppgtt_spt(ptr) \
713 container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
715 #define shadow_page_to_ppgtt_spt(ptr) \
716 container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
718 static void *alloc_spt(gfp_t gfp_mask)
720 struct intel_vgpu_ppgtt_spt *spt;
722 spt = kzalloc(sizeof(*spt), gfp_mask);
726 spt->shadow_page.page = alloc_page(gfp_mask);
727 if (!spt->shadow_page.page) {
734 static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
736 __free_page(spt->shadow_page.page);
740 static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
742 trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
744 clean_shadow_page(spt->vgpu, &spt->shadow_page);
745 clean_guest_page(spt->vgpu, &spt->guest_page);
746 list_del_init(&spt->post_shadow_list);
751 static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
753 struct hlist_node *n;
754 struct intel_vgpu_shadow_page *sp;
757 hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node)
758 ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
761 static int ppgtt_handle_guest_write_page_table_bytes(
762 struct intel_vgpu_guest_page *gpt,
763 u64 pa, void *p_data, int bytes);
765 static int ppgtt_write_protection_handler(void *data, u64 pa,
766 void *p_data, int bytes)
768 struct intel_vgpu_page_track *t = data;
769 struct intel_vgpu_guest_page *p = page_track_to_guest_page(t);
772 if (bytes != 4 && bytes != 8)
778 ret = ppgtt_handle_guest_write_page_table_bytes(p,
785 static int reclaim_one_mm(struct intel_gvt *gvt);
787 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
788 struct intel_vgpu *vgpu, int type, unsigned long gfn)
790 struct intel_vgpu_ppgtt_spt *spt = NULL;
794 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
796 if (reclaim_one_mm(vgpu->gvt))
799 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
800 return ERR_PTR(-ENOMEM);
804 spt->guest_page_type = type;
805 atomic_set(&spt->refcount, 1);
806 INIT_LIST_HEAD(&spt->post_shadow_list);
809 * TODO: guest page type may be different with shadow page type,
810 * when we support PSE page in future.
812 ret = init_shadow_page(vgpu, &spt->shadow_page, type, true);
814 gvt_vgpu_err("fail to initialize shadow page for spt\n");
818 ret = init_guest_page(vgpu, &spt->guest_page,
819 gfn, ppgtt_write_protection_handler, NULL);
821 gvt_vgpu_err("fail to initialize guest page for spt\n");
825 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
828 ppgtt_free_shadow_page(spt);
832 static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
833 struct intel_vgpu *vgpu, unsigned long mfn)
835 struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn);
838 return shadow_page_to_ppgtt_spt(p);
840 gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
844 #define pt_entry_size_shift(spt) \
845 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
847 #define pt_entries(spt) \
848 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
850 #define for_each_present_guest_entry(spt, e, i) \
851 for (i = 0; i < pt_entries(spt); i++) \
852 if (!ppgtt_get_guest_entry(spt, e, i) && \
853 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
855 #define for_each_present_shadow_entry(spt, e, i) \
856 for (i = 0; i < pt_entries(spt); i++) \
857 if (!ppgtt_get_shadow_entry(spt, e, i) && \
858 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
860 static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
862 int v = atomic_read(&spt->refcount);
864 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
866 atomic_inc(&spt->refcount);
869 static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
871 static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
872 struct intel_gvt_gtt_entry *e)
874 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
875 struct intel_vgpu_ppgtt_spt *s;
876 intel_gvt_gtt_type_t cur_pt_type;
878 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
881 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
882 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
883 cur_pt_type = get_next_pt_type(e->type) + 1;
884 if (ops->get_pfn(e) ==
885 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
888 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
890 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
894 return ppgtt_invalidate_shadow_page(s);
897 static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
899 struct intel_vgpu *vgpu = spt->vgpu;
900 struct intel_gvt_gtt_entry e;
903 int v = atomic_read(&spt->refcount);
905 trace_spt_change(spt->vgpu->id, "die", spt,
906 spt->guest_page.track.gfn, spt->shadow_page.type);
908 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
910 if (atomic_dec_return(&spt->refcount) > 0)
913 if (gtt_type_is_pte_pt(spt->shadow_page.type))
916 for_each_present_shadow_entry(spt, &e, index) {
917 if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
918 gvt_vgpu_err("GVT doesn't support pse bit for now\n");
921 ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
927 trace_spt_change(spt->vgpu->id, "release", spt,
928 spt->guest_page.track.gfn, spt->shadow_page.type);
929 ppgtt_free_shadow_page(spt);
932 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
933 spt, e.val64, e.type);
937 static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
939 static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
940 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
942 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
943 struct intel_vgpu_ppgtt_spt *s = NULL;
944 struct intel_vgpu_guest_page *g;
945 struct intel_vgpu_page_track *t;
948 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
953 t = intel_vgpu_find_tracked_page(vgpu, ops->get_pfn(we));
955 g = page_track_to_guest_page(t);
956 s = guest_page_to_ppgtt_spt(g);
957 ppgtt_get_shadow_page(s);
959 int type = get_next_pt_type(we->type);
961 s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we));
967 ret = intel_gvt_hypervisor_enable_page_track(vgpu,
968 &s->guest_page.track);
972 ret = ppgtt_populate_shadow_page(s);
976 trace_spt_change(vgpu->id, "new", s, s->guest_page.track.gfn,
977 s->shadow_page.type);
981 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
982 s, we->val64, we->type);
986 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
987 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
989 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
992 se->val64 = ge->val64;
994 ops->set_pfn(se, s->shadow_page.mfn);
997 static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
999 struct intel_vgpu *vgpu = spt->vgpu;
1000 struct intel_vgpu_ppgtt_spt *s;
1001 struct intel_gvt_gtt_entry se, ge;
1005 trace_spt_change(spt->vgpu->id, "born", spt,
1006 spt->guest_page.track.gfn, spt->shadow_page.type);
1008 if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
1009 for_each_present_guest_entry(spt, &ge, i) {
1010 ret = gtt_entry_p2m(vgpu, &ge, &se);
1013 ppgtt_set_shadow_entry(spt, &se, i);
1018 for_each_present_guest_entry(spt, &ge, i) {
1019 if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
1020 gvt_vgpu_err("GVT doesn't support pse bit now\n");
1025 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
1030 ppgtt_get_shadow_entry(spt, &se, i);
1031 ppgtt_generate_shadow_entry(&se, s, &ge);
1032 ppgtt_set_shadow_entry(spt, &se, i);
1036 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1037 spt, ge.val64, ge.type);
1041 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
1042 struct intel_gvt_gtt_entry *se, unsigned long index)
1044 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1045 struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
1046 struct intel_vgpu *vgpu = spt->vgpu;
1047 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1050 trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, se->val64,
1053 if (!ops->test_present(se))
1056 if (ops->get_pfn(se) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
1059 if (gtt_type_is_pt(get_next_pt_type(se->type))) {
1060 struct intel_vgpu_ppgtt_spt *s =
1061 ppgtt_find_shadow_page(vgpu, ops->get_pfn(se));
1063 gvt_vgpu_err("fail to find guest page\n");
1067 ret = ppgtt_invalidate_shadow_page(s);
1073 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1074 spt, se->val64, se->type);
1078 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
1079 struct intel_gvt_gtt_entry *we, unsigned long index)
1081 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1082 struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
1083 struct intel_vgpu *vgpu = spt->vgpu;
1084 struct intel_gvt_gtt_entry m;
1085 struct intel_vgpu_ppgtt_spt *s;
1088 trace_gpt_change(spt->vgpu->id, "add", spt, sp->type,
1091 if (gtt_type_is_pt(get_next_pt_type(we->type))) {
1092 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we);
1097 ppgtt_get_shadow_entry(spt, &m, index);
1098 ppgtt_generate_shadow_entry(&m, s, we);
1099 ppgtt_set_shadow_entry(spt, &m, index);
1101 ret = gtt_entry_p2m(vgpu, we, &m);
1104 ppgtt_set_shadow_entry(spt, &m, index);
1108 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1109 spt, we->val64, we->type);
1113 static int sync_oos_page(struct intel_vgpu *vgpu,
1114 struct intel_vgpu_oos_page *oos_page)
1116 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1117 struct intel_gvt *gvt = vgpu->gvt;
1118 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1119 struct intel_vgpu_ppgtt_spt *spt =
1120 guest_page_to_ppgtt_spt(oos_page->guest_page);
1121 struct intel_gvt_gtt_entry old, new, m;
1125 trace_oos_change(vgpu->id, "sync", oos_page->id,
1126 oos_page->guest_page, spt->guest_page_type);
1128 old.type = new.type = get_entry_type(spt->guest_page_type);
1129 old.val64 = new.val64 = 0;
1131 for (index = 0; index < (I915_GTT_PAGE_SIZE >>
1132 info->gtt_entry_size_shift); index++) {
1133 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1134 ops->get_entry(NULL, &new, index, true,
1135 oos_page->guest_page->track.gfn << PAGE_SHIFT, vgpu);
1137 if (old.val64 == new.val64
1138 && !test_and_clear_bit(index, spt->post_shadow_bitmap))
1141 trace_oos_sync(vgpu->id, oos_page->id,
1142 oos_page->guest_page, spt->guest_page_type,
1145 ret = gtt_entry_p2m(vgpu, &new, &m);
1149 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
1150 ppgtt_set_shadow_entry(spt, &m, index);
1153 oos_page->guest_page->write_cnt = 0;
1154 list_del_init(&spt->post_shadow_list);
1158 static int detach_oos_page(struct intel_vgpu *vgpu,
1159 struct intel_vgpu_oos_page *oos_page)
1161 struct intel_gvt *gvt = vgpu->gvt;
1162 struct intel_vgpu_ppgtt_spt *spt =
1163 guest_page_to_ppgtt_spt(oos_page->guest_page);
1165 trace_oos_change(vgpu->id, "detach", oos_page->id,
1166 oos_page->guest_page, spt->guest_page_type);
1168 oos_page->guest_page->write_cnt = 0;
1169 oos_page->guest_page->oos_page = NULL;
1170 oos_page->guest_page = NULL;
1172 list_del_init(&oos_page->vm_list);
1173 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1178 static int attach_oos_page(struct intel_vgpu *vgpu,
1179 struct intel_vgpu_oos_page *oos_page,
1180 struct intel_vgpu_guest_page *gpt)
1182 struct intel_gvt *gvt = vgpu->gvt;
1185 ret = intel_gvt_hypervisor_read_gpa(vgpu,
1186 gpt->track.gfn << I915_GTT_PAGE_SHIFT,
1187 oos_page->mem, I915_GTT_PAGE_SIZE);
1191 oos_page->guest_page = gpt;
1192 gpt->oos_page = oos_page;
1194 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1196 trace_oos_change(vgpu->id, "attach", gpt->oos_page->id,
1197 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1201 static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
1202 struct intel_vgpu_guest_page *gpt)
1206 ret = intel_gvt_hypervisor_enable_page_track(vgpu, &gpt->track);
1210 trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id,
1211 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1213 list_del_init(&gpt->oos_page->vm_list);
1214 return sync_oos_page(vgpu, gpt->oos_page);
1217 static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
1218 struct intel_vgpu_guest_page *gpt)
1220 struct intel_gvt *gvt = vgpu->gvt;
1221 struct intel_gvt_gtt *gtt = &gvt->gtt;
1222 struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
1225 WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1227 if (list_empty(>t->oos_page_free_list_head)) {
1228 oos_page = container_of(gtt->oos_page_use_list_head.next,
1229 struct intel_vgpu_oos_page, list);
1230 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
1233 ret = detach_oos_page(vgpu, oos_page);
1237 oos_page = container_of(gtt->oos_page_free_list_head.next,
1238 struct intel_vgpu_oos_page, list);
1239 return attach_oos_page(vgpu, oos_page, gpt);
1242 static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
1243 struct intel_vgpu_guest_page *gpt)
1245 struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
1247 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1250 trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id,
1251 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1253 list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
1254 return intel_gvt_hypervisor_disable_page_track(vgpu, &gpt->track);
1258 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1261 * This function is called before submitting a guest workload to host,
1262 * to sync all the out-of-synced shadow for vGPU
1265 * Zero on success, negative error code if failed.
1267 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1269 struct list_head *pos, *n;
1270 struct intel_vgpu_oos_page *oos_page;
1273 if (!enable_out_of_sync)
1276 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1277 oos_page = container_of(pos,
1278 struct intel_vgpu_oos_page, vm_list);
1279 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
1287 * The heart of PPGTT shadow page table.
1289 static int ppgtt_handle_guest_write_page_table(
1290 struct intel_vgpu_guest_page *gpt,
1291 struct intel_gvt_gtt_entry *we, unsigned long index)
1293 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1294 struct intel_vgpu *vgpu = spt->vgpu;
1295 int type = spt->shadow_page.type;
1296 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1297 struct intel_gvt_gtt_entry se;
1302 new_present = ops->test_present(we);
1305 * Adding the new entry first and then removing the old one, that can
1306 * guarantee the ppgtt table is validated during the window between
1307 * adding and removal.
1309 ppgtt_get_shadow_entry(spt, &se, index);
1312 ret = ppgtt_handle_guest_entry_add(gpt, we, index);
1317 ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
1322 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1323 ppgtt_set_shadow_entry(spt, &se, index);
1328 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1329 spt, we->val64, we->type);
1333 static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt)
1335 return enable_out_of_sync
1336 && gtt_type_is_pte_pt(
1337 guest_page_to_ppgtt_spt(gpt)->guest_page_type)
1338 && gpt->write_cnt >= 2;
1341 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1342 unsigned long index)
1344 set_bit(index, spt->post_shadow_bitmap);
1345 if (!list_empty(&spt->post_shadow_list))
1348 list_add_tail(&spt->post_shadow_list,
1349 &spt->vgpu->gtt.post_shadow_list_head);
1353 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1356 * This function is called before submitting a guest workload to host,
1357 * to flush all the post shadows for a vGPU.
1360 * Zero on success, negative error code if failed.
1362 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1364 struct list_head *pos, *n;
1365 struct intel_vgpu_ppgtt_spt *spt;
1366 struct intel_gvt_gtt_entry ge;
1367 unsigned long index;
1370 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1371 spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1374 for_each_set_bit(index, spt->post_shadow_bitmap,
1375 GTT_ENTRY_NUM_IN_ONE_PAGE) {
1376 ppgtt_get_guest_entry(spt, &ge, index);
1378 ret = ppgtt_handle_guest_write_page_table(
1379 &spt->guest_page, &ge, index);
1382 clear_bit(index, spt->post_shadow_bitmap);
1384 list_del_init(&spt->post_shadow_list);
1389 static int ppgtt_handle_guest_write_page_table_bytes(
1390 struct intel_vgpu_guest_page *gpt,
1391 u64 pa, void *p_data, int bytes)
1393 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1394 struct intel_vgpu *vgpu = spt->vgpu;
1395 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1396 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1397 struct intel_gvt_gtt_entry we, se;
1398 unsigned long index;
1401 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1403 ppgtt_get_guest_entry(spt, &we, index);
1407 if (bytes == info->gtt_entry_size) {
1408 ret = ppgtt_handle_guest_write_page_table(gpt, &we, index);
1412 if (!test_bit(index, spt->post_shadow_bitmap)) {
1413 ppgtt_get_shadow_entry(spt, &se, index);
1414 ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
1419 ppgtt_set_post_shadow(spt, index);
1422 if (!enable_out_of_sync)
1428 ops->set_entry(gpt->oos_page->mem, &we, index,
1431 if (can_do_out_of_sync(gpt)) {
1433 ppgtt_allocate_oos_page(vgpu, gpt);
1435 ret = ppgtt_set_guest_page_oos(vgpu, gpt);
1443 * mm page table allocation policy for bdw+
1444 * - for ggtt, only virtual page table will be allocated.
1445 * - for ppgtt, dedicated virtual/shadow page table will be allocated.
1447 static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
1449 struct intel_vgpu *vgpu = mm->vgpu;
1450 struct intel_gvt *gvt = vgpu->gvt;
1451 const struct intel_gvt_device_info *info = &gvt->device_info;
1454 if (mm->type == INTEL_GVT_MM_PPGTT) {
1455 mm->page_table_entry_cnt = 4;
1456 mm->page_table_entry_size = mm->page_table_entry_cnt *
1457 info->gtt_entry_size;
1458 mem = kzalloc(mm->has_shadow_page_table ?
1459 mm->page_table_entry_size * 2
1460 : mm->page_table_entry_size, GFP_KERNEL);
1463 mm->virtual_page_table = mem;
1464 if (!mm->has_shadow_page_table)
1466 mm->shadow_page_table = mem + mm->page_table_entry_size;
1467 } else if (mm->type == INTEL_GVT_MM_GGTT) {
1468 mm->page_table_entry_cnt =
1469 (gvt_ggtt_gm_sz(gvt) >> I915_GTT_PAGE_SHIFT);
1470 mm->page_table_entry_size = mm->page_table_entry_cnt *
1471 info->gtt_entry_size;
1472 mem = vzalloc(mm->page_table_entry_size);
1475 mm->virtual_page_table = mem;
1480 static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm)
1482 if (mm->type == INTEL_GVT_MM_PPGTT) {
1483 kfree(mm->virtual_page_table);
1484 } else if (mm->type == INTEL_GVT_MM_GGTT) {
1485 if (mm->virtual_page_table)
1486 vfree(mm->virtual_page_table);
1488 mm->virtual_page_table = mm->shadow_page_table = NULL;
1491 static void invalidate_mm(struct intel_vgpu_mm *mm)
1493 struct intel_vgpu *vgpu = mm->vgpu;
1494 struct intel_gvt *gvt = vgpu->gvt;
1495 struct intel_gvt_gtt *gtt = &gvt->gtt;
1496 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1497 struct intel_gvt_gtt_entry se;
1500 if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed))
1503 for (i = 0; i < mm->page_table_entry_cnt; i++) {
1504 ppgtt_get_shadow_root_entry(mm, &se, i);
1505 if (!ops->test_present(&se))
1507 ppgtt_invalidate_shadow_page_by_shadow_entry(
1510 ppgtt_set_shadow_root_entry(mm, &se, i);
1512 trace_gpt_change(vgpu->id, "destroy root pointer",
1513 NULL, se.type, se.val64, i);
1515 mm->shadowed = false;
1519 * intel_vgpu_destroy_mm - destroy a mm object
1520 * @mm: a kref object
1522 * This function is used to destroy a mm object for vGPU
1525 void intel_vgpu_destroy_mm(struct kref *mm_ref)
1527 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
1528 struct intel_vgpu *vgpu = mm->vgpu;
1529 struct intel_gvt *gvt = vgpu->gvt;
1530 struct intel_gvt_gtt *gtt = &gvt->gtt;
1532 if (!mm->initialized)
1535 list_del(&mm->list);
1536 list_del(&mm->lru_list);
1538 if (mm->has_shadow_page_table)
1541 gtt->mm_free_page_table(mm);
1546 static int shadow_mm(struct intel_vgpu_mm *mm)
1548 struct intel_vgpu *vgpu = mm->vgpu;
1549 struct intel_gvt *gvt = vgpu->gvt;
1550 struct intel_gvt_gtt *gtt = &gvt->gtt;
1551 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1552 struct intel_vgpu_ppgtt_spt *spt;
1553 struct intel_gvt_gtt_entry ge, se;
1557 if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed))
1560 mm->shadowed = true;
1562 for (i = 0; i < mm->page_table_entry_cnt; i++) {
1563 ppgtt_get_guest_root_entry(mm, &ge, i);
1564 if (!ops->test_present(&ge))
1567 trace_gpt_change(vgpu->id, __func__, NULL,
1568 ge.type, ge.val64, i);
1570 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
1572 gvt_vgpu_err("fail to populate guest root pointer\n");
1576 ppgtt_generate_shadow_entry(&se, spt, &ge);
1577 ppgtt_set_shadow_root_entry(mm, &se, i);
1579 trace_gpt_change(vgpu->id, "populate root pointer",
1580 NULL, se.type, se.val64, i);
1589 * intel_vgpu_create_mm - create a mm object for a vGPU
1591 * @mm_type: mm object type, should be PPGTT or GGTT
1592 * @virtual_page_table: page table root pointers. Could be NULL if user wants
1593 * to populate shadow later.
1594 * @page_table_level: describe the page table level of the mm object
1595 * @pde_base_index: pde root pointer base in GGTT MMIO.
1597 * This function is used to create a mm object for a vGPU.
1600 * Zero on success, negative error code in pointer if failed.
1602 struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
1603 int mm_type, void *virtual_page_table, int page_table_level,
1606 struct intel_gvt *gvt = vgpu->gvt;
1607 struct intel_gvt_gtt *gtt = &gvt->gtt;
1608 struct intel_vgpu_mm *mm;
1611 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
1619 if (page_table_level == 1)
1620 mm->page_table_entry_type = GTT_TYPE_GGTT_PTE;
1621 else if (page_table_level == 3)
1622 mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1623 else if (page_table_level == 4)
1624 mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1631 mm->page_table_level = page_table_level;
1632 mm->pde_base_index = pde_base_index;
1635 mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT);
1637 kref_init(&mm->ref);
1638 atomic_set(&mm->pincount, 0);
1639 INIT_LIST_HEAD(&mm->list);
1640 INIT_LIST_HEAD(&mm->lru_list);
1641 list_add_tail(&mm->list, &vgpu->gtt.mm_list_head);
1643 ret = gtt->mm_alloc_page_table(mm);
1645 gvt_vgpu_err("fail to allocate page table for mm\n");
1649 mm->initialized = true;
1651 if (virtual_page_table)
1652 memcpy(mm->virtual_page_table, virtual_page_table,
1653 mm->page_table_entry_size);
1655 if (mm->has_shadow_page_table) {
1656 ret = shadow_mm(mm);
1659 list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
1663 gvt_vgpu_err("fail to create mm\n");
1665 intel_gvt_mm_unreference(mm);
1666 return ERR_PTR(ret);
1670 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
1671 * @mm: a vGPU mm object
1673 * This function is called when user doesn't want to use a vGPU mm object
1675 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1677 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
1680 atomic_dec(&mm->pincount);
1684 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
1687 * This function is called when user wants to use a vGPU mm object. If this
1688 * mm object hasn't been shadowed yet, the shadow will be populated at this
1692 * Zero on success, negative error code if failed.
1694 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
1698 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
1701 if (!mm->shadowed) {
1702 ret = shadow_mm(mm);
1707 atomic_inc(&mm->pincount);
1708 list_del_init(&mm->lru_list);
1709 list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
1713 static int reclaim_one_mm(struct intel_gvt *gvt)
1715 struct intel_vgpu_mm *mm;
1716 struct list_head *pos, *n;
1718 list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
1719 mm = container_of(pos, struct intel_vgpu_mm, lru_list);
1721 if (mm->type != INTEL_GVT_MM_PPGTT)
1723 if (atomic_read(&mm->pincount))
1726 list_del_init(&mm->lru_list);
1734 * GMA translation APIs.
1736 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
1737 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
1739 struct intel_vgpu *vgpu = mm->vgpu;
1740 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1741 struct intel_vgpu_ppgtt_spt *s;
1743 if (WARN_ON(!mm->has_shadow_page_table))
1746 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
1751 ppgtt_get_shadow_entry(s, e, index);
1753 ppgtt_get_guest_entry(s, e, index);
1758 * intel_vgpu_gma_to_gpa - translate a gma to GPA
1759 * @mm: mm object. could be a PPGTT or GGTT mm object
1760 * @gma: graphics memory address in this mm object
1762 * This function is used to translate a graphics memory address in specific
1763 * graphics memory space to guest physical address.
1766 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
1768 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
1770 struct intel_vgpu *vgpu = mm->vgpu;
1771 struct intel_gvt *gvt = vgpu->gvt;
1772 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
1773 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
1774 unsigned long gpa = INTEL_GVT_INVALID_ADDR;
1775 unsigned long gma_index[4];
1776 struct intel_gvt_gtt_entry e;
1780 if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT)
1781 return INTEL_GVT_INVALID_ADDR;
1783 if (mm->type == INTEL_GVT_MM_GGTT) {
1784 if (!vgpu_gmadr_is_valid(vgpu, gma))
1787 ret = ggtt_get_guest_entry(mm, &e,
1788 gma_ops->gma_to_ggtt_pte_index(gma));
1791 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
1792 + (gma & ~I915_GTT_PAGE_MASK);
1794 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
1798 switch (mm->page_table_level) {
1800 ret = ppgtt_get_shadow_root_entry(mm, &e, 0);
1803 gma_index[0] = gma_ops->gma_to_pml4_index(gma);
1804 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
1805 gma_index[2] = gma_ops->gma_to_pde_index(gma);
1806 gma_index[3] = gma_ops->gma_to_pte_index(gma);
1810 ret = ppgtt_get_shadow_root_entry(mm, &e,
1811 gma_ops->gma_to_l3_pdp_index(gma));
1814 gma_index[0] = gma_ops->gma_to_pde_index(gma);
1815 gma_index[1] = gma_ops->gma_to_pte_index(gma);
1819 ret = ppgtt_get_shadow_root_entry(mm, &e,
1820 gma_ops->gma_to_pde_index(gma));
1823 gma_index[0] = gma_ops->gma_to_pte_index(gma);
1831 /* walk into the shadow page table and get gpa from guest entry */
1832 for (i = 0; i < index; i++) {
1833 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
1838 if (!pte_ops->test_present(&e)) {
1839 gvt_dbg_core("GMA 0x%lx is not present\n", gma);
1844 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
1845 + (gma & ~I915_GTT_PAGE_MASK);
1847 trace_gma_translate(vgpu->id, "ppgtt", 0,
1848 mm->page_table_level, gma, gpa);
1851 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
1852 return INTEL_GVT_INVALID_ADDR;
1855 static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
1856 unsigned int off, void *p_data, unsigned int bytes)
1858 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1859 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1860 unsigned long index = off >> info->gtt_entry_size_shift;
1861 struct intel_gvt_gtt_entry e;
1863 if (bytes != 4 && bytes != 8)
1866 ggtt_get_guest_entry(ggtt_mm, &e, index);
1867 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
1873 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
1875 * @off: register offset
1876 * @p_data: data will be returned to guest
1877 * @bytes: data length
1879 * This function is used to emulate the GTT MMIO register read
1882 * Zero on success, error code if failed.
1884 int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
1885 void *p_data, unsigned int bytes)
1887 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1890 if (bytes != 4 && bytes != 8)
1893 off -= info->gtt_start_offset;
1894 ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes);
1898 static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1899 void *p_data, unsigned int bytes)
1901 struct intel_gvt *gvt = vgpu->gvt;
1902 const struct intel_gvt_device_info *info = &gvt->device_info;
1903 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1904 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1905 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
1907 struct intel_gvt_gtt_entry e, m;
1910 if (bytes != 4 && bytes != 8)
1913 gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
1915 /* the VM may configure the whole GM space when ballooning is used */
1916 if (!vgpu_gmadr_is_valid(vgpu, gma))
1919 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
1921 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
1924 if (ops->test_present(&e)) {
1925 ret = gtt_entry_p2m(vgpu, &e, &m);
1927 gvt_vgpu_err("fail to translate guest gtt entry\n");
1928 /* guest driver may read/write the entry when partial
1929 * update the entry in this situation p2m will fail
1930 * settting the shadow entry to point to a scratch page
1932 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
1936 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
1939 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
1940 gtt_invalidate(gvt->dev_priv);
1941 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
1946 * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write
1948 * @off: register offset
1949 * @p_data: data from guest write
1950 * @bytes: data length
1952 * This function is used to emulate the GTT MMIO register write
1955 * Zero on success, error code if failed.
1957 int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1958 void *p_data, unsigned int bytes)
1960 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1963 if (bytes != 4 && bytes != 8)
1966 off -= info->gtt_start_offset;
1967 ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes);
1971 static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1972 intel_gvt_gtt_type_t type)
1974 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
1975 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1976 int page_entry_num = I915_GTT_PAGE_SIZE >>
1977 vgpu->gvt->device_info.gtt_entry_size_shift;
1980 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
1983 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
1986 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
1988 gvt_vgpu_err("fail to allocate scratch page\n");
1992 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
1993 4096, PCI_DMA_BIDIRECTIONAL);
1994 if (dma_mapping_error(dev, daddr)) {
1995 gvt_vgpu_err("fail to dmamap scratch_pt\n");
1996 __free_page(virt_to_page(scratch_pt));
1999 gtt->scratch_pt[type].page_mfn =
2000 (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2001 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
2002 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
2003 vgpu->id, type, gtt->scratch_pt[type].page_mfn);
2005 /* Build the tree by full filled the scratch pt with the entries which
2006 * point to the next level scratch pt or scratch page. The
2007 * scratch_pt[type] indicate the scratch pt/scratch page used by the
2009 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
2010 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
2011 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
2013 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
2014 struct intel_gvt_gtt_entry se;
2016 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
2017 se.type = get_entry_type(type - 1);
2018 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
2020 /* The entry parameters like present/writeable/cache type
2021 * set to the same as i915's scratch page tree.
2023 se.val64 |= _PAGE_PRESENT | _PAGE_RW;
2024 if (type == GTT_TYPE_PPGTT_PDE_PT)
2025 se.val64 |= PPAT_CACHED;
2027 for (i = 0; i < page_entry_num; i++)
2028 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
2034 static int release_scratch_page_tree(struct intel_vgpu *vgpu)
2037 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
2040 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2041 if (vgpu->gtt.scratch_pt[i].page != NULL) {
2042 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
2043 I915_GTT_PAGE_SHIFT);
2044 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2045 __free_page(vgpu->gtt.scratch_pt[i].page);
2046 vgpu->gtt.scratch_pt[i].page = NULL;
2047 vgpu->gtt.scratch_pt[i].page_mfn = 0;
2054 static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2058 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2059 ret = alloc_scratch_pages(vgpu, i);
2067 release_scratch_page_tree(vgpu);
2072 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2075 * This function is used to initialize per-vGPU graphics memory virtualization
2079 * Zero on success, error code if failed.
2081 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2083 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2084 struct intel_vgpu_mm *ggtt_mm;
2086 hash_init(gtt->tracked_guest_page_hash_table);
2087 hash_init(gtt->shadow_page_hash_table);
2089 INIT_LIST_HEAD(>t->mm_list_head);
2090 INIT_LIST_HEAD(>t->oos_page_list_head);
2091 INIT_LIST_HEAD(>t->post_shadow_list_head);
2093 intel_vgpu_reset_ggtt(vgpu);
2095 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
2097 if (IS_ERR(ggtt_mm)) {
2098 gvt_vgpu_err("fail to create mm for ggtt.\n");
2099 return PTR_ERR(ggtt_mm);
2102 gtt->ggtt_mm = ggtt_mm;
2104 return create_scratch_page_tree(vgpu);
2107 static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
2109 struct list_head *pos, *n;
2110 struct intel_vgpu_mm *mm;
2112 list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
2113 mm = container_of(pos, struct intel_vgpu_mm, list);
2114 if (mm->type == type) {
2115 vgpu->gvt->gtt.mm_free_page_table(mm);
2116 list_del(&mm->list);
2117 list_del(&mm->lru_list);
2124 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2127 * This function is used to clean up per-vGPU graphics memory virtualization
2131 * Zero on success, error code if failed.
2133 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2135 ppgtt_free_all_shadow_page(vgpu);
2136 release_scratch_page_tree(vgpu);
2138 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
2139 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT);
2142 static void clean_spt_oos(struct intel_gvt *gvt)
2144 struct intel_gvt_gtt *gtt = &gvt->gtt;
2145 struct list_head *pos, *n;
2146 struct intel_vgpu_oos_page *oos_page;
2148 WARN(!list_empty(>t->oos_page_use_list_head),
2149 "someone is still using oos page\n");
2151 list_for_each_safe(pos, n, >t->oos_page_free_list_head) {
2152 oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2153 list_del(&oos_page->list);
2158 static int setup_spt_oos(struct intel_gvt *gvt)
2160 struct intel_gvt_gtt *gtt = &gvt->gtt;
2161 struct intel_vgpu_oos_page *oos_page;
2165 INIT_LIST_HEAD(>t->oos_page_free_list_head);
2166 INIT_LIST_HEAD(>t->oos_page_use_list_head);
2168 for (i = 0; i < preallocated_oos_pages; i++) {
2169 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2175 INIT_LIST_HEAD(&oos_page->list);
2176 INIT_LIST_HEAD(&oos_page->vm_list);
2178 list_add_tail(&oos_page->list, >t->oos_page_free_list_head);
2181 gvt_dbg_mm("%d oos pages preallocated\n", i);
2190 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2192 * @page_table_level: PPGTT page table level
2193 * @root_entry: PPGTT page table root pointers
2195 * This function is used to find a PPGTT mm object from mm object pool
2198 * pointer to mm object on success, NULL if failed.
2200 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2201 int page_table_level, void *root_entry)
2203 struct list_head *pos;
2204 struct intel_vgpu_mm *mm;
2207 list_for_each(pos, &vgpu->gtt.mm_list_head) {
2208 mm = container_of(pos, struct intel_vgpu_mm, list);
2209 if (mm->type != INTEL_GVT_MM_PPGTT)
2212 if (mm->page_table_level != page_table_level)
2216 dst = mm->virtual_page_table;
2218 if (page_table_level == 3) {
2219 if (src[0] == dst[0]
2222 && src[3] == dst[3])
2225 if (src[0] == dst[0])
2233 * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
2236 * @page_table_level: PPGTT page table level
2238 * This function is used to create a PPGTT mm object from a guest to GVT-g
2242 * Zero on success, negative error code if failed.
2244 int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
2245 int page_table_level)
2247 u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
2248 struct intel_vgpu_mm *mm;
2250 if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
2253 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
2255 intel_gvt_mm_reference(mm);
2257 mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
2258 pdp, page_table_level, 0);
2260 gvt_vgpu_err("fail to create mm\n");
2268 * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
2271 * @page_table_level: PPGTT page table level
2273 * This function is used to create a PPGTT mm object from a guest to GVT-g
2277 * Zero on success, negative error code if failed.
2279 int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
2280 int page_table_level)
2282 u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
2283 struct intel_vgpu_mm *mm;
2285 if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
2288 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
2290 gvt_vgpu_err("fail to find ppgtt instance.\n");
2293 intel_gvt_mm_unreference(mm);
2298 * intel_gvt_init_gtt - initialize mm components of a GVT device
2301 * This function is called at the initialization stage, to initialize
2302 * the mm components of a GVT device.
2305 * zero on success, negative error code if failed.
2307 int intel_gvt_init_gtt(struct intel_gvt *gvt)
2311 struct device *dev = &gvt->dev_priv->drm.pdev->dev;
2314 gvt_dbg_core("init gtt\n");
2316 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
2317 || IS_KABYLAKE(gvt->dev_priv)) {
2318 gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2319 gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2320 gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
2321 gvt->gtt.mm_free_page_table = gen8_mm_free_page_table;
2326 page = (void *)get_zeroed_page(GFP_KERNEL);
2328 gvt_err("fail to allocate scratch ggtt page\n");
2332 daddr = dma_map_page(dev, virt_to_page(page), 0,
2333 4096, PCI_DMA_BIDIRECTIONAL);
2334 if (dma_mapping_error(dev, daddr)) {
2335 gvt_err("fail to dmamap scratch ggtt page\n");
2336 __free_page(virt_to_page(page));
2340 gvt->gtt.scratch_page = virt_to_page(page);
2341 gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2343 if (enable_out_of_sync) {
2344 ret = setup_spt_oos(gvt);
2346 gvt_err("fail to initialize SPT oos\n");
2347 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2348 __free_page(gvt->gtt.scratch_page);
2352 INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head);
2357 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2360 * This function is called at the driver unloading stage, to clean up the
2361 * the mm components of a GVT device.
2364 void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2366 struct device *dev = &gvt->dev_priv->drm.pdev->dev;
2367 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
2368 I915_GTT_PAGE_SHIFT);
2370 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2372 __free_page(gvt->gtt.scratch_page);
2374 if (enable_out_of_sync)
2379 * intel_vgpu_reset_ggtt - reset the GGTT entry
2382 * This function is called at the vGPU create stage
2383 * to reset all the GGTT entries.
2386 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
2388 struct intel_gvt *gvt = vgpu->gvt;
2389 struct drm_i915_private *dev_priv = gvt->dev_priv;
2390 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2394 struct intel_gvt_gtt_entry e;
2396 memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
2397 e.type = GTT_TYPE_GGTT_PTE;
2398 ops->set_pfn(&e, gvt->gtt.scratch_mfn);
2399 e.val64 |= _PAGE_PRESENT;
2401 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2402 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2403 for (offset = 0; offset < num_entries; offset++)
2404 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
2406 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2407 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2408 for (offset = 0; offset < num_entries; offset++)
2409 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
2411 gtt_invalidate(dev_priv);
2415 * intel_vgpu_reset_gtt - reset the all GTT related status
2418 * This function is called from vfio core to reset reset all
2419 * GTT related status, including GGTT, PPGTT, scratch page.
2422 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
2424 ppgtt_free_all_shadow_page(vgpu);
2426 /* Shadow pages are only created when there is no page
2427 * table tracking data, so remove page tracking data after
2428 * removing the shadow pages.
2430 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
2432 intel_vgpu_reset_ggtt(vgpu);