2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Kevin Tian <kevin.tian@intel.com>
25 * Eddie Dong <eddie.dong@intel.com>
28 * Niu Bing <bing.niu@intel.com>
29 * Zhi Wang <zhi.a.wang@intel.com>
37 #include "hypercall.h"
40 #include "interrupt.h"
45 #include "scheduler.h"
46 #include "sched_policy.h"
47 #include "mmio_context.h"
48 #include "cmd_parser.h"
49 #include "fb_decoder.h"
51 #include "page_track.h"
53 #define GVT_MAX_VGPU 8
55 struct intel_gvt_host {
59 struct intel_gvt_mpt *mpt;
62 extern struct intel_gvt_host intel_gvt_host;
64 /* Describe per-platform limitations. */
65 struct intel_gvt_device_info {
66 u32 max_support_vgpus;
70 unsigned long msi_cap_offset;
73 u32 gtt_entry_size_shift;
74 int gmadr_bytes_in_cmd;
78 /* GM resources owned by a vGPU */
79 struct intel_vgpu_gm {
82 struct drm_mm_node low_gm_node;
83 struct drm_mm_node high_gm_node;
86 #define INTEL_GVT_MAX_NUM_FENCES 32
88 /* Fences owned by a vGPU */
89 struct intel_vgpu_fence {
90 struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
95 struct intel_vgpu_mmio {
100 #define INTEL_GVT_MAX_BAR_NUM 4
102 struct intel_vgpu_pci_bar {
107 struct intel_vgpu_cfg_space {
108 unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
109 struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
112 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
114 #define INTEL_GVT_MAX_PIPE 4
116 struct intel_vgpu_irq {
117 bool irq_warn_once[INTEL_GVT_EVENT_MAX];
118 DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
119 INTEL_GVT_EVENT_MAX);
122 struct intel_vgpu_opregion {
125 u32 gfn[INTEL_GVT_OPREGION_PAGES];
128 #define vgpu_opregion(vgpu) (&(vgpu->opregion))
130 struct intel_vgpu_display {
131 struct intel_vgpu_i2c_edid i2c_edid;
132 struct intel_vgpu_port ports[I915_MAX_PORTS];
133 struct intel_vgpu_sbi sbi;
136 struct vgpu_sched_ctl {
141 INTEL_VGPU_EXECLIST_SUBMISSION = 1,
142 INTEL_VGPU_GUC_SUBMISSION,
145 struct intel_vgpu_submission_ops {
147 int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
148 void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
149 void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
152 struct intel_vgpu_submission {
153 struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
154 struct list_head workload_q_head[I915_NUM_ENGINES];
155 struct kmem_cache *workloads;
156 atomic_t running_workload_num;
157 struct i915_gem_context *shadow_ctx;
159 u64 i915_context_pml4;
160 u64 i915_context_pdps[GEN8_3LVL_PDPES];
162 DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
163 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
164 void *ring_scan_buffer[I915_NUM_ENGINES];
165 int ring_scan_buffer_size[I915_NUM_ENGINES];
166 const struct intel_vgpu_submission_ops *ops;
167 int virtual_submission_interface;
172 struct intel_gvt *gvt;
173 struct mutex vgpu_lock;
175 unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
179 unsigned int resetting_eng;
181 /* Both sched_data and sched_ctl can be seen a part of the global gvt
182 * scheduler structure. So below 2 vgpu data are protected
183 * by sched_lock, not vgpu_lock.
186 struct vgpu_sched_ctl sched_ctl;
188 struct intel_vgpu_fence fence;
189 struct intel_vgpu_gm gm;
190 struct intel_vgpu_cfg_space cfg_space;
191 struct intel_vgpu_mmio mmio;
192 struct intel_vgpu_irq irq;
193 struct intel_vgpu_gtt gtt;
194 struct intel_vgpu_opregion opregion;
195 struct intel_vgpu_display display;
196 struct intel_vgpu_submission submission;
197 struct radix_tree_root page_track_tree;
198 u32 hws_pga[I915_NUM_ENGINES];
200 struct dentry *debugfs;
202 #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
204 struct mdev_device *mdev;
205 struct vfio_region *region;
207 struct eventfd_ctx *intx_trigger;
208 struct eventfd_ctx *msi_trigger;
211 * Two caches are used to avoid mapping duplicated pages (eg.
212 * scratch pages). This help to reduce dma setup overhead.
214 struct rb_root gfn_cache;
215 struct rb_root dma_addr_cache;
216 unsigned long nr_cache_entries;
217 struct mutex cache_lock;
219 struct notifier_block iommu_notifier;
220 struct notifier_block group_notifier;
222 struct work_struct release_work;
224 struct vfio_device *vfio_device;
228 struct list_head dmabuf_obj_list_head;
229 struct mutex dmabuf_lock;
230 struct idr object_idr;
232 struct completion vblank_done;
237 /* validating GM healthy status*/
238 #define vgpu_is_vm_unhealthy(ret_val) \
239 (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
241 struct intel_gvt_gm {
242 unsigned long vgpu_allocated_low_gm_size;
243 unsigned long vgpu_allocated_high_gm_size;
246 struct intel_gvt_fence {
247 unsigned long vgpu_allocated_fence_num;
250 /* Special MMIO blocks. */
251 struct gvt_mmio_block {
259 #define INTEL_GVT_MMIO_HASH_BITS 11
261 struct intel_gvt_mmio {
263 /* Register contains RO bits */
264 #define F_RO (1 << 0)
265 /* Register contains graphics address */
266 #define F_GMADR (1 << 1)
267 /* Mode mask registers with high 16 bits as the mask bits */
268 #define F_MODE_MASK (1 << 2)
269 /* This reg can be accessed by GPU commands */
270 #define F_CMD_ACCESS (1 << 3)
271 /* This reg has been accessed by a VM */
272 #define F_ACCESSED (1 << 4)
273 /* This reg has been accessed through GPU commands */
274 #define F_CMD_ACCESSED (1 << 5)
275 /* This reg could be accessed by unaligned address */
276 #define F_UNALIGN (1 << 6)
277 /* This reg is saved/restored in context */
278 #define F_IN_CTX (1 << 7)
280 struct gvt_mmio_block *mmio_block;
281 unsigned int num_mmio_block;
283 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
284 unsigned long num_tracked_mmio;
287 struct intel_gvt_firmware {
290 bool firmware_loaded;
293 #define NR_MAX_INTEL_VGPU_TYPES 20
294 struct intel_vgpu_type {
296 unsigned int avail_instance;
297 unsigned int low_gm_size;
298 unsigned int high_gm_size;
301 enum intel_vgpu_edid resolution;
305 /* GVT scope lock, protect GVT itself, and all resource currently
306 * not yet protected by special locks(vgpu and scheduler lock).
309 /* scheduler scope lock, protect gvt and vgpu schedule related data */
310 struct mutex sched_lock;
312 struct drm_i915_private *dev_priv;
313 struct idr vgpu_idr; /* vGPU IDR pool */
315 struct intel_gvt_device_info device_info;
316 struct intel_gvt_gm gm;
317 struct intel_gvt_fence fence;
318 struct intel_gvt_mmio mmio;
319 struct intel_gvt_firmware firmware;
320 struct intel_gvt_irq irq;
321 struct intel_gvt_gtt gtt;
322 struct intel_gvt_workload_scheduler scheduler;
323 struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
324 DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
325 struct intel_vgpu_type *types;
326 unsigned int num_types;
327 struct intel_vgpu *idle_vgpu;
329 struct task_struct *service_thread;
330 wait_queue_head_t service_thread_wq;
332 /* service_request is always used in bit operation, we should always
333 * use it with atomic bit ops so that no need to use gvt big lock.
335 unsigned long service_request;
338 struct engine_mmio *mmio;
339 int ctx_mmio_count[I915_NUM_ENGINES];
342 struct dentry *debugfs_root;
345 static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
351 INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
353 /* Scheduling trigger by timer */
354 INTEL_GVT_REQUEST_SCHED = 1,
356 /* Scheduling trigger by event */
357 INTEL_GVT_REQUEST_EVENT_SCHED = 2,
360 static inline void intel_gvt_request_service(struct intel_gvt *gvt,
363 set_bit(service, (void *)&gvt->service_request);
364 wake_up(&gvt->service_thread_wq);
367 void intel_gvt_free_firmware(struct intel_gvt *gvt);
368 int intel_gvt_load_firmware(struct intel_gvt *gvt);
370 /* Aperture/GM space definitions for GVT device */
371 #define MB_TO_BYTES(mb) ((mb) << 20ULL)
372 #define BYTES_TO_MB(b) ((b) >> 20ULL)
374 #define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
375 #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
378 /* Aperture/GM space definitions for GVT device */
379 #define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
380 #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
382 #define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total)
383 #define gvt_ggtt_sz(gvt) \
384 ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
385 #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
387 #define gvt_aperture_gmadr_base(gvt) (0)
388 #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
389 + gvt_aperture_sz(gvt) - 1)
391 #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
392 + gvt_aperture_sz(gvt))
393 #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
394 + gvt_hidden_sz(gvt) - 1)
396 #define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
398 /* Aperture/GM space definitions for vGPU */
399 #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
400 #define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
401 #define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
402 #define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
404 #define vgpu_aperture_pa_base(vgpu) \
405 (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
407 #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
409 #define vgpu_aperture_pa_end(vgpu) \
410 (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
412 #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
413 #define vgpu_aperture_gmadr_end(vgpu) \
414 (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
416 #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
417 #define vgpu_hidden_gmadr_end(vgpu) \
418 (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
420 #define vgpu_fence_base(vgpu) (vgpu->fence.base)
421 #define vgpu_fence_sz(vgpu) (vgpu->fence.size)
423 struct intel_vgpu_creation_params {
425 __u64 low_gm_sz; /* in MB */
426 __u64 high_gm_sz; /* in MB */
435 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
436 struct intel_vgpu_creation_params *param);
437 void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
438 void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
439 void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
440 u32 fence, u64 value);
442 /* Macros for easily accessing vGPU virtual/shadow register.
443 Explicitly seperate use for typed MMIO reg or real offset.*/
444 #define vgpu_vreg_t(vgpu, reg) \
445 (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
446 #define vgpu_vreg(vgpu, offset) \
447 (*(u32 *)(vgpu->mmio.vreg + (offset)))
448 #define vgpu_vreg64_t(vgpu, reg) \
449 (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
450 #define vgpu_vreg64(vgpu, offset) \
451 (*(u64 *)(vgpu->mmio.vreg + (offset)))
452 #define vgpu_sreg_t(vgpu, reg) \
453 (*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
454 #define vgpu_sreg(vgpu, offset) \
455 (*(u32 *)(vgpu->mmio.sreg + (offset)))
457 #define for_each_active_vgpu(gvt, vgpu, id) \
458 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
459 for_each_if(vgpu->active)
461 static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
462 u32 offset, u32 val, bool low)
466 /* BAR offset should be 32 bits algiend */
467 offset = rounddown(offset, 4);
468 pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
472 * only update bit 31 - bit 4,
473 * leave the bit 3 - bit 0 unchanged.
475 *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
481 int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
482 void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
484 struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
485 void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
486 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
487 struct intel_vgpu_type *type);
488 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
489 void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
490 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
491 unsigned int engine_mask);
492 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
493 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
494 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
496 /* validating GM functions */
497 #define vgpu_gmadr_is_aperture(vgpu, gmadr) \
498 ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
499 (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
501 #define vgpu_gmadr_is_hidden(vgpu, gmadr) \
502 ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
503 (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
505 #define vgpu_gmadr_is_valid(vgpu, gmadr) \
506 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
507 (vgpu_gmadr_is_hidden(vgpu, gmadr))))
509 #define gvt_gmadr_is_aperture(gvt, gmadr) \
510 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
511 (gmadr <= gvt_aperture_gmadr_end(gvt)))
513 #define gvt_gmadr_is_hidden(gvt, gmadr) \
514 ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
515 (gmadr <= gvt_hidden_gmadr_end(gvt)))
517 #define gvt_gmadr_is_valid(gvt, gmadr) \
518 (gvt_gmadr_is_aperture(gvt, gmadr) || \
519 gvt_gmadr_is_hidden(gvt, gmadr))
521 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
522 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
523 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
524 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
525 unsigned long *h_index);
526 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
527 unsigned long *g_index);
529 void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
531 void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
533 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
534 void *p_data, unsigned int bytes);
536 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
537 void *p_data, unsigned int bytes);
539 void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected);
541 static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
543 /* We are 64bit bar. */
544 return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
545 PCI_BASE_ADDRESS_MEM_MASK;
548 void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
549 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu);
550 int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
552 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
553 void populate_pvinfo_page(struct intel_vgpu *vgpu);
555 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
556 void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
558 struct intel_gvt_ops {
559 int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
561 int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
563 int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
565 int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
567 struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
568 struct intel_vgpu_type *);
569 void (*vgpu_destroy)(struct intel_vgpu *vgpu);
570 void (*vgpu_release)(struct intel_vgpu *vgpu);
571 void (*vgpu_reset)(struct intel_vgpu *);
572 void (*vgpu_activate)(struct intel_vgpu *);
573 void (*vgpu_deactivate)(struct intel_vgpu *);
574 struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
576 bool (*get_gvt_attrs)(struct attribute ***type_attrs,
577 struct attribute_group ***intel_vgpu_type_groups);
578 int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
579 int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
580 int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
582 void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected);
587 GVT_FAILSAFE_UNSUPPORTED_GUEST,
588 GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
589 GVT_FAILSAFE_GUEST_ERR,
592 static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
594 intel_runtime_pm_get(dev_priv);
597 static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
599 intel_runtime_pm_put_unchecked(dev_priv);
603 * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
605 * @offset: register offset
608 static inline void intel_gvt_mmio_set_accessed(
609 struct intel_gvt *gvt, unsigned int offset)
611 gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED;
615 * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
617 * @offset: register offset
620 static inline bool intel_gvt_mmio_is_cmd_access(
621 struct intel_gvt *gvt, unsigned int offset)
623 return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
627 * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
629 * @offset: register offset
632 static inline bool intel_gvt_mmio_is_unalign(
633 struct intel_gvt *gvt, unsigned int offset)
635 return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
639 * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
641 * @offset: register offset
644 static inline void intel_gvt_mmio_set_cmd_accessed(
645 struct intel_gvt *gvt, unsigned int offset)
647 gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED;
651 * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
653 * @offset: register offset
656 * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
659 static inline bool intel_gvt_mmio_has_mode_mask(
660 struct intel_gvt *gvt, unsigned int offset)
662 return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
666 * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
668 * @offset: register offset
671 * True if a MMIO has a in-context mask, false if it isn't.
674 static inline bool intel_gvt_mmio_is_in_ctx(
675 struct intel_gvt *gvt, unsigned int offset)
677 return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
681 * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
683 * @offset: register offset
686 static inline void intel_gvt_mmio_set_in_ctx(
687 struct intel_gvt *gvt, unsigned int offset)
689 gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
692 int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
693 void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
694 int intel_gvt_debugfs_init(struct intel_gvt *gvt);
695 void intel_gvt_debugfs_clean(struct intel_gvt *gvt);