2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Kevin Tian <kevin.tian@intel.com>
25 * Eddie Dong <eddie.dong@intel.com>
28 * Niu Bing <bing.niu@intel.com>
29 * Zhi Wang <zhi.a.wang@intel.com>
37 #include "hypercall.h"
40 #include "interrupt.h"
45 #include "scheduler.h"
46 #include "sched_policy.h"
48 #include "cmd_parser.h"
50 #define GVT_MAX_VGPU 8
53 INTEL_GVT_HYPERVISOR_XEN = 0,
54 INTEL_GVT_HYPERVISOR_KVM,
57 struct intel_gvt_host {
60 struct intel_gvt_mpt *mpt;
63 extern struct intel_gvt_host intel_gvt_host;
65 /* Describe per-platform limitations. */
66 struct intel_gvt_device_info {
67 u32 max_support_vgpus;
71 unsigned long msi_cap_offset;
74 u32 gtt_entry_size_shift;
75 int gmadr_bytes_in_cmd;
79 /* GM resources owned by a vGPU */
80 struct intel_vgpu_gm {
83 struct drm_mm_node low_gm_node;
84 struct drm_mm_node high_gm_node;
87 #define INTEL_GVT_MAX_NUM_FENCES 32
89 /* Fences owned by a vGPU */
90 struct intel_vgpu_fence {
91 struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
96 struct intel_vgpu_mmio {
99 bool disable_warn_untrack;
102 #define INTEL_GVT_MAX_CFG_SPACE_SZ 256
103 #define INTEL_GVT_MAX_BAR_NUM 4
105 struct intel_vgpu_pci_bar {
110 struct intel_vgpu_cfg_space {
111 unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ];
112 struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
115 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
117 #define INTEL_GVT_MAX_PIPE 4
119 struct intel_vgpu_irq {
120 bool irq_warn_once[INTEL_GVT_EVENT_MAX];
121 DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
122 INTEL_GVT_EVENT_MAX);
125 struct intel_vgpu_opregion {
127 u32 gfn[INTEL_GVT_OPREGION_PAGES];
128 struct page *pages[INTEL_GVT_OPREGION_PAGES];
131 #define vgpu_opregion(vgpu) (&(vgpu->opregion))
133 #define INTEL_GVT_MAX_PORT 5
135 struct intel_vgpu_display {
136 struct intel_vgpu_i2c_edid i2c_edid;
137 struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT];
138 struct intel_vgpu_sbi sbi;
142 struct intel_gvt *gvt;
144 unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
149 struct intel_vgpu_fence fence;
150 struct intel_vgpu_gm gm;
151 struct intel_vgpu_cfg_space cfg_space;
152 struct intel_vgpu_mmio mmio;
153 struct intel_vgpu_irq irq;
154 struct intel_vgpu_gtt gtt;
155 struct intel_vgpu_opregion opregion;
156 struct intel_vgpu_display display;
157 struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
158 struct list_head workload_q_head[I915_NUM_ENGINES];
159 struct kmem_cache *workloads;
160 atomic_t running_workload_num;
161 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
162 struct i915_gem_context *shadow_ctx;
163 struct notifier_block shadow_ctx_notifier_block;
166 struct intel_gvt_gm {
167 unsigned long vgpu_allocated_low_gm_size;
168 unsigned long vgpu_allocated_high_gm_size;
171 struct intel_gvt_fence {
172 unsigned long vgpu_allocated_fence_num;
175 #define INTEL_GVT_MMIO_HASH_BITS 9
177 struct intel_gvt_mmio {
179 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
182 struct intel_gvt_firmware {
185 bool firmware_loaded;
188 struct intel_gvt_opregion {
189 void __iomem *opregion_va;
195 struct drm_i915_private *dev_priv;
196 struct idr vgpu_idr; /* vGPU IDR pool */
198 struct intel_gvt_device_info device_info;
199 struct intel_gvt_gm gm;
200 struct intel_gvt_fence fence;
201 struct intel_gvt_mmio mmio;
202 struct intel_gvt_firmware firmware;
203 struct intel_gvt_irq irq;
204 struct intel_gvt_gtt gtt;
205 struct intel_gvt_opregion opregion;
206 struct intel_gvt_workload_scheduler scheduler;
207 DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
209 struct task_struct *service_thread;
210 wait_queue_head_t service_thread_wq;
211 unsigned long service_request;
214 static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
220 INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
223 static inline void intel_gvt_request_service(struct intel_gvt *gvt,
226 set_bit(service, (void *)&gvt->service_request);
227 wake_up(&gvt->service_thread_wq);
230 void intel_gvt_free_firmware(struct intel_gvt *gvt);
231 int intel_gvt_load_firmware(struct intel_gvt *gvt);
233 /* Aperture/GM space definitions for GVT device */
234 #define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
235 #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
237 #define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
238 #define gvt_ggtt_sz(gvt) \
239 ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
240 #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
242 #define gvt_aperture_gmadr_base(gvt) (0)
243 #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
244 + gvt_aperture_sz(gvt) - 1)
246 #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
247 + gvt_aperture_sz(gvt))
248 #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
249 + gvt_hidden_sz(gvt) - 1)
251 #define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
253 /* Aperture/GM space definitions for vGPU */
254 #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
255 #define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
256 #define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
257 #define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
259 #define vgpu_aperture_pa_base(vgpu) \
260 (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
262 #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
264 #define vgpu_aperture_pa_end(vgpu) \
265 (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
267 #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
268 #define vgpu_aperture_gmadr_end(vgpu) \
269 (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
271 #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
272 #define vgpu_hidden_gmadr_end(vgpu) \
273 (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
275 #define vgpu_fence_base(vgpu) (vgpu->fence.base)
276 #define vgpu_fence_sz(vgpu) (vgpu->fence.size)
278 struct intel_vgpu_creation_params {
280 __u64 low_gm_sz; /* in MB */
281 __u64 high_gm_sz; /* in MB */
287 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
288 struct intel_vgpu_creation_params *param);
289 void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
290 void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
291 u32 fence, u64 value);
293 /* Macros for easily accessing vGPU virtual/shadow register */
294 #define vgpu_vreg(vgpu, reg) \
295 (*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
296 #define vgpu_vreg8(vgpu, reg) \
297 (*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
298 #define vgpu_vreg16(vgpu, reg) \
299 (*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
300 #define vgpu_vreg64(vgpu, reg) \
301 (*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
302 #define vgpu_sreg(vgpu, reg) \
303 (*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
304 #define vgpu_sreg8(vgpu, reg) \
305 (*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
306 #define vgpu_sreg16(vgpu, reg) \
307 (*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
308 #define vgpu_sreg64(vgpu, reg) \
309 (*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
311 #define for_each_active_vgpu(gvt, vgpu, id) \
312 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
313 for_each_if(vgpu->active)
315 static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
316 u32 offset, u32 val, bool low)
320 /* BAR offset should be 32 bits algiend */
321 offset = rounddown(offset, 4);
322 pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
326 * only update bit 31 - bit 4,
327 * leave the bit 3 - bit 0 unchanged.
329 *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
333 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
334 struct intel_vgpu_creation_params *
337 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
339 /* validating GM functions */
340 #define vgpu_gmadr_is_aperture(vgpu, gmadr) \
341 ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
342 (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
344 #define vgpu_gmadr_is_hidden(vgpu, gmadr) \
345 ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
346 (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
348 #define vgpu_gmadr_is_valid(vgpu, gmadr) \
349 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
350 (vgpu_gmadr_is_hidden(vgpu, gmadr))))
352 #define gvt_gmadr_is_aperture(gvt, gmadr) \
353 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
354 (gmadr <= gvt_aperture_gmadr_end(gvt)))
356 #define gvt_gmadr_is_hidden(gvt, gmadr) \
357 ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
358 (gmadr <= gvt_hidden_gmadr_end(gvt)))
360 #define gvt_gmadr_is_valid(gvt, gmadr) \
361 (gvt_gmadr_is_aperture(gvt, gmadr) || \
362 gvt_gmadr_is_hidden(gvt, gmadr))
364 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
365 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
366 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
367 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
368 unsigned long *h_index);
369 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
370 unsigned long *g_index);
372 int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset,
373 void *p_data, unsigned int bytes);
375 int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset,
376 void *p_data, unsigned int bytes);
378 void intel_gvt_clean_opregion(struct intel_gvt *gvt);
379 int intel_gvt_init_opregion(struct intel_gvt *gvt);
381 void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
382 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
384 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
385 int setup_vgpu_mmio(struct intel_vgpu *vgpu);
386 void populate_pvinfo_page(struct intel_vgpu *vgpu);