2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Zhiyuan Lv <zhiyuan.lv@intel.com>
25 * Zhi Wang <zhi.a.wang@intel.com>
28 * Min He <min.he@intel.com>
29 * Bing Niu <bing.niu@intel.com>
30 * Ping Gao <ping.a.gao@intel.com>
31 * Tina Zhang <tina.zhang@intel.com>
38 #define _EL_OFFSET_STATUS 0x234
39 #define _EL_OFFSET_STATUS_BUF 0x370
40 #define _EL_OFFSET_STATUS_PTR 0x3A0
42 #define execlist_ring_mmio(gvt, ring_id, offset) \
43 (gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
45 #define valid_context(ctx) ((ctx)->valid)
46 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
47 ((a)->lrca == (b)->lrca))
49 static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
51 static int context_switch_events[] = {
52 [RCS] = RCS_AS_CONTEXT_SWITCH,
53 [BCS] = BCS_AS_CONTEXT_SWITCH,
54 [VCS] = VCS_AS_CONTEXT_SWITCH,
55 [VCS2] = VCS2_AS_CONTEXT_SWITCH,
56 [VECS] = VECS_AS_CONTEXT_SWITCH,
59 static int ring_id_to_context_switch_event(int ring_id)
61 if (WARN_ON(ring_id < RCS ||
62 ring_id >= ARRAY_SIZE(context_switch_events)))
65 return context_switch_events[ring_id];
68 static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
70 gvt_dbg_el("[before] running slot %d/context %x pending slot %d\n",
71 execlist->running_slot ?
72 execlist->running_slot->index : -1,
73 execlist->running_context ?
74 execlist->running_context->context_id : 0,
75 execlist->pending_slot ?
76 execlist->pending_slot->index : -1);
78 execlist->running_slot = execlist->pending_slot;
79 execlist->pending_slot = NULL;
80 execlist->running_context = execlist->running_context ?
81 &execlist->running_slot->ctx[0] : NULL;
83 gvt_dbg_el("[after] running slot %d/context %x pending slot %d\n",
84 execlist->running_slot ?
85 execlist->running_slot->index : -1,
86 execlist->running_context ?
87 execlist->running_context->context_id : 0,
88 execlist->pending_slot ?
89 execlist->pending_slot->index : -1);
92 static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
94 struct intel_vgpu_execlist_slot *running = execlist->running_slot;
95 struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
96 struct execlist_ctx_descriptor_format *desc = execlist->running_context;
97 struct intel_vgpu *vgpu = execlist->vgpu;
98 struct execlist_status_format status;
99 int ring_id = execlist->ring_id;
100 u32 status_reg = execlist_ring_mmio(vgpu->gvt,
101 ring_id, _EL_OFFSET_STATUS);
103 status.ldw = vgpu_vreg(vgpu, status_reg);
104 status.udw = vgpu_vreg(vgpu, status_reg + 4);
107 status.current_execlist_pointer = !!running->index;
108 status.execlist_write_pointer = !!!running->index;
109 status.execlist_0_active = status.execlist_0_valid =
111 status.execlist_1_active = status.execlist_1_valid =
114 status.context_id = 0;
115 status.execlist_0_active = status.execlist_0_valid = 0;
116 status.execlist_1_active = status.execlist_1_valid = 0;
119 status.context_id = desc ? desc->context_id : 0;
120 status.execlist_queue_full = !!(pending);
122 vgpu_vreg(vgpu, status_reg) = status.ldw;
123 vgpu_vreg(vgpu, status_reg + 4) = status.udw;
125 gvt_dbg_el("vgpu%d: status reg offset %x ldw %x udw %x\n",
126 vgpu->id, status_reg, status.ldw, status.udw);
129 static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
130 struct execlist_context_status_format *status,
131 bool trigger_interrupt_later)
133 struct intel_vgpu *vgpu = execlist->vgpu;
134 int ring_id = execlist->ring_id;
135 struct execlist_context_status_pointer_format ctx_status_ptr;
137 u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
139 ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
140 _EL_OFFSET_STATUS_PTR);
141 ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
142 _EL_OFFSET_STATUS_BUF);
144 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
146 write_pointer = ctx_status_ptr.write_ptr;
148 if (write_pointer == 0x7)
152 write_pointer %= 0x6;
155 offset = ctx_status_buf_reg + write_pointer * 8;
157 vgpu_vreg(vgpu, offset) = status->ldw;
158 vgpu_vreg(vgpu, offset + 4) = status->udw;
160 ctx_status_ptr.write_ptr = write_pointer;
161 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
163 gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
164 vgpu->id, write_pointer, offset, status->ldw, status->udw);
166 if (trigger_interrupt_later)
169 intel_vgpu_trigger_virtual_event(vgpu,
170 ring_id_to_context_switch_event(execlist->ring_id));
173 static int emulate_execlist_ctx_schedule_out(
174 struct intel_vgpu_execlist *execlist,
175 struct execlist_ctx_descriptor_format *ctx)
177 struct intel_vgpu *vgpu = execlist->vgpu;
178 struct intel_vgpu_execlist_slot *running = execlist->running_slot;
179 struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
180 struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
181 struct execlist_ctx_descriptor_format *ctx1 = &running->ctx[1];
182 struct execlist_context_status_format status;
184 memset(&status, 0, sizeof(status));
186 gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
188 if (WARN_ON(!same_context(ctx, execlist->running_context))) {
189 gvt_vgpu_err("schedule out context is not running context,"
190 "ctx id %x running ctx id %x\n",
192 execlist->running_context->context_id);
196 /* ctx1 is valid, ctx0/ctx is scheduled-out -> element switch */
197 if (valid_context(ctx1) && same_context(ctx0, ctx)) {
198 gvt_dbg_el("ctx 1 valid, ctx/ctx 0 is scheduled-out\n");
200 execlist->running_context = ctx1;
202 emulate_execlist_status(execlist);
204 status.context_complete = status.element_switch = 1;
205 status.context_id = ctx->context_id;
207 emulate_csb_update(execlist, &status, false);
209 * ctx1 is not valid, ctx == ctx0
210 * ctx1 is valid, ctx1 == ctx
211 * --> last element is finished
213 * active-to-idle if there is *no* pending execlist
214 * context-complete if there *is* pending execlist
216 } else if ((!valid_context(ctx1) && same_context(ctx0, ctx))
217 || (valid_context(ctx1) && same_context(ctx1, ctx))) {
218 gvt_dbg_el("need to switch virtual execlist slot\n");
220 switch_virtual_execlist_slot(execlist);
222 emulate_execlist_status(execlist);
224 status.context_complete = status.active_to_idle = 1;
225 status.context_id = ctx->context_id;
228 emulate_csb_update(execlist, &status, false);
230 emulate_csb_update(execlist, &status, true);
232 memset(&status, 0, sizeof(status));
234 status.idle_to_active = 1;
235 status.context_id = 0;
237 emulate_csb_update(execlist, &status, false);
247 static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
248 struct intel_vgpu_execlist *execlist)
250 struct intel_vgpu *vgpu = execlist->vgpu;
251 int ring_id = execlist->ring_id;
252 u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
254 struct execlist_status_format status;
256 status.ldw = vgpu_vreg(vgpu, status_reg);
257 status.udw = vgpu_vreg(vgpu, status_reg + 4);
259 if (status.execlist_queue_full) {
260 gvt_vgpu_err("virtual execlist slots are full\n");
264 return &execlist->slot[status.execlist_write_pointer];
267 static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
268 struct execlist_ctx_descriptor_format ctx[2])
270 struct intel_vgpu_execlist_slot *running = execlist->running_slot;
271 struct intel_vgpu_execlist_slot *slot =
272 get_next_execlist_slot(execlist);
274 struct execlist_ctx_descriptor_format *ctx0, *ctx1;
275 struct execlist_context_status_format status;
276 struct intel_vgpu *vgpu = execlist->vgpu;
278 gvt_dbg_el("emulate schedule-in\n");
281 gvt_vgpu_err("no available execlist slot\n");
285 memset(&status, 0, sizeof(status));
286 memset(slot->ctx, 0, sizeof(slot->ctx));
288 slot->ctx[0] = ctx[0];
289 slot->ctx[1] = ctx[1];
291 gvt_dbg_el("alloc slot index %d ctx 0 %x ctx 1 %x\n",
292 slot->index, ctx[0].context_id,
296 * no running execlist, make this write bundle as running execlist
300 gvt_dbg_el("no current running execlist\n");
302 execlist->running_slot = slot;
303 execlist->pending_slot = NULL;
304 execlist->running_context = &slot->ctx[0];
306 gvt_dbg_el("running slot index %d running context %x\n",
307 execlist->running_slot->index,
308 execlist->running_context->context_id);
310 emulate_execlist_status(execlist);
312 status.idle_to_active = 1;
313 status.context_id = 0;
315 emulate_csb_update(execlist, &status, false);
319 ctx0 = &running->ctx[0];
320 ctx1 = &running->ctx[1];
322 gvt_dbg_el("current running slot index %d ctx 0 %x ctx 1 %x\n",
323 running->index, ctx0->context_id, ctx1->context_id);
326 * already has an running execlist
327 * a. running ctx1 is valid,
328 * ctx0 is finished, and running ctx1 == new execlist ctx[0]
329 * b. running ctx1 is not valid,
330 * ctx0 == new execlist ctx[0]
331 * ----> lite-restore + preempted
333 if ((valid_context(ctx1) && same_context(ctx1, &slot->ctx[0]) &&
335 (!same_context(ctx0, execlist->running_context))) ||
336 (!valid_context(ctx1) &&
337 same_context(ctx0, &slot->ctx[0]))) { /* condition b */
338 gvt_dbg_el("need to switch virtual execlist slot\n");
340 execlist->pending_slot = slot;
341 switch_virtual_execlist_slot(execlist);
343 emulate_execlist_status(execlist);
345 status.lite_restore = status.preempted = 1;
346 status.context_id = ctx[0].context_id;
348 emulate_csb_update(execlist, &status, false);
350 gvt_dbg_el("emulate as pending slot\n");
353 * --> emulate pending execlist exist + but no preemption case
355 execlist->pending_slot = slot;
356 emulate_execlist_status(execlist);
361 static void free_workload(struct intel_vgpu_workload *workload)
363 intel_vgpu_unpin_mm(workload->shadow_mm);
364 intel_gvt_mm_unreference(workload->shadow_mm);
365 kmem_cache_free(workload->vgpu->workloads, workload);
368 #define get_desc_from_elsp_dwords(ed, i) \
369 ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
371 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
373 const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
374 struct intel_shadow_bb_entry *entry_obj;
376 /* pin the gem object to ggtt */
377 list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
378 struct i915_vma *vma;
380 vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
385 /* FIXME: we are not tracking our pinned VMA leaving it
386 * up to the core to fix up the stray pin_count upon
390 /* update the relocate gma with shadow batch buffer*/
391 entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
392 if (gmadr_bytes == 8)
393 entry_obj->bb_start_cmd_va[2] = 0;
398 static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
400 struct intel_vgpu_workload *workload = container_of(wa_ctx,
401 struct intel_vgpu_workload,
403 int ring_id = workload->ring_id;
404 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
405 struct drm_i915_gem_object *ctx_obj =
406 shadow_ctx->engine[ring_id].state->obj;
407 struct execlist_ring_context *shadow_ring_context;
410 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
411 shadow_ring_context = kmap_atomic(page);
413 shadow_ring_context->bb_per_ctx_ptr.val =
414 (shadow_ring_context->bb_per_ctx_ptr.val &
415 (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
416 shadow_ring_context->rcs_indirect_ctx.val =
417 (shadow_ring_context->rcs_indirect_ctx.val &
418 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
420 kunmap_atomic(shadow_ring_context);
424 static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
426 struct i915_vma *vma;
427 unsigned char *per_ctx_va =
428 (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
429 wa_ctx->indirect_ctx.size;
431 if (wa_ctx->indirect_ctx.size == 0)
434 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
435 0, CACHELINE_BYTES, 0);
440 /* FIXME: we are not tracking our pinned VMA leaving it
441 * up to the core to fix up the stray pin_count upon
445 wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
447 wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
448 memset(per_ctx_va, 0, CACHELINE_BYTES);
450 update_wa_ctx_2_shadow_ctx(wa_ctx);
454 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
456 /* release all the shadow batch buffer */
457 if (!list_empty(&workload->shadow_bb)) {
458 struct intel_shadow_bb_entry *entry_obj =
459 list_first_entry(&workload->shadow_bb,
460 struct intel_shadow_bb_entry,
462 struct intel_shadow_bb_entry *temp;
464 list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
466 i915_gem_object_unpin_map(entry_obj->obj);
467 i915_gem_object_put(entry_obj->obj);
468 list_del(&entry_obj->list);
474 static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
476 struct intel_vgpu *vgpu = workload->vgpu;
477 struct execlist_ctx_descriptor_format ctx[2];
478 int ring_id = workload->ring_id;
481 ret = intel_vgpu_pin_mm(workload->shadow_mm);
483 gvt_vgpu_err("fail to vgpu pin mm\n");
487 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
489 gvt_vgpu_err("fail to vgpu sync oos pages\n");
493 ret = intel_vgpu_flush_post_shadow(workload->vgpu);
495 gvt_vgpu_err("fail to flush post shadow\n");
499 ret = intel_gvt_generate_request(workload);
501 gvt_vgpu_err("fail to generate request\n");
505 ret = prepare_shadow_batch_buffer(workload);
507 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
511 ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
513 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
514 goto err_shadow_batch;
517 if (!workload->emulate_schedule_in)
520 ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
521 ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
523 ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
527 gvt_vgpu_err("fail to emulate execlist schedule in\n");
529 release_shadow_wa_ctx(&workload->wa_ctx);
531 release_shadow_batch_buffer(workload);
533 intel_vgpu_unpin_mm(workload->shadow_mm);
538 static int complete_execlist_workload(struct intel_vgpu_workload *workload)
540 struct intel_vgpu *vgpu = workload->vgpu;
541 int ring_id = workload->ring_id;
542 struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
543 struct intel_vgpu_workload *next_workload;
544 struct list_head *next = workload_q_head(vgpu, ring_id)->next;
545 bool lite_restore = false;
548 gvt_dbg_el("complete workload %p status %d\n", workload,
551 if (!workload->status) {
552 release_shadow_batch_buffer(workload);
553 release_shadow_wa_ctx(&workload->wa_ctx);
556 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
557 /* if workload->status is not successful means HW GPU
558 * has occurred GPU hang or something wrong with i915/GVT,
559 * and GVT won't inject context switch interrupt to guest.
560 * So this error is a vGPU hang actually to the guest.
561 * According to this we should emunlate a vGPU hang. If
562 * there are pending workloads which are already submitted
563 * from guest, we should clean them up like HW GPU does.
565 * if it is in middle of engine resetting, the pending
566 * workloads won't be submitted to HW GPU and will be
567 * cleaned up during the resetting process later, so doing
568 * the workload clean up here doesn't have any impact.
570 clean_workloads(vgpu, ENGINE_MASK(ring_id));
574 if (!list_empty(workload_q_head(vgpu, ring_id))) {
575 struct execlist_ctx_descriptor_format *this_desc, *next_desc;
577 next_workload = container_of(next,
578 struct intel_vgpu_workload, list);
579 this_desc = &workload->ctx_desc;
580 next_desc = &next_workload->ctx_desc;
582 lite_restore = same_context(this_desc, next_desc);
586 gvt_dbg_el("next context == current - no schedule-out\n");
587 free_workload(workload);
591 ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
595 free_workload(workload);
598 free_workload(workload);
602 #define RING_CTX_OFF(x) \
603 offsetof(struct execlist_ring_context, x)
605 static void read_guest_pdps(struct intel_vgpu *vgpu,
606 u64 ring_context_gpa, u32 pdp[8])
611 gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
613 for (i = 0; i < 8; i++)
614 intel_gvt_hypervisor_read_gpa(vgpu,
615 gpa + i * 8, &pdp[7 - i], 4);
618 static int prepare_mm(struct intel_vgpu_workload *workload)
620 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
621 struct intel_vgpu_mm *mm;
622 struct intel_vgpu *vgpu = workload->vgpu;
623 int page_table_level;
626 if (desc->addressing_mode == 1) { /* legacy 32-bit */
627 page_table_level = 3;
628 } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
629 page_table_level = 4;
631 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
635 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
637 mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
639 intel_gvt_mm_reference(mm);
642 mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
643 pdp, page_table_level, 0);
645 gvt_vgpu_err("fail to create mm object.\n");
649 workload->shadow_mm = mm;
653 #define get_last_workload(q) \
654 (list_empty(q) ? NULL : container_of(q->prev, \
655 struct intel_vgpu_workload, list))
657 static int submit_context(struct intel_vgpu *vgpu, int ring_id,
658 struct execlist_ctx_descriptor_format *desc,
659 bool emulate_schedule_in)
661 struct list_head *q = workload_q_head(vgpu, ring_id);
662 struct intel_vgpu_workload *last_workload = get_last_workload(q);
663 struct intel_vgpu_workload *workload = NULL;
664 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
665 u64 ring_context_gpa;
666 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
669 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
670 (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
671 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
672 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
676 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
677 RING_CTX_OFF(ring_header.val), &head, 4);
679 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
680 RING_CTX_OFF(ring_tail.val), &tail, 4);
682 head &= RB_HEAD_OFF_MASK;
683 tail &= RB_TAIL_OFF_MASK;
685 if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
686 gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
687 gvt_dbg_el("ctx head %x real head %lx\n", head,
688 last_workload->rb_tail);
690 * cannot use guest context head pointer here,
691 * as it might not be updated at this time
693 head = last_workload->rb_tail;
696 gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
698 workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL);
702 /* record some ring buffer register values for scan and shadow */
703 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
704 RING_CTX_OFF(rb_start.val), &start, 4);
705 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
706 RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
707 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
708 RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
710 INIT_LIST_HEAD(&workload->list);
711 INIT_LIST_HEAD(&workload->shadow_bb);
713 init_waitqueue_head(&workload->shadow_ctx_status_wq);
714 atomic_set(&workload->shadow_ctx_active, 0);
716 workload->vgpu = vgpu;
717 workload->ring_id = ring_id;
718 workload->ctx_desc = *desc;
719 workload->ring_context_gpa = ring_context_gpa;
720 workload->rb_head = head;
721 workload->rb_tail = tail;
722 workload->rb_start = start;
723 workload->rb_ctl = ctl;
724 workload->prepare = prepare_execlist_workload;
725 workload->complete = complete_execlist_workload;
726 workload->status = -EINPROGRESS;
727 workload->emulate_schedule_in = emulate_schedule_in;
728 workload->shadowed = false;
730 if (ring_id == RCS) {
731 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
732 RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
733 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
734 RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
736 workload->wa_ctx.indirect_ctx.guest_gma =
737 indirect_ctx & INDIRECT_CTX_ADDR_MASK;
738 workload->wa_ctx.indirect_ctx.size =
739 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
741 workload->wa_ctx.per_ctx.guest_gma =
742 per_ctx & PER_CTX_ADDR_MASK;
743 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
746 if (emulate_schedule_in)
747 workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords;
749 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
750 workload, ring_id, head, tail, start, ctl);
752 gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
753 emulate_schedule_in);
755 ret = prepare_mm(workload);
757 kmem_cache_free(vgpu->workloads, workload);
761 /* Only scan and shadow the first workload in the queue
762 * as there is only one pre-allocated buf-obj for shadow.
764 if (list_empty(workload_q_head(vgpu, ring_id))) {
765 intel_runtime_pm_get(dev_priv);
766 mutex_lock(&dev_priv->drm.struct_mutex);
767 intel_gvt_scan_and_shadow_workload(workload);
768 mutex_unlock(&dev_priv->drm.struct_mutex);
769 intel_runtime_pm_put(dev_priv);
772 queue_workload(workload);
776 int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
778 struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
779 struct execlist_ctx_descriptor_format desc[2];
782 desc[0] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
783 desc[1] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
785 if (!desc[0].valid) {
786 gvt_vgpu_err("invalid elsp submission, desc0 is invalid\n");
790 for (i = 0; i < ARRAY_SIZE(desc); i++) {
793 if (!desc[i].privilege_access) {
794 gvt_vgpu_err("unexpected GGTT elsp submission\n");
799 /* submit workload */
800 for (i = 0; i < ARRAY_SIZE(desc); i++) {
803 ret = submit_context(vgpu, ring_id, &desc[i], i == 0);
805 gvt_vgpu_err("failed to submit desc %d\n", i);
813 gvt_vgpu_err("descriptors content: desc0 %08x %08x desc1 %08x %08x\n",
814 desc[0].udw, desc[0].ldw, desc[1].udw, desc[1].ldw);
818 static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
820 struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
821 struct execlist_context_status_pointer_format ctx_status_ptr;
822 u32 ctx_status_ptr_reg;
824 memset(execlist, 0, sizeof(*execlist));
826 execlist->vgpu = vgpu;
827 execlist->ring_id = ring_id;
828 execlist->slot[0].index = 0;
829 execlist->slot[1].index = 1;
831 ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
832 _EL_OFFSET_STATUS_PTR);
834 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
835 ctx_status_ptr.read_ptr = 0;
836 ctx_status_ptr.write_ptr = 0x7;
837 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
840 static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
842 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
843 struct intel_engine_cs *engine;
844 struct intel_vgpu_workload *pos, *n;
847 /* free the unsubmited workloads in the queues. */
848 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
849 list_for_each_entry_safe(pos, n,
850 &vgpu->workload_q_head[engine->id], list) {
851 list_del_init(&pos->list);
855 clear_bit(engine->id, vgpu->shadow_ctx_desc_updated);
859 void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
861 enum intel_engine_id i;
862 struct intel_engine_cs *engine;
864 clean_workloads(vgpu, ALL_ENGINES);
865 kmem_cache_destroy(vgpu->workloads);
867 for_each_engine(engine, vgpu->gvt->dev_priv, i) {
868 kfree(vgpu->reserve_ring_buffer_va[i]);
869 vgpu->reserve_ring_buffer_va[i] = NULL;
870 vgpu->reserve_ring_buffer_size[i] = 0;
875 #define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8)
876 int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
878 enum intel_engine_id i;
879 struct intel_engine_cs *engine;
881 /* each ring has a virtual execlist engine */
882 for_each_engine(engine, vgpu->gvt->dev_priv, i) {
883 init_vgpu_execlist(vgpu, i);
884 INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
887 vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
888 sizeof(struct intel_vgpu_workload), 0,
892 if (!vgpu->workloads)
895 /* each ring has a shadow ring buffer until vgpu destroyed */
896 for_each_engine(engine, vgpu->gvt->dev_priv, i) {
897 vgpu->reserve_ring_buffer_va[i] =
898 kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL);
899 if (!vgpu->reserve_ring_buffer_va[i]) {
900 gvt_vgpu_err("fail to alloc reserve ring buffer\n");
903 vgpu->reserve_ring_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
907 for_each_engine(engine, vgpu->gvt->dev_priv, i) {
908 if (vgpu->reserve_ring_buffer_size[i]) {
909 kfree(vgpu->reserve_ring_buffer_va[i]);
910 vgpu->reserve_ring_buffer_va[i] = NULL;
911 vgpu->reserve_ring_buffer_size[i] = 0;
917 void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
918 unsigned long engine_mask)
920 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
921 struct intel_engine_cs *engine;
924 clean_workloads(vgpu, engine_mask);
925 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
926 init_vgpu_execlist(vgpu, engine->id);