b87b4f5e4c8f4c2baa8580f70a042c302b074862
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gvt / execlist.c
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Zhiyuan Lv <zhiyuan.lv@intel.com>
25  *    Zhi Wang <zhi.a.wang@intel.com>
26  *
27  * Contributors:
28  *    Min He <min.he@intel.com>
29  *    Bing Niu <bing.niu@intel.com>
30  *    Ping Gao <ping.a.gao@intel.com>
31  *    Tina Zhang <tina.zhang@intel.com>
32  *
33  */
34
35 #include "i915_drv.h"
36 #include "gvt.h"
37
38 #define _EL_OFFSET_STATUS       0x234
39 #define _EL_OFFSET_STATUS_BUF   0x370
40 #define _EL_OFFSET_STATUS_PTR   0x3A0
41
42 #define execlist_ring_mmio(gvt, ring_id, offset) \
43         (gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
44
45 #define valid_context(ctx) ((ctx)->valid)
46 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
47                 ((a)->lrca == (b)->lrca))
48
49 static int context_switch_events[] = {
50         [RCS] = RCS_AS_CONTEXT_SWITCH,
51         [BCS] = BCS_AS_CONTEXT_SWITCH,
52         [VCS] = VCS_AS_CONTEXT_SWITCH,
53         [VCS2] = VCS2_AS_CONTEXT_SWITCH,
54         [VECS] = VECS_AS_CONTEXT_SWITCH,
55 };
56
57 static int ring_id_to_context_switch_event(int ring_id)
58 {
59         if (WARN_ON(ring_id < RCS && ring_id >
60                                 ARRAY_SIZE(context_switch_events)))
61                 return -EINVAL;
62
63         return context_switch_events[ring_id];
64 }
65
66 static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
67 {
68         gvt_dbg_el("[before] running slot %d/context %x pending slot %d\n",
69                         execlist->running_slot ?
70                         execlist->running_slot->index : -1,
71                         execlist->running_context ?
72                         execlist->running_context->context_id : 0,
73                         execlist->pending_slot ?
74                         execlist->pending_slot->index : -1);
75
76         execlist->running_slot = execlist->pending_slot;
77         execlist->pending_slot = NULL;
78         execlist->running_context = execlist->running_context ?
79                 &execlist->running_slot->ctx[0] : NULL;
80
81         gvt_dbg_el("[after] running slot %d/context %x pending slot %d\n",
82                         execlist->running_slot ?
83                         execlist->running_slot->index : -1,
84                         execlist->running_context ?
85                         execlist->running_context->context_id : 0,
86                         execlist->pending_slot ?
87                         execlist->pending_slot->index : -1);
88 }
89
90 static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
91 {
92         struct intel_vgpu_execlist_slot *running = execlist->running_slot;
93         struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
94         struct execlist_ctx_descriptor_format *desc = execlist->running_context;
95         struct intel_vgpu *vgpu = execlist->vgpu;
96         struct execlist_status_format status;
97         int ring_id = execlist->ring_id;
98         u32 status_reg = execlist_ring_mmio(vgpu->gvt,
99                         ring_id, _EL_OFFSET_STATUS);
100
101         status.ldw = vgpu_vreg(vgpu, status_reg);
102         status.udw = vgpu_vreg(vgpu, status_reg + 4);
103
104         if (running) {
105                 status.current_execlist_pointer = !!running->index;
106                 status.execlist_write_pointer = !!!running->index;
107                 status.execlist_0_active = status.execlist_0_valid =
108                         !!!(running->index);
109                 status.execlist_1_active = status.execlist_1_valid =
110                         !!(running->index);
111         } else {
112                 status.context_id = 0;
113                 status.execlist_0_active = status.execlist_0_valid = 0;
114                 status.execlist_1_active = status.execlist_1_valid = 0;
115         }
116
117         status.context_id = desc ? desc->context_id : 0;
118         status.execlist_queue_full = !!(pending);
119
120         vgpu_vreg(vgpu, status_reg) = status.ldw;
121         vgpu_vreg(vgpu, status_reg + 4) = status.udw;
122
123         gvt_dbg_el("vgpu%d: status reg offset %x ldw %x udw %x\n",
124                 vgpu->id, status_reg, status.ldw, status.udw);
125 }
126
127 static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
128                 struct execlist_context_status_format *status,
129                 bool trigger_interrupt_later)
130 {
131         struct intel_vgpu *vgpu = execlist->vgpu;
132         int ring_id = execlist->ring_id;
133         struct execlist_context_status_pointer_format ctx_status_ptr;
134         u32 write_pointer;
135         u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
136
137         ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
138                         _EL_OFFSET_STATUS_PTR);
139         ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
140                         _EL_OFFSET_STATUS_BUF);
141
142         ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
143
144         write_pointer = ctx_status_ptr.write_ptr;
145
146         if (write_pointer == 0x7)
147                 write_pointer = 0;
148         else {
149                 ++write_pointer;
150                 write_pointer %= 0x6;
151         }
152
153         offset = ctx_status_buf_reg + write_pointer * 8;
154
155         vgpu_vreg(vgpu, offset) = status->ldw;
156         vgpu_vreg(vgpu, offset + 4) = status->udw;
157
158         ctx_status_ptr.write_ptr = write_pointer;
159         vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
160
161         gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
162                 vgpu->id, write_pointer, offset, status->ldw, status->udw);
163
164         if (trigger_interrupt_later)
165                 return;
166
167         intel_vgpu_trigger_virtual_event(vgpu,
168                         ring_id_to_context_switch_event(execlist->ring_id));
169 }
170
171 static int emulate_execlist_ctx_schedule_out(
172                 struct intel_vgpu_execlist *execlist,
173                 struct execlist_ctx_descriptor_format *ctx)
174 {
175         struct intel_vgpu_execlist_slot *running = execlist->running_slot;
176         struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
177         struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
178         struct execlist_ctx_descriptor_format *ctx1 = &running->ctx[1];
179         struct execlist_context_status_format status;
180
181         memset(&status, 0, sizeof(status));
182
183         gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
184
185         if (WARN_ON(!same_context(ctx, execlist->running_context))) {
186                 gvt_err("schedule out context is not running context,"
187                                 "ctx id %x running ctx id %x\n",
188                                 ctx->context_id,
189                                 execlist->running_context->context_id);
190                 return -EINVAL;
191         }
192
193         /* ctx1 is valid, ctx0/ctx is scheduled-out -> element switch */
194         if (valid_context(ctx1) && same_context(ctx0, ctx)) {
195                 gvt_dbg_el("ctx 1 valid, ctx/ctx 0 is scheduled-out\n");
196
197                 execlist->running_context = ctx1;
198
199                 emulate_execlist_status(execlist);
200
201                 status.context_complete = status.element_switch = 1;
202                 status.context_id = ctx->context_id;
203
204                 emulate_csb_update(execlist, &status, false);
205                 /*
206                  * ctx1 is not valid, ctx == ctx0
207                  * ctx1 is valid, ctx1 == ctx
208                  *      --> last element is finished
209                  * emulate:
210                  *      active-to-idle if there is *no* pending execlist
211                  *      context-complete if there *is* pending execlist
212                  */
213         } else if ((!valid_context(ctx1) && same_context(ctx0, ctx))
214                         || (valid_context(ctx1) && same_context(ctx1, ctx))) {
215                 gvt_dbg_el("need to switch virtual execlist slot\n");
216
217                 switch_virtual_execlist_slot(execlist);
218
219                 emulate_execlist_status(execlist);
220
221                 status.context_complete = status.active_to_idle = 1;
222                 status.context_id = ctx->context_id;
223
224                 if (!pending) {
225                         emulate_csb_update(execlist, &status, false);
226                 } else {
227                         emulate_csb_update(execlist, &status, true);
228
229                         memset(&status, 0, sizeof(status));
230
231                         status.idle_to_active = 1;
232                         status.context_id = 0;
233
234                         emulate_csb_update(execlist, &status, false);
235                 }
236         } else {
237                 WARN_ON(1);
238                 return -EINVAL;
239         }
240
241         return 0;
242 }
243
244 static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
245                 struct intel_vgpu_execlist *execlist)
246 {
247         struct intel_vgpu *vgpu = execlist->vgpu;
248         int ring_id = execlist->ring_id;
249         u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
250                         _EL_OFFSET_STATUS);
251         struct execlist_status_format status;
252
253         status.ldw = vgpu_vreg(vgpu, status_reg);
254         status.udw = vgpu_vreg(vgpu, status_reg + 4);
255
256         if (status.execlist_queue_full) {
257                 gvt_err("virtual execlist slots are full\n");
258                 return NULL;
259         }
260
261         return &execlist->slot[status.execlist_write_pointer];
262 }
263
264 static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
265                 struct execlist_ctx_descriptor_format ctx[2])
266 {
267         struct intel_vgpu_execlist_slot *running = execlist->running_slot;
268         struct intel_vgpu_execlist_slot *slot =
269                 get_next_execlist_slot(execlist);
270
271         struct execlist_ctx_descriptor_format *ctx0, *ctx1;
272         struct execlist_context_status_format status;
273
274         gvt_dbg_el("emulate schedule-in\n");
275
276         if (!slot) {
277                 gvt_err("no available execlist slot\n");
278                 return -EINVAL;
279         }
280
281         memset(&status, 0, sizeof(status));
282         memset(slot->ctx, 0, sizeof(slot->ctx));
283
284         slot->ctx[0] = ctx[0];
285         slot->ctx[1] = ctx[1];
286
287         gvt_dbg_el("alloc slot index %d ctx 0 %x ctx 1 %x\n",
288                         slot->index, ctx[0].context_id,
289                         ctx[1].context_id);
290
291         /*
292          * no running execlist, make this write bundle as running execlist
293          * -> idle-to-active
294          */
295         if (!running) {
296                 gvt_dbg_el("no current running execlist\n");
297
298                 execlist->running_slot = slot;
299                 execlist->pending_slot = NULL;
300                 execlist->running_context = &slot->ctx[0];
301
302                 gvt_dbg_el("running slot index %d running context %x\n",
303                                 execlist->running_slot->index,
304                                 execlist->running_context->context_id);
305
306                 emulate_execlist_status(execlist);
307
308                 status.idle_to_active = 1;
309                 status.context_id = 0;
310
311                 emulate_csb_update(execlist, &status, false);
312                 return 0;
313         }
314
315         ctx0 = &running->ctx[0];
316         ctx1 = &running->ctx[1];
317
318         gvt_dbg_el("current running slot index %d ctx 0 %x ctx 1 %x\n",
319                 running->index, ctx0->context_id, ctx1->context_id);
320
321         /*
322          * already has an running execlist
323          *      a. running ctx1 is valid,
324          *         ctx0 is finished, and running ctx1 == new execlist ctx[0]
325          *      b. running ctx1 is not valid,
326          *         ctx0 == new execlist ctx[0]
327          * ----> lite-restore + preempted
328          */
329         if ((valid_context(ctx1) && same_context(ctx1, &slot->ctx[0]) &&
330                 /* condition a */
331                 (!same_context(ctx0, execlist->running_context))) ||
332                         (!valid_context(ctx1) &&
333                          same_context(ctx0, &slot->ctx[0]))) { /* condition b */
334                 gvt_dbg_el("need to switch virtual execlist slot\n");
335
336                 execlist->pending_slot = slot;
337                 switch_virtual_execlist_slot(execlist);
338
339                 emulate_execlist_status(execlist);
340
341                 status.lite_restore = status.preempted = 1;
342                 status.context_id = ctx[0].context_id;
343
344                 emulate_csb_update(execlist, &status, false);
345         } else {
346                 gvt_dbg_el("emulate as pending slot\n");
347                 /*
348                  * otherwise
349                  * --> emulate pending execlist exist + but no preemption case
350                  */
351                 execlist->pending_slot = slot;
352                 emulate_execlist_status(execlist);
353         }
354         return 0;
355 }
356
357 static void free_workload(struct intel_vgpu_workload *workload)
358 {
359         intel_vgpu_unpin_mm(workload->shadow_mm);
360         intel_gvt_mm_unreference(workload->shadow_mm);
361         kmem_cache_free(workload->vgpu->workloads, workload);
362 }
363
364 #define get_desc_from_elsp_dwords(ed, i) \
365         ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
366
367
368 #define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
369 #define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
370 static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
371                              unsigned long add, int gmadr_bytes)
372 {
373         if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
374                 return -1;
375
376         *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
377                 BATCH_BUFFER_ADDR_MASK;
378         if (gmadr_bytes == 8) {
379                 *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
380                         add & BATCH_BUFFER_ADDR_HIGH_MASK;
381         }
382
383         return 0;
384 }
385
386 static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
387 {
388         int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
389         struct i915_vma *vma;
390         unsigned long gma;
391
392         /* pin the gem object to ggtt */
393         if (!list_empty(&workload->shadow_bb)) {
394                 struct intel_shadow_bb_entry *entry_obj =
395                         list_first_entry(&workload->shadow_bb,
396                                          struct intel_shadow_bb_entry,
397                                          list);
398                 struct intel_shadow_bb_entry *temp;
399
400                 list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
401                                 list) {
402                         vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
403                                         0, 0);
404                         if (IS_ERR(vma)) {
405                                 gvt_err("Cannot pin\n");
406                                 return;
407                         }
408                         i915_gem_object_unpin_pages(entry_obj->obj);
409
410                         /* update the relocate gma with shadow batch buffer*/
411                         gma = i915_gem_object_ggtt_offset(entry_obj->obj, NULL);
412                         WARN_ON(!IS_ALIGNED(gma, 4));
413                         set_gma_to_bb_cmd(entry_obj, gma, gmadr_bytes);
414                 }
415         }
416 }
417
418 static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
419 {
420         int ring_id = wa_ctx->workload->ring_id;
421         struct i915_gem_context *shadow_ctx =
422                 wa_ctx->workload->vgpu->shadow_ctx;
423         struct drm_i915_gem_object *ctx_obj =
424                 shadow_ctx->engine[ring_id].state->obj;
425         struct execlist_ring_context *shadow_ring_context;
426         struct page *page;
427
428         page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
429         shadow_ring_context = kmap_atomic(page);
430
431         shadow_ring_context->bb_per_ctx_ptr.val =
432                 (shadow_ring_context->bb_per_ctx_ptr.val &
433                 (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
434         shadow_ring_context->rcs_indirect_ctx.val =
435                 (shadow_ring_context->rcs_indirect_ctx.val &
436                 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
437
438         kunmap_atomic(shadow_ring_context);
439         return 0;
440 }
441
442 static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
443 {
444         struct i915_vma *vma;
445         unsigned long gma;
446         unsigned char *per_ctx_va =
447                 (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
448                 wa_ctx->indirect_ctx.size;
449
450         if (wa_ctx->indirect_ctx.size == 0)
451                 return;
452
453         vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, 0, 0, 0);
454         if (IS_ERR(vma)) {
455                 gvt_err("Cannot pin indirect ctx obj\n");
456                 return;
457         }
458         i915_gem_object_unpin_pages(wa_ctx->indirect_ctx.obj);
459
460         gma = i915_gem_object_ggtt_offset(wa_ctx->indirect_ctx.obj, NULL);
461         WARN_ON(!IS_ALIGNED(gma, CACHELINE_BYTES));
462         wa_ctx->indirect_ctx.shadow_gma = gma;
463
464         wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
465         memset(per_ctx_va, 0, CACHELINE_BYTES);
466
467         update_wa_ctx_2_shadow_ctx(wa_ctx);
468 }
469
470 static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
471 {
472         struct intel_vgpu *vgpu = workload->vgpu;
473         struct execlist_ctx_descriptor_format ctx[2];
474         int ring_id = workload->ring_id;
475
476         intel_vgpu_pin_mm(workload->shadow_mm);
477         intel_vgpu_sync_oos_pages(workload->vgpu);
478         intel_vgpu_flush_post_shadow(workload->vgpu);
479         prepare_shadow_batch_buffer(workload);
480         prepare_shadow_wa_ctx(&workload->wa_ctx);
481         if (!workload->emulate_schedule_in)
482                 return 0;
483
484         ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
485         ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
486
487         return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
488 }
489
490 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
491 {
492         /* release all the shadow batch buffer */
493         if (!list_empty(&workload->shadow_bb)) {
494                 struct intel_shadow_bb_entry *entry_obj =
495                         list_first_entry(&workload->shadow_bb,
496                                          struct intel_shadow_bb_entry,
497                                          list);
498                 struct intel_shadow_bb_entry *temp;
499
500                 list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
501                                          list) {
502                         drm_gem_object_unreference(&(entry_obj->obj->base));
503                         kvfree(entry_obj->va);
504                         list_del(&entry_obj->list);
505                         kfree(entry_obj);
506                 }
507         }
508 }
509
510 static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
511 {
512         if (wa_ctx->indirect_ctx.size == 0)
513                 return;
514
515         drm_gem_object_unreference(&(wa_ctx->indirect_ctx.obj->base));
516         kvfree(wa_ctx->indirect_ctx.shadow_va);
517 }
518
519 static int complete_execlist_workload(struct intel_vgpu_workload *workload)
520 {
521         struct intel_vgpu *vgpu = workload->vgpu;
522         struct intel_vgpu_execlist *execlist =
523                 &vgpu->execlist[workload->ring_id];
524         struct intel_vgpu_workload *next_workload;
525         struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
526         bool lite_restore = false;
527         int ret;
528
529         gvt_dbg_el("complete workload %p status %d\n", workload,
530                         workload->status);
531
532         release_shadow_batch_buffer(workload);
533         release_shadow_wa_ctx(&workload->wa_ctx);
534
535         if (workload->status || vgpu->resetting)
536                 goto out;
537
538         if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
539                 struct execlist_ctx_descriptor_format *this_desc, *next_desc;
540
541                 next_workload = container_of(next,
542                                 struct intel_vgpu_workload, list);
543                 this_desc = &workload->ctx_desc;
544                 next_desc = &next_workload->ctx_desc;
545
546                 lite_restore = same_context(this_desc, next_desc);
547         }
548
549         if (lite_restore) {
550                 gvt_dbg_el("next context == current - no schedule-out\n");
551                 free_workload(workload);
552                 return 0;
553         }
554
555         ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
556         if (ret)
557                 goto err;
558 out:
559         free_workload(workload);
560         return 0;
561 err:
562         free_workload(workload);
563         return ret;
564 }
565
566 #define RING_CTX_OFF(x) \
567         offsetof(struct execlist_ring_context, x)
568
569 static void read_guest_pdps(struct intel_vgpu *vgpu,
570                 u64 ring_context_gpa, u32 pdp[8])
571 {
572         u64 gpa;
573         int i;
574
575         gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
576
577         for (i = 0; i < 8; i++)
578                 intel_gvt_hypervisor_read_gpa(vgpu,
579                                 gpa + i * 8, &pdp[7 - i], 4);
580 }
581
582 static int prepare_mm(struct intel_vgpu_workload *workload)
583 {
584         struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
585         struct intel_vgpu_mm *mm;
586         int page_table_level;
587         u32 pdp[8];
588
589         if (desc->addressing_mode == 1) { /* legacy 32-bit */
590                 page_table_level = 3;
591         } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
592                 page_table_level = 4;
593         } else {
594                 gvt_err("Advanced Context mode(SVM) is not supported!\n");
595                 return -EINVAL;
596         }
597
598         read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
599
600         mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
601         if (mm) {
602                 intel_gvt_mm_reference(mm);
603         } else {
604
605                 mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
606                                 pdp, page_table_level, 0);
607                 if (IS_ERR(mm)) {
608                         gvt_err("fail to create mm object.\n");
609                         return PTR_ERR(mm);
610                 }
611         }
612         workload->shadow_mm = mm;
613         return 0;
614 }
615
616 #define get_last_workload(q) \
617         (list_empty(q) ? NULL : container_of(q->prev, \
618         struct intel_vgpu_workload, list))
619
620 bool submit_context(struct intel_vgpu *vgpu, int ring_id,
621                 struct execlist_ctx_descriptor_format *desc,
622                 bool emulate_schedule_in)
623 {
624         struct list_head *q = workload_q_head(vgpu, ring_id);
625         struct intel_vgpu_workload *last_workload = get_last_workload(q);
626         struct intel_vgpu_workload *workload = NULL;
627         u64 ring_context_gpa;
628         u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
629         int ret;
630
631         ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
632                         (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
633         if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
634                 gvt_err("invalid guest context LRCA: %x\n", desc->lrca);
635                 return -EINVAL;
636         }
637
638         intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
639                         RING_CTX_OFF(ring_header.val), &head, 4);
640
641         intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
642                         RING_CTX_OFF(ring_tail.val), &tail, 4);
643
644         head &= RB_HEAD_OFF_MASK;
645         tail &= RB_TAIL_OFF_MASK;
646
647         if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
648                 gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
649                 gvt_dbg_el("ctx head %x real head %lx\n", head,
650                                 last_workload->rb_tail);
651                 /*
652                  * cannot use guest context head pointer here,
653                  * as it might not be updated at this time
654                  */
655                 head = last_workload->rb_tail;
656         }
657
658         gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
659
660         workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL);
661         if (!workload)
662                 return -ENOMEM;
663
664         /* record some ring buffer register values for scan and shadow */
665         intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
666                         RING_CTX_OFF(rb_start.val), &start, 4);
667         intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
668                         RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
669         intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
670                         RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
671
672         INIT_LIST_HEAD(&workload->list);
673         INIT_LIST_HEAD(&workload->shadow_bb);
674
675         init_waitqueue_head(&workload->shadow_ctx_status_wq);
676         atomic_set(&workload->shadow_ctx_active, 0);
677
678         workload->vgpu = vgpu;
679         workload->ring_id = ring_id;
680         workload->ctx_desc = *desc;
681         workload->ring_context_gpa = ring_context_gpa;
682         workload->rb_head = head;
683         workload->rb_tail = tail;
684         workload->rb_start = start;
685         workload->rb_ctl = ctl;
686         workload->prepare = prepare_execlist_workload;
687         workload->complete = complete_execlist_workload;
688         workload->status = -EINPROGRESS;
689         workload->emulate_schedule_in = emulate_schedule_in;
690
691         if (ring_id == RCS) {
692                 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
693                         RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
694                 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
695                         RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
696
697                 workload->wa_ctx.indirect_ctx.guest_gma =
698                         indirect_ctx & INDIRECT_CTX_ADDR_MASK;
699                 workload->wa_ctx.indirect_ctx.size =
700                         (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
701                         CACHELINE_BYTES;
702                 workload->wa_ctx.per_ctx.guest_gma =
703                         per_ctx & PER_CTX_ADDR_MASK;
704                 workload->wa_ctx.workload = workload;
705
706                 WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
707         }
708
709         if (emulate_schedule_in)
710                 memcpy(&workload->elsp_dwords,
711                                 &vgpu->execlist[ring_id].elsp_dwords,
712                                 sizeof(workload->elsp_dwords));
713
714         gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
715                         workload, ring_id, head, tail, start, ctl);
716
717         gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
718                         emulate_schedule_in);
719
720         ret = prepare_mm(workload);
721         if (ret) {
722                 kmem_cache_free(vgpu->workloads, workload);
723                 return ret;
724         }
725
726         queue_workload(workload);
727         return 0;
728 }
729
730 int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
731 {
732         struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
733         struct execlist_ctx_descriptor_format *desc[2], valid_desc[2];
734         unsigned long valid_desc_bitmap = 0;
735         bool emulate_schedule_in = true;
736         int ret;
737         int i;
738
739         memset(valid_desc, 0, sizeof(valid_desc));
740
741         desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
742         desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
743
744         for (i = 0; i < 2; i++) {
745                 if (!desc[i]->valid)
746                         continue;
747
748                 if (!desc[i]->privilege_access) {
749                         gvt_err("vgpu%d: unexpected GGTT elsp submission\n",
750                                         vgpu->id);
751                         return -EINVAL;
752                 }
753
754                 /* TODO: add another guest context checks here. */
755                 set_bit(i, &valid_desc_bitmap);
756                 valid_desc[i] = *desc[i];
757         }
758
759         if (!valid_desc_bitmap) {
760                 gvt_err("vgpu%d: no valid desc in a elsp submission\n",
761                                 vgpu->id);
762                 return -EINVAL;
763         }
764
765         if (!test_bit(0, (void *)&valid_desc_bitmap) &&
766                         test_bit(1, (void *)&valid_desc_bitmap)) {
767                 gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n",
768                                 vgpu->id);
769                 return -EINVAL;
770         }
771
772         /* submit workload */
773         for_each_set_bit(i, (void *)&valid_desc_bitmap, 2) {
774                 ret = submit_context(vgpu, ring_id, &valid_desc[i],
775                                 emulate_schedule_in);
776                 if (ret) {
777                         gvt_err("vgpu%d: fail to schedule workload\n",
778                                         vgpu->id);
779                         return ret;
780                 }
781                 emulate_schedule_in = false;
782         }
783         return 0;
784 }
785
786 static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
787 {
788         struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
789         struct execlist_context_status_pointer_format ctx_status_ptr;
790         u32 ctx_status_ptr_reg;
791
792         memset(execlist, 0, sizeof(*execlist));
793
794         execlist->vgpu = vgpu;
795         execlist->ring_id = ring_id;
796         execlist->slot[0].index = 0;
797         execlist->slot[1].index = 1;
798
799         ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
800                         _EL_OFFSET_STATUS_PTR);
801
802         ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
803         ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
804         vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
805 }
806
807 void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
808 {
809         kmem_cache_destroy(vgpu->workloads);
810 }
811
812 int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
813 {
814         int i;
815
816         /* each ring has a virtual execlist engine */
817         for (i = 0; i < I915_NUM_ENGINES; i++) {
818                 init_vgpu_execlist(vgpu, i);
819                 INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
820         }
821
822         vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
823                         sizeof(struct intel_vgpu_workload), 0,
824                         SLAB_HWCACHE_ALIGN,
825                         NULL);
826
827         if (!vgpu->workloads)
828                 return -ENOMEM;
829
830         return 0;
831 }
832
833 void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
834                 unsigned long ring_bitmap)
835 {
836         int bit;
837         struct list_head *pos, *n;
838         struct intel_vgpu_workload *workload = NULL;
839
840         for_each_set_bit(bit, &ring_bitmap, sizeof(ring_bitmap) * 8) {
841                 if (bit >= I915_NUM_ENGINES)
842                         break;
843                 /* free the unsubmited workload in the queue */
844                 list_for_each_safe(pos, n, &vgpu->workload_q_head[bit]) {
845                         workload = container_of(pos,
846                                         struct intel_vgpu_workload, list);
847                         list_del_init(&workload->list);
848                         free_workload(workload);
849                 }
850
851                 init_vgpu_execlist(vgpu, bit);
852         }
853 }