Merge remote-tracking branches 'spi/topic/atmel', 'spi/topic/bcm63xx', 'spi/topic...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gvt / scheduler.c
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Zhi Wang <zhi.a.wang@intel.com>
25  *
26  * Contributors:
27  *    Ping Gao <ping.a.gao@intel.com>
28  *    Tina Zhang <tina.zhang@intel.com>
29  *    Chanbin Du <changbin.du@intel.com>
30  *    Min He <min.he@intel.com>
31  *    Bing Niu <bing.niu@intel.com>
32  *    Zhenyu Wang <zhenyuw@linux.intel.com>
33  *
34  */
35
36 #include <linux/kthread.h>
37
38 #include "i915_drv.h"
39 #include "gvt.h"
40
41 #define RING_CTX_OFF(x) \
42         offsetof(struct execlist_ring_context, x)
43
44 static void set_context_pdp_root_pointer(
45                 struct execlist_ring_context *ring_context,
46                 u32 pdp[8])
47 {
48         struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
49         int i;
50
51         for (i = 0; i < 8; i++)
52                 pdp_pair[i].val = pdp[7 - i];
53 }
54
55 static int populate_shadow_context(struct intel_vgpu_workload *workload)
56 {
57         struct intel_vgpu *vgpu = workload->vgpu;
58         struct intel_gvt *gvt = vgpu->gvt;
59         int ring_id = workload->ring_id;
60         struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
61         struct drm_i915_gem_object *ctx_obj =
62                 shadow_ctx->engine[ring_id].state->obj;
63         struct execlist_ring_context *shadow_ring_context;
64         struct page *page;
65         void *dst;
66         unsigned long context_gpa, context_page_num;
67         int i;
68
69         gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
70                         workload->ctx_desc.lrca);
71
72         context_page_num = intel_lr_context_size(
73                         gvt->dev_priv->engine[ring_id]);
74
75         context_page_num = context_page_num >> PAGE_SHIFT;
76
77         if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
78                 context_page_num = 19;
79
80         i = 2;
81
82         while (i < context_page_num) {
83                 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
84                                 (u32)((workload->ctx_desc.lrca + i) <<
85                                 GTT_PAGE_SHIFT));
86                 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
87                         gvt_vgpu_err("Invalid guest context descriptor\n");
88                         return -EINVAL;
89                 }
90
91                 page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
92                 dst = kmap(page);
93                 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
94                                 GTT_PAGE_SIZE);
95                 kunmap(page);
96                 i++;
97         }
98
99         page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
100         shadow_ring_context = kmap(page);
101
102 #define COPY_REG(name) \
103         intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
104                 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
105
106         COPY_REG(ctx_ctrl);
107         COPY_REG(ctx_timestamp);
108
109         if (ring_id == RCS) {
110                 COPY_REG(bb_per_ctx_ptr);
111                 COPY_REG(rcs_indirect_ctx);
112                 COPY_REG(rcs_indirect_ctx_offset);
113         }
114 #undef COPY_REG
115
116         set_context_pdp_root_pointer(shadow_ring_context,
117                                      workload->shadow_mm->shadow_page_table);
118
119         intel_gvt_hypervisor_read_gpa(vgpu,
120                         workload->ring_context_gpa +
121                         sizeof(*shadow_ring_context),
122                         (void *)shadow_ring_context +
123                         sizeof(*shadow_ring_context),
124                         GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
125
126         kunmap(page);
127         return 0;
128 }
129
130 static inline bool is_gvt_request(struct drm_i915_gem_request *req)
131 {
132         return i915_gem_context_force_single_submission(req->ctx);
133 }
134
135 static int shadow_context_status_change(struct notifier_block *nb,
136                 unsigned long action, void *data)
137 {
138         struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
139         struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
140                                 shadow_ctx_notifier_block[req->engine->id]);
141         struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
142         struct intel_vgpu_workload *workload =
143                 scheduler->current_workload[req->engine->id];
144
145         if (!is_gvt_request(req) || unlikely(!workload))
146                 return NOTIFY_OK;
147
148         switch (action) {
149         case INTEL_CONTEXT_SCHEDULE_IN:
150                 intel_gvt_load_render_mmio(workload->vgpu,
151                                            workload->ring_id);
152                 atomic_set(&workload->shadow_ctx_active, 1);
153                 break;
154         case INTEL_CONTEXT_SCHEDULE_OUT:
155                 intel_gvt_restore_render_mmio(workload->vgpu,
156                                               workload->ring_id);
157                 /* If the status is -EINPROGRESS means this workload
158                  * doesn't meet any issue during dispatching so when
159                  * get the SCHEDULE_OUT set the status to be zero for
160                  * good. If the status is NOT -EINPROGRESS means there
161                  * is something wrong happened during dispatching and
162                  * the status should not be set to zero
163                  */
164                 if (workload->status == -EINPROGRESS)
165                         workload->status = 0;
166                 atomic_set(&workload->shadow_ctx_active, 0);
167                 break;
168         default:
169                 WARN_ON(1);
170                 return NOTIFY_OK;
171         }
172         wake_up(&workload->shadow_ctx_status_wq);
173         return NOTIFY_OK;
174 }
175
176 static int dispatch_workload(struct intel_vgpu_workload *workload)
177 {
178         int ring_id = workload->ring_id;
179         struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
180         struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
181         struct intel_engine_cs *engine = dev_priv->engine[ring_id];
182         struct drm_i915_gem_request *rq;
183         struct intel_vgpu *vgpu = workload->vgpu;
184         int ret;
185
186         gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
187                 ring_id, workload);
188
189         shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
190         shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
191                                     GEN8_CTX_ADDRESSING_MODE_SHIFT;
192
193         mutex_lock(&dev_priv->drm.struct_mutex);
194
195         /* pin shadow context by gvt even the shadow context will be pinned
196          * when i915 alloc request. That is because gvt will update the guest
197          * context from shadow context when workload is completed, and at that
198          * moment, i915 may already unpined the shadow context to make the
199          * shadow_ctx pages invalid. So gvt need to pin itself. After update
200          * the guest context, gvt can unpin the shadow_ctx safely.
201          */
202         ret = engine->context_pin(engine, shadow_ctx);
203         if (ret) {
204                 gvt_vgpu_err("fail to pin shadow context\n");
205                 workload->status = ret;
206                 mutex_unlock(&dev_priv->drm.struct_mutex);
207                 return ret;
208         }
209
210         rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
211         if (IS_ERR(rq)) {
212                 gvt_vgpu_err("fail to allocate gem request\n");
213                 ret = PTR_ERR(rq);
214                 goto out;
215         }
216
217         gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
218
219         workload->req = i915_gem_request_get(rq);
220
221         ret = intel_gvt_scan_and_shadow_workload(workload);
222         if (ret)
223                 goto out;
224
225         if ((workload->ring_id == RCS) &&
226             (workload->wa_ctx.indirect_ctx.size != 0)) {
227                 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
228                 if (ret)
229                         goto out;
230         }
231
232         ret = populate_shadow_context(workload);
233         if (ret)
234                 goto out;
235
236         if (workload->prepare) {
237                 ret = workload->prepare(workload);
238                 if (ret)
239                         goto out;
240         }
241
242         gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
243                         ring_id, workload->req);
244
245         ret = 0;
246         workload->dispatched = true;
247 out:
248         if (ret)
249                 workload->status = ret;
250
251         if (!IS_ERR_OR_NULL(rq))
252                 i915_add_request_no_flush(rq);
253         else
254                 engine->context_unpin(engine, shadow_ctx);
255
256         mutex_unlock(&dev_priv->drm.struct_mutex);
257         return ret;
258 }
259
260 static struct intel_vgpu_workload *pick_next_workload(
261                 struct intel_gvt *gvt, int ring_id)
262 {
263         struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
264         struct intel_vgpu_workload *workload = NULL;
265
266         mutex_lock(&gvt->lock);
267
268         /*
269          * no current vgpu / will be scheduled out / no workload
270          * bail out
271          */
272         if (!scheduler->current_vgpu) {
273                 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
274                 goto out;
275         }
276
277         if (scheduler->need_reschedule) {
278                 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
279                 goto out;
280         }
281
282         if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) {
283                 gvt_dbg_sched("ring id %d stop - no available workload\n",
284                                 ring_id);
285                 goto out;
286         }
287
288         /*
289          * still have current workload, maybe the workload disptacher
290          * fail to submit it for some reason, resubmit it.
291          */
292         if (scheduler->current_workload[ring_id]) {
293                 workload = scheduler->current_workload[ring_id];
294                 gvt_dbg_sched("ring id %d still have current workload %p\n",
295                                 ring_id, workload);
296                 goto out;
297         }
298
299         /*
300          * pick a workload as current workload
301          * once current workload is set, schedule policy routines
302          * will wait the current workload is finished when trying to
303          * schedule out a vgpu.
304          */
305         scheduler->current_workload[ring_id] = container_of(
306                         workload_q_head(scheduler->current_vgpu, ring_id)->next,
307                         struct intel_vgpu_workload, list);
308
309         workload = scheduler->current_workload[ring_id];
310
311         gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
312
313         atomic_inc(&workload->vgpu->running_workload_num);
314 out:
315         mutex_unlock(&gvt->lock);
316         return workload;
317 }
318
319 static void update_guest_context(struct intel_vgpu_workload *workload)
320 {
321         struct intel_vgpu *vgpu = workload->vgpu;
322         struct intel_gvt *gvt = vgpu->gvt;
323         int ring_id = workload->ring_id;
324         struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
325         struct drm_i915_gem_object *ctx_obj =
326                 shadow_ctx->engine[ring_id].state->obj;
327         struct execlist_ring_context *shadow_ring_context;
328         struct page *page;
329         void *src;
330         unsigned long context_gpa, context_page_num;
331         int i;
332
333         gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
334                         workload->ctx_desc.lrca);
335
336         context_page_num = intel_lr_context_size(
337                         gvt->dev_priv->engine[ring_id]);
338
339         context_page_num = context_page_num >> PAGE_SHIFT;
340
341         if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
342                 context_page_num = 19;
343
344         i = 2;
345
346         while (i < context_page_num) {
347                 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
348                                 (u32)((workload->ctx_desc.lrca + i) <<
349                                         GTT_PAGE_SHIFT));
350                 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
351                         gvt_vgpu_err("invalid guest context descriptor\n");
352                         return;
353                 }
354
355                 page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
356                 src = kmap(page);
357                 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
358                                 GTT_PAGE_SIZE);
359                 kunmap(page);
360                 i++;
361         }
362
363         intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
364                 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
365
366         page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
367         shadow_ring_context = kmap(page);
368
369 #define COPY_REG(name) \
370         intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
371                 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
372
373         COPY_REG(ctx_ctrl);
374         COPY_REG(ctx_timestamp);
375
376 #undef COPY_REG
377
378         intel_gvt_hypervisor_write_gpa(vgpu,
379                         workload->ring_context_gpa +
380                         sizeof(*shadow_ring_context),
381                         (void *)shadow_ring_context +
382                         sizeof(*shadow_ring_context),
383                         GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
384
385         kunmap(page);
386 }
387
388 static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
389 {
390         struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
391         struct intel_vgpu_workload *workload;
392         struct intel_vgpu *vgpu;
393         int event;
394
395         mutex_lock(&gvt->lock);
396
397         workload = scheduler->current_workload[ring_id];
398         vgpu = workload->vgpu;
399
400         /* For the workload w/ request, needs to wait for the context
401          * switch to make sure request is completed.
402          * For the workload w/o request, directly complete the workload.
403          */
404         if (workload->req) {
405                 struct drm_i915_private *dev_priv =
406                         workload->vgpu->gvt->dev_priv;
407                 struct intel_engine_cs *engine =
408                         dev_priv->engine[workload->ring_id];
409                 wait_event(workload->shadow_ctx_status_wq,
410                            !atomic_read(&workload->shadow_ctx_active));
411
412                 i915_gem_request_put(fetch_and_zero(&workload->req));
413
414                 if (!workload->status && !vgpu->resetting) {
415                         update_guest_context(workload);
416
417                         for_each_set_bit(event, workload->pending_events,
418                                          INTEL_GVT_EVENT_MAX)
419                                 intel_vgpu_trigger_virtual_event(vgpu, event);
420                 }
421                 mutex_lock(&dev_priv->drm.struct_mutex);
422                 /* unpin shadow ctx as the shadow_ctx update is done */
423                 engine->context_unpin(engine, workload->vgpu->shadow_ctx);
424                 mutex_unlock(&dev_priv->drm.struct_mutex);
425         }
426
427         gvt_dbg_sched("ring id %d complete workload %p status %d\n",
428                         ring_id, workload, workload->status);
429
430         scheduler->current_workload[ring_id] = NULL;
431
432         list_del_init(&workload->list);
433         workload->complete(workload);
434
435         atomic_dec(&vgpu->running_workload_num);
436         wake_up(&scheduler->workload_complete_wq);
437         mutex_unlock(&gvt->lock);
438 }
439
440 struct workload_thread_param {
441         struct intel_gvt *gvt;
442         int ring_id;
443 };
444
445 static DEFINE_MUTEX(scheduler_mutex);
446
447 static int workload_thread(void *priv)
448 {
449         struct workload_thread_param *p = (struct workload_thread_param *)priv;
450         struct intel_gvt *gvt = p->gvt;
451         int ring_id = p->ring_id;
452         struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
453         struct intel_vgpu_workload *workload = NULL;
454         struct intel_vgpu *vgpu = NULL;
455         int ret;
456         bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
457         DEFINE_WAIT_FUNC(wait, woken_wake_function);
458
459         kfree(p);
460
461         gvt_dbg_core("workload thread for ring %d started\n", ring_id);
462
463         while (!kthread_should_stop()) {
464                 add_wait_queue(&scheduler->waitq[ring_id], &wait);
465                 do {
466                         workload = pick_next_workload(gvt, ring_id);
467                         if (workload)
468                                 break;
469                         wait_woken(&wait, TASK_INTERRUPTIBLE,
470                                    MAX_SCHEDULE_TIMEOUT);
471                 } while (!kthread_should_stop());
472                 remove_wait_queue(&scheduler->waitq[ring_id], &wait);
473
474                 if (!workload)
475                         break;
476
477                 mutex_lock(&scheduler_mutex);
478
479                 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
480                                 workload->ring_id, workload,
481                                 workload->vgpu->id);
482
483                 intel_runtime_pm_get(gvt->dev_priv);
484
485                 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
486                                 workload->ring_id, workload);
487
488                 if (need_force_wake)
489                         intel_uncore_forcewake_get(gvt->dev_priv,
490                                         FORCEWAKE_ALL);
491
492                 mutex_lock(&gvt->lock);
493                 ret = dispatch_workload(workload);
494                 mutex_unlock(&gvt->lock);
495
496                 if (ret) {
497                         vgpu = workload->vgpu;
498                         gvt_vgpu_err("fail to dispatch workload, skip\n");
499                         goto complete;
500                 }
501
502                 gvt_dbg_sched("ring id %d wait workload %p\n",
503                                 workload->ring_id, workload);
504                 i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
505
506 complete:
507                 gvt_dbg_sched("will complete workload %p, status: %d\n",
508                                 workload, workload->status);
509
510                 complete_current_workload(gvt, ring_id);
511
512                 if (need_force_wake)
513                         intel_uncore_forcewake_put(gvt->dev_priv,
514                                         FORCEWAKE_ALL);
515
516                 intel_runtime_pm_put(gvt->dev_priv);
517
518                 mutex_unlock(&scheduler_mutex);
519
520         }
521         return 0;
522 }
523
524 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
525 {
526         struct intel_gvt *gvt = vgpu->gvt;
527         struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
528
529         if (atomic_read(&vgpu->running_workload_num)) {
530                 gvt_dbg_sched("wait vgpu idle\n");
531
532                 wait_event(scheduler->workload_complete_wq,
533                                 !atomic_read(&vgpu->running_workload_num));
534         }
535 }
536
537 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
538 {
539         struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
540         struct intel_engine_cs *engine;
541         enum intel_engine_id i;
542
543         gvt_dbg_core("clean workload scheduler\n");
544
545         for_each_engine(engine, gvt->dev_priv, i) {
546                 atomic_notifier_chain_unregister(
547                                         &engine->context_status_notifier,
548                                         &gvt->shadow_ctx_notifier_block[i]);
549                 kthread_stop(scheduler->thread[i]);
550         }
551 }
552
553 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
554 {
555         struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
556         struct workload_thread_param *param = NULL;
557         struct intel_engine_cs *engine;
558         enum intel_engine_id i;
559         int ret;
560
561         gvt_dbg_core("init workload scheduler\n");
562
563         init_waitqueue_head(&scheduler->workload_complete_wq);
564
565         for_each_engine(engine, gvt->dev_priv, i) {
566                 init_waitqueue_head(&scheduler->waitq[i]);
567
568                 param = kzalloc(sizeof(*param), GFP_KERNEL);
569                 if (!param) {
570                         ret = -ENOMEM;
571                         goto err;
572                 }
573
574                 param->gvt = gvt;
575                 param->ring_id = i;
576
577                 scheduler->thread[i] = kthread_run(workload_thread, param,
578                         "gvt workload %d", i);
579                 if (IS_ERR(scheduler->thread[i])) {
580                         gvt_err("fail to create workload thread\n");
581                         ret = PTR_ERR(scheduler->thread[i]);
582                         goto err;
583                 }
584
585                 gvt->shadow_ctx_notifier_block[i].notifier_call =
586                                         shadow_context_status_change;
587                 atomic_notifier_chain_register(&engine->context_status_notifier,
588                                         &gvt->shadow_ctx_notifier_block[i]);
589         }
590         return 0;
591 err:
592         intel_gvt_clean_workload_scheduler(gvt);
593         kfree(param);
594         param = NULL;
595         return ret;
596 }
597
598 void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
599 {
600         i915_gem_context_put_unlocked(vgpu->shadow_ctx);
601 }
602
603 int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
604 {
605         atomic_set(&vgpu->running_workload_num, 0);
606
607         vgpu->shadow_ctx = i915_gem_context_create_gvt(
608                         &vgpu->gvt->dev_priv->drm);
609         if (IS_ERR(vgpu->shadow_ctx))
610                 return PTR_ERR(vgpu->shadow_ctx);
611
612         vgpu->shadow_ctx->engine[RCS].initialised = true;
613
614         return 0;
615 }