Merge drm/drm-next into drm-intel-next-queued
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_scheduler.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include <linux/mutex.h>
8
9 #include "i915_drv.h"
10 #include "i915_globals.h"
11 #include "i915_request.h"
12 #include "i915_scheduler.h"
13
14 static struct i915_global_scheduler {
15         struct i915_global base;
16         struct kmem_cache *slab_dependencies;
17         struct kmem_cache *slab_priorities;
18 } global;
19
20 static DEFINE_SPINLOCK(schedule_lock);
21
22 static const struct i915_request *
23 node_to_request(const struct i915_sched_node *node)
24 {
25         return container_of(node, const struct i915_request, sched);
26 }
27
28 static inline bool node_started(const struct i915_sched_node *node)
29 {
30         return i915_request_started(node_to_request(node));
31 }
32
33 static inline bool node_signaled(const struct i915_sched_node *node)
34 {
35         return i915_request_completed(node_to_request(node));
36 }
37
38 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
39 {
40         return rb_entry(rb, struct i915_priolist, node);
41 }
42
43 static void assert_priolists(struct intel_engine_execlists * const execlists)
44 {
45         struct rb_node *rb;
46         long last_prio, i;
47
48         if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
49                 return;
50
51         GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
52                    rb_first(&execlists->queue.rb_root));
53
54         last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
55         for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
56                 const struct i915_priolist *p = to_priolist(rb);
57
58                 GEM_BUG_ON(p->priority >= last_prio);
59                 last_prio = p->priority;
60
61                 GEM_BUG_ON(!p->used);
62                 for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
63                         if (list_empty(&p->requests[i]))
64                                 continue;
65
66                         GEM_BUG_ON(!(p->used & BIT(i)));
67                 }
68         }
69 }
70
71 struct list_head *
72 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
73 {
74         struct intel_engine_execlists * const execlists = &engine->execlists;
75         struct i915_priolist *p;
76         struct rb_node **parent, *rb;
77         bool first = true;
78         int idx, i;
79
80         lockdep_assert_held(&engine->active.lock);
81         assert_priolists(execlists);
82
83         /* buckets sorted from highest [in slot 0] to lowest priority */
84         idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
85         prio >>= I915_USER_PRIORITY_SHIFT;
86         if (unlikely(execlists->no_priolist))
87                 prio = I915_PRIORITY_NORMAL;
88
89 find_priolist:
90         /* most positive priority is scheduled first, equal priorities fifo */
91         rb = NULL;
92         parent = &execlists->queue.rb_root.rb_node;
93         while (*parent) {
94                 rb = *parent;
95                 p = to_priolist(rb);
96                 if (prio > p->priority) {
97                         parent = &rb->rb_left;
98                 } else if (prio < p->priority) {
99                         parent = &rb->rb_right;
100                         first = false;
101                 } else {
102                         goto out;
103                 }
104         }
105
106         if (prio == I915_PRIORITY_NORMAL) {
107                 p = &execlists->default_priolist;
108         } else {
109                 p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
110                 /* Convert an allocation failure to a priority bump */
111                 if (unlikely(!p)) {
112                         prio = I915_PRIORITY_NORMAL; /* recurses just once */
113
114                         /* To maintain ordering with all rendering, after an
115                          * allocation failure we have to disable all scheduling.
116                          * Requests will then be executed in fifo, and schedule
117                          * will ensure that dependencies are emitted in fifo.
118                          * There will be still some reordering with existing
119                          * requests, so if userspace lied about their
120                          * dependencies that reordering may be visible.
121                          */
122                         execlists->no_priolist = true;
123                         goto find_priolist;
124                 }
125         }
126
127         p->priority = prio;
128         for (i = 0; i < ARRAY_SIZE(p->requests); i++)
129                 INIT_LIST_HEAD(&p->requests[i]);
130         rb_link_node(&p->node, rb, parent);
131         rb_insert_color_cached(&p->node, &execlists->queue, first);
132         p->used = 0;
133
134 out:
135         p->used |= BIT(idx);
136         return &p->requests[idx];
137 }
138
139 void __i915_priolist_free(struct i915_priolist *p)
140 {
141         kmem_cache_free(global.slab_priorities, p);
142 }
143
144 struct sched_cache {
145         struct list_head *priolist;
146 };
147
148 static struct intel_engine_cs *
149 sched_lock_engine(const struct i915_sched_node *node,
150                   struct intel_engine_cs *locked,
151                   struct sched_cache *cache)
152 {
153         const struct i915_request *rq = node_to_request(node);
154         struct intel_engine_cs *engine;
155
156         GEM_BUG_ON(!locked);
157
158         /*
159          * Virtual engines complicate acquiring the engine timeline lock,
160          * as their rq->engine pointer is not stable until under that
161          * engine lock. The simple ploy we use is to take the lock then
162          * check that the rq still belongs to the newly locked engine.
163          */
164         while (locked != (engine = READ_ONCE(rq->engine))) {
165                 spin_unlock(&locked->active.lock);
166                 memset(cache, 0, sizeof(*cache));
167                 spin_lock(&engine->active.lock);
168                 locked = engine;
169         }
170
171         GEM_BUG_ON(locked != engine);
172         return locked;
173 }
174
175 static inline int rq_prio(const struct i915_request *rq)
176 {
177         return rq->sched.attr.priority | __NO_PREEMPTION;
178 }
179
180 static void kick_submission(struct intel_engine_cs *engine, int prio)
181 {
182         const struct i915_request *inflight = *engine->execlists.active;
183
184         /*
185          * If we are already the currently executing context, don't
186          * bother evaluating if we should preempt ourselves, or if
187          * we expect nothing to change as a result of running the
188          * tasklet, i.e. we have not change the priority queue
189          * sufficiently to oust the running context.
190          */
191         if (!inflight || !i915_scheduler_need_preempt(prio, rq_prio(inflight)))
192                 return;
193
194         tasklet_hi_schedule(&engine->execlists.tasklet);
195 }
196
197 static void __i915_schedule(struct i915_sched_node *node,
198                             const struct i915_sched_attr *attr)
199 {
200         struct intel_engine_cs *engine;
201         struct i915_dependency *dep, *p;
202         struct i915_dependency stack;
203         const int prio = attr->priority;
204         struct sched_cache cache;
205         LIST_HEAD(dfs);
206
207         /* Needed in order to use the temporary link inside i915_dependency */
208         lockdep_assert_held(&schedule_lock);
209         GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
210
211         if (prio <= READ_ONCE(node->attr.priority))
212                 return;
213
214         if (node_signaled(node))
215                 return;
216
217         stack.signaler = node;
218         list_add(&stack.dfs_link, &dfs);
219
220         /*
221          * Recursively bump all dependent priorities to match the new request.
222          *
223          * A naive approach would be to use recursion:
224          * static void update_priorities(struct i915_sched_node *node, prio) {
225          *      list_for_each_entry(dep, &node->signalers_list, signal_link)
226          *              update_priorities(dep->signal, prio)
227          *      queue_request(node);
228          * }
229          * but that may have unlimited recursion depth and so runs a very
230          * real risk of overunning the kernel stack. Instead, we build
231          * a flat list of all dependencies starting with the current request.
232          * As we walk the list of dependencies, we add all of its dependencies
233          * to the end of the list (this may include an already visited
234          * request) and continue to walk onwards onto the new dependencies. The
235          * end result is a topological list of requests in reverse order, the
236          * last element in the list is the request we must execute first.
237          */
238         list_for_each_entry(dep, &dfs, dfs_link) {
239                 struct i915_sched_node *node = dep->signaler;
240
241                 /* If we are already flying, we know we have no signalers */
242                 if (node_started(node))
243                         continue;
244
245                 /*
246                  * Within an engine, there can be no cycle, but we may
247                  * refer to the same dependency chain multiple times
248                  * (redundant dependencies are not eliminated) and across
249                  * engines.
250                  */
251                 list_for_each_entry(p, &node->signalers_list, signal_link) {
252                         GEM_BUG_ON(p == dep); /* no cycles! */
253
254                         if (node_signaled(p->signaler))
255                                 continue;
256
257                         if (prio > READ_ONCE(p->signaler->attr.priority))
258                                 list_move_tail(&p->dfs_link, &dfs);
259                 }
260         }
261
262         /*
263          * If we didn't need to bump any existing priorities, and we haven't
264          * yet submitted this request (i.e. there is no potential race with
265          * execlists_submit_request()), we can set our own priority and skip
266          * acquiring the engine locks.
267          */
268         if (node->attr.priority == I915_PRIORITY_INVALID) {
269                 GEM_BUG_ON(!list_empty(&node->link));
270                 node->attr = *attr;
271
272                 if (stack.dfs_link.next == stack.dfs_link.prev)
273                         return;
274
275                 __list_del_entry(&stack.dfs_link);
276         }
277
278         memset(&cache, 0, sizeof(cache));
279         engine = node_to_request(node)->engine;
280         spin_lock(&engine->active.lock);
281
282         /* Fifo and depth-first replacement ensure our deps execute before us */
283         engine = sched_lock_engine(node, engine, &cache);
284         list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
285                 INIT_LIST_HEAD(&dep->dfs_link);
286
287                 node = dep->signaler;
288                 engine = sched_lock_engine(node, engine, &cache);
289                 lockdep_assert_held(&engine->active.lock);
290
291                 /* Recheck after acquiring the engine->timeline.lock */
292                 if (prio <= node->attr.priority || node_signaled(node))
293                         continue;
294
295                 GEM_BUG_ON(node_to_request(node)->engine != engine);
296
297                 node->attr.priority = prio;
298
299                 if (list_empty(&node->link)) {
300                         /*
301                          * If the request is not in the priolist queue because
302                          * it is not yet runnable, then it doesn't contribute
303                          * to our preemption decisions. On the other hand,
304                          * if the request is on the HW, it too is not in the
305                          * queue; but in that case we may still need to reorder
306                          * the inflight requests.
307                          */
308                         continue;
309                 }
310
311                 if (!intel_engine_is_virtual(engine) &&
312                     !i915_request_is_active(node_to_request(node))) {
313                         if (!cache.priolist)
314                                 cache.priolist =
315                                         i915_sched_lookup_priolist(engine,
316                                                                    prio);
317                         list_move_tail(&node->link, cache.priolist);
318                 }
319
320                 if (prio <= engine->execlists.queue_priority_hint)
321                         continue;
322
323                 engine->execlists.queue_priority_hint = prio;
324
325                 /* Defer (tasklet) submission until after all of our updates. */
326                 kick_submission(engine, prio);
327         }
328
329         spin_unlock(&engine->active.lock);
330 }
331
332 void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
333 {
334         spin_lock_irq(&schedule_lock);
335         __i915_schedule(&rq->sched, attr);
336         spin_unlock_irq(&schedule_lock);
337 }
338
339 static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
340 {
341         struct i915_sched_attr attr = node->attr;
342
343         attr.priority |= bump;
344         __i915_schedule(node, &attr);
345 }
346
347 void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
348 {
349         unsigned long flags;
350
351         GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
352
353         if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
354                 return;
355
356         spin_lock_irqsave(&schedule_lock, flags);
357         __bump_priority(&rq->sched, bump);
358         spin_unlock_irqrestore(&schedule_lock, flags);
359 }
360
361 void i915_sched_node_init(struct i915_sched_node *node)
362 {
363         INIT_LIST_HEAD(&node->signalers_list);
364         INIT_LIST_HEAD(&node->waiters_list);
365         INIT_LIST_HEAD(&node->link);
366         node->attr.priority = I915_PRIORITY_INVALID;
367         node->semaphores = 0;
368         node->flags = 0;
369 }
370
371 static struct i915_dependency *
372 i915_dependency_alloc(void)
373 {
374         return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
375 }
376
377 static void
378 i915_dependency_free(struct i915_dependency *dep)
379 {
380         kmem_cache_free(global.slab_dependencies, dep);
381 }
382
383 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
384                                       struct i915_sched_node *signal,
385                                       struct i915_dependency *dep,
386                                       unsigned long flags)
387 {
388         bool ret = false;
389
390         spin_lock_irq(&schedule_lock);
391
392         if (!node_signaled(signal)) {
393                 INIT_LIST_HEAD(&dep->dfs_link);
394                 list_add(&dep->wait_link, &signal->waiters_list);
395                 list_add(&dep->signal_link, &node->signalers_list);
396                 dep->signaler = signal;
397                 dep->waiter = node;
398                 dep->flags = flags;
399
400                 /* Keep track of whether anyone on this chain has a semaphore */
401                 if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
402                     !node_started(signal))
403                         node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
404
405                 /*
406                  * As we do not allow WAIT to preempt inflight requests,
407                  * once we have executed a request, along with triggering
408                  * any execution callbacks, we must preserve its ordering
409                  * within the non-preemptible FIFO.
410                  */
411                 BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK);
412                 if (flags & I915_DEPENDENCY_EXTERNAL)
413                         __bump_priority(signal, __NO_PREEMPTION);
414
415                 ret = true;
416         }
417
418         spin_unlock_irq(&schedule_lock);
419
420         return ret;
421 }
422
423 int i915_sched_node_add_dependency(struct i915_sched_node *node,
424                                    struct i915_sched_node *signal)
425 {
426         struct i915_dependency *dep;
427
428         dep = i915_dependency_alloc();
429         if (!dep)
430                 return -ENOMEM;
431
432         if (!__i915_sched_node_add_dependency(node, signal, dep,
433                                               I915_DEPENDENCY_EXTERNAL |
434                                               I915_DEPENDENCY_ALLOC))
435                 i915_dependency_free(dep);
436
437         return 0;
438 }
439
440 void i915_sched_node_fini(struct i915_sched_node *node)
441 {
442         struct i915_dependency *dep, *tmp;
443
444         spin_lock_irq(&schedule_lock);
445
446         /*
447          * Everyone we depended upon (the fences we wait to be signaled)
448          * should retire before us and remove themselves from our list.
449          * However, retirement is run independently on each timeline and
450          * so we may be called out-of-order.
451          */
452         list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
453                 GEM_BUG_ON(!node_signaled(dep->signaler));
454                 GEM_BUG_ON(!list_empty(&dep->dfs_link));
455
456                 list_del(&dep->wait_link);
457                 if (dep->flags & I915_DEPENDENCY_ALLOC)
458                         i915_dependency_free(dep);
459         }
460
461         /* Remove ourselves from everyone who depends upon us */
462         list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
463                 GEM_BUG_ON(dep->signaler != node);
464                 GEM_BUG_ON(!list_empty(&dep->dfs_link));
465
466                 list_del(&dep->signal_link);
467                 if (dep->flags & I915_DEPENDENCY_ALLOC)
468                         i915_dependency_free(dep);
469         }
470
471         spin_unlock_irq(&schedule_lock);
472 }
473
474 static void i915_global_scheduler_shrink(void)
475 {
476         kmem_cache_shrink(global.slab_dependencies);
477         kmem_cache_shrink(global.slab_priorities);
478 }
479
480 static void i915_global_scheduler_exit(void)
481 {
482         kmem_cache_destroy(global.slab_dependencies);
483         kmem_cache_destroy(global.slab_priorities);
484 }
485
486 static struct i915_global_scheduler global = { {
487         .shrink = i915_global_scheduler_shrink,
488         .exit = i915_global_scheduler_exit,
489 } };
490
491 int __init i915_global_scheduler_init(void)
492 {
493         global.slab_dependencies = KMEM_CACHE(i915_dependency,
494                                               SLAB_HWCACHE_ALIGN);
495         if (!global.slab_dependencies)
496                 return -ENOMEM;
497
498         global.slab_priorities = KMEM_CACHE(i915_priolist,
499                                             SLAB_HWCACHE_ALIGN);
500         if (!global.slab_priorities)
501                 goto err_priorities;
502
503         i915_global_register(&global.base);
504         return 0;
505
506 err_priorities:
507         kmem_cache_destroy(global.slab_priorities);
508         return -ENOMEM;
509 }