Merge tag 'tilcdc-4.15-fixes' of https://github.com/jsarha/linux into drm-next
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <uapi/linux/sched/types.h>
28 #include <drm/drmP.h>
29 #include "gpu_scheduler.h"
30
31 #define CREATE_TRACE_POINTS
32 #include "gpu_sched_trace.h"
33
34 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
35 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36 static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
37
38 /* Initialize a given run queue struct */
39 static void amd_sched_rq_init(struct amd_sched_rq *rq)
40 {
41         spin_lock_init(&rq->lock);
42         INIT_LIST_HEAD(&rq->entities);
43         rq->current_entity = NULL;
44 }
45
46 static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
47                                     struct amd_sched_entity *entity)
48 {
49         if (!list_empty(&entity->list))
50                 return;
51         spin_lock(&rq->lock);
52         list_add_tail(&entity->list, &rq->entities);
53         spin_unlock(&rq->lock);
54 }
55
56 static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
57                                        struct amd_sched_entity *entity)
58 {
59         if (list_empty(&entity->list))
60                 return;
61         spin_lock(&rq->lock);
62         list_del_init(&entity->list);
63         if (rq->current_entity == entity)
64                 rq->current_entity = NULL;
65         spin_unlock(&rq->lock);
66 }
67
68 /**
69  * Select an entity which could provide a job to run
70  *
71  * @rq          The run queue to check.
72  *
73  * Try to find a ready entity, returns NULL if none found.
74  */
75 static struct amd_sched_entity *
76 amd_sched_rq_select_entity(struct amd_sched_rq *rq)
77 {
78         struct amd_sched_entity *entity;
79
80         spin_lock(&rq->lock);
81
82         entity = rq->current_entity;
83         if (entity) {
84                 list_for_each_entry_continue(entity, &rq->entities, list) {
85                         if (amd_sched_entity_is_ready(entity)) {
86                                 rq->current_entity = entity;
87                                 spin_unlock(&rq->lock);
88                                 return entity;
89                         }
90                 }
91         }
92
93         list_for_each_entry(entity, &rq->entities, list) {
94
95                 if (amd_sched_entity_is_ready(entity)) {
96                         rq->current_entity = entity;
97                         spin_unlock(&rq->lock);
98                         return entity;
99                 }
100
101                 if (entity == rq->current_entity)
102                         break;
103         }
104
105         spin_unlock(&rq->lock);
106
107         return NULL;
108 }
109
110 /**
111  * Init a context entity used by scheduler when submit to HW ring.
112  *
113  * @sched       The pointer to the scheduler
114  * @entity      The pointer to a valid amd_sched_entity
115  * @rq          The run queue this entity belongs
116  * @kernel      If this is an entity for the kernel
117  * @jobs        The max number of jobs in the job queue
118  *
119  * return 0 if succeed. negative error code on failure
120 */
121 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
122                           struct amd_sched_entity *entity,
123                           struct amd_sched_rq *rq,
124                           uint32_t jobs)
125 {
126         int r;
127
128         if (!(sched && entity && rq))
129                 return -EINVAL;
130
131         memset(entity, 0, sizeof(struct amd_sched_entity));
132         INIT_LIST_HEAD(&entity->list);
133         entity->rq = rq;
134         entity->sched = sched;
135
136         spin_lock_init(&entity->rq_lock);
137         spin_lock_init(&entity->queue_lock);
138         r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
139         if (r)
140                 return r;
141
142         atomic_set(&entity->fence_seq, 0);
143         entity->fence_context = dma_fence_context_alloc(2);
144
145         return 0;
146 }
147
148 /**
149  * Query if entity is initialized
150  *
151  * @sched       Pointer to scheduler instance
152  * @entity      The pointer to a valid scheduler entity
153  *
154  * return true if entity is initialized, false otherwise
155 */
156 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
157                                             struct amd_sched_entity *entity)
158 {
159         return entity->sched == sched &&
160                 entity->rq != NULL;
161 }
162
163 /**
164  * Check if entity is idle
165  *
166  * @entity      The pointer to a valid scheduler entity
167  *
168  * Return true if entity don't has any unscheduled jobs.
169  */
170 static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
171 {
172         rmb();
173         if (kfifo_is_empty(&entity->job_queue))
174                 return true;
175
176         return false;
177 }
178
179 /**
180  * Check if entity is ready
181  *
182  * @entity      The pointer to a valid scheduler entity
183  *
184  * Return true if entity could provide a job.
185  */
186 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
187 {
188         if (kfifo_is_empty(&entity->job_queue))
189                 return false;
190
191         if (ACCESS_ONCE(entity->dependency))
192                 return false;
193
194         return true;
195 }
196
197 /**
198  * Destroy a context entity
199  *
200  * @sched       Pointer to scheduler instance
201  * @entity      The pointer to a valid scheduler entity
202  *
203  * Cleanup and free the allocated resources.
204  */
205 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
206                            struct amd_sched_entity *entity)
207 {
208         int r;
209
210         if (!amd_sched_entity_is_initialized(sched, entity))
211                 return;
212         /**
213          * The client will not queue more IBs during this fini, consume existing
214          * queued IBs or discard them on SIGKILL
215         */
216         if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
217                 r = -ERESTARTSYS;
218         else
219                 r = wait_event_killable(sched->job_scheduled,
220                                         amd_sched_entity_is_idle(entity));
221         amd_sched_entity_set_rq(entity, NULL);
222         if (r) {
223                 struct amd_sched_job *job;
224
225                 /* Park the kernel for a moment to make sure it isn't processing
226                  * our enity.
227                  */
228                 kthread_park(sched->thread);
229                 kthread_unpark(sched->thread);
230                 while (kfifo_out(&entity->job_queue, &job, sizeof(job))) {
231                         struct amd_sched_fence *s_fence = job->s_fence;
232                         amd_sched_fence_scheduled(s_fence);
233                         dma_fence_set_error(&s_fence->finished, -ESRCH);
234                         amd_sched_fence_finished(s_fence);
235                         dma_fence_put(&s_fence->finished);
236                         sched->ops->free_job(job);
237                 }
238
239         }
240         kfifo_free(&entity->job_queue);
241 }
242
243 static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
244 {
245         struct amd_sched_entity *entity =
246                 container_of(cb, struct amd_sched_entity, cb);
247         entity->dependency = NULL;
248         dma_fence_put(f);
249         amd_sched_wakeup(entity->sched);
250 }
251
252 static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
253 {
254         struct amd_sched_entity *entity =
255                 container_of(cb, struct amd_sched_entity, cb);
256         entity->dependency = NULL;
257         dma_fence_put(f);
258 }
259
260 void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
261                              struct amd_sched_rq *rq)
262 {
263         if (entity->rq == rq)
264                 return;
265
266         spin_lock(&entity->rq_lock);
267
268         if (entity->rq)
269                 amd_sched_rq_remove_entity(entity->rq, entity);
270
271         entity->rq = rq;
272         if (rq)
273                 amd_sched_rq_add_entity(rq, entity);
274
275         spin_unlock(&entity->rq_lock);
276 }
277
278 bool amd_sched_dependency_optimized(struct dma_fence* fence,
279                                     struct amd_sched_entity *entity)
280 {
281         struct amd_gpu_scheduler *sched = entity->sched;
282         struct amd_sched_fence *s_fence;
283
284         if (!fence || dma_fence_is_signaled(fence))
285                 return false;
286         if (fence->context == entity->fence_context)
287                 return true;
288         s_fence = to_amd_sched_fence(fence);
289         if (s_fence && s_fence->sched == sched)
290                 return true;
291
292         return false;
293 }
294
295 static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
296 {
297         struct amd_gpu_scheduler *sched = entity->sched;
298         struct dma_fence * fence = entity->dependency;
299         struct amd_sched_fence *s_fence;
300
301         if (fence->context == entity->fence_context) {
302                 /* We can ignore fences from ourself */
303                 dma_fence_put(entity->dependency);
304                 return false;
305         }
306
307         s_fence = to_amd_sched_fence(fence);
308         if (s_fence && s_fence->sched == sched) {
309
310                 /*
311                  * Fence is from the same scheduler, only need to wait for
312                  * it to be scheduled
313                  */
314                 fence = dma_fence_get(&s_fence->scheduled);
315                 dma_fence_put(entity->dependency);
316                 entity->dependency = fence;
317                 if (!dma_fence_add_callback(fence, &entity->cb,
318                                             amd_sched_entity_clear_dep))
319                         return true;
320
321                 /* Ignore it when it is already scheduled */
322                 dma_fence_put(fence);
323                 return false;
324         }
325
326         if (!dma_fence_add_callback(entity->dependency, &entity->cb,
327                                     amd_sched_entity_wakeup))
328                 return true;
329
330         dma_fence_put(entity->dependency);
331         return false;
332 }
333
334 static struct amd_sched_job *
335 amd_sched_entity_peek_job(struct amd_sched_entity *entity)
336 {
337         struct amd_gpu_scheduler *sched = entity->sched;
338         struct amd_sched_job *sched_job;
339
340         if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
341                 return NULL;
342
343         while ((entity->dependency = sched->ops->dependency(sched_job)))
344                 if (amd_sched_entity_add_dependency_cb(entity))
345                         return NULL;
346
347         return sched_job;
348 }
349
350 /**
351  * Helper to submit a job to the job queue
352  *
353  * @sched_job           The pointer to job required to submit
354  *
355  * Returns true if we could submit the job.
356  */
357 static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
358 {
359         struct amd_gpu_scheduler *sched = sched_job->sched;
360         struct amd_sched_entity *entity = sched_job->s_entity;
361         bool added, first = false;
362
363         spin_lock(&entity->queue_lock);
364         added = kfifo_in(&entity->job_queue, &sched_job,
365                         sizeof(sched_job)) == sizeof(sched_job);
366
367         if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
368                 first = true;
369
370         spin_unlock(&entity->queue_lock);
371
372         /* first job wakes up scheduler */
373         if (first) {
374                 /* Add the entity to the run queue */
375                 spin_lock(&entity->rq_lock);
376                 amd_sched_rq_add_entity(entity->rq, entity);
377                 spin_unlock(&entity->rq_lock);
378                 amd_sched_wakeup(sched);
379         }
380         return added;
381 }
382
383 /* job_finish is called after hw fence signaled
384  */
385 static void amd_sched_job_finish(struct work_struct *work)
386 {
387         struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
388                                                    finish_work);
389         struct amd_gpu_scheduler *sched = s_job->sched;
390
391         /* remove job from ring_mirror_list */
392         spin_lock(&sched->job_list_lock);
393         list_del_init(&s_job->node);
394         if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
395                 struct amd_sched_job *next;
396
397                 spin_unlock(&sched->job_list_lock);
398                 cancel_delayed_work_sync(&s_job->work_tdr);
399                 spin_lock(&sched->job_list_lock);
400
401                 /* queue TDR for next job */
402                 next = list_first_entry_or_null(&sched->ring_mirror_list,
403                                                 struct amd_sched_job, node);
404
405                 if (next)
406                         schedule_delayed_work(&next->work_tdr, sched->timeout);
407         }
408         spin_unlock(&sched->job_list_lock);
409         dma_fence_put(&s_job->s_fence->finished);
410         sched->ops->free_job(s_job);
411 }
412
413 static void amd_sched_job_finish_cb(struct dma_fence *f,
414                                     struct dma_fence_cb *cb)
415 {
416         struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
417                                                  finish_cb);
418         schedule_work(&job->finish_work);
419 }
420
421 static void amd_sched_job_begin(struct amd_sched_job *s_job)
422 {
423         struct amd_gpu_scheduler *sched = s_job->sched;
424
425         dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
426                                amd_sched_job_finish_cb);
427
428         spin_lock(&sched->job_list_lock);
429         list_add_tail(&s_job->node, &sched->ring_mirror_list);
430         if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
431             list_first_entry_or_null(&sched->ring_mirror_list,
432                                      struct amd_sched_job, node) == s_job)
433                 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
434         spin_unlock(&sched->job_list_lock);
435 }
436
437 static void amd_sched_job_timedout(struct work_struct *work)
438 {
439         struct amd_sched_job *job = container_of(work, struct amd_sched_job,
440                                                  work_tdr.work);
441
442         job->sched->ops->timedout_job(job);
443 }
444
445 void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
446 {
447         struct amd_sched_job *s_job;
448
449         spin_lock(&sched->job_list_lock);
450         list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
451                 if (s_job->s_fence->parent &&
452                     dma_fence_remove_callback(s_job->s_fence->parent,
453                                               &s_job->s_fence->cb)) {
454                         dma_fence_put(s_job->s_fence->parent);
455                         s_job->s_fence->parent = NULL;
456                         atomic_dec(&sched->hw_rq_count);
457                 }
458         }
459         spin_unlock(&sched->job_list_lock);
460 }
461
462 void amd_sched_job_kickout(struct amd_sched_job *s_job)
463 {
464         struct amd_gpu_scheduler *sched = s_job->sched;
465
466         spin_lock(&sched->job_list_lock);
467         list_del_init(&s_job->node);
468         spin_unlock(&sched->job_list_lock);
469 }
470
471 void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
472 {
473         struct amd_sched_job *s_job, *tmp;
474         int r;
475
476         spin_lock(&sched->job_list_lock);
477         s_job = list_first_entry_or_null(&sched->ring_mirror_list,
478                                          struct amd_sched_job, node);
479         if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
480                 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
481
482         list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
483                 struct amd_sched_fence *s_fence = s_job->s_fence;
484                 struct dma_fence *fence;
485
486                 spin_unlock(&sched->job_list_lock);
487                 fence = sched->ops->run_job(s_job);
488                 atomic_inc(&sched->hw_rq_count);
489                 if (fence) {
490                         s_fence->parent = dma_fence_get(fence);
491                         r = dma_fence_add_callback(fence, &s_fence->cb,
492                                                    amd_sched_process_job);
493                         if (r == -ENOENT)
494                                 amd_sched_process_job(fence, &s_fence->cb);
495                         else if (r)
496                                 DRM_ERROR("fence add callback failed (%d)\n",
497                                           r);
498                         dma_fence_put(fence);
499                 } else {
500                         DRM_ERROR("Failed to run job!\n");
501                         amd_sched_process_job(NULL, &s_fence->cb);
502                 }
503                 spin_lock(&sched->job_list_lock);
504         }
505         spin_unlock(&sched->job_list_lock);
506 }
507
508 /**
509  * Submit a job to the job queue
510  *
511  * @sched_job           The pointer to job required to submit
512  *
513  * Returns 0 for success, negative error code otherwise.
514  */
515 void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
516 {
517         struct amd_sched_entity *entity = sched_job->s_entity;
518
519         trace_amd_sched_job(sched_job);
520         wait_event(entity->sched->job_scheduled,
521                    amd_sched_entity_in(sched_job));
522 }
523
524 /* init a sched_job with basic field */
525 int amd_sched_job_init(struct amd_sched_job *job,
526                        struct amd_gpu_scheduler *sched,
527                        struct amd_sched_entity *entity,
528                        void *owner)
529 {
530         job->sched = sched;
531         job->s_entity = entity;
532         job->s_fence = amd_sched_fence_create(entity, owner);
533         if (!job->s_fence)
534                 return -ENOMEM;
535         job->id = atomic64_inc_return(&sched->job_id_count);
536
537         INIT_WORK(&job->finish_work, amd_sched_job_finish);
538         INIT_LIST_HEAD(&job->node);
539         INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
540
541         return 0;
542 }
543
544 /**
545  * Return ture if we can push more jobs to the hw.
546  */
547 static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
548 {
549         return atomic_read(&sched->hw_rq_count) <
550                 sched->hw_submission_limit;
551 }
552
553 /**
554  * Wake up the scheduler when it is ready
555  */
556 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
557 {
558         if (amd_sched_ready(sched))
559                 wake_up_interruptible(&sched->wake_up_worker);
560 }
561
562 /**
563  * Select next entity to process
564 */
565 static struct amd_sched_entity *
566 amd_sched_select_entity(struct amd_gpu_scheduler *sched)
567 {
568         struct amd_sched_entity *entity;
569         int i;
570
571         if (!amd_sched_ready(sched))
572                 return NULL;
573
574         /* Kernel run queue has higher priority than normal run queue*/
575         for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) {
576                 entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
577                 if (entity)
578                         break;
579         }
580
581         return entity;
582 }
583
584 static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
585 {
586         struct amd_sched_fence *s_fence =
587                 container_of(cb, struct amd_sched_fence, cb);
588         struct amd_gpu_scheduler *sched = s_fence->sched;
589
590         dma_fence_get(&s_fence->finished);
591         atomic_dec(&sched->hw_rq_count);
592         amd_sched_fence_finished(s_fence);
593
594         trace_amd_sched_process_job(s_fence);
595         dma_fence_put(&s_fence->finished);
596         wake_up_interruptible(&sched->wake_up_worker);
597 }
598
599 static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
600 {
601         if (kthread_should_park()) {
602                 kthread_parkme();
603                 return true;
604         }
605
606         return false;
607 }
608
609 static int amd_sched_main(void *param)
610 {
611         struct sched_param sparam = {.sched_priority = 1};
612         struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
613         int r, count;
614
615         sched_setscheduler(current, SCHED_FIFO, &sparam);
616
617         while (!kthread_should_stop()) {
618                 struct amd_sched_entity *entity = NULL;
619                 struct amd_sched_fence *s_fence;
620                 struct amd_sched_job *sched_job;
621                 struct dma_fence *fence;
622
623                 wait_event_interruptible(sched->wake_up_worker,
624                                          (!amd_sched_blocked(sched) &&
625                                           (entity = amd_sched_select_entity(sched))) ||
626                                          kthread_should_stop());
627
628                 if (!entity)
629                         continue;
630
631                 sched_job = amd_sched_entity_peek_job(entity);
632                 if (!sched_job)
633                         continue;
634
635                 s_fence = sched_job->s_fence;
636
637                 atomic_inc(&sched->hw_rq_count);
638                 amd_sched_job_begin(sched_job);
639
640                 fence = sched->ops->run_job(sched_job);
641                 amd_sched_fence_scheduled(s_fence);
642
643                 if (fence) {
644                         s_fence->parent = dma_fence_get(fence);
645                         r = dma_fence_add_callback(fence, &s_fence->cb,
646                                                    amd_sched_process_job);
647                         if (r == -ENOENT)
648                                 amd_sched_process_job(fence, &s_fence->cb);
649                         else if (r)
650                                 DRM_ERROR("fence add callback failed (%d)\n",
651                                           r);
652                         dma_fence_put(fence);
653                 } else {
654                         DRM_ERROR("Failed to run job!\n");
655                         amd_sched_process_job(NULL, &s_fence->cb);
656                 }
657
658                 count = kfifo_out(&entity->job_queue, &sched_job,
659                                 sizeof(sched_job));
660                 WARN_ON(count != sizeof(sched_job));
661                 wake_up(&sched->job_scheduled);
662         }
663         return 0;
664 }
665
666 /**
667  * Init a gpu scheduler instance
668  *
669  * @sched               The pointer to the scheduler
670  * @ops                 The backend operations for this scheduler.
671  * @hw_submissions      Number of hw submissions to do.
672  * @name                Name used for debugging
673  *
674  * Return 0 on success, otherwise error code.
675 */
676 int amd_sched_init(struct amd_gpu_scheduler *sched,
677                    const struct amd_sched_backend_ops *ops,
678                    unsigned hw_submission, long timeout, const char *name)
679 {
680         int i;
681         sched->ops = ops;
682         sched->hw_submission_limit = hw_submission;
683         sched->name = name;
684         sched->timeout = timeout;
685         for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++)
686                 amd_sched_rq_init(&sched->sched_rq[i]);
687
688         init_waitqueue_head(&sched->wake_up_worker);
689         init_waitqueue_head(&sched->job_scheduled);
690         INIT_LIST_HEAD(&sched->ring_mirror_list);
691         spin_lock_init(&sched->job_list_lock);
692         atomic_set(&sched->hw_rq_count, 0);
693         atomic64_set(&sched->job_id_count, 0);
694
695         /* Each scheduler will run on a seperate kernel thread */
696         sched->thread = kthread_run(amd_sched_main, sched, sched->name);
697         if (IS_ERR(sched->thread)) {
698                 DRM_ERROR("Failed to create scheduler for %s.\n", name);
699                 return PTR_ERR(sched->thread);
700         }
701
702         return 0;
703 }
704
705 /**
706  * Destroy a gpu scheduler
707  *
708  * @sched       The pointer to the scheduler
709  */
710 void amd_sched_fini(struct amd_gpu_scheduler *sched)
711 {
712         if (sched->thread)
713                 kthread_stop(sched->thread);
714 }