1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Etnaviv Project
6 #include <linux/kthread.h>
8 #include "etnaviv_drv.h"
9 #include "etnaviv_dump.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_gpu.h"
12 #include "etnaviv_sched.h"
14 static int etnaviv_job_hang_limit = 0;
15 module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
16 static int etnaviv_hw_jobs_limit = 4;
17 module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
19 static struct dma_fence *
20 etnaviv_sched_dependency(struct drm_sched_job *sched_job,
21 struct drm_sched_entity *entity)
23 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
24 struct dma_fence *fence;
27 if (unlikely(submit->in_fence)) {
28 fence = submit->in_fence;
29 submit->in_fence = NULL;
31 if (!dma_fence_is_signaled(fence))
37 for (i = 0; i < submit->nr_bos; i++) {
38 struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
45 if (!dma_fence_is_signaled(fence))
51 for (j = 0; j < bo->nr_shared; j++) {
55 fence = bo->shared[j];
58 if (!dma_fence_is_signaled(fence))
71 static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
73 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
74 struct dma_fence *fence = NULL;
76 if (likely(!sched_job->s_fence->finished.error))
77 fence = etnaviv_gpu_submit(submit);
79 dev_dbg(submit->gpu->dev, "skipping bad job\n");
84 static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
86 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
87 struct etnaviv_gpu *gpu = submit->gpu;
90 kthread_park(gpu->sched.thread);
91 drm_sched_hw_job_reset(&gpu->sched, sched_job);
93 /* get the GPU back into the init state */
94 etnaviv_core_dump(gpu);
95 etnaviv_gpu_recover_hang(gpu);
97 /* restart scheduler after GPU is usable again */
98 drm_sched_job_recovery(&gpu->sched);
99 kthread_unpark(gpu->sched.thread);
102 static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
104 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
106 etnaviv_submit_put(submit);
109 static const struct drm_sched_backend_ops etnaviv_sched_ops = {
110 .dependency = etnaviv_sched_dependency,
111 .run_job = etnaviv_sched_run_job,
112 .timedout_job = etnaviv_sched_timedout_job,
113 .free_job = etnaviv_sched_free_job,
116 int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
117 struct etnaviv_gem_submit *submit)
121 ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
122 sched_entity, submit->cmdbuf.ctx);
126 submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
127 mutex_lock(&submit->gpu->fence_idr_lock);
128 submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
129 submit->out_fence, 0,
130 INT_MAX, GFP_KERNEL);
131 mutex_unlock(&submit->gpu->fence_idr_lock);
132 if (submit->out_fence_id < 0)
135 /* the scheduler holds on to the job now */
136 kref_get(&submit->refcount);
138 drm_sched_entity_push_job(&submit->sched_job, sched_entity);
143 int etnaviv_sched_init(struct etnaviv_gpu *gpu)
147 ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
148 etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
149 msecs_to_jiffies(500), dev_name(gpu->dev));
156 void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
158 drm_sched_fini(&gpu->sched);