staging: r8822be: Fix RTL8822be can't find any wireless AP
[sfrench/cifs-2.6.git] / drivers / gpu / drm / etnaviv / etnaviv_sched.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 Etnaviv Project
4  */
5
6 #include <linux/kthread.h>
7
8 #include "etnaviv_drv.h"
9 #include "etnaviv_dump.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_gpu.h"
12 #include "etnaviv_sched.h"
13
14 static int etnaviv_job_hang_limit = 0;
15 module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
16 static int etnaviv_hw_jobs_limit = 4;
17 module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
18
19 static struct dma_fence *
20 etnaviv_sched_dependency(struct drm_sched_job *sched_job,
21                          struct drm_sched_entity *entity)
22 {
23         struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
24         struct dma_fence *fence;
25         int i;
26
27         if (unlikely(submit->in_fence)) {
28                 fence = submit->in_fence;
29                 submit->in_fence = NULL;
30
31                 if (!dma_fence_is_signaled(fence))
32                         return fence;
33
34                 dma_fence_put(fence);
35         }
36
37         for (i = 0; i < submit->nr_bos; i++) {
38                 struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
39                 int j;
40
41                 if (bo->excl) {
42                         fence = bo->excl;
43                         bo->excl = NULL;
44
45                         if (!dma_fence_is_signaled(fence))
46                                 return fence;
47
48                         dma_fence_put(fence);
49                 }
50
51                 for (j = 0; j < bo->nr_shared; j++) {
52                         if (!bo->shared[j])
53                                 continue;
54
55                         fence = bo->shared[j];
56                         bo->shared[j] = NULL;
57
58                         if (!dma_fence_is_signaled(fence))
59                                 return fence;
60
61                         dma_fence_put(fence);
62                 }
63                 kfree(bo->shared);
64                 bo->nr_shared = 0;
65                 bo->shared = NULL;
66         }
67
68         return NULL;
69 }
70
71 static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
72 {
73         struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
74         struct dma_fence *fence = NULL;
75
76         if (likely(!sched_job->s_fence->finished.error))
77                 fence = etnaviv_gpu_submit(submit);
78         else
79                 dev_dbg(submit->gpu->dev, "skipping bad job\n");
80
81         return fence;
82 }
83
84 static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
85 {
86         struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
87         struct etnaviv_gpu *gpu = submit->gpu;
88
89         /* block scheduler */
90         kthread_park(gpu->sched.thread);
91         drm_sched_hw_job_reset(&gpu->sched, sched_job);
92
93         /* get the GPU back into the init state */
94         etnaviv_core_dump(gpu);
95         etnaviv_gpu_recover_hang(gpu);
96
97         /* restart scheduler after GPU is usable again */
98         drm_sched_job_recovery(&gpu->sched);
99         kthread_unpark(gpu->sched.thread);
100 }
101
102 static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
103 {
104         struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
105
106         etnaviv_submit_put(submit);
107 }
108
109 static const struct drm_sched_backend_ops etnaviv_sched_ops = {
110         .dependency = etnaviv_sched_dependency,
111         .run_job = etnaviv_sched_run_job,
112         .timedout_job = etnaviv_sched_timedout_job,
113         .free_job = etnaviv_sched_free_job,
114 };
115
116 int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
117                            struct etnaviv_gem_submit *submit)
118 {
119         int ret;
120
121         ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
122                                  sched_entity, submit->cmdbuf.ctx);
123         if (ret)
124                 return ret;
125
126         submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
127         mutex_lock(&submit->gpu->fence_idr_lock);
128         submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
129                                                 submit->out_fence, 0,
130                                                 INT_MAX, GFP_KERNEL);
131         mutex_unlock(&submit->gpu->fence_idr_lock);
132         if (submit->out_fence_id < 0)
133                 return -ENOMEM;
134
135         /* the scheduler holds on to the job now */
136         kref_get(&submit->refcount);
137
138         drm_sched_entity_push_job(&submit->sched_job, sched_entity);
139
140         return 0;
141 }
142
143 int etnaviv_sched_init(struct etnaviv_gpu *gpu)
144 {
145         int ret;
146
147         ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
148                              etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
149                              msecs_to_jiffies(500), dev_name(gpu->dev));
150         if (ret)
151                 return ret;
152
153         return 0;
154 }
155
156 void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
157 {
158         drm_sched_fini(&gpu->sched);
159 }