Merge tag 'v5.3-rc3' into drm-next-5.4
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ctx.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: monk liu <monk.liu@amd.com>
23  */
24
25 #include <drm/drm_auth.h>
26 #include "amdgpu.h"
27 #include "amdgpu_sched.h"
28 #include "amdgpu_ras.h"
29
30 #define to_amdgpu_ctx_entity(e) \
31         container_of((e), struct amdgpu_ctx_entity, entity)
32
33 const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
34         [AMDGPU_HW_IP_GFX]      =       1,
35         [AMDGPU_HW_IP_COMPUTE]  =       4,
36         [AMDGPU_HW_IP_DMA]      =       2,
37         [AMDGPU_HW_IP_UVD]      =       1,
38         [AMDGPU_HW_IP_VCE]      =       1,
39         [AMDGPU_HW_IP_UVD_ENC]  =       1,
40         [AMDGPU_HW_IP_VCN_DEC]  =       1,
41         [AMDGPU_HW_IP_VCN_ENC]  =       1,
42         [AMDGPU_HW_IP_VCN_JPEG] =       1,
43 };
44
45 static int amdgput_ctx_total_num_entities(void)
46 {
47         unsigned i, num_entities = 0;
48
49         for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
50                 num_entities += amdgpu_ctx_num_entities[i];
51
52         return num_entities;
53 }
54
55 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
56                                       enum drm_sched_priority priority)
57 {
58         /* NORMAL and below are accessible by everyone */
59         if (priority <= DRM_SCHED_PRIORITY_NORMAL)
60                 return 0;
61
62         if (capable(CAP_SYS_NICE))
63                 return 0;
64
65         if (drm_is_current_master(filp))
66                 return 0;
67
68         return -EACCES;
69 }
70
71 static int amdgpu_ctx_init(struct amdgpu_device *adev,
72                            enum drm_sched_priority priority,
73                            struct drm_file *filp,
74                            struct amdgpu_ctx *ctx)
75 {
76         unsigned num_entities = amdgput_ctx_total_num_entities();
77         unsigned i, j, k;
78         int r;
79
80         if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
81                 return -EINVAL;
82
83         r = amdgpu_ctx_priority_permit(filp, priority);
84         if (r)
85                 return r;
86
87         memset(ctx, 0, sizeof(*ctx));
88         ctx->adev = adev;
89
90         ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
91                               sizeof(struct dma_fence*), GFP_KERNEL);
92         if (!ctx->fences)
93                 return -ENOMEM;
94
95         ctx->entities[0] = kcalloc(num_entities,
96                                    sizeof(struct amdgpu_ctx_entity),
97                                    GFP_KERNEL);
98         if (!ctx->entities[0]) {
99                 r = -ENOMEM;
100                 goto error_free_fences;
101         }
102
103         for (i = 0; i < num_entities; ++i) {
104                 struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
105
106                 entity->sequence = 1;
107                 entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
108         }
109         for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
110                 ctx->entities[i] = ctx->entities[i - 1] +
111                         amdgpu_ctx_num_entities[i - 1];
112
113         kref_init(&ctx->refcount);
114         spin_lock_init(&ctx->ring_lock);
115         mutex_init(&ctx->lock);
116
117         ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
118         ctx->reset_counter_query = ctx->reset_counter;
119         ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
120         ctx->init_priority = priority;
121         ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
122
123         for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
124                 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
125                 struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
126                 unsigned num_rings = 0;
127                 unsigned num_rqs = 0;
128
129                 switch (i) {
130                 case AMDGPU_HW_IP_GFX:
131                         rings[0] = &adev->gfx.gfx_ring[0];
132                         num_rings = 1;
133                         break;
134                 case AMDGPU_HW_IP_COMPUTE:
135                         for (j = 0; j < adev->gfx.num_compute_rings; ++j)
136                                 rings[j] = &adev->gfx.compute_ring[j];
137                         num_rings = adev->gfx.num_compute_rings;
138                         break;
139                 case AMDGPU_HW_IP_DMA:
140                         for (j = 0; j < adev->sdma.num_instances; ++j)
141                                 rings[j] = &adev->sdma.instance[j].ring;
142                         num_rings = adev->sdma.num_instances;
143                         break;
144                 case AMDGPU_HW_IP_UVD:
145                         rings[0] = &adev->uvd.inst[0].ring;
146                         num_rings = 1;
147                         break;
148                 case AMDGPU_HW_IP_VCE:
149                         rings[0] = &adev->vce.ring[0];
150                         num_rings = 1;
151                         break;
152                 case AMDGPU_HW_IP_UVD_ENC:
153                         rings[0] = &adev->uvd.inst[0].ring_enc[0];
154                         num_rings = 1;
155                         break;
156                 case AMDGPU_HW_IP_VCN_DEC:
157                         for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
158                                 if (adev->vcn.harvest_config & (1 << j))
159                                         continue;
160                                 rings[num_rings++] = &adev->vcn.inst[j].ring_dec;
161                         }
162                         break;
163                 case AMDGPU_HW_IP_VCN_ENC:
164                         for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
165                                 if (adev->vcn.harvest_config & (1 << j))
166                                         continue;
167                                 for (k = 0; k < adev->vcn.num_enc_rings; ++k)
168                                         rings[num_rings++] = &adev->vcn.inst[j].ring_enc[k];
169                         }
170                         break;
171                 case AMDGPU_HW_IP_VCN_JPEG:
172                         for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
173                                 if (adev->vcn.harvest_config & (1 << j))
174                                         continue;
175                                 rings[num_rings++] = &adev->vcn.inst[j].ring_jpeg;
176                         }
177                         break;
178                 }
179
180                 for (j = 0; j < num_rings; ++j) {
181                         if (!rings[j]->adev)
182                                 continue;
183
184                         rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
185                 }
186
187                 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
188                         r = drm_sched_entity_init(&ctx->entities[i][j].entity,
189                                                   rqs, num_rqs, &ctx->guilty);
190                 if (r)
191                         goto error_cleanup_entities;
192         }
193
194         return 0;
195
196 error_cleanup_entities:
197         for (i = 0; i < num_entities; ++i)
198                 drm_sched_entity_destroy(&ctx->entities[0][i].entity);
199         kfree(ctx->entities[0]);
200
201 error_free_fences:
202         kfree(ctx->fences);
203         ctx->fences = NULL;
204         return r;
205 }
206
207 static void amdgpu_ctx_fini(struct kref *ref)
208 {
209         struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
210         unsigned num_entities = amdgput_ctx_total_num_entities();
211         struct amdgpu_device *adev = ctx->adev;
212         unsigned i, j;
213
214         if (!adev)
215                 return;
216
217         for (i = 0; i < num_entities; ++i)
218                 for (j = 0; j < amdgpu_sched_jobs; ++j)
219                         dma_fence_put(ctx->entities[0][i].fences[j]);
220         kfree(ctx->fences);
221         kfree(ctx->entities[0]);
222
223         mutex_destroy(&ctx->lock);
224
225         kfree(ctx);
226 }
227
228 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
229                           u32 ring, struct drm_sched_entity **entity)
230 {
231         if (hw_ip >= AMDGPU_HW_IP_NUM) {
232                 DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
233                 return -EINVAL;
234         }
235
236         /* Right now all IPs have only one instance - multiple rings. */
237         if (instance != 0) {
238                 DRM_DEBUG("invalid ip instance: %d\n", instance);
239                 return -EINVAL;
240         }
241
242         if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
243                 DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
244                 return -EINVAL;
245         }
246
247         *entity = &ctx->entities[hw_ip][ring].entity;
248         return 0;
249 }
250
251 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
252                             struct amdgpu_fpriv *fpriv,
253                             struct drm_file *filp,
254                             enum drm_sched_priority priority,
255                             uint32_t *id)
256 {
257         struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
258         struct amdgpu_ctx *ctx;
259         int r;
260
261         ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
262         if (!ctx)
263                 return -ENOMEM;
264
265         mutex_lock(&mgr->lock);
266         r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
267         if (r < 0) {
268                 mutex_unlock(&mgr->lock);
269                 kfree(ctx);
270                 return r;
271         }
272
273         *id = (uint32_t)r;
274         r = amdgpu_ctx_init(adev, priority, filp, ctx);
275         if (r) {
276                 idr_remove(&mgr->ctx_handles, *id);
277                 *id = 0;
278                 kfree(ctx);
279         }
280         mutex_unlock(&mgr->lock);
281         return r;
282 }
283
284 static void amdgpu_ctx_do_release(struct kref *ref)
285 {
286         struct amdgpu_ctx *ctx;
287         unsigned num_entities;
288         u32 i;
289
290         ctx = container_of(ref, struct amdgpu_ctx, refcount);
291
292         num_entities = 0;
293         for (i = 0; i < AMDGPU_HW_IP_NUM; i++)
294                 num_entities += amdgpu_ctx_num_entities[i];
295
296         for (i = 0; i < num_entities; i++)
297                 drm_sched_entity_destroy(&ctx->entities[0][i].entity);
298
299         amdgpu_ctx_fini(ref);
300 }
301
302 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
303 {
304         struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
305         struct amdgpu_ctx *ctx;
306
307         mutex_lock(&mgr->lock);
308         ctx = idr_remove(&mgr->ctx_handles, id);
309         if (ctx)
310                 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
311         mutex_unlock(&mgr->lock);
312         return ctx ? 0 : -EINVAL;
313 }
314
315 static int amdgpu_ctx_query(struct amdgpu_device *adev,
316                             struct amdgpu_fpriv *fpriv, uint32_t id,
317                             union drm_amdgpu_ctx_out *out)
318 {
319         struct amdgpu_ctx *ctx;
320         struct amdgpu_ctx_mgr *mgr;
321         unsigned reset_counter;
322
323         if (!fpriv)
324                 return -EINVAL;
325
326         mgr = &fpriv->ctx_mgr;
327         mutex_lock(&mgr->lock);
328         ctx = idr_find(&mgr->ctx_handles, id);
329         if (!ctx) {
330                 mutex_unlock(&mgr->lock);
331                 return -EINVAL;
332         }
333
334         /* TODO: these two are always zero */
335         out->state.flags = 0x0;
336         out->state.hangs = 0x0;
337
338         /* determine if a GPU reset has occured since the last call */
339         reset_counter = atomic_read(&adev->gpu_reset_counter);
340         /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
341         if (ctx->reset_counter_query == reset_counter)
342                 out->state.reset_status = AMDGPU_CTX_NO_RESET;
343         else
344                 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
345         ctx->reset_counter_query = reset_counter;
346
347         mutex_unlock(&mgr->lock);
348         return 0;
349 }
350
351 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
352         struct amdgpu_fpriv *fpriv, uint32_t id,
353         union drm_amdgpu_ctx_out *out)
354 {
355         struct amdgpu_ctx *ctx;
356         struct amdgpu_ctx_mgr *mgr;
357         uint32_t ras_counter;
358
359         if (!fpriv)
360                 return -EINVAL;
361
362         mgr = &fpriv->ctx_mgr;
363         mutex_lock(&mgr->lock);
364         ctx = idr_find(&mgr->ctx_handles, id);
365         if (!ctx) {
366                 mutex_unlock(&mgr->lock);
367                 return -EINVAL;
368         }
369
370         out->state.flags = 0x0;
371         out->state.hangs = 0x0;
372
373         if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
374                 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
375
376         if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
377                 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
378
379         if (atomic_read(&ctx->guilty))
380                 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
381
382         /*query ue count*/
383         ras_counter = amdgpu_ras_query_error_count(adev, false);
384         /*ras counter is monotonic increasing*/
385         if (ras_counter != ctx->ras_counter_ue) {
386                 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
387                 ctx->ras_counter_ue = ras_counter;
388         }
389
390         /*query ce count*/
391         ras_counter = amdgpu_ras_query_error_count(adev, true);
392         if (ras_counter != ctx->ras_counter_ce) {
393                 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
394                 ctx->ras_counter_ce = ras_counter;
395         }
396
397         mutex_unlock(&mgr->lock);
398         return 0;
399 }
400
401 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
402                      struct drm_file *filp)
403 {
404         int r;
405         uint32_t id;
406         enum drm_sched_priority priority;
407
408         union drm_amdgpu_ctx *args = data;
409         struct amdgpu_device *adev = dev->dev_private;
410         struct amdgpu_fpriv *fpriv = filp->driver_priv;
411
412         r = 0;
413         id = args->in.ctx_id;
414         priority = amdgpu_to_sched_priority(args->in.priority);
415
416         /* For backwards compatibility reasons, we need to accept
417          * ioctls with garbage in the priority field */
418         if (priority == DRM_SCHED_PRIORITY_INVALID)
419                 priority = DRM_SCHED_PRIORITY_NORMAL;
420
421         switch (args->in.op) {
422         case AMDGPU_CTX_OP_ALLOC_CTX:
423                 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
424                 args->out.alloc.ctx_id = id;
425                 break;
426         case AMDGPU_CTX_OP_FREE_CTX:
427                 r = amdgpu_ctx_free(fpriv, id);
428                 break;
429         case AMDGPU_CTX_OP_QUERY_STATE:
430                 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
431                 break;
432         case AMDGPU_CTX_OP_QUERY_STATE2:
433                 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
434                 break;
435         default:
436                 return -EINVAL;
437         }
438
439         return r;
440 }
441
442 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
443 {
444         struct amdgpu_ctx *ctx;
445         struct amdgpu_ctx_mgr *mgr;
446
447         if (!fpriv)
448                 return NULL;
449
450         mgr = &fpriv->ctx_mgr;
451
452         mutex_lock(&mgr->lock);
453         ctx = idr_find(&mgr->ctx_handles, id);
454         if (ctx)
455                 kref_get(&ctx->refcount);
456         mutex_unlock(&mgr->lock);
457         return ctx;
458 }
459
460 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
461 {
462         if (ctx == NULL)
463                 return -EINVAL;
464
465         kref_put(&ctx->refcount, amdgpu_ctx_do_release);
466         return 0;
467 }
468
469 void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
470                           struct drm_sched_entity *entity,
471                           struct dma_fence *fence, uint64_t* handle)
472 {
473         struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
474         uint64_t seq = centity->sequence;
475         struct dma_fence *other = NULL;
476         unsigned idx = 0;
477
478         idx = seq & (amdgpu_sched_jobs - 1);
479         other = centity->fences[idx];
480         if (other)
481                 BUG_ON(!dma_fence_is_signaled(other));
482
483         dma_fence_get(fence);
484
485         spin_lock(&ctx->ring_lock);
486         centity->fences[idx] = fence;
487         centity->sequence++;
488         spin_unlock(&ctx->ring_lock);
489
490         dma_fence_put(other);
491         if (handle)
492                 *handle = seq;
493 }
494
495 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
496                                        struct drm_sched_entity *entity,
497                                        uint64_t seq)
498 {
499         struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
500         struct dma_fence *fence;
501
502         spin_lock(&ctx->ring_lock);
503
504         if (seq == ~0ull)
505                 seq = centity->sequence - 1;
506
507         if (seq >= centity->sequence) {
508                 spin_unlock(&ctx->ring_lock);
509                 return ERR_PTR(-EINVAL);
510         }
511
512
513         if (seq + amdgpu_sched_jobs < centity->sequence) {
514                 spin_unlock(&ctx->ring_lock);
515                 return NULL;
516         }
517
518         fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
519         spin_unlock(&ctx->ring_lock);
520
521         return fence;
522 }
523
524 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
525                                   enum drm_sched_priority priority)
526 {
527         unsigned num_entities = amdgput_ctx_total_num_entities();
528         enum drm_sched_priority ctx_prio;
529         unsigned i;
530
531         ctx->override_priority = priority;
532
533         ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
534                         ctx->init_priority : ctx->override_priority;
535
536         for (i = 0; i < num_entities; i++) {
537                 struct drm_sched_entity *entity = &ctx->entities[0][i].entity;
538
539                 drm_sched_entity_set_priority(entity, ctx_prio);
540         }
541 }
542
543 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
544                                struct drm_sched_entity *entity)
545 {
546         struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
547         unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1);
548         struct dma_fence *other = centity->fences[idx];
549
550         if (other) {
551                 signed long r;
552                 r = dma_fence_wait(other, true);
553                 if (r < 0) {
554                         if (r != -ERESTARTSYS)
555                                 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
556
557                         return r;
558                 }
559         }
560
561         return 0;
562 }
563
564 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
565 {
566         mutex_init(&mgr->lock);
567         idr_init(&mgr->ctx_handles);
568 }
569
570 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
571 {
572         unsigned num_entities = amdgput_ctx_total_num_entities();
573         struct amdgpu_ctx *ctx;
574         struct idr *idp;
575         uint32_t id, i;
576
577         idp = &mgr->ctx_handles;
578
579         mutex_lock(&mgr->lock);
580         idr_for_each_entry(idp, ctx, id) {
581                 for (i = 0; i < num_entities; i++) {
582                         struct drm_sched_entity *entity;
583
584                         entity = &ctx->entities[0][i].entity;
585                         timeout = drm_sched_entity_flush(entity, timeout);
586                 }
587         }
588         mutex_unlock(&mgr->lock);
589         return timeout;
590 }
591
592 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
593 {
594         unsigned num_entities = amdgput_ctx_total_num_entities();
595         struct amdgpu_ctx *ctx;
596         struct idr *idp;
597         uint32_t id, i;
598
599         idp = &mgr->ctx_handles;
600
601         idr_for_each_entry(idp, ctx, id) {
602                 if (kref_read(&ctx->refcount) != 1) {
603                         DRM_ERROR("ctx %p is still alive\n", ctx);
604                         continue;
605                 }
606
607                 for (i = 0; i < num_entities; i++)
608                         drm_sched_entity_fini(&ctx->entities[0][i].entity);
609         }
610 }
611
612 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
613 {
614         struct amdgpu_ctx *ctx;
615         struct idr *idp;
616         uint32_t id;
617
618         amdgpu_ctx_mgr_entity_fini(mgr);
619
620         idp = &mgr->ctx_handles;
621
622         idr_for_each_entry(idp, ctx, id) {
623                 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
624                         DRM_ERROR("ctx %p is still alive\n", ctx);
625         }
626
627         idr_destroy(&mgr->ctx_handles);
628         mutex_destroy(&mgr->lock);
629 }