Merge tag 'drm-misc-fixes-2022-11-17' of git://anongit.freedesktop.org/drm/drm-misc...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_cs.c
1 /*
2  * Copyright 2008 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Jerome Glisse <glisse@freedesktop.org>
26  */
27
28 #include <linux/file.h>
29 #include <linux/pagemap.h>
30 #include <linux/sync_file.h>
31 #include <linux/dma-buf.h>
32
33 #include <drm/amdgpu_drm.h>
34 #include <drm/drm_syncobj.h>
35 #include "amdgpu_cs.h"
36 #include "amdgpu.h"
37 #include "amdgpu_trace.h"
38 #include "amdgpu_gmc.h"
39 #include "amdgpu_gem.h"
40 #include "amdgpu_ras.h"
41
42 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
43                                  struct amdgpu_device *adev,
44                                  struct drm_file *filp,
45                                  union drm_amdgpu_cs *cs)
46 {
47         struct amdgpu_fpriv *fpriv = filp->driver_priv;
48
49         if (cs->in.num_chunks == 0)
50                 return -EINVAL;
51
52         memset(p, 0, sizeof(*p));
53         p->adev = adev;
54         p->filp = filp;
55
56         p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
57         if (!p->ctx)
58                 return -EINVAL;
59
60         if (atomic_read(&p->ctx->guilty)) {
61                 amdgpu_ctx_put(p->ctx);
62                 return -ECANCELED;
63         }
64         return 0;
65 }
66
67 static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p,
68                              struct drm_amdgpu_cs_chunk_ib *chunk_ib)
69 {
70         struct drm_sched_entity *entity;
71         unsigned int i;
72         int r;
73
74         r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type,
75                                   chunk_ib->ip_instance,
76                                   chunk_ib->ring, &entity);
77         if (r)
78                 return r;
79
80         /*
81          * Abort if there is no run queue associated with this entity.
82          * Possibly because of disabled HW IP.
83          */
84         if (entity->rq == NULL)
85                 return -EINVAL;
86
87         /* Check if we can add this IB to some existing job */
88         for (i = 0; i < p->gang_size; ++i)
89                 if (p->entities[i] == entity)
90                         return i;
91
92         /* If not increase the gang size if possible */
93         if (i == AMDGPU_CS_GANG_SIZE)
94                 return -EINVAL;
95
96         p->entities[i] = entity;
97         p->gang_size = i + 1;
98         return i;
99 }
100
101 static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
102                            struct drm_amdgpu_cs_chunk_ib *chunk_ib,
103                            unsigned int *num_ibs)
104 {
105         int r;
106
107         r = amdgpu_cs_job_idx(p, chunk_ib);
108         if (r < 0)
109                 return r;
110
111         ++(num_ibs[r]);
112         p->gang_leader_idx = r;
113         return 0;
114 }
115
116 static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
117                                    struct drm_amdgpu_cs_chunk_fence *data,
118                                    uint32_t *offset)
119 {
120         struct drm_gem_object *gobj;
121         struct amdgpu_bo *bo;
122         unsigned long size;
123         int r;
124
125         gobj = drm_gem_object_lookup(p->filp, data->handle);
126         if (gobj == NULL)
127                 return -EINVAL;
128
129         bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
130         p->uf_entry.priority = 0;
131         p->uf_entry.tv.bo = &bo->tbo;
132         /* One for TTM and two for the CS job */
133         p->uf_entry.tv.num_shared = 3;
134
135         drm_gem_object_put(gobj);
136
137         size = amdgpu_bo_size(bo);
138         if (size != PAGE_SIZE || (data->offset + 8) > size) {
139                 r = -EINVAL;
140                 goto error_unref;
141         }
142
143         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
144                 r = -EINVAL;
145                 goto error_unref;
146         }
147
148         *offset = data->offset;
149
150         return 0;
151
152 error_unref:
153         amdgpu_bo_unref(&bo);
154         return r;
155 }
156
157 static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
158                                    struct drm_amdgpu_bo_list_in *data)
159 {
160         struct drm_amdgpu_bo_list_entry *info;
161         int r;
162
163         r = amdgpu_bo_create_list_entry_array(data, &info);
164         if (r)
165                 return r;
166
167         r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
168                                   &p->bo_list);
169         if (r)
170                 goto error_free;
171
172         kvfree(info);
173         return 0;
174
175 error_free:
176         kvfree(info);
177
178         return r;
179 }
180
181 /* Copy the data from userspace and go over it the first time */
182 static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
183                            union drm_amdgpu_cs *cs)
184 {
185         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
186         unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
187         struct amdgpu_vm *vm = &fpriv->vm;
188         uint64_t *chunk_array_user;
189         uint64_t *chunk_array;
190         uint32_t uf_offset = 0;
191         unsigned int size;
192         int ret;
193         int i;
194
195         chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
196                                      GFP_KERNEL);
197         if (!chunk_array)
198                 return -ENOMEM;
199
200         /* get chunks */
201         chunk_array_user = u64_to_user_ptr(cs->in.chunks);
202         if (copy_from_user(chunk_array, chunk_array_user,
203                            sizeof(uint64_t)*cs->in.num_chunks)) {
204                 ret = -EFAULT;
205                 goto free_chunk;
206         }
207
208         p->nchunks = cs->in.num_chunks;
209         p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
210                             GFP_KERNEL);
211         if (!p->chunks) {
212                 ret = -ENOMEM;
213                 goto free_chunk;
214         }
215
216         for (i = 0; i < p->nchunks; i++) {
217                 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
218                 struct drm_amdgpu_cs_chunk user_chunk;
219                 uint32_t __user *cdata;
220
221                 chunk_ptr = u64_to_user_ptr(chunk_array[i]);
222                 if (copy_from_user(&user_chunk, chunk_ptr,
223                                        sizeof(struct drm_amdgpu_cs_chunk))) {
224                         ret = -EFAULT;
225                         i--;
226                         goto free_partial_kdata;
227                 }
228                 p->chunks[i].chunk_id = user_chunk.chunk_id;
229                 p->chunks[i].length_dw = user_chunk.length_dw;
230
231                 size = p->chunks[i].length_dw;
232                 cdata = u64_to_user_ptr(user_chunk.chunk_data);
233
234                 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
235                                                     GFP_KERNEL);
236                 if (p->chunks[i].kdata == NULL) {
237                         ret = -ENOMEM;
238                         i--;
239                         goto free_partial_kdata;
240                 }
241                 size *= sizeof(uint32_t);
242                 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
243                         ret = -EFAULT;
244                         goto free_partial_kdata;
245                 }
246
247                 /* Assume the worst on the following checks */
248                 ret = -EINVAL;
249                 switch (p->chunks[i].chunk_id) {
250                 case AMDGPU_CHUNK_ID_IB:
251                         if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
252                                 goto free_partial_kdata;
253
254                         ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs);
255                         if (ret)
256                                 goto free_partial_kdata;
257                         break;
258
259                 case AMDGPU_CHUNK_ID_FENCE:
260                         if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
261                                 goto free_partial_kdata;
262
263                         ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
264                                                       &uf_offset);
265                         if (ret)
266                                 goto free_partial_kdata;
267                         break;
268
269                 case AMDGPU_CHUNK_ID_BO_HANDLES:
270                         if (size < sizeof(struct drm_amdgpu_bo_list_in))
271                                 goto free_partial_kdata;
272
273                         ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
274                         if (ret)
275                                 goto free_partial_kdata;
276                         break;
277
278                 case AMDGPU_CHUNK_ID_DEPENDENCIES:
279                 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
280                 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
281                 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
282                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
283                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
284                         break;
285
286                 default:
287                         goto free_partial_kdata;
288                 }
289         }
290
291         if (!p->gang_size)
292                 return -EINVAL;
293
294         for (i = 0; i < p->gang_size; ++i) {
295                 ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm);
296                 if (ret)
297                         goto free_all_kdata;
298
299                 ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i],
300                                          &fpriv->vm);
301                 if (ret)
302                         goto free_all_kdata;
303         }
304         p->gang_leader = p->jobs[p->gang_leader_idx];
305
306         if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) {
307                 ret = -ECANCELED;
308                 goto free_all_kdata;
309         }
310
311         if (p->uf_entry.tv.bo)
312                 p->gang_leader->uf_addr = uf_offset;
313         kvfree(chunk_array);
314
315         /* Use this opportunity to fill in task info for the vm */
316         amdgpu_vm_set_task_info(vm);
317
318         return 0;
319
320 free_all_kdata:
321         i = p->nchunks - 1;
322 free_partial_kdata:
323         for (; i >= 0; i--)
324                 kvfree(p->chunks[i].kdata);
325         kvfree(p->chunks);
326         p->chunks = NULL;
327         p->nchunks = 0;
328 free_chunk:
329         kvfree(chunk_array);
330
331         return ret;
332 }
333
334 static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
335                            struct amdgpu_cs_chunk *chunk,
336                            unsigned int *ce_preempt,
337                            unsigned int *de_preempt)
338 {
339         struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata;
340         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
341         struct amdgpu_vm *vm = &fpriv->vm;
342         struct amdgpu_ring *ring;
343         struct amdgpu_job *job;
344         struct amdgpu_ib *ib;
345         int r;
346
347         r = amdgpu_cs_job_idx(p, chunk_ib);
348         if (r < 0)
349                 return r;
350
351         job = p->jobs[r];
352         ring = amdgpu_job_ring(job);
353         ib = &job->ibs[job->num_ibs++];
354
355         /* MM engine doesn't support user fences */
356         if (p->uf_entry.tv.bo && ring->funcs->no_user_fence)
357                 return -EINVAL;
358
359         if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
360             chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
361                 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
362                         (*ce_preempt)++;
363                 else
364                         (*de_preempt)++;
365
366                 /* Each GFX command submit allows only 1 IB max
367                  * preemptible for CE & DE */
368                 if (*ce_preempt > 1 || *de_preempt > 1)
369                         return -EINVAL;
370         }
371
372         if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
373                 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
374
375         r =  amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
376                            chunk_ib->ib_bytes : 0,
377                            AMDGPU_IB_POOL_DELAYED, ib);
378         if (r) {
379                 DRM_ERROR("Failed to get ib !\n");
380                 return r;
381         }
382
383         ib->gpu_addr = chunk_ib->va_start;
384         ib->length_dw = chunk_ib->ib_bytes / 4;
385         ib->flags = chunk_ib->flags;
386         return 0;
387 }
388
389 static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
390                                      struct amdgpu_cs_chunk *chunk)
391 {
392         struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
393         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
394         unsigned num_deps;
395         int i, r;
396
397         num_deps = chunk->length_dw * 4 /
398                 sizeof(struct drm_amdgpu_cs_chunk_dep);
399
400         for (i = 0; i < num_deps; ++i) {
401                 struct amdgpu_ctx *ctx;
402                 struct drm_sched_entity *entity;
403                 struct dma_fence *fence;
404
405                 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
406                 if (ctx == NULL)
407                         return -EINVAL;
408
409                 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
410                                           deps[i].ip_instance,
411                                           deps[i].ring, &entity);
412                 if (r) {
413                         amdgpu_ctx_put(ctx);
414                         return r;
415                 }
416
417                 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
418                 amdgpu_ctx_put(ctx);
419
420                 if (IS_ERR(fence))
421                         return PTR_ERR(fence);
422                 else if (!fence)
423                         continue;
424
425                 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
426                         struct drm_sched_fence *s_fence;
427                         struct dma_fence *old = fence;
428
429                         s_fence = to_drm_sched_fence(fence);
430                         fence = dma_fence_get(&s_fence->scheduled);
431                         dma_fence_put(old);
432                 }
433
434                 r = amdgpu_sync_fence(&p->gang_leader->sync, fence);
435                 dma_fence_put(fence);
436                 if (r)
437                         return r;
438         }
439         return 0;
440 }
441
442 static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
443                                          uint32_t handle, u64 point,
444                                          u64 flags)
445 {
446         struct dma_fence *fence;
447         int r;
448
449         r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
450         if (r) {
451                 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
452                           handle, point, r);
453                 return r;
454         }
455
456         r = amdgpu_sync_fence(&p->gang_leader->sync, fence);
457         dma_fence_put(fence);
458
459         return r;
460 }
461
462 static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
463                                    struct amdgpu_cs_chunk *chunk)
464 {
465         struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
466         unsigned num_deps;
467         int i, r;
468
469         num_deps = chunk->length_dw * 4 /
470                 sizeof(struct drm_amdgpu_cs_chunk_sem);
471         for (i = 0; i < num_deps; ++i) {
472                 r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0);
473                 if (r)
474                         return r;
475         }
476
477         return 0;
478 }
479
480 static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
481                                               struct amdgpu_cs_chunk *chunk)
482 {
483         struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
484         unsigned num_deps;
485         int i, r;
486
487         num_deps = chunk->length_dw * 4 /
488                 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
489         for (i = 0; i < num_deps; ++i) {
490                 r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle,
491                                                   syncobj_deps[i].point,
492                                                   syncobj_deps[i].flags);
493                 if (r)
494                         return r;
495         }
496
497         return 0;
498 }
499
500 static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
501                                     struct amdgpu_cs_chunk *chunk)
502 {
503         struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
504         unsigned num_deps;
505         int i;
506
507         num_deps = chunk->length_dw * 4 /
508                 sizeof(struct drm_amdgpu_cs_chunk_sem);
509
510         if (p->post_deps)
511                 return -EINVAL;
512
513         p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
514                                      GFP_KERNEL);
515         p->num_post_deps = 0;
516
517         if (!p->post_deps)
518                 return -ENOMEM;
519
520
521         for (i = 0; i < num_deps; ++i) {
522                 p->post_deps[i].syncobj =
523                         drm_syncobj_find(p->filp, deps[i].handle);
524                 if (!p->post_deps[i].syncobj)
525                         return -EINVAL;
526                 p->post_deps[i].chain = NULL;
527                 p->post_deps[i].point = 0;
528                 p->num_post_deps++;
529         }
530
531         return 0;
532 }
533
534 static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
535                                                 struct amdgpu_cs_chunk *chunk)
536 {
537         struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
538         unsigned num_deps;
539         int i;
540
541         num_deps = chunk->length_dw * 4 /
542                 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
543
544         if (p->post_deps)
545                 return -EINVAL;
546
547         p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
548                                      GFP_KERNEL);
549         p->num_post_deps = 0;
550
551         if (!p->post_deps)
552                 return -ENOMEM;
553
554         for (i = 0; i < num_deps; ++i) {
555                 struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
556
557                 dep->chain = NULL;
558                 if (syncobj_deps[i].point) {
559                         dep->chain = dma_fence_chain_alloc();
560                         if (!dep->chain)
561                                 return -ENOMEM;
562                 }
563
564                 dep->syncobj = drm_syncobj_find(p->filp,
565                                                 syncobj_deps[i].handle);
566                 if (!dep->syncobj) {
567                         dma_fence_chain_free(dep->chain);
568                         return -EINVAL;
569                 }
570                 dep->point = syncobj_deps[i].point;
571                 p->num_post_deps++;
572         }
573
574         return 0;
575 }
576
577 static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
578 {
579         unsigned int ce_preempt = 0, de_preempt = 0;
580         int i, r;
581
582         for (i = 0; i < p->nchunks; ++i) {
583                 struct amdgpu_cs_chunk *chunk;
584
585                 chunk = &p->chunks[i];
586
587                 switch (chunk->chunk_id) {
588                 case AMDGPU_CHUNK_ID_IB:
589                         r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt);
590                         if (r)
591                                 return r;
592                         break;
593                 case AMDGPU_CHUNK_ID_DEPENDENCIES:
594                 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
595                         r = amdgpu_cs_p2_dependencies(p, chunk);
596                         if (r)
597                                 return r;
598                         break;
599                 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
600                         r = amdgpu_cs_p2_syncobj_in(p, chunk);
601                         if (r)
602                                 return r;
603                         break;
604                 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
605                         r = amdgpu_cs_p2_syncobj_out(p, chunk);
606                         if (r)
607                                 return r;
608                         break;
609                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
610                         r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk);
611                         if (r)
612                                 return r;
613                         break;
614                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
615                         r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk);
616                         if (r)
617                                 return r;
618                         break;
619                 }
620         }
621
622         return 0;
623 }
624
625 /* Convert microseconds to bytes. */
626 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
627 {
628         if (us <= 0 || !adev->mm_stats.log2_max_MBps)
629                 return 0;
630
631         /* Since accum_us is incremented by a million per second, just
632          * multiply it by the number of MB/s to get the number of bytes.
633          */
634         return us << adev->mm_stats.log2_max_MBps;
635 }
636
637 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
638 {
639         if (!adev->mm_stats.log2_max_MBps)
640                 return 0;
641
642         return bytes >> adev->mm_stats.log2_max_MBps;
643 }
644
645 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
646  * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
647  * which means it can go over the threshold once. If that happens, the driver
648  * will be in debt and no other buffer migrations can be done until that debt
649  * is repaid.
650  *
651  * This approach allows moving a buffer of any size (it's important to allow
652  * that).
653  *
654  * The currency is simply time in microseconds and it increases as the clock
655  * ticks. The accumulated microseconds (us) are converted to bytes and
656  * returned.
657  */
658 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
659                                               u64 *max_bytes,
660                                               u64 *max_vis_bytes)
661 {
662         s64 time_us, increment_us;
663         u64 free_vram, total_vram, used_vram;
664         /* Allow a maximum of 200 accumulated ms. This is basically per-IB
665          * throttling.
666          *
667          * It means that in order to get full max MBps, at least 5 IBs per
668          * second must be submitted and not more than 200ms apart from each
669          * other.
670          */
671         const s64 us_upper_bound = 200000;
672
673         if (!adev->mm_stats.log2_max_MBps) {
674                 *max_bytes = 0;
675                 *max_vis_bytes = 0;
676                 return;
677         }
678
679         total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
680         used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
681         free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
682
683         spin_lock(&adev->mm_stats.lock);
684
685         /* Increase the amount of accumulated us. */
686         time_us = ktime_to_us(ktime_get());
687         increment_us = time_us - adev->mm_stats.last_update_us;
688         adev->mm_stats.last_update_us = time_us;
689         adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
690                                       us_upper_bound);
691
692         /* This prevents the short period of low performance when the VRAM
693          * usage is low and the driver is in debt or doesn't have enough
694          * accumulated us to fill VRAM quickly.
695          *
696          * The situation can occur in these cases:
697          * - a lot of VRAM is freed by userspace
698          * - the presence of a big buffer causes a lot of evictions
699          *   (solution: split buffers into smaller ones)
700          *
701          * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
702          * accum_us to a positive number.
703          */
704         if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
705                 s64 min_us;
706
707                 /* Be more aggressive on dGPUs. Try to fill a portion of free
708                  * VRAM now.
709                  */
710                 if (!(adev->flags & AMD_IS_APU))
711                         min_us = bytes_to_us(adev, free_vram / 4);
712                 else
713                         min_us = 0; /* Reset accum_us on APUs. */
714
715                 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
716         }
717
718         /* This is set to 0 if the driver is in debt to disallow (optional)
719          * buffer moves.
720          */
721         *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
722
723         /* Do the same for visible VRAM if half of it is free */
724         if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
725                 u64 total_vis_vram = adev->gmc.visible_vram_size;
726                 u64 used_vis_vram =
727                   amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
728
729                 if (used_vis_vram < total_vis_vram) {
730                         u64 free_vis_vram = total_vis_vram - used_vis_vram;
731                         adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
732                                                           increment_us, us_upper_bound);
733
734                         if (free_vis_vram >= total_vis_vram / 2)
735                                 adev->mm_stats.accum_us_vis =
736                                         max(bytes_to_us(adev, free_vis_vram / 2),
737                                             adev->mm_stats.accum_us_vis);
738                 }
739
740                 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
741         } else {
742                 *max_vis_bytes = 0;
743         }
744
745         spin_unlock(&adev->mm_stats.lock);
746 }
747
748 /* Report how many bytes have really been moved for the last command
749  * submission. This can result in a debt that can stop buffer migrations
750  * temporarily.
751  */
752 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
753                                   u64 num_vis_bytes)
754 {
755         spin_lock(&adev->mm_stats.lock);
756         adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
757         adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
758         spin_unlock(&adev->mm_stats.lock);
759 }
760
761 static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
762 {
763         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
764         struct amdgpu_cs_parser *p = param;
765         struct ttm_operation_ctx ctx = {
766                 .interruptible = true,
767                 .no_wait_gpu = false,
768                 .resv = bo->tbo.base.resv
769         };
770         uint32_t domain;
771         int r;
772
773         if (bo->tbo.pin_count)
774                 return 0;
775
776         /* Don't move this buffer if we have depleted our allowance
777          * to move it. Don't move anything if the threshold is zero.
778          */
779         if (p->bytes_moved < p->bytes_moved_threshold &&
780             (!bo->tbo.base.dma_buf ||
781             list_empty(&bo->tbo.base.dma_buf->attachments))) {
782                 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
783                     (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
784                         /* And don't move a CPU_ACCESS_REQUIRED BO to limited
785                          * visible VRAM if we've depleted our allowance to do
786                          * that.
787                          */
788                         if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
789                                 domain = bo->preferred_domains;
790                         else
791                                 domain = bo->allowed_domains;
792                 } else {
793                         domain = bo->preferred_domains;
794                 }
795         } else {
796                 domain = bo->allowed_domains;
797         }
798
799 retry:
800         amdgpu_bo_placement_from_domain(bo, domain);
801         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
802
803         p->bytes_moved += ctx.bytes_moved;
804         if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
805             amdgpu_bo_in_cpu_visible_vram(bo))
806                 p->bytes_moved_vis += ctx.bytes_moved;
807
808         if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
809                 domain = bo->allowed_domains;
810                 goto retry;
811         }
812
813         return r;
814 }
815
816 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
817                             struct list_head *validated)
818 {
819         struct ttm_operation_ctx ctx = { true, false };
820         struct amdgpu_bo_list_entry *lobj;
821         int r;
822
823         list_for_each_entry(lobj, validated, tv.head) {
824                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
825                 struct mm_struct *usermm;
826
827                 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
828                 if (usermm && usermm != current->mm)
829                         return -EPERM;
830
831                 if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
832                     lobj->user_invalidated && lobj->user_pages) {
833                         amdgpu_bo_placement_from_domain(bo,
834                                                         AMDGPU_GEM_DOMAIN_CPU);
835                         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
836                         if (r)
837                                 return r;
838
839                         amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
840                                                      lobj->user_pages);
841                 }
842
843                 r = amdgpu_cs_bo_validate(p, bo);
844                 if (r)
845                         return r;
846
847                 kvfree(lobj->user_pages);
848                 lobj->user_pages = NULL;
849         }
850         return 0;
851 }
852
853 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
854                                 union drm_amdgpu_cs *cs)
855 {
856         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
857         struct amdgpu_vm *vm = &fpriv->vm;
858         struct amdgpu_bo_list_entry *e;
859         struct list_head duplicates;
860         unsigned int i;
861         int r;
862
863         INIT_LIST_HEAD(&p->validated);
864
865         /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
866         if (cs->in.bo_list_handle) {
867                 if (p->bo_list)
868                         return -EINVAL;
869
870                 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
871                                        &p->bo_list);
872                 if (r)
873                         return r;
874         } else if (!p->bo_list) {
875                 /* Create a empty bo_list when no handle is provided */
876                 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
877                                           &p->bo_list);
878                 if (r)
879                         return r;
880         }
881
882         mutex_lock(&p->bo_list->bo_list_mutex);
883
884         /* One for TTM and one for the CS job */
885         amdgpu_bo_list_for_each_entry(e, p->bo_list)
886                 e->tv.num_shared = 2;
887
888         amdgpu_bo_list_get_list(p->bo_list, &p->validated);
889
890         INIT_LIST_HEAD(&duplicates);
891         amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
892
893         if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
894                 list_add(&p->uf_entry.tv.head, &p->validated);
895
896         /* Get userptr backing pages. If pages are updated after registered
897          * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
898          * amdgpu_ttm_backend_bind() to flush and invalidate new pages
899          */
900         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
901                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
902                 bool userpage_invalidated = false;
903                 int i;
904
905                 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
906                                         sizeof(struct page *),
907                                         GFP_KERNEL | __GFP_ZERO);
908                 if (!e->user_pages) {
909                         DRM_ERROR("kvmalloc_array failure\n");
910                         r = -ENOMEM;
911                         goto out_free_user_pages;
912                 }
913
914                 r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
915                 if (r) {
916                         kvfree(e->user_pages);
917                         e->user_pages = NULL;
918                         goto out_free_user_pages;
919                 }
920
921                 for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
922                         if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
923                                 userpage_invalidated = true;
924                                 break;
925                         }
926                 }
927                 e->user_invalidated = userpage_invalidated;
928         }
929
930         r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
931                                    &duplicates);
932         if (unlikely(r != 0)) {
933                 if (r != -ERESTARTSYS)
934                         DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
935                 goto out_free_user_pages;
936         }
937
938         amdgpu_bo_list_for_each_entry(e, p->bo_list) {
939                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
940
941                 e->bo_va = amdgpu_vm_bo_find(vm, bo);
942         }
943
944         amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
945                                           &p->bytes_moved_vis_threshold);
946         p->bytes_moved = 0;
947         p->bytes_moved_vis = 0;
948
949         r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
950                                       amdgpu_cs_bo_validate, p);
951         if (r) {
952                 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
953                 goto error_validate;
954         }
955
956         r = amdgpu_cs_list_validate(p, &duplicates);
957         if (r)
958                 goto error_validate;
959
960         r = amdgpu_cs_list_validate(p, &p->validated);
961         if (r)
962                 goto error_validate;
963
964         if (p->uf_entry.tv.bo) {
965                 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
966
967                 r = amdgpu_ttm_alloc_gart(&uf->tbo);
968                 if (r)
969                         goto error_validate;
970
971                 p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(uf);
972         }
973
974         amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
975                                      p->bytes_moved_vis);
976
977         for (i = 0; i < p->gang_size; ++i)
978                 amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj,
979                                          p->bo_list->gws_obj,
980                                          p->bo_list->oa_obj);
981         return 0;
982
983 error_validate:
984         ttm_eu_backoff_reservation(&p->ticket, &p->validated);
985
986 out_free_user_pages:
987         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
988                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
989
990                 if (!e->user_pages)
991                         continue;
992                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
993                 kvfree(e->user_pages);
994                 e->user_pages = NULL;
995         }
996         mutex_unlock(&p->bo_list->bo_list_mutex);
997         return r;
998 }
999
1000 static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p)
1001 {
1002         int i, j;
1003
1004         if (!trace_amdgpu_cs_enabled())
1005                 return;
1006
1007         for (i = 0; i < p->gang_size; ++i) {
1008                 struct amdgpu_job *job = p->jobs[i];
1009
1010                 for (j = 0; j < job->num_ibs; ++j)
1011                         trace_amdgpu_cs(p, job, &job->ibs[j]);
1012         }
1013 }
1014
1015 static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
1016                                struct amdgpu_job *job)
1017 {
1018         struct amdgpu_ring *ring = amdgpu_job_ring(job);
1019         unsigned int i;
1020         int r;
1021
1022         /* Only for UVD/VCE VM emulation */
1023         if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place)
1024                 return 0;
1025
1026         for (i = 0; i < job->num_ibs; ++i) {
1027                 struct amdgpu_ib *ib = &job->ibs[i];
1028                 struct amdgpu_bo_va_mapping *m;
1029                 struct amdgpu_bo *aobj;
1030                 uint64_t va_start;
1031                 uint8_t *kptr;
1032
1033                 va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
1034                 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
1035                 if (r) {
1036                         DRM_ERROR("IB va_start is invalid\n");
1037                         return r;
1038                 }
1039
1040                 if ((va_start + ib->length_dw * 4) >
1041                     (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
1042                         DRM_ERROR("IB va_start+ib_bytes is invalid\n");
1043                         return -EINVAL;
1044                 }
1045
1046                 /* the IB should be reserved at this point */
1047                 r = amdgpu_bo_kmap(aobj, (void **)&kptr);
1048                 if (r) {
1049                         return r;
1050                 }
1051
1052                 kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
1053
1054                 if (ring->funcs->parse_cs) {
1055                         memcpy(ib->ptr, kptr, ib->length_dw * 4);
1056                         amdgpu_bo_kunmap(aobj);
1057
1058                         r = amdgpu_ring_parse_cs(ring, p, job, ib);
1059                         if (r)
1060                                 return r;
1061                 } else {
1062                         ib->ptr = (uint32_t *)kptr;
1063                         r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
1064                         amdgpu_bo_kunmap(aobj);
1065                         if (r)
1066                                 return r;
1067                 }
1068         }
1069
1070         return 0;
1071 }
1072
1073 static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p)
1074 {
1075         unsigned int i;
1076         int r;
1077
1078         for (i = 0; i < p->gang_size; ++i) {
1079                 r = amdgpu_cs_patch_ibs(p, p->jobs[i]);
1080                 if (r)
1081                         return r;
1082         }
1083         return 0;
1084 }
1085
1086 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
1087 {
1088         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1089         struct amdgpu_job *job = p->gang_leader;
1090         struct amdgpu_device *adev = p->adev;
1091         struct amdgpu_vm *vm = &fpriv->vm;
1092         struct amdgpu_bo_list_entry *e;
1093         struct amdgpu_bo_va *bo_va;
1094         struct amdgpu_bo *bo;
1095         unsigned int i;
1096         int r;
1097
1098         r = amdgpu_vm_clear_freed(adev, vm, NULL);
1099         if (r)
1100                 return r;
1101
1102         r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
1103         if (r)
1104                 return r;
1105
1106         r = amdgpu_sync_fence(&job->sync, fpriv->prt_va->last_pt_update);
1107         if (r)
1108                 return r;
1109
1110         if (fpriv->csa_va) {
1111                 bo_va = fpriv->csa_va;
1112                 BUG_ON(!bo_va);
1113                 r = amdgpu_vm_bo_update(adev, bo_va, false);
1114                 if (r)
1115                         return r;
1116
1117                 r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update);
1118                 if (r)
1119                         return r;
1120         }
1121
1122         amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1123                 /* ignore duplicates */
1124                 bo = ttm_to_amdgpu_bo(e->tv.bo);
1125                 if (!bo)
1126                         continue;
1127
1128                 bo_va = e->bo_va;
1129                 if (bo_va == NULL)
1130                         continue;
1131
1132                 r = amdgpu_vm_bo_update(adev, bo_va, false);
1133                 if (r)
1134                         return r;
1135
1136                 r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update);
1137                 if (r)
1138                         return r;
1139         }
1140
1141         r = amdgpu_vm_handle_moved(adev, vm);
1142         if (r)
1143                 return r;
1144
1145         r = amdgpu_vm_update_pdes(adev, vm, false);
1146         if (r)
1147                 return r;
1148
1149         r = amdgpu_sync_fence(&job->sync, vm->last_update);
1150         if (r)
1151                 return r;
1152
1153         for (i = 0; i < p->gang_size; ++i) {
1154                 job = p->jobs[i];
1155
1156                 if (!job->vm)
1157                         continue;
1158
1159                 job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
1160         }
1161
1162         if (amdgpu_vm_debug) {
1163                 /* Invalidate all BOs to test for userspace bugs */
1164                 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1165                         struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1166
1167                         /* ignore duplicates */
1168                         if (!bo)
1169                                 continue;
1170
1171                         amdgpu_vm_bo_invalidate(adev, bo, false);
1172                 }
1173         }
1174
1175         return 0;
1176 }
1177
1178 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
1179 {
1180         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1181         struct amdgpu_job *leader = p->gang_leader;
1182         struct amdgpu_bo_list_entry *e;
1183         unsigned int i;
1184         int r;
1185
1186         list_for_each_entry(e, &p->validated, tv.head) {
1187                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1188                 struct dma_resv *resv = bo->tbo.base.resv;
1189                 enum amdgpu_sync_mode sync_mode;
1190
1191                 sync_mode = amdgpu_bo_explicit_sync(bo) ?
1192                         AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
1193                 r = amdgpu_sync_resv(p->adev, &leader->sync, resv, sync_mode,
1194                                      &fpriv->vm);
1195                 if (r)
1196                         return r;
1197         }
1198
1199         for (i = 0; i < p->gang_size; ++i) {
1200                 if (p->jobs[i] == leader)
1201                         continue;
1202
1203                 r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync);
1204                 if (r)
1205                         return r;
1206         }
1207
1208         r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
1209         if (r && r != -ERESTARTSYS)
1210                 DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
1211         return r;
1212 }
1213
1214 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1215 {
1216         int i;
1217
1218         for (i = 0; i < p->num_post_deps; ++i) {
1219                 if (p->post_deps[i].chain && p->post_deps[i].point) {
1220                         drm_syncobj_add_point(p->post_deps[i].syncobj,
1221                                               p->post_deps[i].chain,
1222                                               p->fence, p->post_deps[i].point);
1223                         p->post_deps[i].chain = NULL;
1224                 } else {
1225                         drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1226                                                   p->fence);
1227                 }
1228         }
1229 }
1230
1231 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1232                             union drm_amdgpu_cs *cs)
1233 {
1234         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1235         struct amdgpu_job *leader = p->gang_leader;
1236         struct amdgpu_bo_list_entry *e;
1237         unsigned int i;
1238         uint64_t seq;
1239         int r;
1240
1241         for (i = 0; i < p->gang_size; ++i)
1242                 drm_sched_job_arm(&p->jobs[i]->base);
1243
1244         for (i = 0; i < p->gang_size; ++i) {
1245                 struct dma_fence *fence;
1246
1247                 if (p->jobs[i] == leader)
1248                         continue;
1249
1250                 fence = &p->jobs[i]->base.s_fence->scheduled;
1251                 r = amdgpu_sync_fence(&leader->sync, fence);
1252                 if (r)
1253                         goto error_cleanup;
1254         }
1255
1256         if (p->gang_size > 1) {
1257                 for (i = 0; i < p->gang_size; ++i)
1258                         amdgpu_job_set_gang_leader(p->jobs[i], leader);
1259         }
1260
1261         /* No memory allocation is allowed while holding the notifier lock.
1262          * The lock is held until amdgpu_cs_submit is finished and fence is
1263          * added to BOs.
1264          */
1265         mutex_lock(&p->adev->notifier_lock);
1266
1267         /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1268          * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1269          */
1270         r = 0;
1271         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1272                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1273
1274                 r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1275         }
1276         if (r) {
1277                 r = -EAGAIN;
1278                 goto error_unlock;
1279         }
1280
1281         p->fence = dma_fence_get(&leader->base.s_fence->finished);
1282         list_for_each_entry(e, &p->validated, tv.head) {
1283
1284                 /* Everybody except for the gang leader uses READ */
1285                 for (i = 0; i < p->gang_size; ++i) {
1286                         if (p->jobs[i] == leader)
1287                                 continue;
1288
1289                         dma_resv_add_fence(e->tv.bo->base.resv,
1290                                            &p->jobs[i]->base.s_fence->finished,
1291                                            DMA_RESV_USAGE_READ);
1292                 }
1293
1294                 /* The gang leader is remembered as writer */
1295                 e->tv.num_shared = 0;
1296         }
1297
1298         seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
1299                                    p->fence);
1300         amdgpu_cs_post_dependencies(p);
1301
1302         if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1303             !p->ctx->preamble_presented) {
1304                 leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1305                 p->ctx->preamble_presented = true;
1306         }
1307
1308         cs->out.handle = seq;
1309         leader->uf_sequence = seq;
1310
1311         amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1312         for (i = 0; i < p->gang_size; ++i) {
1313                 amdgpu_job_free_resources(p->jobs[i]);
1314                 trace_amdgpu_cs_ioctl(p->jobs[i]);
1315                 drm_sched_entity_push_job(&p->jobs[i]->base);
1316                 p->jobs[i] = NULL;
1317         }
1318
1319         amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1320         ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1321
1322         mutex_unlock(&p->adev->notifier_lock);
1323         mutex_unlock(&p->bo_list->bo_list_mutex);
1324         return 0;
1325
1326 error_unlock:
1327         mutex_unlock(&p->adev->notifier_lock);
1328
1329 error_cleanup:
1330         for (i = 0; i < p->gang_size; ++i)
1331                 drm_sched_job_cleanup(&p->jobs[i]->base);
1332         return r;
1333 }
1334
1335 /* Cleanup the parser structure */
1336 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
1337 {
1338         unsigned i;
1339
1340         for (i = 0; i < parser->num_post_deps; i++) {
1341                 drm_syncobj_put(parser->post_deps[i].syncobj);
1342                 kfree(parser->post_deps[i].chain);
1343         }
1344         kfree(parser->post_deps);
1345
1346         dma_fence_put(parser->fence);
1347
1348         if (parser->ctx)
1349                 amdgpu_ctx_put(parser->ctx);
1350         if (parser->bo_list)
1351                 amdgpu_bo_list_put(parser->bo_list);
1352
1353         for (i = 0; i < parser->nchunks; i++)
1354                 kvfree(parser->chunks[i].kdata);
1355         kvfree(parser->chunks);
1356         for (i = 0; i < parser->gang_size; ++i) {
1357                 if (parser->jobs[i])
1358                         amdgpu_job_free(parser->jobs[i]);
1359         }
1360         if (parser->uf_entry.tv.bo) {
1361                 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
1362
1363                 amdgpu_bo_unref(&uf);
1364         }
1365 }
1366
1367 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1368 {
1369         struct amdgpu_device *adev = drm_to_adev(dev);
1370         struct amdgpu_cs_parser parser;
1371         int r;
1372
1373         if (amdgpu_ras_intr_triggered())
1374                 return -EHWPOISON;
1375
1376         if (!adev->accel_working)
1377                 return -EBUSY;
1378
1379         r = amdgpu_cs_parser_init(&parser, adev, filp, data);
1380         if (r) {
1381                 if (printk_ratelimit())
1382                         DRM_ERROR("Failed to initialize parser %d!\n", r);
1383                 return r;
1384         }
1385
1386         r = amdgpu_cs_pass1(&parser, data);
1387         if (r)
1388                 goto error_fini;
1389
1390         r = amdgpu_cs_pass2(&parser);
1391         if (r)
1392                 goto error_fini;
1393
1394         r = amdgpu_cs_parser_bos(&parser, data);
1395         if (r) {
1396                 if (r == -ENOMEM)
1397                         DRM_ERROR("Not enough memory for command submission!\n");
1398                 else if (r != -ERESTARTSYS && r != -EAGAIN)
1399                         DRM_ERROR("Failed to process the buffer list %d!\n", r);
1400                 goto error_fini;
1401         }
1402
1403         r = amdgpu_cs_patch_jobs(&parser);
1404         if (r)
1405                 goto error_backoff;
1406
1407         r = amdgpu_cs_vm_handling(&parser);
1408         if (r)
1409                 goto error_backoff;
1410
1411         r = amdgpu_cs_sync_rings(&parser);
1412         if (r)
1413                 goto error_backoff;
1414
1415         trace_amdgpu_cs_ibs(&parser);
1416
1417         r = amdgpu_cs_submit(&parser, data);
1418         if (r)
1419                 goto error_backoff;
1420
1421         amdgpu_cs_parser_fini(&parser);
1422         return 0;
1423
1424 error_backoff:
1425         ttm_eu_backoff_reservation(&parser.ticket, &parser.validated);
1426         mutex_unlock(&parser.bo_list->bo_list_mutex);
1427
1428 error_fini:
1429         amdgpu_cs_parser_fini(&parser);
1430         return r;
1431 }
1432
1433 /**
1434  * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1435  *
1436  * @dev: drm device
1437  * @data: data from userspace
1438  * @filp: file private
1439  *
1440  * Wait for the command submission identified by handle to finish.
1441  */
1442 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1443                          struct drm_file *filp)
1444 {
1445         union drm_amdgpu_wait_cs *wait = data;
1446         unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1447         struct drm_sched_entity *entity;
1448         struct amdgpu_ctx *ctx;
1449         struct dma_fence *fence;
1450         long r;
1451
1452         ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1453         if (ctx == NULL)
1454                 return -EINVAL;
1455
1456         r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1457                                   wait->in.ring, &entity);
1458         if (r) {
1459                 amdgpu_ctx_put(ctx);
1460                 return r;
1461         }
1462
1463         fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1464         if (IS_ERR(fence))
1465                 r = PTR_ERR(fence);
1466         else if (fence) {
1467                 r = dma_fence_wait_timeout(fence, true, timeout);
1468                 if (r > 0 && fence->error)
1469                         r = fence->error;
1470                 dma_fence_put(fence);
1471         } else
1472                 r = 1;
1473
1474         amdgpu_ctx_put(ctx);
1475         if (r < 0)
1476                 return r;
1477
1478         memset(wait, 0, sizeof(*wait));
1479         wait->out.status = (r == 0);
1480
1481         return 0;
1482 }
1483
1484 /**
1485  * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1486  *
1487  * @adev: amdgpu device
1488  * @filp: file private
1489  * @user: drm_amdgpu_fence copied from user space
1490  */
1491 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1492                                              struct drm_file *filp,
1493                                              struct drm_amdgpu_fence *user)
1494 {
1495         struct drm_sched_entity *entity;
1496         struct amdgpu_ctx *ctx;
1497         struct dma_fence *fence;
1498         int r;
1499
1500         ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1501         if (ctx == NULL)
1502                 return ERR_PTR(-EINVAL);
1503
1504         r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1505                                   user->ring, &entity);
1506         if (r) {
1507                 amdgpu_ctx_put(ctx);
1508                 return ERR_PTR(r);
1509         }
1510
1511         fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1512         amdgpu_ctx_put(ctx);
1513
1514         return fence;
1515 }
1516
1517 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1518                                     struct drm_file *filp)
1519 {
1520         struct amdgpu_device *adev = drm_to_adev(dev);
1521         union drm_amdgpu_fence_to_handle *info = data;
1522         struct dma_fence *fence;
1523         struct drm_syncobj *syncobj;
1524         struct sync_file *sync_file;
1525         int fd, r;
1526
1527         fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1528         if (IS_ERR(fence))
1529                 return PTR_ERR(fence);
1530
1531         if (!fence)
1532                 fence = dma_fence_get_stub();
1533
1534         switch (info->in.what) {
1535         case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1536                 r = drm_syncobj_create(&syncobj, 0, fence);
1537                 dma_fence_put(fence);
1538                 if (r)
1539                         return r;
1540                 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1541                 drm_syncobj_put(syncobj);
1542                 return r;
1543
1544         case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1545                 r = drm_syncobj_create(&syncobj, 0, fence);
1546                 dma_fence_put(fence);
1547                 if (r)
1548                         return r;
1549                 r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1550                 drm_syncobj_put(syncobj);
1551                 return r;
1552
1553         case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1554                 fd = get_unused_fd_flags(O_CLOEXEC);
1555                 if (fd < 0) {
1556                         dma_fence_put(fence);
1557                         return fd;
1558                 }
1559
1560                 sync_file = sync_file_create(fence);
1561                 dma_fence_put(fence);
1562                 if (!sync_file) {
1563                         put_unused_fd(fd);
1564                         return -ENOMEM;
1565                 }
1566
1567                 fd_install(fd, sync_file->file);
1568                 info->out.handle = fd;
1569                 return 0;
1570
1571         default:
1572                 dma_fence_put(fence);
1573                 return -EINVAL;
1574         }
1575 }
1576
1577 /**
1578  * amdgpu_cs_wait_all_fences - wait on all fences to signal
1579  *
1580  * @adev: amdgpu device
1581  * @filp: file private
1582  * @wait: wait parameters
1583  * @fences: array of drm_amdgpu_fence
1584  */
1585 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1586                                      struct drm_file *filp,
1587                                      union drm_amdgpu_wait_fences *wait,
1588                                      struct drm_amdgpu_fence *fences)
1589 {
1590         uint32_t fence_count = wait->in.fence_count;
1591         unsigned int i;
1592         long r = 1;
1593
1594         for (i = 0; i < fence_count; i++) {
1595                 struct dma_fence *fence;
1596                 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1597
1598                 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1599                 if (IS_ERR(fence))
1600                         return PTR_ERR(fence);
1601                 else if (!fence)
1602                         continue;
1603
1604                 r = dma_fence_wait_timeout(fence, true, timeout);
1605                 dma_fence_put(fence);
1606                 if (r < 0)
1607                         return r;
1608
1609                 if (r == 0)
1610                         break;
1611
1612                 if (fence->error)
1613                         return fence->error;
1614         }
1615
1616         memset(wait, 0, sizeof(*wait));
1617         wait->out.status = (r > 0);
1618
1619         return 0;
1620 }
1621
1622 /**
1623  * amdgpu_cs_wait_any_fence - wait on any fence to signal
1624  *
1625  * @adev: amdgpu device
1626  * @filp: file private
1627  * @wait: wait parameters
1628  * @fences: array of drm_amdgpu_fence
1629  */
1630 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1631                                     struct drm_file *filp,
1632                                     union drm_amdgpu_wait_fences *wait,
1633                                     struct drm_amdgpu_fence *fences)
1634 {
1635         unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1636         uint32_t fence_count = wait->in.fence_count;
1637         uint32_t first = ~0;
1638         struct dma_fence **array;
1639         unsigned int i;
1640         long r;
1641
1642         /* Prepare the fence array */
1643         array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1644
1645         if (array == NULL)
1646                 return -ENOMEM;
1647
1648         for (i = 0; i < fence_count; i++) {
1649                 struct dma_fence *fence;
1650
1651                 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1652                 if (IS_ERR(fence)) {
1653                         r = PTR_ERR(fence);
1654                         goto err_free_fence_array;
1655                 } else if (fence) {
1656                         array[i] = fence;
1657                 } else { /* NULL, the fence has been already signaled */
1658                         r = 1;
1659                         first = i;
1660                         goto out;
1661                 }
1662         }
1663
1664         r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1665                                        &first);
1666         if (r < 0)
1667                 goto err_free_fence_array;
1668
1669 out:
1670         memset(wait, 0, sizeof(*wait));
1671         wait->out.status = (r > 0);
1672         wait->out.first_signaled = first;
1673
1674         if (first < fence_count && array[first])
1675                 r = array[first]->error;
1676         else
1677                 r = 0;
1678
1679 err_free_fence_array:
1680         for (i = 0; i < fence_count; i++)
1681                 dma_fence_put(array[i]);
1682         kfree(array);
1683
1684         return r;
1685 }
1686
1687 /**
1688  * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1689  *
1690  * @dev: drm device
1691  * @data: data from userspace
1692  * @filp: file private
1693  */
1694 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1695                                 struct drm_file *filp)
1696 {
1697         struct amdgpu_device *adev = drm_to_adev(dev);
1698         union drm_amdgpu_wait_fences *wait = data;
1699         uint32_t fence_count = wait->in.fence_count;
1700         struct drm_amdgpu_fence *fences_user;
1701         struct drm_amdgpu_fence *fences;
1702         int r;
1703
1704         /* Get the fences from userspace */
1705         fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1706                         GFP_KERNEL);
1707         if (fences == NULL)
1708                 return -ENOMEM;
1709
1710         fences_user = u64_to_user_ptr(wait->in.fences);
1711         if (copy_from_user(fences, fences_user,
1712                 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1713                 r = -EFAULT;
1714                 goto err_free_fences;
1715         }
1716
1717         if (wait->in.wait_all)
1718                 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1719         else
1720                 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1721
1722 err_free_fences:
1723         kfree(fences);
1724
1725         return r;
1726 }
1727
1728 /**
1729  * amdgpu_cs_find_mapping - find bo_va for VM address
1730  *
1731  * @parser: command submission parser context
1732  * @addr: VM address
1733  * @bo: resulting BO of the mapping found
1734  * @map: Placeholder to return found BO mapping
1735  *
1736  * Search the buffer objects in the command submission context for a certain
1737  * virtual memory address. Returns allocation structure when found, NULL
1738  * otherwise.
1739  */
1740 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1741                            uint64_t addr, struct amdgpu_bo **bo,
1742                            struct amdgpu_bo_va_mapping **map)
1743 {
1744         struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1745         struct ttm_operation_ctx ctx = { false, false };
1746         struct amdgpu_vm *vm = &fpriv->vm;
1747         struct amdgpu_bo_va_mapping *mapping;
1748         int r;
1749
1750         addr /= AMDGPU_GPU_PAGE_SIZE;
1751
1752         mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1753         if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1754                 return -EINVAL;
1755
1756         *bo = mapping->bo_va->base.bo;
1757         *map = mapping;
1758
1759         /* Double check that the BO is reserved by this CS */
1760         if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
1761                 return -EINVAL;
1762
1763         if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1764                 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1765                 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1766                 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1767                 if (r)
1768                         return r;
1769         }
1770
1771         return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1772 }