drm/amdgpu: fix userptr HMM range handling v2
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_cs.c
1 /*
2  * Copyright 2008 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Jerome Glisse <glisse@freedesktop.org>
26  */
27
28 #include <linux/file.h>
29 #include <linux/pagemap.h>
30 #include <linux/sync_file.h>
31 #include <linux/dma-buf.h>
32
33 #include <drm/amdgpu_drm.h>
34 #include <drm/drm_syncobj.h>
35 #include "amdgpu_cs.h"
36 #include "amdgpu.h"
37 #include "amdgpu_trace.h"
38 #include "amdgpu_gmc.h"
39 #include "amdgpu_gem.h"
40 #include "amdgpu_ras.h"
41
42 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
43                                  struct amdgpu_device *adev,
44                                  struct drm_file *filp,
45                                  union drm_amdgpu_cs *cs)
46 {
47         struct amdgpu_fpriv *fpriv = filp->driver_priv;
48
49         if (cs->in.num_chunks == 0)
50                 return -EINVAL;
51
52         memset(p, 0, sizeof(*p));
53         p->adev = adev;
54         p->filp = filp;
55
56         p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
57         if (!p->ctx)
58                 return -EINVAL;
59
60         if (atomic_read(&p->ctx->guilty)) {
61                 amdgpu_ctx_put(p->ctx);
62                 return -ECANCELED;
63         }
64         return 0;
65 }
66
67 static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p,
68                              struct drm_amdgpu_cs_chunk_ib *chunk_ib)
69 {
70         struct drm_sched_entity *entity;
71         unsigned int i;
72         int r;
73
74         r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type,
75                                   chunk_ib->ip_instance,
76                                   chunk_ib->ring, &entity);
77         if (r)
78                 return r;
79
80         /*
81          * Abort if there is no run queue associated with this entity.
82          * Possibly because of disabled HW IP.
83          */
84         if (entity->rq == NULL)
85                 return -EINVAL;
86
87         /* Check if we can add this IB to some existing job */
88         for (i = 0; i < p->gang_size; ++i)
89                 if (p->entities[i] == entity)
90                         return i;
91
92         /* If not increase the gang size if possible */
93         if (i == AMDGPU_CS_GANG_SIZE)
94                 return -EINVAL;
95
96         p->entities[i] = entity;
97         p->gang_size = i + 1;
98         return i;
99 }
100
101 static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
102                            struct drm_amdgpu_cs_chunk_ib *chunk_ib,
103                            unsigned int *num_ibs)
104 {
105         int r;
106
107         r = amdgpu_cs_job_idx(p, chunk_ib);
108         if (r < 0)
109                 return r;
110
111         ++(num_ibs[r]);
112         p->gang_leader_idx = r;
113         return 0;
114 }
115
116 static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
117                                    struct drm_amdgpu_cs_chunk_fence *data,
118                                    uint32_t *offset)
119 {
120         struct drm_gem_object *gobj;
121         struct amdgpu_bo *bo;
122         unsigned long size;
123         int r;
124
125         gobj = drm_gem_object_lookup(p->filp, data->handle);
126         if (gobj == NULL)
127                 return -EINVAL;
128
129         bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
130         p->uf_entry.priority = 0;
131         p->uf_entry.tv.bo = &bo->tbo;
132         /* One for TTM and two for the CS job */
133         p->uf_entry.tv.num_shared = 3;
134
135         drm_gem_object_put(gobj);
136
137         size = amdgpu_bo_size(bo);
138         if (size != PAGE_SIZE || (data->offset + 8) > size) {
139                 r = -EINVAL;
140                 goto error_unref;
141         }
142
143         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
144                 r = -EINVAL;
145                 goto error_unref;
146         }
147
148         *offset = data->offset;
149
150         return 0;
151
152 error_unref:
153         amdgpu_bo_unref(&bo);
154         return r;
155 }
156
157 static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
158                                    struct drm_amdgpu_bo_list_in *data)
159 {
160         struct drm_amdgpu_bo_list_entry *info;
161         int r;
162
163         r = amdgpu_bo_create_list_entry_array(data, &info);
164         if (r)
165                 return r;
166
167         r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
168                                   &p->bo_list);
169         if (r)
170                 goto error_free;
171
172         kvfree(info);
173         return 0;
174
175 error_free:
176         kvfree(info);
177
178         return r;
179 }
180
181 /* Copy the data from userspace and go over it the first time */
182 static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
183                            union drm_amdgpu_cs *cs)
184 {
185         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
186         unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
187         struct amdgpu_vm *vm = &fpriv->vm;
188         uint64_t *chunk_array_user;
189         uint64_t *chunk_array;
190         uint32_t uf_offset = 0;
191         unsigned int size;
192         int ret;
193         int i;
194
195         chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
196                                      GFP_KERNEL);
197         if (!chunk_array)
198                 return -ENOMEM;
199
200         /* get chunks */
201         chunk_array_user = u64_to_user_ptr(cs->in.chunks);
202         if (copy_from_user(chunk_array, chunk_array_user,
203                            sizeof(uint64_t)*cs->in.num_chunks)) {
204                 ret = -EFAULT;
205                 goto free_chunk;
206         }
207
208         p->nchunks = cs->in.num_chunks;
209         p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
210                             GFP_KERNEL);
211         if (!p->chunks) {
212                 ret = -ENOMEM;
213                 goto free_chunk;
214         }
215
216         for (i = 0; i < p->nchunks; i++) {
217                 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
218                 struct drm_amdgpu_cs_chunk user_chunk;
219                 uint32_t __user *cdata;
220
221                 chunk_ptr = u64_to_user_ptr(chunk_array[i]);
222                 if (copy_from_user(&user_chunk, chunk_ptr,
223                                        sizeof(struct drm_amdgpu_cs_chunk))) {
224                         ret = -EFAULT;
225                         i--;
226                         goto free_partial_kdata;
227                 }
228                 p->chunks[i].chunk_id = user_chunk.chunk_id;
229                 p->chunks[i].length_dw = user_chunk.length_dw;
230
231                 size = p->chunks[i].length_dw;
232                 cdata = u64_to_user_ptr(user_chunk.chunk_data);
233
234                 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
235                                                     GFP_KERNEL);
236                 if (p->chunks[i].kdata == NULL) {
237                         ret = -ENOMEM;
238                         i--;
239                         goto free_partial_kdata;
240                 }
241                 size *= sizeof(uint32_t);
242                 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
243                         ret = -EFAULT;
244                         goto free_partial_kdata;
245                 }
246
247                 /* Assume the worst on the following checks */
248                 ret = -EINVAL;
249                 switch (p->chunks[i].chunk_id) {
250                 case AMDGPU_CHUNK_ID_IB:
251                         if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
252                                 goto free_partial_kdata;
253
254                         ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs);
255                         if (ret)
256                                 goto free_partial_kdata;
257                         break;
258
259                 case AMDGPU_CHUNK_ID_FENCE:
260                         if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
261                                 goto free_partial_kdata;
262
263                         ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
264                                                       &uf_offset);
265                         if (ret)
266                                 goto free_partial_kdata;
267                         break;
268
269                 case AMDGPU_CHUNK_ID_BO_HANDLES:
270                         if (size < sizeof(struct drm_amdgpu_bo_list_in))
271                                 goto free_partial_kdata;
272
273                         ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
274                         if (ret)
275                                 goto free_partial_kdata;
276                         break;
277
278                 case AMDGPU_CHUNK_ID_DEPENDENCIES:
279                 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
280                 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
281                 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
282                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
283                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
284                         break;
285
286                 default:
287                         goto free_partial_kdata;
288                 }
289         }
290
291         if (!p->gang_size) {
292                 ret = -EINVAL;
293                 goto free_partial_kdata;
294         }
295
296         for (i = 0; i < p->gang_size; ++i) {
297                 ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm);
298                 if (ret)
299                         goto free_all_kdata;
300
301                 ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i],
302                                          &fpriv->vm);
303                 if (ret)
304                         goto free_all_kdata;
305         }
306         p->gang_leader = p->jobs[p->gang_leader_idx];
307
308         if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) {
309                 ret = -ECANCELED;
310                 goto free_all_kdata;
311         }
312
313         if (p->uf_entry.tv.bo)
314                 p->gang_leader->uf_addr = uf_offset;
315         kvfree(chunk_array);
316
317         /* Use this opportunity to fill in task info for the vm */
318         amdgpu_vm_set_task_info(vm);
319
320         return 0;
321
322 free_all_kdata:
323         i = p->nchunks - 1;
324 free_partial_kdata:
325         for (; i >= 0; i--)
326                 kvfree(p->chunks[i].kdata);
327         kvfree(p->chunks);
328         p->chunks = NULL;
329         p->nchunks = 0;
330 free_chunk:
331         kvfree(chunk_array);
332
333         return ret;
334 }
335
336 static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
337                            struct amdgpu_cs_chunk *chunk,
338                            unsigned int *ce_preempt,
339                            unsigned int *de_preempt)
340 {
341         struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata;
342         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
343         struct amdgpu_vm *vm = &fpriv->vm;
344         struct amdgpu_ring *ring;
345         struct amdgpu_job *job;
346         struct amdgpu_ib *ib;
347         int r;
348
349         r = amdgpu_cs_job_idx(p, chunk_ib);
350         if (r < 0)
351                 return r;
352
353         job = p->jobs[r];
354         ring = amdgpu_job_ring(job);
355         ib = &job->ibs[job->num_ibs++];
356
357         /* MM engine doesn't support user fences */
358         if (p->uf_entry.tv.bo && ring->funcs->no_user_fence)
359                 return -EINVAL;
360
361         if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
362             chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
363                 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
364                         (*ce_preempt)++;
365                 else
366                         (*de_preempt)++;
367
368                 /* Each GFX command submit allows only 1 IB max
369                  * preemptible for CE & DE */
370                 if (*ce_preempt > 1 || *de_preempt > 1)
371                         return -EINVAL;
372         }
373
374         if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
375                 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
376
377         r =  amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
378                            chunk_ib->ib_bytes : 0,
379                            AMDGPU_IB_POOL_DELAYED, ib);
380         if (r) {
381                 DRM_ERROR("Failed to get ib !\n");
382                 return r;
383         }
384
385         ib->gpu_addr = chunk_ib->va_start;
386         ib->length_dw = chunk_ib->ib_bytes / 4;
387         ib->flags = chunk_ib->flags;
388         return 0;
389 }
390
391 static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
392                                      struct amdgpu_cs_chunk *chunk)
393 {
394         struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
395         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
396         unsigned num_deps;
397         int i, r;
398
399         num_deps = chunk->length_dw * 4 /
400                 sizeof(struct drm_amdgpu_cs_chunk_dep);
401
402         for (i = 0; i < num_deps; ++i) {
403                 struct amdgpu_ctx *ctx;
404                 struct drm_sched_entity *entity;
405                 struct dma_fence *fence;
406
407                 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
408                 if (ctx == NULL)
409                         return -EINVAL;
410
411                 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
412                                           deps[i].ip_instance,
413                                           deps[i].ring, &entity);
414                 if (r) {
415                         amdgpu_ctx_put(ctx);
416                         return r;
417                 }
418
419                 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
420                 amdgpu_ctx_put(ctx);
421
422                 if (IS_ERR(fence))
423                         return PTR_ERR(fence);
424                 else if (!fence)
425                         continue;
426
427                 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
428                         struct drm_sched_fence *s_fence;
429                         struct dma_fence *old = fence;
430
431                         s_fence = to_drm_sched_fence(fence);
432                         fence = dma_fence_get(&s_fence->scheduled);
433                         dma_fence_put(old);
434                 }
435
436                 r = amdgpu_sync_fence(&p->gang_leader->sync, fence);
437                 dma_fence_put(fence);
438                 if (r)
439                         return r;
440         }
441         return 0;
442 }
443
444 static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
445                                          uint32_t handle, u64 point,
446                                          u64 flags)
447 {
448         struct dma_fence *fence;
449         int r;
450
451         r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
452         if (r) {
453                 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
454                           handle, point, r);
455                 return r;
456         }
457
458         r = amdgpu_sync_fence(&p->gang_leader->sync, fence);
459         dma_fence_put(fence);
460
461         return r;
462 }
463
464 static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
465                                    struct amdgpu_cs_chunk *chunk)
466 {
467         struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
468         unsigned num_deps;
469         int i, r;
470
471         num_deps = chunk->length_dw * 4 /
472                 sizeof(struct drm_amdgpu_cs_chunk_sem);
473         for (i = 0; i < num_deps; ++i) {
474                 r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0);
475                 if (r)
476                         return r;
477         }
478
479         return 0;
480 }
481
482 static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
483                                               struct amdgpu_cs_chunk *chunk)
484 {
485         struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
486         unsigned num_deps;
487         int i, r;
488
489         num_deps = chunk->length_dw * 4 /
490                 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
491         for (i = 0; i < num_deps; ++i) {
492                 r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle,
493                                                   syncobj_deps[i].point,
494                                                   syncobj_deps[i].flags);
495                 if (r)
496                         return r;
497         }
498
499         return 0;
500 }
501
502 static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
503                                     struct amdgpu_cs_chunk *chunk)
504 {
505         struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
506         unsigned num_deps;
507         int i;
508
509         num_deps = chunk->length_dw * 4 /
510                 sizeof(struct drm_amdgpu_cs_chunk_sem);
511
512         if (p->post_deps)
513                 return -EINVAL;
514
515         p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
516                                      GFP_KERNEL);
517         p->num_post_deps = 0;
518
519         if (!p->post_deps)
520                 return -ENOMEM;
521
522
523         for (i = 0; i < num_deps; ++i) {
524                 p->post_deps[i].syncobj =
525                         drm_syncobj_find(p->filp, deps[i].handle);
526                 if (!p->post_deps[i].syncobj)
527                         return -EINVAL;
528                 p->post_deps[i].chain = NULL;
529                 p->post_deps[i].point = 0;
530                 p->num_post_deps++;
531         }
532
533         return 0;
534 }
535
536 static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
537                                                 struct amdgpu_cs_chunk *chunk)
538 {
539         struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
540         unsigned num_deps;
541         int i;
542
543         num_deps = chunk->length_dw * 4 /
544                 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
545
546         if (p->post_deps)
547                 return -EINVAL;
548
549         p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
550                                      GFP_KERNEL);
551         p->num_post_deps = 0;
552
553         if (!p->post_deps)
554                 return -ENOMEM;
555
556         for (i = 0; i < num_deps; ++i) {
557                 struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
558
559                 dep->chain = NULL;
560                 if (syncobj_deps[i].point) {
561                         dep->chain = dma_fence_chain_alloc();
562                         if (!dep->chain)
563                                 return -ENOMEM;
564                 }
565
566                 dep->syncobj = drm_syncobj_find(p->filp,
567                                                 syncobj_deps[i].handle);
568                 if (!dep->syncobj) {
569                         dma_fence_chain_free(dep->chain);
570                         return -EINVAL;
571                 }
572                 dep->point = syncobj_deps[i].point;
573                 p->num_post_deps++;
574         }
575
576         return 0;
577 }
578
579 static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
580 {
581         unsigned int ce_preempt = 0, de_preempt = 0;
582         int i, r;
583
584         for (i = 0; i < p->nchunks; ++i) {
585                 struct amdgpu_cs_chunk *chunk;
586
587                 chunk = &p->chunks[i];
588
589                 switch (chunk->chunk_id) {
590                 case AMDGPU_CHUNK_ID_IB:
591                         r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt);
592                         if (r)
593                                 return r;
594                         break;
595                 case AMDGPU_CHUNK_ID_DEPENDENCIES:
596                 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
597                         r = amdgpu_cs_p2_dependencies(p, chunk);
598                         if (r)
599                                 return r;
600                         break;
601                 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
602                         r = amdgpu_cs_p2_syncobj_in(p, chunk);
603                         if (r)
604                                 return r;
605                         break;
606                 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
607                         r = amdgpu_cs_p2_syncobj_out(p, chunk);
608                         if (r)
609                                 return r;
610                         break;
611                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
612                         r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk);
613                         if (r)
614                                 return r;
615                         break;
616                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
617                         r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk);
618                         if (r)
619                                 return r;
620                         break;
621                 }
622         }
623
624         return 0;
625 }
626
627 /* Convert microseconds to bytes. */
628 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
629 {
630         if (us <= 0 || !adev->mm_stats.log2_max_MBps)
631                 return 0;
632
633         /* Since accum_us is incremented by a million per second, just
634          * multiply it by the number of MB/s to get the number of bytes.
635          */
636         return us << adev->mm_stats.log2_max_MBps;
637 }
638
639 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
640 {
641         if (!adev->mm_stats.log2_max_MBps)
642                 return 0;
643
644         return bytes >> adev->mm_stats.log2_max_MBps;
645 }
646
647 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
648  * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
649  * which means it can go over the threshold once. If that happens, the driver
650  * will be in debt and no other buffer migrations can be done until that debt
651  * is repaid.
652  *
653  * This approach allows moving a buffer of any size (it's important to allow
654  * that).
655  *
656  * The currency is simply time in microseconds and it increases as the clock
657  * ticks. The accumulated microseconds (us) are converted to bytes and
658  * returned.
659  */
660 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
661                                               u64 *max_bytes,
662                                               u64 *max_vis_bytes)
663 {
664         s64 time_us, increment_us;
665         u64 free_vram, total_vram, used_vram;
666         /* Allow a maximum of 200 accumulated ms. This is basically per-IB
667          * throttling.
668          *
669          * It means that in order to get full max MBps, at least 5 IBs per
670          * second must be submitted and not more than 200ms apart from each
671          * other.
672          */
673         const s64 us_upper_bound = 200000;
674
675         if (!adev->mm_stats.log2_max_MBps) {
676                 *max_bytes = 0;
677                 *max_vis_bytes = 0;
678                 return;
679         }
680
681         total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
682         used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
683         free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
684
685         spin_lock(&adev->mm_stats.lock);
686
687         /* Increase the amount of accumulated us. */
688         time_us = ktime_to_us(ktime_get());
689         increment_us = time_us - adev->mm_stats.last_update_us;
690         adev->mm_stats.last_update_us = time_us;
691         adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
692                                       us_upper_bound);
693
694         /* This prevents the short period of low performance when the VRAM
695          * usage is low and the driver is in debt or doesn't have enough
696          * accumulated us to fill VRAM quickly.
697          *
698          * The situation can occur in these cases:
699          * - a lot of VRAM is freed by userspace
700          * - the presence of a big buffer causes a lot of evictions
701          *   (solution: split buffers into smaller ones)
702          *
703          * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
704          * accum_us to a positive number.
705          */
706         if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
707                 s64 min_us;
708
709                 /* Be more aggressive on dGPUs. Try to fill a portion of free
710                  * VRAM now.
711                  */
712                 if (!(adev->flags & AMD_IS_APU))
713                         min_us = bytes_to_us(adev, free_vram / 4);
714                 else
715                         min_us = 0; /* Reset accum_us on APUs. */
716
717                 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
718         }
719
720         /* This is set to 0 if the driver is in debt to disallow (optional)
721          * buffer moves.
722          */
723         *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
724
725         /* Do the same for visible VRAM if half of it is free */
726         if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
727                 u64 total_vis_vram = adev->gmc.visible_vram_size;
728                 u64 used_vis_vram =
729                   amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
730
731                 if (used_vis_vram < total_vis_vram) {
732                         u64 free_vis_vram = total_vis_vram - used_vis_vram;
733                         adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
734                                                           increment_us, us_upper_bound);
735
736                         if (free_vis_vram >= total_vis_vram / 2)
737                                 adev->mm_stats.accum_us_vis =
738                                         max(bytes_to_us(adev, free_vis_vram / 2),
739                                             adev->mm_stats.accum_us_vis);
740                 }
741
742                 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
743         } else {
744                 *max_vis_bytes = 0;
745         }
746
747         spin_unlock(&adev->mm_stats.lock);
748 }
749
750 /* Report how many bytes have really been moved for the last command
751  * submission. This can result in a debt that can stop buffer migrations
752  * temporarily.
753  */
754 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
755                                   u64 num_vis_bytes)
756 {
757         spin_lock(&adev->mm_stats.lock);
758         adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
759         adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
760         spin_unlock(&adev->mm_stats.lock);
761 }
762
763 static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
764 {
765         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
766         struct amdgpu_cs_parser *p = param;
767         struct ttm_operation_ctx ctx = {
768                 .interruptible = true,
769                 .no_wait_gpu = false,
770                 .resv = bo->tbo.base.resv
771         };
772         uint32_t domain;
773         int r;
774
775         if (bo->tbo.pin_count)
776                 return 0;
777
778         /* Don't move this buffer if we have depleted our allowance
779          * to move it. Don't move anything if the threshold is zero.
780          */
781         if (p->bytes_moved < p->bytes_moved_threshold &&
782             (!bo->tbo.base.dma_buf ||
783             list_empty(&bo->tbo.base.dma_buf->attachments))) {
784                 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
785                     (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
786                         /* And don't move a CPU_ACCESS_REQUIRED BO to limited
787                          * visible VRAM if we've depleted our allowance to do
788                          * that.
789                          */
790                         if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
791                                 domain = bo->preferred_domains;
792                         else
793                                 domain = bo->allowed_domains;
794                 } else {
795                         domain = bo->preferred_domains;
796                 }
797         } else {
798                 domain = bo->allowed_domains;
799         }
800
801 retry:
802         amdgpu_bo_placement_from_domain(bo, domain);
803         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
804
805         p->bytes_moved += ctx.bytes_moved;
806         if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
807             amdgpu_bo_in_cpu_visible_vram(bo))
808                 p->bytes_moved_vis += ctx.bytes_moved;
809
810         if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
811                 domain = bo->allowed_domains;
812                 goto retry;
813         }
814
815         return r;
816 }
817
818 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
819                             struct list_head *validated)
820 {
821         struct ttm_operation_ctx ctx = { true, false };
822         struct amdgpu_bo_list_entry *lobj;
823         int r;
824
825         list_for_each_entry(lobj, validated, tv.head) {
826                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
827                 struct mm_struct *usermm;
828
829                 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
830                 if (usermm && usermm != current->mm)
831                         return -EPERM;
832
833                 if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
834                     lobj->user_invalidated && lobj->user_pages) {
835                         amdgpu_bo_placement_from_domain(bo,
836                                                         AMDGPU_GEM_DOMAIN_CPU);
837                         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
838                         if (r)
839                                 return r;
840
841                         amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
842                                                      lobj->user_pages);
843                 }
844
845                 r = amdgpu_cs_bo_validate(p, bo);
846                 if (r)
847                         return r;
848
849                 kvfree(lobj->user_pages);
850                 lobj->user_pages = NULL;
851         }
852         return 0;
853 }
854
855 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
856                                 union drm_amdgpu_cs *cs)
857 {
858         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
859         struct amdgpu_vm *vm = &fpriv->vm;
860         struct amdgpu_bo_list_entry *e;
861         struct list_head duplicates;
862         unsigned int i;
863         int r;
864
865         INIT_LIST_HEAD(&p->validated);
866
867         /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
868         if (cs->in.bo_list_handle) {
869                 if (p->bo_list)
870                         return -EINVAL;
871
872                 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
873                                        &p->bo_list);
874                 if (r)
875                         return r;
876         } else if (!p->bo_list) {
877                 /* Create a empty bo_list when no handle is provided */
878                 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
879                                           &p->bo_list);
880                 if (r)
881                         return r;
882         }
883
884         mutex_lock(&p->bo_list->bo_list_mutex);
885
886         /* One for TTM and one for the CS job */
887         amdgpu_bo_list_for_each_entry(e, p->bo_list)
888                 e->tv.num_shared = 2;
889
890         amdgpu_bo_list_get_list(p->bo_list, &p->validated);
891
892         INIT_LIST_HEAD(&duplicates);
893         amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
894
895         if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
896                 list_add(&p->uf_entry.tv.head, &p->validated);
897
898         /* Get userptr backing pages. If pages are updated after registered
899          * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
900          * amdgpu_ttm_backend_bind() to flush and invalidate new pages
901          */
902         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
903                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
904                 bool userpage_invalidated = false;
905                 int i;
906
907                 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
908                                         sizeof(struct page *),
909                                         GFP_KERNEL | __GFP_ZERO);
910                 if (!e->user_pages) {
911                         DRM_ERROR("kvmalloc_array failure\n");
912                         r = -ENOMEM;
913                         goto out_free_user_pages;
914                 }
915
916                 r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
917                 if (r) {
918                         kvfree(e->user_pages);
919                         e->user_pages = NULL;
920                         goto out_free_user_pages;
921                 }
922
923                 for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
924                         if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
925                                 userpage_invalidated = true;
926                                 break;
927                         }
928                 }
929                 e->user_invalidated = userpage_invalidated;
930         }
931
932         r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
933                                    &duplicates);
934         if (unlikely(r != 0)) {
935                 if (r != -ERESTARTSYS)
936                         DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
937                 goto out_free_user_pages;
938         }
939
940         amdgpu_bo_list_for_each_entry(e, p->bo_list) {
941                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
942
943                 e->bo_va = amdgpu_vm_bo_find(vm, bo);
944         }
945
946         amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
947                                           &p->bytes_moved_vis_threshold);
948         p->bytes_moved = 0;
949         p->bytes_moved_vis = 0;
950
951         r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
952                                       amdgpu_cs_bo_validate, p);
953         if (r) {
954                 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
955                 goto error_validate;
956         }
957
958         r = amdgpu_cs_list_validate(p, &duplicates);
959         if (r)
960                 goto error_validate;
961
962         r = amdgpu_cs_list_validate(p, &p->validated);
963         if (r)
964                 goto error_validate;
965
966         if (p->uf_entry.tv.bo) {
967                 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
968
969                 r = amdgpu_ttm_alloc_gart(&uf->tbo);
970                 if (r)
971                         goto error_validate;
972
973                 p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(uf);
974         }
975
976         amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
977                                      p->bytes_moved_vis);
978
979         for (i = 0; i < p->gang_size; ++i)
980                 amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj,
981                                          p->bo_list->gws_obj,
982                                          p->bo_list->oa_obj);
983         return 0;
984
985 error_validate:
986         ttm_eu_backoff_reservation(&p->ticket, &p->validated);
987
988 out_free_user_pages:
989         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
990                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
991
992                 if (!e->user_pages)
993                         continue;
994                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
995                 kvfree(e->user_pages);
996                 e->user_pages = NULL;
997                 e->range = NULL;
998         }
999         mutex_unlock(&p->bo_list->bo_list_mutex);
1000         return r;
1001 }
1002
1003 static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p)
1004 {
1005         int i, j;
1006
1007         if (!trace_amdgpu_cs_enabled())
1008                 return;
1009
1010         for (i = 0; i < p->gang_size; ++i) {
1011                 struct amdgpu_job *job = p->jobs[i];
1012
1013                 for (j = 0; j < job->num_ibs; ++j)
1014                         trace_amdgpu_cs(p, job, &job->ibs[j]);
1015         }
1016 }
1017
1018 static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
1019                                struct amdgpu_job *job)
1020 {
1021         struct amdgpu_ring *ring = amdgpu_job_ring(job);
1022         unsigned int i;
1023         int r;
1024
1025         /* Only for UVD/VCE VM emulation */
1026         if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place)
1027                 return 0;
1028
1029         for (i = 0; i < job->num_ibs; ++i) {
1030                 struct amdgpu_ib *ib = &job->ibs[i];
1031                 struct amdgpu_bo_va_mapping *m;
1032                 struct amdgpu_bo *aobj;
1033                 uint64_t va_start;
1034                 uint8_t *kptr;
1035
1036                 va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
1037                 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
1038                 if (r) {
1039                         DRM_ERROR("IB va_start is invalid\n");
1040                         return r;
1041                 }
1042
1043                 if ((va_start + ib->length_dw * 4) >
1044                     (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
1045                         DRM_ERROR("IB va_start+ib_bytes is invalid\n");
1046                         return -EINVAL;
1047                 }
1048
1049                 /* the IB should be reserved at this point */
1050                 r = amdgpu_bo_kmap(aobj, (void **)&kptr);
1051                 if (r) {
1052                         return r;
1053                 }
1054
1055                 kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
1056
1057                 if (ring->funcs->parse_cs) {
1058                         memcpy(ib->ptr, kptr, ib->length_dw * 4);
1059                         amdgpu_bo_kunmap(aobj);
1060
1061                         r = amdgpu_ring_parse_cs(ring, p, job, ib);
1062                         if (r)
1063                                 return r;
1064                 } else {
1065                         ib->ptr = (uint32_t *)kptr;
1066                         r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
1067                         amdgpu_bo_kunmap(aobj);
1068                         if (r)
1069                                 return r;
1070                 }
1071         }
1072
1073         return 0;
1074 }
1075
1076 static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p)
1077 {
1078         unsigned int i;
1079         int r;
1080
1081         for (i = 0; i < p->gang_size; ++i) {
1082                 r = amdgpu_cs_patch_ibs(p, p->jobs[i]);
1083                 if (r)
1084                         return r;
1085         }
1086         return 0;
1087 }
1088
1089 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
1090 {
1091         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1092         struct amdgpu_job *job = p->gang_leader;
1093         struct amdgpu_device *adev = p->adev;
1094         struct amdgpu_vm *vm = &fpriv->vm;
1095         struct amdgpu_bo_list_entry *e;
1096         struct amdgpu_bo_va *bo_va;
1097         struct amdgpu_bo *bo;
1098         unsigned int i;
1099         int r;
1100
1101         r = amdgpu_vm_clear_freed(adev, vm, NULL);
1102         if (r)
1103                 return r;
1104
1105         r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
1106         if (r)
1107                 return r;
1108
1109         r = amdgpu_sync_fence(&job->sync, fpriv->prt_va->last_pt_update);
1110         if (r)
1111                 return r;
1112
1113         if (fpriv->csa_va) {
1114                 bo_va = fpriv->csa_va;
1115                 BUG_ON(!bo_va);
1116                 r = amdgpu_vm_bo_update(adev, bo_va, false);
1117                 if (r)
1118                         return r;
1119
1120                 r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update);
1121                 if (r)
1122                         return r;
1123         }
1124
1125         amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1126                 /* ignore duplicates */
1127                 bo = ttm_to_amdgpu_bo(e->tv.bo);
1128                 if (!bo)
1129                         continue;
1130
1131                 bo_va = e->bo_va;
1132                 if (bo_va == NULL)
1133                         continue;
1134
1135                 r = amdgpu_vm_bo_update(adev, bo_va, false);
1136                 if (r)
1137                         return r;
1138
1139                 r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update);
1140                 if (r)
1141                         return r;
1142         }
1143
1144         r = amdgpu_vm_handle_moved(adev, vm);
1145         if (r)
1146                 return r;
1147
1148         r = amdgpu_vm_update_pdes(adev, vm, false);
1149         if (r)
1150                 return r;
1151
1152         r = amdgpu_sync_fence(&job->sync, vm->last_update);
1153         if (r)
1154                 return r;
1155
1156         for (i = 0; i < p->gang_size; ++i) {
1157                 job = p->jobs[i];
1158
1159                 if (!job->vm)
1160                         continue;
1161
1162                 job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
1163         }
1164
1165         if (amdgpu_vm_debug) {
1166                 /* Invalidate all BOs to test for userspace bugs */
1167                 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1168                         struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1169
1170                         /* ignore duplicates */
1171                         if (!bo)
1172                                 continue;
1173
1174                         amdgpu_vm_bo_invalidate(adev, bo, false);
1175                 }
1176         }
1177
1178         return 0;
1179 }
1180
1181 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
1182 {
1183         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1184         struct amdgpu_job *leader = p->gang_leader;
1185         struct amdgpu_bo_list_entry *e;
1186         unsigned int i;
1187         int r;
1188
1189         list_for_each_entry(e, &p->validated, tv.head) {
1190                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1191                 struct dma_resv *resv = bo->tbo.base.resv;
1192                 enum amdgpu_sync_mode sync_mode;
1193
1194                 sync_mode = amdgpu_bo_explicit_sync(bo) ?
1195                         AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
1196                 r = amdgpu_sync_resv(p->adev, &leader->sync, resv, sync_mode,
1197                                      &fpriv->vm);
1198                 if (r)
1199                         return r;
1200         }
1201
1202         for (i = 0; i < p->gang_size; ++i) {
1203                 if (p->jobs[i] == leader)
1204                         continue;
1205
1206                 r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync);
1207                 if (r)
1208                         return r;
1209         }
1210
1211         r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
1212         if (r && r != -ERESTARTSYS)
1213                 DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
1214         return r;
1215 }
1216
1217 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1218 {
1219         int i;
1220
1221         for (i = 0; i < p->num_post_deps; ++i) {
1222                 if (p->post_deps[i].chain && p->post_deps[i].point) {
1223                         drm_syncobj_add_point(p->post_deps[i].syncobj,
1224                                               p->post_deps[i].chain,
1225                                               p->fence, p->post_deps[i].point);
1226                         p->post_deps[i].chain = NULL;
1227                 } else {
1228                         drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1229                                                   p->fence);
1230                 }
1231         }
1232 }
1233
1234 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1235                             union drm_amdgpu_cs *cs)
1236 {
1237         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1238         struct amdgpu_job *leader = p->gang_leader;
1239         struct amdgpu_bo_list_entry *e;
1240         unsigned int i;
1241         uint64_t seq;
1242         int r;
1243
1244         for (i = 0; i < p->gang_size; ++i)
1245                 drm_sched_job_arm(&p->jobs[i]->base);
1246
1247         for (i = 0; i < p->gang_size; ++i) {
1248                 struct dma_fence *fence;
1249
1250                 if (p->jobs[i] == leader)
1251                         continue;
1252
1253                 fence = &p->jobs[i]->base.s_fence->scheduled;
1254                 r = amdgpu_sync_fence(&leader->sync, fence);
1255                 if (r)
1256                         goto error_cleanup;
1257         }
1258
1259         if (p->gang_size > 1) {
1260                 for (i = 0; i < p->gang_size; ++i)
1261                         amdgpu_job_set_gang_leader(p->jobs[i], leader);
1262         }
1263
1264         /* No memory allocation is allowed while holding the notifier lock.
1265          * The lock is held until amdgpu_cs_submit is finished and fence is
1266          * added to BOs.
1267          */
1268         mutex_lock(&p->adev->notifier_lock);
1269
1270         /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1271          * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1272          */
1273         r = 0;
1274         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1275                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1276
1277                 r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
1278                 e->range = NULL;
1279         }
1280         if (r) {
1281                 r = -EAGAIN;
1282                 goto error_unlock;
1283         }
1284
1285         p->fence = dma_fence_get(&leader->base.s_fence->finished);
1286         list_for_each_entry(e, &p->validated, tv.head) {
1287
1288                 /* Everybody except for the gang leader uses READ */
1289                 for (i = 0; i < p->gang_size; ++i) {
1290                         if (p->jobs[i] == leader)
1291                                 continue;
1292
1293                         dma_resv_add_fence(e->tv.bo->base.resv,
1294                                            &p->jobs[i]->base.s_fence->finished,
1295                                            DMA_RESV_USAGE_READ);
1296                 }
1297
1298                 /* The gang leader is remembered as writer */
1299                 e->tv.num_shared = 0;
1300         }
1301
1302         seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
1303                                    p->fence);
1304         amdgpu_cs_post_dependencies(p);
1305
1306         if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1307             !p->ctx->preamble_presented) {
1308                 leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1309                 p->ctx->preamble_presented = true;
1310         }
1311
1312         cs->out.handle = seq;
1313         leader->uf_sequence = seq;
1314
1315         amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1316         for (i = 0; i < p->gang_size; ++i) {
1317                 amdgpu_job_free_resources(p->jobs[i]);
1318                 trace_amdgpu_cs_ioctl(p->jobs[i]);
1319                 drm_sched_entity_push_job(&p->jobs[i]->base);
1320                 p->jobs[i] = NULL;
1321         }
1322
1323         amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1324         ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1325
1326         mutex_unlock(&p->adev->notifier_lock);
1327         mutex_unlock(&p->bo_list->bo_list_mutex);
1328         return 0;
1329
1330 error_unlock:
1331         mutex_unlock(&p->adev->notifier_lock);
1332
1333 error_cleanup:
1334         for (i = 0; i < p->gang_size; ++i)
1335                 drm_sched_job_cleanup(&p->jobs[i]->base);
1336         return r;
1337 }
1338
1339 /* Cleanup the parser structure */
1340 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
1341 {
1342         unsigned i;
1343
1344         for (i = 0; i < parser->num_post_deps; i++) {
1345                 drm_syncobj_put(parser->post_deps[i].syncobj);
1346                 kfree(parser->post_deps[i].chain);
1347         }
1348         kfree(parser->post_deps);
1349
1350         dma_fence_put(parser->fence);
1351
1352         if (parser->ctx)
1353                 amdgpu_ctx_put(parser->ctx);
1354         if (parser->bo_list)
1355                 amdgpu_bo_list_put(parser->bo_list);
1356
1357         for (i = 0; i < parser->nchunks; i++)
1358                 kvfree(parser->chunks[i].kdata);
1359         kvfree(parser->chunks);
1360         for (i = 0; i < parser->gang_size; ++i) {
1361                 if (parser->jobs[i])
1362                         amdgpu_job_free(parser->jobs[i]);
1363         }
1364         if (parser->uf_entry.tv.bo) {
1365                 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
1366
1367                 amdgpu_bo_unref(&uf);
1368         }
1369 }
1370
1371 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1372 {
1373         struct amdgpu_device *adev = drm_to_adev(dev);
1374         struct amdgpu_cs_parser parser;
1375         int r;
1376
1377         if (amdgpu_ras_intr_triggered())
1378                 return -EHWPOISON;
1379
1380         if (!adev->accel_working)
1381                 return -EBUSY;
1382
1383         r = amdgpu_cs_parser_init(&parser, adev, filp, data);
1384         if (r) {
1385                 if (printk_ratelimit())
1386                         DRM_ERROR("Failed to initialize parser %d!\n", r);
1387                 return r;
1388         }
1389
1390         r = amdgpu_cs_pass1(&parser, data);
1391         if (r)
1392                 goto error_fini;
1393
1394         r = amdgpu_cs_pass2(&parser);
1395         if (r)
1396                 goto error_fini;
1397
1398         r = amdgpu_cs_parser_bos(&parser, data);
1399         if (r) {
1400                 if (r == -ENOMEM)
1401                         DRM_ERROR("Not enough memory for command submission!\n");
1402                 else if (r != -ERESTARTSYS && r != -EAGAIN)
1403                         DRM_ERROR("Failed to process the buffer list %d!\n", r);
1404                 goto error_fini;
1405         }
1406
1407         r = amdgpu_cs_patch_jobs(&parser);
1408         if (r)
1409                 goto error_backoff;
1410
1411         r = amdgpu_cs_vm_handling(&parser);
1412         if (r)
1413                 goto error_backoff;
1414
1415         r = amdgpu_cs_sync_rings(&parser);
1416         if (r)
1417                 goto error_backoff;
1418
1419         trace_amdgpu_cs_ibs(&parser);
1420
1421         r = amdgpu_cs_submit(&parser, data);
1422         if (r)
1423                 goto error_backoff;
1424
1425         amdgpu_cs_parser_fini(&parser);
1426         return 0;
1427
1428 error_backoff:
1429         ttm_eu_backoff_reservation(&parser.ticket, &parser.validated);
1430         mutex_unlock(&parser.bo_list->bo_list_mutex);
1431
1432 error_fini:
1433         amdgpu_cs_parser_fini(&parser);
1434         return r;
1435 }
1436
1437 /**
1438  * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1439  *
1440  * @dev: drm device
1441  * @data: data from userspace
1442  * @filp: file private
1443  *
1444  * Wait for the command submission identified by handle to finish.
1445  */
1446 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1447                          struct drm_file *filp)
1448 {
1449         union drm_amdgpu_wait_cs *wait = data;
1450         unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1451         struct drm_sched_entity *entity;
1452         struct amdgpu_ctx *ctx;
1453         struct dma_fence *fence;
1454         long r;
1455
1456         ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1457         if (ctx == NULL)
1458                 return -EINVAL;
1459
1460         r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1461                                   wait->in.ring, &entity);
1462         if (r) {
1463                 amdgpu_ctx_put(ctx);
1464                 return r;
1465         }
1466
1467         fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1468         if (IS_ERR(fence))
1469                 r = PTR_ERR(fence);
1470         else if (fence) {
1471                 r = dma_fence_wait_timeout(fence, true, timeout);
1472                 if (r > 0 && fence->error)
1473                         r = fence->error;
1474                 dma_fence_put(fence);
1475         } else
1476                 r = 1;
1477
1478         amdgpu_ctx_put(ctx);
1479         if (r < 0)
1480                 return r;
1481
1482         memset(wait, 0, sizeof(*wait));
1483         wait->out.status = (r == 0);
1484
1485         return 0;
1486 }
1487
1488 /**
1489  * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1490  *
1491  * @adev: amdgpu device
1492  * @filp: file private
1493  * @user: drm_amdgpu_fence copied from user space
1494  */
1495 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1496                                              struct drm_file *filp,
1497                                              struct drm_amdgpu_fence *user)
1498 {
1499         struct drm_sched_entity *entity;
1500         struct amdgpu_ctx *ctx;
1501         struct dma_fence *fence;
1502         int r;
1503
1504         ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1505         if (ctx == NULL)
1506                 return ERR_PTR(-EINVAL);
1507
1508         r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1509                                   user->ring, &entity);
1510         if (r) {
1511                 amdgpu_ctx_put(ctx);
1512                 return ERR_PTR(r);
1513         }
1514
1515         fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1516         amdgpu_ctx_put(ctx);
1517
1518         return fence;
1519 }
1520
1521 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1522                                     struct drm_file *filp)
1523 {
1524         struct amdgpu_device *adev = drm_to_adev(dev);
1525         union drm_amdgpu_fence_to_handle *info = data;
1526         struct dma_fence *fence;
1527         struct drm_syncobj *syncobj;
1528         struct sync_file *sync_file;
1529         int fd, r;
1530
1531         fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1532         if (IS_ERR(fence))
1533                 return PTR_ERR(fence);
1534
1535         if (!fence)
1536                 fence = dma_fence_get_stub();
1537
1538         switch (info->in.what) {
1539         case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1540                 r = drm_syncobj_create(&syncobj, 0, fence);
1541                 dma_fence_put(fence);
1542                 if (r)
1543                         return r;
1544                 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1545                 drm_syncobj_put(syncobj);
1546                 return r;
1547
1548         case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1549                 r = drm_syncobj_create(&syncobj, 0, fence);
1550                 dma_fence_put(fence);
1551                 if (r)
1552                         return r;
1553                 r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1554                 drm_syncobj_put(syncobj);
1555                 return r;
1556
1557         case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1558                 fd = get_unused_fd_flags(O_CLOEXEC);
1559                 if (fd < 0) {
1560                         dma_fence_put(fence);
1561                         return fd;
1562                 }
1563
1564                 sync_file = sync_file_create(fence);
1565                 dma_fence_put(fence);
1566                 if (!sync_file) {
1567                         put_unused_fd(fd);
1568                         return -ENOMEM;
1569                 }
1570
1571                 fd_install(fd, sync_file->file);
1572                 info->out.handle = fd;
1573                 return 0;
1574
1575         default:
1576                 dma_fence_put(fence);
1577                 return -EINVAL;
1578         }
1579 }
1580
1581 /**
1582  * amdgpu_cs_wait_all_fences - wait on all fences to signal
1583  *
1584  * @adev: amdgpu device
1585  * @filp: file private
1586  * @wait: wait parameters
1587  * @fences: array of drm_amdgpu_fence
1588  */
1589 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1590                                      struct drm_file *filp,
1591                                      union drm_amdgpu_wait_fences *wait,
1592                                      struct drm_amdgpu_fence *fences)
1593 {
1594         uint32_t fence_count = wait->in.fence_count;
1595         unsigned int i;
1596         long r = 1;
1597
1598         for (i = 0; i < fence_count; i++) {
1599                 struct dma_fence *fence;
1600                 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1601
1602                 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1603                 if (IS_ERR(fence))
1604                         return PTR_ERR(fence);
1605                 else if (!fence)
1606                         continue;
1607
1608                 r = dma_fence_wait_timeout(fence, true, timeout);
1609                 dma_fence_put(fence);
1610                 if (r < 0)
1611                         return r;
1612
1613                 if (r == 0)
1614                         break;
1615
1616                 if (fence->error)
1617                         return fence->error;
1618         }
1619
1620         memset(wait, 0, sizeof(*wait));
1621         wait->out.status = (r > 0);
1622
1623         return 0;
1624 }
1625
1626 /**
1627  * amdgpu_cs_wait_any_fence - wait on any fence to signal
1628  *
1629  * @adev: amdgpu device
1630  * @filp: file private
1631  * @wait: wait parameters
1632  * @fences: array of drm_amdgpu_fence
1633  */
1634 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1635                                     struct drm_file *filp,
1636                                     union drm_amdgpu_wait_fences *wait,
1637                                     struct drm_amdgpu_fence *fences)
1638 {
1639         unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1640         uint32_t fence_count = wait->in.fence_count;
1641         uint32_t first = ~0;
1642         struct dma_fence **array;
1643         unsigned int i;
1644         long r;
1645
1646         /* Prepare the fence array */
1647         array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1648
1649         if (array == NULL)
1650                 return -ENOMEM;
1651
1652         for (i = 0; i < fence_count; i++) {
1653                 struct dma_fence *fence;
1654
1655                 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1656                 if (IS_ERR(fence)) {
1657                         r = PTR_ERR(fence);
1658                         goto err_free_fence_array;
1659                 } else if (fence) {
1660                         array[i] = fence;
1661                 } else { /* NULL, the fence has been already signaled */
1662                         r = 1;
1663                         first = i;
1664                         goto out;
1665                 }
1666         }
1667
1668         r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1669                                        &first);
1670         if (r < 0)
1671                 goto err_free_fence_array;
1672
1673 out:
1674         memset(wait, 0, sizeof(*wait));
1675         wait->out.status = (r > 0);
1676         wait->out.first_signaled = first;
1677
1678         if (first < fence_count && array[first])
1679                 r = array[first]->error;
1680         else
1681                 r = 0;
1682
1683 err_free_fence_array:
1684         for (i = 0; i < fence_count; i++)
1685                 dma_fence_put(array[i]);
1686         kfree(array);
1687
1688         return r;
1689 }
1690
1691 /**
1692  * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1693  *
1694  * @dev: drm device
1695  * @data: data from userspace
1696  * @filp: file private
1697  */
1698 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1699                                 struct drm_file *filp)
1700 {
1701         struct amdgpu_device *adev = drm_to_adev(dev);
1702         union drm_amdgpu_wait_fences *wait = data;
1703         uint32_t fence_count = wait->in.fence_count;
1704         struct drm_amdgpu_fence *fences_user;
1705         struct drm_amdgpu_fence *fences;
1706         int r;
1707
1708         /* Get the fences from userspace */
1709         fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1710                         GFP_KERNEL);
1711         if (fences == NULL)
1712                 return -ENOMEM;
1713
1714         fences_user = u64_to_user_ptr(wait->in.fences);
1715         if (copy_from_user(fences, fences_user,
1716                 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1717                 r = -EFAULT;
1718                 goto err_free_fences;
1719         }
1720
1721         if (wait->in.wait_all)
1722                 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1723         else
1724                 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1725
1726 err_free_fences:
1727         kfree(fences);
1728
1729         return r;
1730 }
1731
1732 /**
1733  * amdgpu_cs_find_mapping - find bo_va for VM address
1734  *
1735  * @parser: command submission parser context
1736  * @addr: VM address
1737  * @bo: resulting BO of the mapping found
1738  * @map: Placeholder to return found BO mapping
1739  *
1740  * Search the buffer objects in the command submission context for a certain
1741  * virtual memory address. Returns allocation structure when found, NULL
1742  * otherwise.
1743  */
1744 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1745                            uint64_t addr, struct amdgpu_bo **bo,
1746                            struct amdgpu_bo_va_mapping **map)
1747 {
1748         struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1749         struct ttm_operation_ctx ctx = { false, false };
1750         struct amdgpu_vm *vm = &fpriv->vm;
1751         struct amdgpu_bo_va_mapping *mapping;
1752         int r;
1753
1754         addr /= AMDGPU_GPU_PAGE_SIZE;
1755
1756         mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1757         if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1758                 return -EINVAL;
1759
1760         *bo = mapping->bo_va->base.bo;
1761         *map = mapping;
1762
1763         /* Double check that the BO is reserved by this CS */
1764         if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
1765                 return -EINVAL;
1766
1767         if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1768                 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1769                 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1770                 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1771                 if (r)
1772                         return r;
1773         }
1774
1775         return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1776 }