Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / nouveau / nouveau_svm.c
1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "nouveau_svm.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dmem.h"
26
27 #include <nvif/notify.h>
28 #include <nvif/object.h>
29 #include <nvif/vmm.h>
30
31 #include <nvif/class.h>
32 #include <nvif/clb069.h>
33 #include <nvif/ifc00d.h>
34
35 #include <linux/sched/mm.h>
36 #include <linux/sort.h>
37 #include <linux/hmm.h>
38
39 struct nouveau_svm {
40         struct nouveau_drm *drm;
41         struct mutex mutex;
42         struct list_head inst;
43
44         struct nouveau_svm_fault_buffer {
45                 int id;
46                 struct nvif_object object;
47                 u32 entries;
48                 u32 getaddr;
49                 u32 putaddr;
50                 u32 get;
51                 u32 put;
52                 struct nvif_notify notify;
53
54                 struct nouveau_svm_fault {
55                         u64 inst;
56                         u64 addr;
57                         u64 time;
58                         u32 engine;
59                         u8  gpc;
60                         u8  hub;
61                         u8  access;
62                         u8  client;
63                         u8  fault;
64                         struct nouveau_svmm *svmm;
65                 } **fault;
66                 int fault_nr;
67         } buffer[1];
68 };
69
70 #define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
71 #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
72
73 struct nouveau_ivmm {
74         struct nouveau_svmm *svmm;
75         u64 inst;
76         struct list_head head;
77 };
78
79 static struct nouveau_ivmm *
80 nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
81 {
82         struct nouveau_ivmm *ivmm;
83         list_for_each_entry(ivmm, &svm->inst, head) {
84                 if (ivmm->inst == inst)
85                         return ivmm;
86         }
87         return NULL;
88 }
89
90 struct nouveau_svmm {
91         struct nouveau_vmm *vmm;
92         struct {
93                 unsigned long start;
94                 unsigned long limit;
95         } unmanaged;
96
97         struct mutex mutex;
98
99         struct mm_struct *mm;
100         struct hmm_mirror mirror;
101 };
102
103 #define SVMM_DBG(s,f,a...)                                                     \
104         NV_DEBUG((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
105 #define SVMM_ERR(s,f,a...)                                                     \
106         NV_WARN((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
107
108 int
109 nouveau_svmm_bind(struct drm_device *dev, void *data,
110                   struct drm_file *file_priv)
111 {
112         struct nouveau_cli *cli = nouveau_cli(file_priv);
113         struct drm_nouveau_svm_bind *args = data;
114         unsigned target, cmd, priority;
115         unsigned long addr, end, size;
116         struct mm_struct *mm;
117
118         args->va_start &= PAGE_MASK;
119         args->va_end &= PAGE_MASK;
120
121         /* Sanity check arguments */
122         if (args->reserved0 || args->reserved1)
123                 return -EINVAL;
124         if (args->header & (~NOUVEAU_SVM_BIND_VALID_MASK))
125                 return -EINVAL;
126         if (args->va_start >= args->va_end)
127                 return -EINVAL;
128         if (!args->npages)
129                 return -EINVAL;
130
131         cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
132         cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
133         switch (cmd) {
134         case NOUVEAU_SVM_BIND_COMMAND__MIGRATE:
135                 break;
136         default:
137                 return -EINVAL;
138         }
139
140         priority = args->header >> NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
141         priority &= NOUVEAU_SVM_BIND_PRIORITY_MASK;
142
143         /* FIXME support CPU target ie all target value < GPU_VRAM */
144         target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT;
145         target &= NOUVEAU_SVM_BIND_TARGET_MASK;
146         switch (target) {
147         case NOUVEAU_SVM_BIND_TARGET__GPU_VRAM:
148                 break;
149         default:
150                 return -EINVAL;
151         }
152
153         /*
154          * FIXME: For now refuse non 0 stride, we need to change the migrate
155          * kernel function to handle stride to avoid to create a mess within
156          * each device driver.
157          */
158         if (args->stride)
159                 return -EINVAL;
160
161         size = ((unsigned long)args->npages) << PAGE_SHIFT;
162         if ((args->va_start + size) <= args->va_start)
163                 return -EINVAL;
164         if ((args->va_start + size) > args->va_end)
165                 return -EINVAL;
166
167         /*
168          * Ok we are ask to do something sane, for now we only support migrate
169          * commands but we will add things like memory policy (what to do on
170          * page fault) and maybe some other commands.
171          */
172
173         mm = get_task_mm(current);
174         down_read(&mm->mmap_sem);
175
176         for (addr = args->va_start, end = args->va_start + size; addr < end;) {
177                 struct vm_area_struct *vma;
178                 unsigned long next;
179
180                 vma = find_vma_intersection(mm, addr, end);
181                 if (!vma)
182                         break;
183
184                 next = min(vma->vm_end, end);
185                 /* This is a best effort so we ignore errors */
186                 nouveau_dmem_migrate_vma(cli->drm, vma, addr, next);
187                 addr = next;
188         }
189
190         /*
191          * FIXME Return the number of page we have migrated, again we need to
192          * update the migrate API to return that information so that we can
193          * report it to user space.
194          */
195         args->result = 0;
196
197         up_read(&mm->mmap_sem);
198         mmput(mm);
199
200         return 0;
201 }
202
203 /* Unlink channel instance from SVMM. */
204 void
205 nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst)
206 {
207         struct nouveau_ivmm *ivmm;
208         if (svmm) {
209                 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
210                 ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst);
211                 if (ivmm) {
212                         list_del(&ivmm->head);
213                         kfree(ivmm);
214                 }
215                 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
216         }
217 }
218
219 /* Link channel instance to SVMM. */
220 int
221 nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
222 {
223         struct nouveau_ivmm *ivmm;
224         if (svmm) {
225                 if (!(ivmm = kmalloc(sizeof(*ivmm), GFP_KERNEL)))
226                         return -ENOMEM;
227                 ivmm->svmm = svmm;
228                 ivmm->inst = inst;
229
230                 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
231                 list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst);
232                 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
233         }
234         return 0;
235 }
236
237 /* Invalidate SVMM address-range on GPU. */
238 static void
239 nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
240 {
241         if (limit > start) {
242                 bool super = svmm->vmm->vmm.object.client->super;
243                 svmm->vmm->vmm.object.client->super = true;
244                 nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
245                                  &(struct nvif_vmm_pfnclr_v0) {
246                                         .addr = start,
247                                         .size = limit - start,
248                                  }, sizeof(struct nvif_vmm_pfnclr_v0));
249                 svmm->vmm->vmm.object.client->super = super;
250         }
251 }
252
253 static int
254 nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
255                                         const struct hmm_update *update)
256 {
257         struct nouveau_svmm *svmm = container_of(mirror, typeof(*svmm), mirror);
258         unsigned long start = update->start;
259         unsigned long limit = update->end;
260
261         if (!update->blockable)
262                 return -EAGAIN;
263
264         SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
265
266         mutex_lock(&svmm->mutex);
267         if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
268                 if (start < svmm->unmanaged.start) {
269                         nouveau_svmm_invalidate(svmm, start,
270                                                 svmm->unmanaged.limit);
271                 }
272                 start = svmm->unmanaged.limit;
273         }
274
275         nouveau_svmm_invalidate(svmm, start, limit);
276         mutex_unlock(&svmm->mutex);
277         return 0;
278 }
279
280 static void
281 nouveau_svmm_release(struct hmm_mirror *mirror)
282 {
283 }
284
285 static const struct hmm_mirror_ops
286 nouveau_svmm = {
287         .sync_cpu_device_pagetables = nouveau_svmm_sync_cpu_device_pagetables,
288         .release = nouveau_svmm_release,
289 };
290
291 void
292 nouveau_svmm_fini(struct nouveau_svmm **psvmm)
293 {
294         struct nouveau_svmm *svmm = *psvmm;
295         if (svmm) {
296                 hmm_mirror_unregister(&svmm->mirror);
297                 kfree(*psvmm);
298                 *psvmm = NULL;
299         }
300 }
301
302 int
303 nouveau_svmm_init(struct drm_device *dev, void *data,
304                   struct drm_file *file_priv)
305 {
306         struct nouveau_cli *cli = nouveau_cli(file_priv);
307         struct nouveau_svmm *svmm;
308         struct drm_nouveau_svm_init *args = data;
309         int ret;
310
311         /* Allocate tracking for SVM-enabled VMM. */
312         if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
313                 return -ENOMEM;
314         svmm->vmm = &cli->svm;
315         svmm->unmanaged.start = args->unmanaged_addr;
316         svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size;
317         mutex_init(&svmm->mutex);
318
319         /* Check that SVM isn't already enabled for the client. */
320         mutex_lock(&cli->mutex);
321         if (cli->svm.cli) {
322                 ret = -EBUSY;
323                 goto done;
324         }
325
326         /* Allocate a new GPU VMM that can support SVM (managed by the
327          * client, with replayable faults enabled).
328          *
329          * All future channel/memory allocations will make use of this
330          * VMM instead of the standard one.
331          */
332         ret = nvif_vmm_init(&cli->mmu, cli->vmm.vmm.object.oclass, true,
333                             args->unmanaged_addr, args->unmanaged_size,
334                             &(struct gp100_vmm_v0) {
335                                 .fault_replay = true,
336                             }, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
337         if (ret)
338                 goto done;
339
340         /* Enable HMM mirroring of CPU address-space to VMM. */
341         svmm->mm = get_task_mm(current);
342         down_write(&svmm->mm->mmap_sem);
343         svmm->mirror.ops = &nouveau_svmm;
344         ret = hmm_mirror_register(&svmm->mirror, svmm->mm);
345         if (ret == 0) {
346                 cli->svm.svmm = svmm;
347                 cli->svm.cli = cli;
348         }
349         up_write(&svmm->mm->mmap_sem);
350         mmput(svmm->mm);
351
352 done:
353         if (ret)
354                 nouveau_svmm_fini(&svmm);
355         mutex_unlock(&cli->mutex);
356         return ret;
357 }
358
359 static const u64
360 nouveau_svm_pfn_flags[HMM_PFN_FLAG_MAX] = {
361         [HMM_PFN_VALID         ] = NVIF_VMM_PFNMAP_V0_V,
362         [HMM_PFN_WRITE         ] = NVIF_VMM_PFNMAP_V0_W,
363         [HMM_PFN_DEVICE_PRIVATE] = NVIF_VMM_PFNMAP_V0_VRAM,
364 };
365
366 static const u64
367 nouveau_svm_pfn_values[HMM_PFN_VALUE_MAX] = {
368         [HMM_PFN_ERROR  ] = ~NVIF_VMM_PFNMAP_V0_V,
369         [HMM_PFN_NONE   ] =  NVIF_VMM_PFNMAP_V0_NONE,
370         [HMM_PFN_SPECIAL] = ~NVIF_VMM_PFNMAP_V0_V,
371 };
372
373 /* Issue fault replay for GPU to retry accesses that faulted previously. */
374 static void
375 nouveau_svm_fault_replay(struct nouveau_svm *svm)
376 {
377         SVM_DBG(svm, "replay");
378         WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
379                                  GP100_VMM_VN_FAULT_REPLAY,
380                                  &(struct gp100_vmm_fault_replay_vn) {},
381                                  sizeof(struct gp100_vmm_fault_replay_vn)));
382 }
383
384 /* Cancel a replayable fault that could not be handled.
385  *
386  * Cancelling the fault will trigger recovery to reset the engine
387  * and kill the offending channel (ie. GPU SIGSEGV).
388  */
389 static void
390 nouveau_svm_fault_cancel(struct nouveau_svm *svm,
391                          u64 inst, u8 hub, u8 gpc, u8 client)
392 {
393         SVM_DBG(svm, "cancel %016llx %d %02x %02x", inst, hub, gpc, client);
394         WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
395                                  GP100_VMM_VN_FAULT_CANCEL,
396                                  &(struct gp100_vmm_fault_cancel_v0) {
397                                         .hub = hub,
398                                         .gpc = gpc,
399                                         .client = client,
400                                         .inst = inst,
401                                  }, sizeof(struct gp100_vmm_fault_cancel_v0)));
402 }
403
404 static void
405 nouveau_svm_fault_cancel_fault(struct nouveau_svm *svm,
406                                struct nouveau_svm_fault *fault)
407 {
408         nouveau_svm_fault_cancel(svm, fault->inst,
409                                       fault->hub,
410                                       fault->gpc,
411                                       fault->client);
412 }
413
414 static int
415 nouveau_svm_fault_cmp(const void *a, const void *b)
416 {
417         const struct nouveau_svm_fault *fa = *(struct nouveau_svm_fault **)a;
418         const struct nouveau_svm_fault *fb = *(struct nouveau_svm_fault **)b;
419         int ret;
420         if ((ret = (s64)fa->inst - fb->inst))
421                 return ret;
422         if ((ret = (s64)fa->addr - fb->addr))
423                 return ret;
424         /*XXX: atomic? */
425         return (fa->access == 0 || fa->access == 3) -
426                (fb->access == 0 || fb->access == 3);
427 }
428
429 static void
430 nouveau_svm_fault_cache(struct nouveau_svm *svm,
431                         struct nouveau_svm_fault_buffer *buffer, u32 offset)
432 {
433         struct nvif_object *memory = &buffer->object;
434         const u32 instlo = nvif_rd32(memory, offset + 0x00);
435         const u32 insthi = nvif_rd32(memory, offset + 0x04);
436         const u32 addrlo = nvif_rd32(memory, offset + 0x08);
437         const u32 addrhi = nvif_rd32(memory, offset + 0x0c);
438         const u32 timelo = nvif_rd32(memory, offset + 0x10);
439         const u32 timehi = nvif_rd32(memory, offset + 0x14);
440         const u32 engine = nvif_rd32(memory, offset + 0x18);
441         const u32   info = nvif_rd32(memory, offset + 0x1c);
442         const u64   inst = (u64)insthi << 32 | instlo;
443         const u8     gpc = (info & 0x1f000000) >> 24;
444         const u8     hub = (info & 0x00100000) >> 20;
445         const u8  client = (info & 0x00007f00) >> 8;
446         struct nouveau_svm_fault *fault;
447
448         //XXX: i think we're supposed to spin waiting */
449         if (WARN_ON(!(info & 0x80000000)))
450                 return;
451
452         nvif_mask(memory, offset + 0x1c, 0x80000000, 0x00000000);
453
454         if (!buffer->fault[buffer->fault_nr]) {
455                 fault = kmalloc(sizeof(*fault), GFP_KERNEL);
456                 if (WARN_ON(!fault)) {
457                         nouveau_svm_fault_cancel(svm, inst, hub, gpc, client);
458                         return;
459                 }
460                 buffer->fault[buffer->fault_nr] = fault;
461         }
462
463         fault = buffer->fault[buffer->fault_nr++];
464         fault->inst   = inst;
465         fault->addr   = (u64)addrhi << 32 | addrlo;
466         fault->time   = (u64)timehi << 32 | timelo;
467         fault->engine = engine;
468         fault->gpc    = gpc;
469         fault->hub    = hub;
470         fault->access = (info & 0x000f0000) >> 16;
471         fault->client = client;
472         fault->fault  = (info & 0x0000001f);
473
474         SVM_DBG(svm, "fault %016llx %016llx %02x",
475                 fault->inst, fault->addr, fault->access);
476 }
477
478 static int
479 nouveau_svm_fault(struct nvif_notify *notify)
480 {
481         struct nouveau_svm_fault_buffer *buffer =
482                 container_of(notify, typeof(*buffer), notify);
483         struct nouveau_svm *svm =
484                 container_of(buffer, typeof(*svm), buffer[buffer->id]);
485         struct nvif_object *device = &svm->drm->client.device.object;
486         struct nouveau_svmm *svmm;
487         struct {
488                 struct {
489                         struct nvif_ioctl_v0 i;
490                         struct nvif_ioctl_mthd_v0 m;
491                         struct nvif_vmm_pfnmap_v0 p;
492                 } i;
493                 u64 phys[16];
494         } args;
495         struct hmm_range range;
496         struct vm_area_struct *vma;
497         u64 inst, start, limit;
498         int fi, fn, pi, fill;
499         int replay = 0, ret;
500
501         /* Parse available fault buffer entries into a cache, and update
502          * the GET pointer so HW can reuse the entries.
503          */
504         SVM_DBG(svm, "fault handler");
505         if (buffer->get == buffer->put) {
506                 buffer->put = nvif_rd32(device, buffer->putaddr);
507                 buffer->get = nvif_rd32(device, buffer->getaddr);
508                 if (buffer->get == buffer->put)
509                         return NVIF_NOTIFY_KEEP;
510         }
511         buffer->fault_nr = 0;
512
513         SVM_DBG(svm, "get %08x put %08x", buffer->get, buffer->put);
514         while (buffer->get != buffer->put) {
515                 nouveau_svm_fault_cache(svm, buffer, buffer->get * 0x20);
516                 if (++buffer->get == buffer->entries)
517                         buffer->get = 0;
518         }
519         nvif_wr32(device, buffer->getaddr, buffer->get);
520         SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr);
521
522         /* Sort parsed faults by instance pointer to prevent unnecessary
523          * instance to SVMM translations, followed by address and access
524          * type to reduce the amount of work when handling the faults.
525          */
526         sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault),
527              nouveau_svm_fault_cmp, NULL);
528
529         /* Lookup SVMM structure for each unique instance pointer. */
530         mutex_lock(&svm->mutex);
531         for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) {
532                 if (!svmm || buffer->fault[fi]->inst != inst) {
533                         struct nouveau_ivmm *ivmm =
534                                 nouveau_ivmm_find(svm, buffer->fault[fi]->inst);
535                         svmm = ivmm ? ivmm->svmm : NULL;
536                         inst = buffer->fault[fi]->inst;
537                         SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm);
538                 }
539                 buffer->fault[fi]->svmm = svmm;
540         }
541         mutex_unlock(&svm->mutex);
542
543         /* Process list of faults. */
544         args.i.i.version = 0;
545         args.i.i.type = NVIF_IOCTL_V0_MTHD;
546         args.i.m.version = 0;
547         args.i.m.method = NVIF_VMM_V0_PFNMAP;
548         args.i.p.version = 0;
549
550         for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
551                 /* Cancel any faults from non-SVM channels. */
552                 if (!(svmm = buffer->fault[fi]->svmm)) {
553                         nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
554                         continue;
555                 }
556                 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
557
558                 /* We try and group handling of faults within a small
559                  * window into a single update.
560                  */
561                 start = buffer->fault[fi]->addr;
562                 limit = start + (ARRAY_SIZE(args.phys) << PAGE_SHIFT);
563                 if (start < svmm->unmanaged.limit)
564                         limit = min_t(u64, limit, svmm->unmanaged.start);
565                 else
566                 if (limit > svmm->unmanaged.start)
567                         start = max_t(u64, start, svmm->unmanaged.limit);
568                 SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
569
570                 /* Intersect fault window with the CPU VMA, cancelling
571                  * the fault if the address is invalid.
572                  */
573                 down_read(&svmm->mm->mmap_sem);
574                 vma = find_vma_intersection(svmm->mm, start, limit);
575                 if (!vma) {
576                         SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
577                         up_read(&svmm->mm->mmap_sem);
578                         nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
579                         continue;
580                 }
581                 start = max_t(u64, start, vma->vm_start);
582                 limit = min_t(u64, limit, vma->vm_end);
583                 SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
584
585                 if (buffer->fault[fi]->addr != start) {
586                         SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr);
587                         up_read(&svmm->mm->mmap_sem);
588                         nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
589                         continue;
590                 }
591
592                 /* Prepare the GPU-side update of all pages within the
593                  * fault window, determining required pages and access
594                  * permissions based on pending faults.
595                  */
596                 args.i.p.page = PAGE_SHIFT;
597                 args.i.p.addr = start;
598                 for (fn = fi, pi = 0;;) {
599                         /* Determine required permissions based on GPU fault
600                          * access flags.
601                          *XXX: atomic?
602                          */
603                         if (buffer->fault[fn]->access != 0 /* READ. */ &&
604                             buffer->fault[fn]->access != 3 /* PREFETCH. */) {
605                                 args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V |
606                                                   NVIF_VMM_PFNMAP_V0_W;
607                         } else {
608                                 args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V;
609                         }
610                         args.i.p.size = pi << PAGE_SHIFT;
611
612                         /* It's okay to skip over duplicate addresses from the
613                          * same SVMM as faults are ordered by access type such
614                          * that only the first one needs to be handled.
615                          *
616                          * ie. WRITE faults appear first, thus any handling of
617                          * pending READ faults will already be satisfied.
618                          */
619                         while (++fn < buffer->fault_nr &&
620                                buffer->fault[fn]->svmm == svmm &&
621                                buffer->fault[fn    ]->addr ==
622                                buffer->fault[fn - 1]->addr);
623
624                         /* If the next fault is outside the window, or all GPU
625                          * faults have been dealt with, we're done here.
626                          */
627                         if (fn >= buffer->fault_nr ||
628                             buffer->fault[fn]->svmm != svmm ||
629                             buffer->fault[fn]->addr >= limit)
630                                 break;
631
632                         /* Fill in the gap between this fault and the next. */
633                         fill = (buffer->fault[fn    ]->addr -
634                                 buffer->fault[fn - 1]->addr) >> PAGE_SHIFT;
635                         while (--fill)
636                                 args.phys[pi++] = NVIF_VMM_PFNMAP_V0_NONE;
637                 }
638
639                 SVMM_DBG(svmm, "wndw %016llx-%016llx covering %d fault(s)",
640                          args.i.p.addr,
641                          args.i.p.addr + args.i.p.size, fn - fi);
642
643                 /* Have HMM fault pages within the fault window to the GPU. */
644                 range.vma = vma;
645                 range.start = args.i.p.addr;
646                 range.end = args.i.p.addr + args.i.p.size;
647                 range.pfns = args.phys;
648                 range.flags = nouveau_svm_pfn_flags;
649                 range.values = nouveau_svm_pfn_values;
650                 range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
651 again:
652                 ret = hmm_vma_fault(&range, true);
653                 if (ret == 0) {
654                         mutex_lock(&svmm->mutex);
655                         if (!hmm_vma_range_done(&range)) {
656                                 mutex_unlock(&svmm->mutex);
657                                 goto again;
658                         }
659
660                         nouveau_dmem_convert_pfn(svm->drm, &range);
661
662                         svmm->vmm->vmm.object.client->super = true;
663                         ret = nvif_object_ioctl(&svmm->vmm->vmm.object,
664                                                 &args, sizeof(args.i) +
665                                                 pi * sizeof(args.phys[0]),
666                                                 NULL);
667                         svmm->vmm->vmm.object.client->super = false;
668                         mutex_unlock(&svmm->mutex);
669                 }
670                 up_read(&svmm->mm->mmap_sem);
671
672                 /* Cancel any faults in the window whose pages didn't manage
673                  * to keep their valid bit, or stay writeable when required.
674                  *
675                  * If handling failed completely, cancel all faults.
676                  */
677                 while (fi < fn) {
678                         struct nouveau_svm_fault *fault = buffer->fault[fi++];
679                         pi = (fault->addr - range.start) >> PAGE_SHIFT;
680                         if (ret ||
681                              !(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_V) ||
682                             (!(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_W) &&
683                              fault->access != 0 && fault->access != 3)) {
684                                 nouveau_svm_fault_cancel_fault(svm, fault);
685                                 continue;
686                         }
687                         replay++;
688                 }
689         }
690
691         /* Issue fault replay to the GPU. */
692         if (replay)
693                 nouveau_svm_fault_replay(svm);
694         return NVIF_NOTIFY_KEEP;
695 }
696
697 static void
698 nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
699 {
700         struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
701         nvif_notify_put(&buffer->notify);
702 }
703
704 static int
705 nouveau_svm_fault_buffer_init(struct nouveau_svm *svm, int id)
706 {
707         struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
708         struct nvif_object *device = &svm->drm->client.device.object;
709         buffer->get = nvif_rd32(device, buffer->getaddr);
710         buffer->put = nvif_rd32(device, buffer->putaddr);
711         SVM_DBG(svm, "get %08x put %08x (init)", buffer->get, buffer->put);
712         return nvif_notify_get(&buffer->notify);
713 }
714
715 static void
716 nouveau_svm_fault_buffer_dtor(struct nouveau_svm *svm, int id)
717 {
718         struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
719         int i;
720
721         if (buffer->fault) {
722                 for (i = 0; buffer->fault[i] && i < buffer->entries; i++)
723                         kfree(buffer->fault[i]);
724                 kvfree(buffer->fault);
725         }
726
727         nouveau_svm_fault_buffer_fini(svm, id);
728
729         nvif_notify_fini(&buffer->notify);
730         nvif_object_fini(&buffer->object);
731 }
732
733 static int
734 nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
735 {
736         struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
737         struct nouveau_drm *drm = svm->drm;
738         struct nvif_object *device = &drm->client.device.object;
739         struct nvif_clb069_v0 args = {};
740         int ret;
741
742         buffer->id = id;
743
744         ret = nvif_object_init(device, 0, oclass, &args, sizeof(args),
745                                &buffer->object);
746         if (ret < 0) {
747                 SVM_ERR(svm, "Fault buffer allocation failed: %d", ret);
748                 return ret;
749         }
750
751         nvif_object_map(&buffer->object, NULL, 0);
752         buffer->entries = args.entries;
753         buffer->getaddr = args.get;
754         buffer->putaddr = args.put;
755
756         ret = nvif_notify_init(&buffer->object, nouveau_svm_fault, true,
757                                NVB069_V0_NTFY_FAULT, NULL, 0, 0,
758                                &buffer->notify);
759         if (ret)
760                 return ret;
761
762         buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL);
763         if (!buffer->fault)
764                 return -ENOMEM;
765
766         return nouveau_svm_fault_buffer_init(svm, id);
767 }
768
769 void
770 nouveau_svm_resume(struct nouveau_drm *drm)
771 {
772         struct nouveau_svm *svm = drm->svm;
773         if (svm)
774                 nouveau_svm_fault_buffer_init(svm, 0);
775 }
776
777 void
778 nouveau_svm_suspend(struct nouveau_drm *drm)
779 {
780         struct nouveau_svm *svm = drm->svm;
781         if (svm)
782                 nouveau_svm_fault_buffer_fini(svm, 0);
783 }
784
785 void
786 nouveau_svm_fini(struct nouveau_drm *drm)
787 {
788         struct nouveau_svm *svm = drm->svm;
789         if (svm) {
790                 nouveau_svm_fault_buffer_dtor(svm, 0);
791                 kfree(drm->svm);
792                 drm->svm = NULL;
793         }
794 }
795
796 void
797 nouveau_svm_init(struct nouveau_drm *drm)
798 {
799         static const struct nvif_mclass buffers[] = {
800                 {   VOLTA_FAULT_BUFFER_A, 0 },
801                 { MAXWELL_FAULT_BUFFER_A, 0 },
802                 {}
803         };
804         struct nouveau_svm *svm;
805         int ret;
806
807         /* Disable on Volta and newer until channel recovery is fixed,
808          * otherwise clients will have a trivial way to trash the GPU
809          * for everyone.
810          */
811         if (drm->client.device.info.family > NV_DEVICE_INFO_V0_PASCAL)
812                 return;
813
814         if (!(drm->svm = svm = kzalloc(sizeof(*drm->svm), GFP_KERNEL)))
815                 return;
816
817         drm->svm->drm = drm;
818         mutex_init(&drm->svm->mutex);
819         INIT_LIST_HEAD(&drm->svm->inst);
820
821         ret = nvif_mclass(&drm->client.device.object, buffers);
822         if (ret < 0) {
823                 SVM_DBG(svm, "No supported fault buffer class");
824                 nouveau_svm_fini(drm);
825                 return;
826         }
827
828         ret = nouveau_svm_fault_buffer_ctor(svm, buffers[ret].oclass, 0);
829         if (ret) {
830                 nouveau_svm_fini(drm);
831                 return;
832         }
833
834         SVM_DBG(svm, "Initialised");
835 }