Merge tag 'for-linus-2019-08-17' of git://git.kernel.dk/linux-block
[sfrench/cifs-2.6.git] / drivers / gpu / drm / nouveau / nouveau_svm.c
1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "nouveau_svm.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dmem.h"
26
27 #include <nvif/notify.h>
28 #include <nvif/object.h>
29 #include <nvif/vmm.h>
30
31 #include <nvif/class.h>
32 #include <nvif/clb069.h>
33 #include <nvif/ifc00d.h>
34
35 #include <linux/sched/mm.h>
36 #include <linux/sort.h>
37 #include <linux/hmm.h>
38
39 struct nouveau_svm {
40         struct nouveau_drm *drm;
41         struct mutex mutex;
42         struct list_head inst;
43
44         struct nouveau_svm_fault_buffer {
45                 int id;
46                 struct nvif_object object;
47                 u32 entries;
48                 u32 getaddr;
49                 u32 putaddr;
50                 u32 get;
51                 u32 put;
52                 struct nvif_notify notify;
53
54                 struct nouveau_svm_fault {
55                         u64 inst;
56                         u64 addr;
57                         u64 time;
58                         u32 engine;
59                         u8  gpc;
60                         u8  hub;
61                         u8  access;
62                         u8  client;
63                         u8  fault;
64                         struct nouveau_svmm *svmm;
65                 } **fault;
66                 int fault_nr;
67         } buffer[1];
68 };
69
70 #define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
71 #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
72
73 struct nouveau_ivmm {
74         struct nouveau_svmm *svmm;
75         u64 inst;
76         struct list_head head;
77 };
78
79 static struct nouveau_ivmm *
80 nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
81 {
82         struct nouveau_ivmm *ivmm;
83         list_for_each_entry(ivmm, &svm->inst, head) {
84                 if (ivmm->inst == inst)
85                         return ivmm;
86         }
87         return NULL;
88 }
89
90 struct nouveau_svmm {
91         struct nouveau_vmm *vmm;
92         struct {
93                 unsigned long start;
94                 unsigned long limit;
95         } unmanaged;
96
97         struct mutex mutex;
98
99         struct mm_struct *mm;
100         struct hmm_mirror mirror;
101 };
102
103 #define SVMM_DBG(s,f,a...)                                                     \
104         NV_DEBUG((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
105 #define SVMM_ERR(s,f,a...)                                                     \
106         NV_WARN((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
107
108 int
109 nouveau_svmm_bind(struct drm_device *dev, void *data,
110                   struct drm_file *file_priv)
111 {
112         struct nouveau_cli *cli = nouveau_cli(file_priv);
113         struct drm_nouveau_svm_bind *args = data;
114         unsigned target, cmd, priority;
115         unsigned long addr, end, size;
116         struct mm_struct *mm;
117
118         args->va_start &= PAGE_MASK;
119         args->va_end &= PAGE_MASK;
120
121         /* Sanity check arguments */
122         if (args->reserved0 || args->reserved1)
123                 return -EINVAL;
124         if (args->header & (~NOUVEAU_SVM_BIND_VALID_MASK))
125                 return -EINVAL;
126         if (args->va_start >= args->va_end)
127                 return -EINVAL;
128         if (!args->npages)
129                 return -EINVAL;
130
131         cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
132         cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
133         switch (cmd) {
134         case NOUVEAU_SVM_BIND_COMMAND__MIGRATE:
135                 break;
136         default:
137                 return -EINVAL;
138         }
139
140         priority = args->header >> NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
141         priority &= NOUVEAU_SVM_BIND_PRIORITY_MASK;
142
143         /* FIXME support CPU target ie all target value < GPU_VRAM */
144         target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT;
145         target &= NOUVEAU_SVM_BIND_TARGET_MASK;
146         switch (target) {
147         case NOUVEAU_SVM_BIND_TARGET__GPU_VRAM:
148                 break;
149         default:
150                 return -EINVAL;
151         }
152
153         /*
154          * FIXME: For now refuse non 0 stride, we need to change the migrate
155          * kernel function to handle stride to avoid to create a mess within
156          * each device driver.
157          */
158         if (args->stride)
159                 return -EINVAL;
160
161         size = ((unsigned long)args->npages) << PAGE_SHIFT;
162         if ((args->va_start + size) <= args->va_start)
163                 return -EINVAL;
164         if ((args->va_start + size) > args->va_end)
165                 return -EINVAL;
166
167         /*
168          * Ok we are ask to do something sane, for now we only support migrate
169          * commands but we will add things like memory policy (what to do on
170          * page fault) and maybe some other commands.
171          */
172
173         mm = get_task_mm(current);
174         down_read(&mm->mmap_sem);
175
176         for (addr = args->va_start, end = args->va_start + size; addr < end;) {
177                 struct vm_area_struct *vma;
178                 unsigned long next;
179
180                 vma = find_vma_intersection(mm, addr, end);
181                 if (!vma)
182                         break;
183
184                 next = min(vma->vm_end, end);
185                 /* This is a best effort so we ignore errors */
186                 nouveau_dmem_migrate_vma(cli->drm, vma, addr, next);
187                 addr = next;
188         }
189
190         /*
191          * FIXME Return the number of page we have migrated, again we need to
192          * update the migrate API to return that information so that we can
193          * report it to user space.
194          */
195         args->result = 0;
196
197         up_read(&mm->mmap_sem);
198         mmput(mm);
199
200         return 0;
201 }
202
203 /* Unlink channel instance from SVMM. */
204 void
205 nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst)
206 {
207         struct nouveau_ivmm *ivmm;
208         if (svmm) {
209                 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
210                 ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst);
211                 if (ivmm) {
212                         list_del(&ivmm->head);
213                         kfree(ivmm);
214                 }
215                 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
216         }
217 }
218
219 /* Link channel instance to SVMM. */
220 int
221 nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
222 {
223         struct nouveau_ivmm *ivmm;
224         if (svmm) {
225                 if (!(ivmm = kmalloc(sizeof(*ivmm), GFP_KERNEL)))
226                         return -ENOMEM;
227                 ivmm->svmm = svmm;
228                 ivmm->inst = inst;
229
230                 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
231                 list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst);
232                 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
233         }
234         return 0;
235 }
236
237 /* Invalidate SVMM address-range on GPU. */
238 static void
239 nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
240 {
241         if (limit > start) {
242                 bool super = svmm->vmm->vmm.object.client->super;
243                 svmm->vmm->vmm.object.client->super = true;
244                 nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
245                                  &(struct nvif_vmm_pfnclr_v0) {
246                                         .addr = start,
247                                         .size = limit - start,
248                                  }, sizeof(struct nvif_vmm_pfnclr_v0));
249                 svmm->vmm->vmm.object.client->super = super;
250         }
251 }
252
253 static int
254 nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
255                                         const struct hmm_update *update)
256 {
257         struct nouveau_svmm *svmm = container_of(mirror, typeof(*svmm), mirror);
258         unsigned long start = update->start;
259         unsigned long limit = update->end;
260
261         if (!update->blockable)
262                 return -EAGAIN;
263
264         SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
265
266         mutex_lock(&svmm->mutex);
267         if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
268                 if (start < svmm->unmanaged.start) {
269                         nouveau_svmm_invalidate(svmm, start,
270                                                 svmm->unmanaged.limit);
271                 }
272                 start = svmm->unmanaged.limit;
273         }
274
275         nouveau_svmm_invalidate(svmm, start, limit);
276         mutex_unlock(&svmm->mutex);
277         return 0;
278 }
279
280 static void
281 nouveau_svmm_release(struct hmm_mirror *mirror)
282 {
283 }
284
285 static const struct hmm_mirror_ops
286 nouveau_svmm = {
287         .sync_cpu_device_pagetables = nouveau_svmm_sync_cpu_device_pagetables,
288         .release = nouveau_svmm_release,
289 };
290
291 void
292 nouveau_svmm_fini(struct nouveau_svmm **psvmm)
293 {
294         struct nouveau_svmm *svmm = *psvmm;
295         if (svmm) {
296                 hmm_mirror_unregister(&svmm->mirror);
297                 kfree(*psvmm);
298                 *psvmm = NULL;
299         }
300 }
301
302 int
303 nouveau_svmm_init(struct drm_device *dev, void *data,
304                   struct drm_file *file_priv)
305 {
306         struct nouveau_cli *cli = nouveau_cli(file_priv);
307         struct nouveau_svmm *svmm;
308         struct drm_nouveau_svm_init *args = data;
309         int ret;
310
311         /* Allocate tracking for SVM-enabled VMM. */
312         if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
313                 return -ENOMEM;
314         svmm->vmm = &cli->svm;
315         svmm->unmanaged.start = args->unmanaged_addr;
316         svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size;
317         mutex_init(&svmm->mutex);
318
319         /* Check that SVM isn't already enabled for the client. */
320         mutex_lock(&cli->mutex);
321         if (cli->svm.cli) {
322                 ret = -EBUSY;
323                 goto done;
324         }
325
326         /* Allocate a new GPU VMM that can support SVM (managed by the
327          * client, with replayable faults enabled).
328          *
329          * All future channel/memory allocations will make use of this
330          * VMM instead of the standard one.
331          */
332         ret = nvif_vmm_init(&cli->mmu, cli->vmm.vmm.object.oclass, true,
333                             args->unmanaged_addr, args->unmanaged_size,
334                             &(struct gp100_vmm_v0) {
335                                 .fault_replay = true,
336                             }, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
337         if (ret)
338                 goto done;
339
340         /* Enable HMM mirroring of CPU address-space to VMM. */
341         svmm->mm = get_task_mm(current);
342         down_write(&svmm->mm->mmap_sem);
343         svmm->mirror.ops = &nouveau_svmm;
344         ret = hmm_mirror_register(&svmm->mirror, svmm->mm);
345         if (ret == 0) {
346                 cli->svm.svmm = svmm;
347                 cli->svm.cli = cli;
348         }
349         up_write(&svmm->mm->mmap_sem);
350         mmput(svmm->mm);
351
352 done:
353         if (ret)
354                 nouveau_svmm_fini(&svmm);
355         mutex_unlock(&cli->mutex);
356         return ret;
357 }
358
359 static const u64
360 nouveau_svm_pfn_flags[HMM_PFN_FLAG_MAX] = {
361         [HMM_PFN_VALID         ] = NVIF_VMM_PFNMAP_V0_V,
362         [HMM_PFN_WRITE         ] = NVIF_VMM_PFNMAP_V0_W,
363         [HMM_PFN_DEVICE_PRIVATE] = NVIF_VMM_PFNMAP_V0_VRAM,
364 };
365
366 static const u64
367 nouveau_svm_pfn_values[HMM_PFN_VALUE_MAX] = {
368         [HMM_PFN_ERROR  ] = ~NVIF_VMM_PFNMAP_V0_V,
369         [HMM_PFN_NONE   ] =  NVIF_VMM_PFNMAP_V0_NONE,
370         [HMM_PFN_SPECIAL] = ~NVIF_VMM_PFNMAP_V0_V,
371 };
372
373 /* Issue fault replay for GPU to retry accesses that faulted previously. */
374 static void
375 nouveau_svm_fault_replay(struct nouveau_svm *svm)
376 {
377         SVM_DBG(svm, "replay");
378         WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
379                                  GP100_VMM_VN_FAULT_REPLAY,
380                                  &(struct gp100_vmm_fault_replay_vn) {},
381                                  sizeof(struct gp100_vmm_fault_replay_vn)));
382 }
383
384 /* Cancel a replayable fault that could not be handled.
385  *
386  * Cancelling the fault will trigger recovery to reset the engine
387  * and kill the offending channel (ie. GPU SIGSEGV).
388  */
389 static void
390 nouveau_svm_fault_cancel(struct nouveau_svm *svm,
391                          u64 inst, u8 hub, u8 gpc, u8 client)
392 {
393         SVM_DBG(svm, "cancel %016llx %d %02x %02x", inst, hub, gpc, client);
394         WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
395                                  GP100_VMM_VN_FAULT_CANCEL,
396                                  &(struct gp100_vmm_fault_cancel_v0) {
397                                         .hub = hub,
398                                         .gpc = gpc,
399                                         .client = client,
400                                         .inst = inst,
401                                  }, sizeof(struct gp100_vmm_fault_cancel_v0)));
402 }
403
404 static void
405 nouveau_svm_fault_cancel_fault(struct nouveau_svm *svm,
406                                struct nouveau_svm_fault *fault)
407 {
408         nouveau_svm_fault_cancel(svm, fault->inst,
409                                       fault->hub,
410                                       fault->gpc,
411                                       fault->client);
412 }
413
414 static int
415 nouveau_svm_fault_cmp(const void *a, const void *b)
416 {
417         const struct nouveau_svm_fault *fa = *(struct nouveau_svm_fault **)a;
418         const struct nouveau_svm_fault *fb = *(struct nouveau_svm_fault **)b;
419         int ret;
420         if ((ret = (s64)fa->inst - fb->inst))
421                 return ret;
422         if ((ret = (s64)fa->addr - fb->addr))
423                 return ret;
424         /*XXX: atomic? */
425         return (fa->access == 0 || fa->access == 3) -
426                (fb->access == 0 || fb->access == 3);
427 }
428
429 static void
430 nouveau_svm_fault_cache(struct nouveau_svm *svm,
431                         struct nouveau_svm_fault_buffer *buffer, u32 offset)
432 {
433         struct nvif_object *memory = &buffer->object;
434         const u32 instlo = nvif_rd32(memory, offset + 0x00);
435         const u32 insthi = nvif_rd32(memory, offset + 0x04);
436         const u32 addrlo = nvif_rd32(memory, offset + 0x08);
437         const u32 addrhi = nvif_rd32(memory, offset + 0x0c);
438         const u32 timelo = nvif_rd32(memory, offset + 0x10);
439         const u32 timehi = nvif_rd32(memory, offset + 0x14);
440         const u32 engine = nvif_rd32(memory, offset + 0x18);
441         const u32   info = nvif_rd32(memory, offset + 0x1c);
442         const u64   inst = (u64)insthi << 32 | instlo;
443         const u8     gpc = (info & 0x1f000000) >> 24;
444         const u8     hub = (info & 0x00100000) >> 20;
445         const u8  client = (info & 0x00007f00) >> 8;
446         struct nouveau_svm_fault *fault;
447
448         //XXX: i think we're supposed to spin waiting */
449         if (WARN_ON(!(info & 0x80000000)))
450                 return;
451
452         nvif_mask(memory, offset + 0x1c, 0x80000000, 0x00000000);
453
454         if (!buffer->fault[buffer->fault_nr]) {
455                 fault = kmalloc(sizeof(*fault), GFP_KERNEL);
456                 if (WARN_ON(!fault)) {
457                         nouveau_svm_fault_cancel(svm, inst, hub, gpc, client);
458                         return;
459                 }
460                 buffer->fault[buffer->fault_nr] = fault;
461         }
462
463         fault = buffer->fault[buffer->fault_nr++];
464         fault->inst   = inst;
465         fault->addr   = (u64)addrhi << 32 | addrlo;
466         fault->time   = (u64)timehi << 32 | timelo;
467         fault->engine = engine;
468         fault->gpc    = gpc;
469         fault->hub    = hub;
470         fault->access = (info & 0x000f0000) >> 16;
471         fault->client = client;
472         fault->fault  = (info & 0x0000001f);
473
474         SVM_DBG(svm, "fault %016llx %016llx %02x",
475                 fault->inst, fault->addr, fault->access);
476 }
477
478 static inline bool
479 nouveau_range_done(struct hmm_range *range)
480 {
481         bool ret = hmm_range_valid(range);
482
483         hmm_range_unregister(range);
484         return ret;
485 }
486
487 static int
488 nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range)
489 {
490         long ret;
491
492         range->default_flags = 0;
493         range->pfn_flags_mask = -1UL;
494
495         ret = hmm_range_register(range, mirror,
496                                  range->start, range->end,
497                                  PAGE_SHIFT);
498         if (ret) {
499                 up_read(&range->vma->vm_mm->mmap_sem);
500                 return (int)ret;
501         }
502
503         if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
504                 up_read(&range->vma->vm_mm->mmap_sem);
505                 return -EAGAIN;
506         }
507
508         ret = hmm_range_fault(range, true);
509         if (ret <= 0) {
510                 if (ret == 0)
511                         ret = -EBUSY;
512                 up_read(&range->vma->vm_mm->mmap_sem);
513                 hmm_range_unregister(range);
514                 return ret;
515         }
516         return 0;
517 }
518
519 static int
520 nouveau_svm_fault(struct nvif_notify *notify)
521 {
522         struct nouveau_svm_fault_buffer *buffer =
523                 container_of(notify, typeof(*buffer), notify);
524         struct nouveau_svm *svm =
525                 container_of(buffer, typeof(*svm), buffer[buffer->id]);
526         struct nvif_object *device = &svm->drm->client.device.object;
527         struct nouveau_svmm *svmm;
528         struct {
529                 struct {
530                         struct nvif_ioctl_v0 i;
531                         struct nvif_ioctl_mthd_v0 m;
532                         struct nvif_vmm_pfnmap_v0 p;
533                 } i;
534                 u64 phys[16];
535         } args;
536         struct hmm_range range;
537         struct vm_area_struct *vma;
538         u64 inst, start, limit;
539         int fi, fn, pi, fill;
540         int replay = 0, ret;
541
542         /* Parse available fault buffer entries into a cache, and update
543          * the GET pointer so HW can reuse the entries.
544          */
545         SVM_DBG(svm, "fault handler");
546         if (buffer->get == buffer->put) {
547                 buffer->put = nvif_rd32(device, buffer->putaddr);
548                 buffer->get = nvif_rd32(device, buffer->getaddr);
549                 if (buffer->get == buffer->put)
550                         return NVIF_NOTIFY_KEEP;
551         }
552         buffer->fault_nr = 0;
553
554         SVM_DBG(svm, "get %08x put %08x", buffer->get, buffer->put);
555         while (buffer->get != buffer->put) {
556                 nouveau_svm_fault_cache(svm, buffer, buffer->get * 0x20);
557                 if (++buffer->get == buffer->entries)
558                         buffer->get = 0;
559         }
560         nvif_wr32(device, buffer->getaddr, buffer->get);
561         SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr);
562
563         /* Sort parsed faults by instance pointer to prevent unnecessary
564          * instance to SVMM translations, followed by address and access
565          * type to reduce the amount of work when handling the faults.
566          */
567         sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault),
568              nouveau_svm_fault_cmp, NULL);
569
570         /* Lookup SVMM structure for each unique instance pointer. */
571         mutex_lock(&svm->mutex);
572         for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) {
573                 if (!svmm || buffer->fault[fi]->inst != inst) {
574                         struct nouveau_ivmm *ivmm =
575                                 nouveau_ivmm_find(svm, buffer->fault[fi]->inst);
576                         svmm = ivmm ? ivmm->svmm : NULL;
577                         inst = buffer->fault[fi]->inst;
578                         SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm);
579                 }
580                 buffer->fault[fi]->svmm = svmm;
581         }
582         mutex_unlock(&svm->mutex);
583
584         /* Process list of faults. */
585         args.i.i.version = 0;
586         args.i.i.type = NVIF_IOCTL_V0_MTHD;
587         args.i.m.version = 0;
588         args.i.m.method = NVIF_VMM_V0_PFNMAP;
589         args.i.p.version = 0;
590
591         for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
592                 /* Cancel any faults from non-SVM channels. */
593                 if (!(svmm = buffer->fault[fi]->svmm)) {
594                         nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
595                         continue;
596                 }
597                 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
598
599                 /* We try and group handling of faults within a small
600                  * window into a single update.
601                  */
602                 start = buffer->fault[fi]->addr;
603                 limit = start + (ARRAY_SIZE(args.phys) << PAGE_SHIFT);
604                 if (start < svmm->unmanaged.limit)
605                         limit = min_t(u64, limit, svmm->unmanaged.start);
606                 else
607                 if (limit > svmm->unmanaged.start)
608                         start = max_t(u64, start, svmm->unmanaged.limit);
609                 SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
610
611                 /* Intersect fault window with the CPU VMA, cancelling
612                  * the fault if the address is invalid.
613                  */
614                 down_read(&svmm->mm->mmap_sem);
615                 vma = find_vma_intersection(svmm->mm, start, limit);
616                 if (!vma) {
617                         SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
618                         up_read(&svmm->mm->mmap_sem);
619                         nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
620                         continue;
621                 }
622                 start = max_t(u64, start, vma->vm_start);
623                 limit = min_t(u64, limit, vma->vm_end);
624                 SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
625
626                 if (buffer->fault[fi]->addr != start) {
627                         SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr);
628                         up_read(&svmm->mm->mmap_sem);
629                         nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
630                         continue;
631                 }
632
633                 /* Prepare the GPU-side update of all pages within the
634                  * fault window, determining required pages and access
635                  * permissions based on pending faults.
636                  */
637                 args.i.p.page = PAGE_SHIFT;
638                 args.i.p.addr = start;
639                 for (fn = fi, pi = 0;;) {
640                         /* Determine required permissions based on GPU fault
641                          * access flags.
642                          *XXX: atomic?
643                          */
644                         if (buffer->fault[fn]->access != 0 /* READ. */ &&
645                             buffer->fault[fn]->access != 3 /* PREFETCH. */) {
646                                 args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V |
647                                                   NVIF_VMM_PFNMAP_V0_W;
648                         } else {
649                                 args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V;
650                         }
651                         args.i.p.size = pi << PAGE_SHIFT;
652
653                         /* It's okay to skip over duplicate addresses from the
654                          * same SVMM as faults are ordered by access type such
655                          * that only the first one needs to be handled.
656                          *
657                          * ie. WRITE faults appear first, thus any handling of
658                          * pending READ faults will already be satisfied.
659                          */
660                         while (++fn < buffer->fault_nr &&
661                                buffer->fault[fn]->svmm == svmm &&
662                                buffer->fault[fn    ]->addr ==
663                                buffer->fault[fn - 1]->addr);
664
665                         /* If the next fault is outside the window, or all GPU
666                          * faults have been dealt with, we're done here.
667                          */
668                         if (fn >= buffer->fault_nr ||
669                             buffer->fault[fn]->svmm != svmm ||
670                             buffer->fault[fn]->addr >= limit)
671                                 break;
672
673                         /* Fill in the gap between this fault and the next. */
674                         fill = (buffer->fault[fn    ]->addr -
675                                 buffer->fault[fn - 1]->addr) >> PAGE_SHIFT;
676                         while (--fill)
677                                 args.phys[pi++] = NVIF_VMM_PFNMAP_V0_NONE;
678                 }
679
680                 SVMM_DBG(svmm, "wndw %016llx-%016llx covering %d fault(s)",
681                          args.i.p.addr,
682                          args.i.p.addr + args.i.p.size, fn - fi);
683
684                 /* Have HMM fault pages within the fault window to the GPU. */
685                 range.vma = vma;
686                 range.start = args.i.p.addr;
687                 range.end = args.i.p.addr + args.i.p.size;
688                 range.pfns = args.phys;
689                 range.flags = nouveau_svm_pfn_flags;
690                 range.values = nouveau_svm_pfn_values;
691                 range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
692 again:
693                 ret = nouveau_range_fault(&svmm->mirror, &range);
694                 if (ret == 0) {
695                         mutex_lock(&svmm->mutex);
696                         if (!nouveau_range_done(&range)) {
697                                 mutex_unlock(&svmm->mutex);
698                                 goto again;
699                         }
700
701                         nouveau_dmem_convert_pfn(svm->drm, &range);
702
703                         svmm->vmm->vmm.object.client->super = true;
704                         ret = nvif_object_ioctl(&svmm->vmm->vmm.object,
705                                                 &args, sizeof(args.i) +
706                                                 pi * sizeof(args.phys[0]),
707                                                 NULL);
708                         svmm->vmm->vmm.object.client->super = false;
709                         mutex_unlock(&svmm->mutex);
710                         up_read(&svmm->mm->mmap_sem);
711                 }
712
713                 /* Cancel any faults in the window whose pages didn't manage
714                  * to keep their valid bit, or stay writeable when required.
715                  *
716                  * If handling failed completely, cancel all faults.
717                  */
718                 while (fi < fn) {
719                         struct nouveau_svm_fault *fault = buffer->fault[fi++];
720                         pi = (fault->addr - range.start) >> PAGE_SHIFT;
721                         if (ret ||
722                              !(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_V) ||
723                             (!(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_W) &&
724                              fault->access != 0 && fault->access != 3)) {
725                                 nouveau_svm_fault_cancel_fault(svm, fault);
726                                 continue;
727                         }
728                         replay++;
729                 }
730         }
731
732         /* Issue fault replay to the GPU. */
733         if (replay)
734                 nouveau_svm_fault_replay(svm);
735         return NVIF_NOTIFY_KEEP;
736 }
737
738 static void
739 nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
740 {
741         struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
742         nvif_notify_put(&buffer->notify);
743 }
744
745 static int
746 nouveau_svm_fault_buffer_init(struct nouveau_svm *svm, int id)
747 {
748         struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
749         struct nvif_object *device = &svm->drm->client.device.object;
750         buffer->get = nvif_rd32(device, buffer->getaddr);
751         buffer->put = nvif_rd32(device, buffer->putaddr);
752         SVM_DBG(svm, "get %08x put %08x (init)", buffer->get, buffer->put);
753         return nvif_notify_get(&buffer->notify);
754 }
755
756 static void
757 nouveau_svm_fault_buffer_dtor(struct nouveau_svm *svm, int id)
758 {
759         struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
760         int i;
761
762         if (buffer->fault) {
763                 for (i = 0; buffer->fault[i] && i < buffer->entries; i++)
764                         kfree(buffer->fault[i]);
765                 kvfree(buffer->fault);
766         }
767
768         nouveau_svm_fault_buffer_fini(svm, id);
769
770         nvif_notify_fini(&buffer->notify);
771         nvif_object_fini(&buffer->object);
772 }
773
774 static int
775 nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
776 {
777         struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
778         struct nouveau_drm *drm = svm->drm;
779         struct nvif_object *device = &drm->client.device.object;
780         struct nvif_clb069_v0 args = {};
781         int ret;
782
783         buffer->id = id;
784
785         ret = nvif_object_init(device, 0, oclass, &args, sizeof(args),
786                                &buffer->object);
787         if (ret < 0) {
788                 SVM_ERR(svm, "Fault buffer allocation failed: %d", ret);
789                 return ret;
790         }
791
792         nvif_object_map(&buffer->object, NULL, 0);
793         buffer->entries = args.entries;
794         buffer->getaddr = args.get;
795         buffer->putaddr = args.put;
796
797         ret = nvif_notify_init(&buffer->object, nouveau_svm_fault, true,
798                                NVB069_V0_NTFY_FAULT, NULL, 0, 0,
799                                &buffer->notify);
800         if (ret)
801                 return ret;
802
803         buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL);
804         if (!buffer->fault)
805                 return -ENOMEM;
806
807         return nouveau_svm_fault_buffer_init(svm, id);
808 }
809
810 void
811 nouveau_svm_resume(struct nouveau_drm *drm)
812 {
813         struct nouveau_svm *svm = drm->svm;
814         if (svm)
815                 nouveau_svm_fault_buffer_init(svm, 0);
816 }
817
818 void
819 nouveau_svm_suspend(struct nouveau_drm *drm)
820 {
821         struct nouveau_svm *svm = drm->svm;
822         if (svm)
823                 nouveau_svm_fault_buffer_fini(svm, 0);
824 }
825
826 void
827 nouveau_svm_fini(struct nouveau_drm *drm)
828 {
829         struct nouveau_svm *svm = drm->svm;
830         if (svm) {
831                 nouveau_svm_fault_buffer_dtor(svm, 0);
832                 kfree(drm->svm);
833                 drm->svm = NULL;
834         }
835 }
836
837 void
838 nouveau_svm_init(struct nouveau_drm *drm)
839 {
840         static const struct nvif_mclass buffers[] = {
841                 {   VOLTA_FAULT_BUFFER_A, 0 },
842                 { MAXWELL_FAULT_BUFFER_A, 0 },
843                 {}
844         };
845         struct nouveau_svm *svm;
846         int ret;
847
848         /* Disable on Volta and newer until channel recovery is fixed,
849          * otherwise clients will have a trivial way to trash the GPU
850          * for everyone.
851          */
852         if (drm->client.device.info.family > NV_DEVICE_INFO_V0_PASCAL)
853                 return;
854
855         if (!(drm->svm = svm = kzalloc(sizeof(*drm->svm), GFP_KERNEL)))
856                 return;
857
858         drm->svm->drm = drm;
859         mutex_init(&drm->svm->mutex);
860         INIT_LIST_HEAD(&drm->svm->inst);
861
862         ret = nvif_mclass(&drm->client.device.object, buffers);
863         if (ret < 0) {
864                 SVM_DBG(svm, "No supported fault buffer class");
865                 nouveau_svm_fini(drm);
866                 return;
867         }
868
869         ret = nouveau_svm_fault_buffer_ctor(svm, buffers[ret].oclass, 0);
870         if (ret) {
871                 nouveau_svm_fini(drm);
872                 return;
873         }
874
875         SVM_DBG(svm, "Initialised");
876 }