2 * Copyright 2018 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #include "nouveau_svm.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dmem.h"
27 #include <nvif/notify.h>
28 #include <nvif/object.h>
31 #include <nvif/class.h>
32 #include <nvif/clb069.h>
33 #include <nvif/ifc00d.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sort.h>
37 #include <linux/hmm.h>
40 struct nouveau_drm *drm;
42 struct list_head inst;
44 struct nouveau_svm_fault_buffer {
46 struct nvif_object object;
52 struct nvif_notify notify;
54 struct nouveau_svm_fault {
64 struct nouveau_svmm *svmm;
70 #define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
71 #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
74 struct nouveau_svmm *svmm;
76 struct list_head head;
79 static struct nouveau_ivmm *
80 nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
82 struct nouveau_ivmm *ivmm;
83 list_for_each_entry(ivmm, &svm->inst, head) {
84 if (ivmm->inst == inst)
91 struct nouveau_vmm *vmm;
100 struct hmm_mirror mirror;
103 #define SVMM_DBG(s,f,a...) \
104 NV_DEBUG((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
105 #define SVMM_ERR(s,f,a...) \
106 NV_WARN((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
109 nouveau_svmm_bind(struct drm_device *dev, void *data,
110 struct drm_file *file_priv)
112 struct nouveau_cli *cli = nouveau_cli(file_priv);
113 struct drm_nouveau_svm_bind *args = data;
114 unsigned target, cmd, priority;
115 unsigned long addr, end, size;
116 struct mm_struct *mm;
118 args->va_start &= PAGE_MASK;
119 args->va_end &= PAGE_MASK;
121 /* Sanity check arguments */
122 if (args->reserved0 || args->reserved1)
124 if (args->header & (~NOUVEAU_SVM_BIND_VALID_MASK))
126 if (args->va_start >= args->va_end)
131 cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
132 cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
134 case NOUVEAU_SVM_BIND_COMMAND__MIGRATE:
140 priority = args->header >> NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
141 priority &= NOUVEAU_SVM_BIND_PRIORITY_MASK;
143 /* FIXME support CPU target ie all target value < GPU_VRAM */
144 target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT;
145 target &= NOUVEAU_SVM_BIND_TARGET_MASK;
147 case NOUVEAU_SVM_BIND_TARGET__GPU_VRAM:
154 * FIXME: For now refuse non 0 stride, we need to change the migrate
155 * kernel function to handle stride to avoid to create a mess within
156 * each device driver.
161 size = ((unsigned long)args->npages) << PAGE_SHIFT;
162 if ((args->va_start + size) <= args->va_start)
164 if ((args->va_start + size) > args->va_end)
168 * Ok we are ask to do something sane, for now we only support migrate
169 * commands but we will add things like memory policy (what to do on
170 * page fault) and maybe some other commands.
173 mm = get_task_mm(current);
174 down_read(&mm->mmap_sem);
176 for (addr = args->va_start, end = args->va_start + size; addr < end;) {
177 struct vm_area_struct *vma;
180 vma = find_vma_intersection(mm, addr, end);
184 next = min(vma->vm_end, end);
185 /* This is a best effort so we ignore errors */
186 nouveau_dmem_migrate_vma(cli->drm, vma, addr, next);
191 * FIXME Return the number of page we have migrated, again we need to
192 * update the migrate API to return that information so that we can
193 * report it to user space.
197 up_read(&mm->mmap_sem);
203 /* Unlink channel instance from SVMM. */
205 nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst)
207 struct nouveau_ivmm *ivmm;
209 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
210 ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst);
212 list_del(&ivmm->head);
215 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
219 /* Link channel instance to SVMM. */
221 nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
223 struct nouveau_ivmm *ivmm;
225 if (!(ivmm = kmalloc(sizeof(*ivmm), GFP_KERNEL)))
230 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
231 list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst);
232 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
237 /* Invalidate SVMM address-range on GPU. */
239 nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
242 bool super = svmm->vmm->vmm.object.client->super;
243 svmm->vmm->vmm.object.client->super = true;
244 nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
245 &(struct nvif_vmm_pfnclr_v0) {
247 .size = limit - start,
248 }, sizeof(struct nvif_vmm_pfnclr_v0));
249 svmm->vmm->vmm.object.client->super = super;
254 nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
255 const struct hmm_update *update)
257 struct nouveau_svmm *svmm = container_of(mirror, typeof(*svmm), mirror);
258 unsigned long start = update->start;
259 unsigned long limit = update->end;
261 if (!update->blockable)
264 SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
266 mutex_lock(&svmm->mutex);
267 if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
268 if (start < svmm->unmanaged.start) {
269 nouveau_svmm_invalidate(svmm, start,
270 svmm->unmanaged.limit);
272 start = svmm->unmanaged.limit;
275 nouveau_svmm_invalidate(svmm, start, limit);
276 mutex_unlock(&svmm->mutex);
281 nouveau_svmm_release(struct hmm_mirror *mirror)
285 static const struct hmm_mirror_ops
287 .sync_cpu_device_pagetables = nouveau_svmm_sync_cpu_device_pagetables,
288 .release = nouveau_svmm_release,
292 nouveau_svmm_fini(struct nouveau_svmm **psvmm)
294 struct nouveau_svmm *svmm = *psvmm;
296 hmm_mirror_unregister(&svmm->mirror);
303 nouveau_svmm_init(struct drm_device *dev, void *data,
304 struct drm_file *file_priv)
306 struct nouveau_cli *cli = nouveau_cli(file_priv);
307 struct nouveau_svmm *svmm;
308 struct drm_nouveau_svm_init *args = data;
311 /* Allocate tracking for SVM-enabled VMM. */
312 if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
314 svmm->vmm = &cli->svm;
315 svmm->unmanaged.start = args->unmanaged_addr;
316 svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size;
317 mutex_init(&svmm->mutex);
319 /* Check that SVM isn't already enabled for the client. */
320 mutex_lock(&cli->mutex);
326 /* Allocate a new GPU VMM that can support SVM (managed by the
327 * client, with replayable faults enabled).
329 * All future channel/memory allocations will make use of this
330 * VMM instead of the standard one.
332 ret = nvif_vmm_init(&cli->mmu, cli->vmm.vmm.object.oclass, true,
333 args->unmanaged_addr, args->unmanaged_size,
334 &(struct gp100_vmm_v0) {
335 .fault_replay = true,
336 }, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
340 /* Enable HMM mirroring of CPU address-space to VMM. */
341 svmm->mm = get_task_mm(current);
342 down_write(&svmm->mm->mmap_sem);
343 svmm->mirror.ops = &nouveau_svmm;
344 ret = hmm_mirror_register(&svmm->mirror, svmm->mm);
346 cli->svm.svmm = svmm;
349 up_write(&svmm->mm->mmap_sem);
354 nouveau_svmm_fini(&svmm);
355 mutex_unlock(&cli->mutex);
360 nouveau_svm_pfn_flags[HMM_PFN_FLAG_MAX] = {
361 [HMM_PFN_VALID ] = NVIF_VMM_PFNMAP_V0_V,
362 [HMM_PFN_WRITE ] = NVIF_VMM_PFNMAP_V0_W,
363 [HMM_PFN_DEVICE_PRIVATE] = NVIF_VMM_PFNMAP_V0_VRAM,
367 nouveau_svm_pfn_values[HMM_PFN_VALUE_MAX] = {
368 [HMM_PFN_ERROR ] = ~NVIF_VMM_PFNMAP_V0_V,
369 [HMM_PFN_NONE ] = NVIF_VMM_PFNMAP_V0_NONE,
370 [HMM_PFN_SPECIAL] = ~NVIF_VMM_PFNMAP_V0_V,
373 /* Issue fault replay for GPU to retry accesses that faulted previously. */
375 nouveau_svm_fault_replay(struct nouveau_svm *svm)
377 SVM_DBG(svm, "replay");
378 WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
379 GP100_VMM_VN_FAULT_REPLAY,
380 &(struct gp100_vmm_fault_replay_vn) {},
381 sizeof(struct gp100_vmm_fault_replay_vn)));
384 /* Cancel a replayable fault that could not be handled.
386 * Cancelling the fault will trigger recovery to reset the engine
387 * and kill the offending channel (ie. GPU SIGSEGV).
390 nouveau_svm_fault_cancel(struct nouveau_svm *svm,
391 u64 inst, u8 hub, u8 gpc, u8 client)
393 SVM_DBG(svm, "cancel %016llx %d %02x %02x", inst, hub, gpc, client);
394 WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
395 GP100_VMM_VN_FAULT_CANCEL,
396 &(struct gp100_vmm_fault_cancel_v0) {
401 }, sizeof(struct gp100_vmm_fault_cancel_v0)));
405 nouveau_svm_fault_cancel_fault(struct nouveau_svm *svm,
406 struct nouveau_svm_fault *fault)
408 nouveau_svm_fault_cancel(svm, fault->inst,
415 nouveau_svm_fault_cmp(const void *a, const void *b)
417 const struct nouveau_svm_fault *fa = *(struct nouveau_svm_fault **)a;
418 const struct nouveau_svm_fault *fb = *(struct nouveau_svm_fault **)b;
420 if ((ret = (s64)fa->inst - fb->inst))
422 if ((ret = (s64)fa->addr - fb->addr))
425 return (fa->access == 0 || fa->access == 3) -
426 (fb->access == 0 || fb->access == 3);
430 nouveau_svm_fault_cache(struct nouveau_svm *svm,
431 struct nouveau_svm_fault_buffer *buffer, u32 offset)
433 struct nvif_object *memory = &buffer->object;
434 const u32 instlo = nvif_rd32(memory, offset + 0x00);
435 const u32 insthi = nvif_rd32(memory, offset + 0x04);
436 const u32 addrlo = nvif_rd32(memory, offset + 0x08);
437 const u32 addrhi = nvif_rd32(memory, offset + 0x0c);
438 const u32 timelo = nvif_rd32(memory, offset + 0x10);
439 const u32 timehi = nvif_rd32(memory, offset + 0x14);
440 const u32 engine = nvif_rd32(memory, offset + 0x18);
441 const u32 info = nvif_rd32(memory, offset + 0x1c);
442 const u64 inst = (u64)insthi << 32 | instlo;
443 const u8 gpc = (info & 0x1f000000) >> 24;
444 const u8 hub = (info & 0x00100000) >> 20;
445 const u8 client = (info & 0x00007f00) >> 8;
446 struct nouveau_svm_fault *fault;
448 //XXX: i think we're supposed to spin waiting */
449 if (WARN_ON(!(info & 0x80000000)))
452 nvif_mask(memory, offset + 0x1c, 0x80000000, 0x00000000);
454 if (!buffer->fault[buffer->fault_nr]) {
455 fault = kmalloc(sizeof(*fault), GFP_KERNEL);
456 if (WARN_ON(!fault)) {
457 nouveau_svm_fault_cancel(svm, inst, hub, gpc, client);
460 buffer->fault[buffer->fault_nr] = fault;
463 fault = buffer->fault[buffer->fault_nr++];
465 fault->addr = (u64)addrhi << 32 | addrlo;
466 fault->time = (u64)timehi << 32 | timelo;
467 fault->engine = engine;
470 fault->access = (info & 0x000f0000) >> 16;
471 fault->client = client;
472 fault->fault = (info & 0x0000001f);
474 SVM_DBG(svm, "fault %016llx %016llx %02x",
475 fault->inst, fault->addr, fault->access);
479 nouveau_range_done(struct hmm_range *range)
481 bool ret = hmm_range_valid(range);
483 hmm_range_unregister(range);
488 nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range)
492 range->default_flags = 0;
493 range->pfn_flags_mask = -1UL;
495 ret = hmm_range_register(range, mirror,
496 range->start, range->end,
499 up_read(&range->vma->vm_mm->mmap_sem);
503 if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
504 up_read(&range->vma->vm_mm->mmap_sem);
508 ret = hmm_range_fault(range, true);
512 up_read(&range->vma->vm_mm->mmap_sem);
513 hmm_range_unregister(range);
520 nouveau_svm_fault(struct nvif_notify *notify)
522 struct nouveau_svm_fault_buffer *buffer =
523 container_of(notify, typeof(*buffer), notify);
524 struct nouveau_svm *svm =
525 container_of(buffer, typeof(*svm), buffer[buffer->id]);
526 struct nvif_object *device = &svm->drm->client.device.object;
527 struct nouveau_svmm *svmm;
530 struct nvif_ioctl_v0 i;
531 struct nvif_ioctl_mthd_v0 m;
532 struct nvif_vmm_pfnmap_v0 p;
536 struct hmm_range range;
537 struct vm_area_struct *vma;
538 u64 inst, start, limit;
539 int fi, fn, pi, fill;
542 /* Parse available fault buffer entries into a cache, and update
543 * the GET pointer so HW can reuse the entries.
545 SVM_DBG(svm, "fault handler");
546 if (buffer->get == buffer->put) {
547 buffer->put = nvif_rd32(device, buffer->putaddr);
548 buffer->get = nvif_rd32(device, buffer->getaddr);
549 if (buffer->get == buffer->put)
550 return NVIF_NOTIFY_KEEP;
552 buffer->fault_nr = 0;
554 SVM_DBG(svm, "get %08x put %08x", buffer->get, buffer->put);
555 while (buffer->get != buffer->put) {
556 nouveau_svm_fault_cache(svm, buffer, buffer->get * 0x20);
557 if (++buffer->get == buffer->entries)
560 nvif_wr32(device, buffer->getaddr, buffer->get);
561 SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr);
563 /* Sort parsed faults by instance pointer to prevent unnecessary
564 * instance to SVMM translations, followed by address and access
565 * type to reduce the amount of work when handling the faults.
567 sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault),
568 nouveau_svm_fault_cmp, NULL);
570 /* Lookup SVMM structure for each unique instance pointer. */
571 mutex_lock(&svm->mutex);
572 for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) {
573 if (!svmm || buffer->fault[fi]->inst != inst) {
574 struct nouveau_ivmm *ivmm =
575 nouveau_ivmm_find(svm, buffer->fault[fi]->inst);
576 svmm = ivmm ? ivmm->svmm : NULL;
577 inst = buffer->fault[fi]->inst;
578 SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm);
580 buffer->fault[fi]->svmm = svmm;
582 mutex_unlock(&svm->mutex);
584 /* Process list of faults. */
585 args.i.i.version = 0;
586 args.i.i.type = NVIF_IOCTL_V0_MTHD;
587 args.i.m.version = 0;
588 args.i.m.method = NVIF_VMM_V0_PFNMAP;
589 args.i.p.version = 0;
591 for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
592 /* Cancel any faults from non-SVM channels. */
593 if (!(svmm = buffer->fault[fi]->svmm)) {
594 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
597 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
599 /* We try and group handling of faults within a small
600 * window into a single update.
602 start = buffer->fault[fi]->addr;
603 limit = start + (ARRAY_SIZE(args.phys) << PAGE_SHIFT);
604 if (start < svmm->unmanaged.limit)
605 limit = min_t(u64, limit, svmm->unmanaged.start);
607 if (limit > svmm->unmanaged.start)
608 start = max_t(u64, start, svmm->unmanaged.limit);
609 SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
611 /* Intersect fault window with the CPU VMA, cancelling
612 * the fault if the address is invalid.
614 down_read(&svmm->mm->mmap_sem);
615 vma = find_vma_intersection(svmm->mm, start, limit);
617 SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
618 up_read(&svmm->mm->mmap_sem);
619 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
622 start = max_t(u64, start, vma->vm_start);
623 limit = min_t(u64, limit, vma->vm_end);
624 SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
626 if (buffer->fault[fi]->addr != start) {
627 SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr);
628 up_read(&svmm->mm->mmap_sem);
629 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
633 /* Prepare the GPU-side update of all pages within the
634 * fault window, determining required pages and access
635 * permissions based on pending faults.
637 args.i.p.page = PAGE_SHIFT;
638 args.i.p.addr = start;
639 for (fn = fi, pi = 0;;) {
640 /* Determine required permissions based on GPU fault
644 if (buffer->fault[fn]->access != 0 /* READ. */ &&
645 buffer->fault[fn]->access != 3 /* PREFETCH. */) {
646 args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V |
647 NVIF_VMM_PFNMAP_V0_W;
649 args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V;
651 args.i.p.size = pi << PAGE_SHIFT;
653 /* It's okay to skip over duplicate addresses from the
654 * same SVMM as faults are ordered by access type such
655 * that only the first one needs to be handled.
657 * ie. WRITE faults appear first, thus any handling of
658 * pending READ faults will already be satisfied.
660 while (++fn < buffer->fault_nr &&
661 buffer->fault[fn]->svmm == svmm &&
662 buffer->fault[fn ]->addr ==
663 buffer->fault[fn - 1]->addr);
665 /* If the next fault is outside the window, or all GPU
666 * faults have been dealt with, we're done here.
668 if (fn >= buffer->fault_nr ||
669 buffer->fault[fn]->svmm != svmm ||
670 buffer->fault[fn]->addr >= limit)
673 /* Fill in the gap between this fault and the next. */
674 fill = (buffer->fault[fn ]->addr -
675 buffer->fault[fn - 1]->addr) >> PAGE_SHIFT;
677 args.phys[pi++] = NVIF_VMM_PFNMAP_V0_NONE;
680 SVMM_DBG(svmm, "wndw %016llx-%016llx covering %d fault(s)",
682 args.i.p.addr + args.i.p.size, fn - fi);
684 /* Have HMM fault pages within the fault window to the GPU. */
686 range.start = args.i.p.addr;
687 range.end = args.i.p.addr + args.i.p.size;
688 range.pfns = args.phys;
689 range.flags = nouveau_svm_pfn_flags;
690 range.values = nouveau_svm_pfn_values;
691 range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
693 ret = nouveau_range_fault(&svmm->mirror, &range);
695 mutex_lock(&svmm->mutex);
696 if (!nouveau_range_done(&range)) {
697 mutex_unlock(&svmm->mutex);
701 nouveau_dmem_convert_pfn(svm->drm, &range);
703 svmm->vmm->vmm.object.client->super = true;
704 ret = nvif_object_ioctl(&svmm->vmm->vmm.object,
705 &args, sizeof(args.i) +
706 pi * sizeof(args.phys[0]),
708 svmm->vmm->vmm.object.client->super = false;
709 mutex_unlock(&svmm->mutex);
710 up_read(&svmm->mm->mmap_sem);
713 /* Cancel any faults in the window whose pages didn't manage
714 * to keep their valid bit, or stay writeable when required.
716 * If handling failed completely, cancel all faults.
719 struct nouveau_svm_fault *fault = buffer->fault[fi++];
720 pi = (fault->addr - range.start) >> PAGE_SHIFT;
722 !(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_V) ||
723 (!(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_W) &&
724 fault->access != 0 && fault->access != 3)) {
725 nouveau_svm_fault_cancel_fault(svm, fault);
732 /* Issue fault replay to the GPU. */
734 nouveau_svm_fault_replay(svm);
735 return NVIF_NOTIFY_KEEP;
739 nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
741 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
742 nvif_notify_put(&buffer->notify);
746 nouveau_svm_fault_buffer_init(struct nouveau_svm *svm, int id)
748 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
749 struct nvif_object *device = &svm->drm->client.device.object;
750 buffer->get = nvif_rd32(device, buffer->getaddr);
751 buffer->put = nvif_rd32(device, buffer->putaddr);
752 SVM_DBG(svm, "get %08x put %08x (init)", buffer->get, buffer->put);
753 return nvif_notify_get(&buffer->notify);
757 nouveau_svm_fault_buffer_dtor(struct nouveau_svm *svm, int id)
759 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
763 for (i = 0; buffer->fault[i] && i < buffer->entries; i++)
764 kfree(buffer->fault[i]);
765 kvfree(buffer->fault);
768 nouveau_svm_fault_buffer_fini(svm, id);
770 nvif_notify_fini(&buffer->notify);
771 nvif_object_fini(&buffer->object);
775 nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
777 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
778 struct nouveau_drm *drm = svm->drm;
779 struct nvif_object *device = &drm->client.device.object;
780 struct nvif_clb069_v0 args = {};
785 ret = nvif_object_init(device, 0, oclass, &args, sizeof(args),
788 SVM_ERR(svm, "Fault buffer allocation failed: %d", ret);
792 nvif_object_map(&buffer->object, NULL, 0);
793 buffer->entries = args.entries;
794 buffer->getaddr = args.get;
795 buffer->putaddr = args.put;
797 ret = nvif_notify_init(&buffer->object, nouveau_svm_fault, true,
798 NVB069_V0_NTFY_FAULT, NULL, 0, 0,
803 buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL);
807 return nouveau_svm_fault_buffer_init(svm, id);
811 nouveau_svm_resume(struct nouveau_drm *drm)
813 struct nouveau_svm *svm = drm->svm;
815 nouveau_svm_fault_buffer_init(svm, 0);
819 nouveau_svm_suspend(struct nouveau_drm *drm)
821 struct nouveau_svm *svm = drm->svm;
823 nouveau_svm_fault_buffer_fini(svm, 0);
827 nouveau_svm_fini(struct nouveau_drm *drm)
829 struct nouveau_svm *svm = drm->svm;
831 nouveau_svm_fault_buffer_dtor(svm, 0);
838 nouveau_svm_init(struct nouveau_drm *drm)
840 static const struct nvif_mclass buffers[] = {
841 { VOLTA_FAULT_BUFFER_A, 0 },
842 { MAXWELL_FAULT_BUFFER_A, 0 },
845 struct nouveau_svm *svm;
848 /* Disable on Volta and newer until channel recovery is fixed,
849 * otherwise clients will have a trivial way to trash the GPU
852 if (drm->client.device.info.family > NV_DEVICE_INFO_V0_PASCAL)
855 if (!(drm->svm = svm = kzalloc(sizeof(*drm->svm), GFP_KERNEL)))
859 mutex_init(&drm->svm->mutex);
860 INIT_LIST_HEAD(&drm->svm->inst);
862 ret = nvif_mclass(&drm->client.device.object, buffers);
864 SVM_DBG(svm, "No supported fault buffer class");
865 nouveau_svm_fini(drm);
869 ret = nouveau_svm_fault_buffer_ctor(svm, buffers[ret].oclass, 0);
871 nouveau_svm_fini(drm);
875 SVM_DBG(svm, "Initialised");