1 // SPDX-License-Identifier: MIT
3 * Copyright © 2022 Intel Corporation
6 #include "xe_gt_pagefault.h"
8 #include <linux/bitfield.h>
9 #include <linux/circ_buf.h>
11 #include <drm/drm_exec.h>
12 #include <drm/drm_managed.h>
13 #include <drm/ttm/ttm_execbuf_util.h>
15 #include "abi/guc_actions_abi.h"
18 #include "xe_gt_tlb_invalidation.h"
20 #include "xe_guc_ct.h"
21 #include "xe_migrate.h"
36 u8 fault_unsuccessful;
42 ACCESS_TYPE_WRITE = 1,
43 ACCESS_TYPE_ATOMIC = 2,
44 ACCESS_TYPE_RESERVED = 3,
49 WRITE_ACCESS_VIOLATION = 1,
50 ATOMIC_ACCESS_VIOLATION = 2,
64 static bool access_is_atomic(enum access_type access_type)
66 return access_type == ACCESS_TYPE_ATOMIC;
69 static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
71 return BIT(tile->id) & vma->tile_present &&
72 !(BIT(tile->id) & vma->usm.tile_invalidated);
75 static bool vma_matches(struct xe_vma *vma, u64 page_addr)
77 if (page_addr > xe_vma_end(vma) - 1 ||
78 page_addr + SZ_4K - 1 < xe_vma_start(vma))
84 static struct xe_vma *lookup_vma(struct xe_vm *vm, u64 page_addr)
86 struct xe_vma *vma = NULL;
88 if (vm->usm.last_fault_vma) { /* Fast lookup */
89 if (vma_matches(vm->usm.last_fault_vma, page_addr))
90 vma = vm->usm.last_fault_vma;
93 vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K);
98 static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
99 bool atomic, unsigned int id)
101 struct xe_bo *bo = xe_vma_bo(vma);
102 struct xe_vm *vm = xe_vma_vm(vma);
103 unsigned int num_shared = 2; /* slots for bind + move */
106 err = xe_vm_prepare_vma(exec, vma, num_shared);
110 if (atomic && IS_DGFX(vm->xe)) {
111 if (xe_vma_is_userptr(vma)) {
116 /* Migrate to VRAM, move should invalidate the VMA first */
117 err = xe_bo_migrate(bo, XE_PL_VRAM0 + id);
121 /* Create backing store if needed */
122 err = xe_bo_validate(bo, vm, true);
130 static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
132 struct xe_device *xe = gt_to_xe(gt);
133 struct xe_tile *tile = gt_to_tile(gt);
134 struct drm_exec exec;
136 struct xe_vma *vma = NULL;
137 struct dma_fence *fence;
142 /* SW isn't expected to handle TRTT faults */
147 mutex_lock(&xe->usm.lock);
148 vm = xa_load(&xe->usm.asid_to_vm, pf->asid);
151 mutex_unlock(&xe->usm.lock);
152 if (!vm || !xe_vm_in_fault_mode(vm))
157 * TODO: Avoid exclusive lock if VM doesn't have userptrs, or
158 * start out read-locked?
160 down_write(&vm->lock);
162 vma = lookup_vma(vm, pf->page_addr);
168 if (!xe_vma_is_userptr(vma) || !xe_vma_userptr_check_repin(vma)) {
169 downgrade_write(&vm->lock);
170 write_locked = false;
173 trace_xe_vma_pagefault(vma);
175 atomic = access_is_atomic(pf->access_type);
177 /* Check if VMA is valid */
178 if (vma_is_valid(tile, vma) && !atomic)
181 /* TODO: Validate fault */
183 if (xe_vma_is_userptr(vma) && write_locked) {
184 spin_lock(&vm->userptr.invalidated_lock);
185 list_del_init(&vma->userptr.invalidate_link);
186 spin_unlock(&vm->userptr.invalidated_lock);
188 ret = xe_vma_userptr_pin_pages(vma);
192 downgrade_write(&vm->lock);
193 write_locked = false;
196 /* Lock VM and BOs dma-resv */
197 drm_exec_init(&exec, 0, 0);
198 drm_exec_until_all_locked(&exec) {
199 ret = xe_pf_begin(&exec, vma, atomic, tile->id);
200 drm_exec_retry_on_contention(&exec);
202 goto unlock_dma_resv;
205 /* Bind VMA only to the GT that has faulted */
206 trace_xe_vma_pf_bind(vma);
207 fence = __xe_pt_bind_vma(tile, vma, xe_tile_migrate_engine(tile), NULL, 0,
208 vma->tile_present & BIT(tile->id));
210 ret = PTR_ERR(fence);
211 goto unlock_dma_resv;
215 * XXX: Should we drop the lock before waiting? This only helps if doing
216 * GPU binds which is currently only done if we have to wait for more
217 * than 10ms on a move.
219 dma_fence_wait(fence, false);
220 dma_fence_put(fence);
222 if (xe_vma_is_userptr(vma))
223 ret = xe_vma_userptr_check_repin(vma);
224 vma->usm.tile_invalidated &= ~BIT(tile->id);
227 drm_exec_fini(&exec);
230 vm->usm.last_fault_vma = vma;
239 ret = xe_gt_tlb_invalidation_vma(gt, NULL, vma);
248 static int send_pagefault_reply(struct xe_guc *guc,
249 struct xe_guc_pagefault_reply *reply)
252 XE_GUC_ACTION_PAGE_FAULT_RES_DESC,
257 return xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
260 static void print_pagefault(struct xe_device *xe, struct pagefault *pf)
262 drm_dbg(&xe->drm, "\n\tASID: %d\n"
265 "\tFaulted Address: 0x%08x%08x\n"
269 "\tEngineClass: %d\n"
270 "\tEngineInstance: %d\n",
271 pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr),
272 lower_32_bits(pf->page_addr),
273 pf->fault_type, pf->access_type, pf->fault_level,
274 pf->engine_class, pf->engine_instance);
277 #define PF_MSG_LEN_DW 4
279 static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf)
281 const struct xe_guc_pagefault_desc *desc;
284 spin_lock_irq(&pf_queue->lock);
285 if (pf_queue->head != pf_queue->tail) {
286 desc = (const struct xe_guc_pagefault_desc *)
287 (pf_queue->data + pf_queue->head);
289 pf->fault_level = FIELD_GET(PFD_FAULT_LEVEL, desc->dw0);
290 pf->trva_fault = FIELD_GET(XE2_PFD_TRVA_FAULT, desc->dw0);
291 pf->engine_class = FIELD_GET(PFD_ENG_CLASS, desc->dw0);
292 pf->engine_instance = FIELD_GET(PFD_ENG_INSTANCE, desc->dw0);
293 pf->pdata = FIELD_GET(PFD_PDATA_HI, desc->dw1) <<
295 pf->pdata |= FIELD_GET(PFD_PDATA_LO, desc->dw0);
296 pf->asid = FIELD_GET(PFD_ASID, desc->dw1);
297 pf->vfid = FIELD_GET(PFD_VFID, desc->dw2);
298 pf->access_type = FIELD_GET(PFD_ACCESS_TYPE, desc->dw2);
299 pf->fault_type = FIELD_GET(PFD_FAULT_TYPE, desc->dw2);
300 pf->page_addr = (u64)(FIELD_GET(PFD_VIRTUAL_ADDR_HI, desc->dw3)) <<
301 PFD_VIRTUAL_ADDR_HI_SHIFT;
302 pf->page_addr |= FIELD_GET(PFD_VIRTUAL_ADDR_LO, desc->dw2) <<
303 PFD_VIRTUAL_ADDR_LO_SHIFT;
305 pf_queue->head = (pf_queue->head + PF_MSG_LEN_DW) %
309 spin_unlock_irq(&pf_queue->lock);
314 static bool pf_queue_full(struct pf_queue *pf_queue)
316 lockdep_assert_held(&pf_queue->lock);
318 return CIRC_SPACE(pf_queue->tail, pf_queue->head, PF_QUEUE_NUM_DW) <=
322 int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
324 struct xe_gt *gt = guc_to_gt(guc);
325 struct xe_device *xe = gt_to_xe(gt);
326 struct pf_queue *pf_queue;
331 if (unlikely(len != PF_MSG_LEN_DW))
334 asid = FIELD_GET(PFD_ASID, msg[1]);
335 pf_queue = >->usm.pf_queue[asid % NUM_PF_QUEUE];
337 spin_lock_irqsave(&pf_queue->lock, flags);
338 full = pf_queue_full(pf_queue);
340 memcpy(pf_queue->data + pf_queue->tail, msg, len * sizeof(u32));
341 pf_queue->tail = (pf_queue->tail + len) % PF_QUEUE_NUM_DW;
342 queue_work(gt->usm.pf_wq, &pf_queue->worker);
344 drm_warn(&xe->drm, "PF Queue full, shouldn't be possible");
346 spin_unlock_irqrestore(&pf_queue->lock, flags);
348 return full ? -ENOSPC : 0;
351 #define USM_QUEUE_MAX_RUNTIME_MS 20
353 static void pf_queue_work_func(struct work_struct *w)
355 struct pf_queue *pf_queue = container_of(w, struct pf_queue, worker);
356 struct xe_gt *gt = pf_queue->gt;
357 struct xe_device *xe = gt_to_xe(gt);
358 struct xe_guc_pagefault_reply reply = {};
359 struct pagefault pf = {};
360 unsigned long threshold;
363 threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS);
365 while (get_pagefault(pf_queue, &pf)) {
366 ret = handle_pagefault(gt, &pf);
368 print_pagefault(xe, &pf);
369 pf.fault_unsuccessful = 1;
370 drm_dbg(&xe->drm, "Fault response: Unsuccessful %d\n", ret);
373 reply.dw0 = FIELD_PREP(PFR_VALID, 1) |
374 FIELD_PREP(PFR_SUCCESS, pf.fault_unsuccessful) |
375 FIELD_PREP(PFR_REPLY, PFR_ACCESS) |
376 FIELD_PREP(PFR_DESC_TYPE, FAULT_RESPONSE_DESC) |
377 FIELD_PREP(PFR_ASID, pf.asid);
379 reply.dw1 = FIELD_PREP(PFR_VFID, pf.vfid) |
380 FIELD_PREP(PFR_ENG_INSTANCE, pf.engine_instance) |
381 FIELD_PREP(PFR_ENG_CLASS, pf.engine_class) |
382 FIELD_PREP(PFR_PDATA, pf.pdata);
384 send_pagefault_reply(>->uc.guc, &reply);
386 if (time_after(jiffies, threshold) &&
387 pf_queue->head != pf_queue->tail) {
388 queue_work(gt->usm.pf_wq, w);
394 static void acc_queue_work_func(struct work_struct *w);
396 int xe_gt_pagefault_init(struct xe_gt *gt)
398 struct xe_device *xe = gt_to_xe(gt);
401 if (!xe->info.has_usm)
404 for (i = 0; i < NUM_PF_QUEUE; ++i) {
405 gt->usm.pf_queue[i].gt = gt;
406 spin_lock_init(>->usm.pf_queue[i].lock);
407 INIT_WORK(>->usm.pf_queue[i].worker, pf_queue_work_func);
409 for (i = 0; i < NUM_ACC_QUEUE; ++i) {
410 gt->usm.acc_queue[i].gt = gt;
411 spin_lock_init(>->usm.acc_queue[i].lock);
412 INIT_WORK(>->usm.acc_queue[i].worker, acc_queue_work_func);
415 gt->usm.pf_wq = alloc_workqueue("xe_gt_page_fault_work_queue",
416 WQ_UNBOUND | WQ_HIGHPRI, NUM_PF_QUEUE);
420 gt->usm.acc_wq = alloc_workqueue("xe_gt_access_counter_work_queue",
421 WQ_UNBOUND | WQ_HIGHPRI,
429 void xe_gt_pagefault_reset(struct xe_gt *gt)
431 struct xe_device *xe = gt_to_xe(gt);
434 if (!xe->info.has_usm)
437 for (i = 0; i < NUM_PF_QUEUE; ++i) {
438 spin_lock_irq(>->usm.pf_queue[i].lock);
439 gt->usm.pf_queue[i].head = 0;
440 gt->usm.pf_queue[i].tail = 0;
441 spin_unlock_irq(>->usm.pf_queue[i].lock);
444 for (i = 0; i < NUM_ACC_QUEUE; ++i) {
445 spin_lock(>->usm.acc_queue[i].lock);
446 gt->usm.acc_queue[i].head = 0;
447 gt->usm.acc_queue[i].tail = 0;
448 spin_unlock(>->usm.acc_queue[i].lock);
452 static int granularity_in_byte(int val)
468 static int sub_granularity_in_byte(int val)
470 return (granularity_in_byte(val) / 32);
473 static void print_acc(struct xe_device *xe, struct acc *acc)
475 drm_warn(&xe->drm, "Access counter request:\n"
480 "\tGranularity: 0x%x KB Region/ %d KB sub-granularity\n"
481 "\tSub_Granularity Vector: 0x%08x\n"
482 "\tVA Range base: 0x%016llx\n",
483 acc->access_type ? "AC_NTFY_VAL" : "AC_TRIG_VAL",
484 acc->asid, acc->vfid, acc->engine_class, acc->engine_instance,
485 granularity_in_byte(acc->granularity) / SZ_1K,
486 sub_granularity_in_byte(acc->granularity) / SZ_1K,
487 acc->sub_granularity, acc->va_range_base);
490 static struct xe_vma *get_acc_vma(struct xe_vm *vm, struct acc *acc)
492 u64 page_va = acc->va_range_base + (ffs(acc->sub_granularity) - 1) *
493 sub_granularity_in_byte(acc->granularity);
495 return xe_vm_find_overlapping_vma(vm, page_va, SZ_4K);
498 static int handle_acc(struct xe_gt *gt, struct acc *acc)
500 struct xe_device *xe = gt_to_xe(gt);
501 struct xe_tile *tile = gt_to_tile(gt);
502 struct drm_exec exec;
507 /* We only support ACC_TRIGGER at the moment */
508 if (acc->access_type != ACC_TRIGGER)
512 mutex_lock(&xe->usm.lock);
513 vm = xa_load(&xe->usm.asid_to_vm, acc->asid);
516 mutex_unlock(&xe->usm.lock);
517 if (!vm || !xe_vm_in_fault_mode(vm))
520 down_read(&vm->lock);
523 vma = get_acc_vma(vm, acc);
529 trace_xe_vma_acc(vma);
531 /* Userptr or null can't be migrated, nothing to do */
532 if (xe_vma_has_no_bo(vma))
535 /* Lock VM and BOs dma-resv */
536 drm_exec_init(&exec, 0, 0);
537 drm_exec_until_all_locked(&exec) {
538 ret = xe_pf_begin(&exec, vma, true, tile->id);
539 drm_exec_retry_on_contention(&exec);
544 drm_exec_fini(&exec);
552 #define make_u64(hi__, low__) ((u64)(hi__) << 32 | (u64)(low__))
554 #define ACC_MSG_LEN_DW 4
556 static bool get_acc(struct acc_queue *acc_queue, struct acc *acc)
558 const struct xe_guc_acc_desc *desc;
561 spin_lock(&acc_queue->lock);
562 if (acc_queue->head != acc_queue->tail) {
563 desc = (const struct xe_guc_acc_desc *)
564 (acc_queue->data + acc_queue->head);
566 acc->granularity = FIELD_GET(ACC_GRANULARITY, desc->dw2);
567 acc->sub_granularity = FIELD_GET(ACC_SUBG_HI, desc->dw1) << 31 |
568 FIELD_GET(ACC_SUBG_LO, desc->dw0);
569 acc->engine_class = FIELD_GET(ACC_ENG_CLASS, desc->dw1);
570 acc->engine_instance = FIELD_GET(ACC_ENG_INSTANCE, desc->dw1);
571 acc->asid = FIELD_GET(ACC_ASID, desc->dw1);
572 acc->vfid = FIELD_GET(ACC_VFID, desc->dw2);
573 acc->access_type = FIELD_GET(ACC_TYPE, desc->dw0);
574 acc->va_range_base = make_u64(desc->dw3 & ACC_VIRTUAL_ADDR_RANGE_HI,
575 desc->dw2 & ACC_VIRTUAL_ADDR_RANGE_LO);
577 acc_queue->head = (acc_queue->head + ACC_MSG_LEN_DW) %
581 spin_unlock(&acc_queue->lock);
586 static void acc_queue_work_func(struct work_struct *w)
588 struct acc_queue *acc_queue = container_of(w, struct acc_queue, worker);
589 struct xe_gt *gt = acc_queue->gt;
590 struct xe_device *xe = gt_to_xe(gt);
592 unsigned long threshold;
595 threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS);
597 while (get_acc(acc_queue, &acc)) {
598 ret = handle_acc(gt, &acc);
601 drm_warn(&xe->drm, "ACC: Unsuccessful %d\n", ret);
604 if (time_after(jiffies, threshold) &&
605 acc_queue->head != acc_queue->tail) {
606 queue_work(gt->usm.acc_wq, w);
612 static bool acc_queue_full(struct acc_queue *acc_queue)
614 lockdep_assert_held(&acc_queue->lock);
616 return CIRC_SPACE(acc_queue->tail, acc_queue->head, ACC_QUEUE_NUM_DW) <=
620 int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len)
622 struct xe_gt *gt = guc_to_gt(guc);
623 struct acc_queue *acc_queue;
627 if (unlikely(len != ACC_MSG_LEN_DW))
630 asid = FIELD_GET(ACC_ASID, msg[1]);
631 acc_queue = >->usm.acc_queue[asid % NUM_ACC_QUEUE];
633 spin_lock(&acc_queue->lock);
634 full = acc_queue_full(acc_queue);
636 memcpy(acc_queue->data + acc_queue->tail, msg,
638 acc_queue->tail = (acc_queue->tail + len) % ACC_QUEUE_NUM_DW;
639 queue_work(gt->usm.acc_wq, &acc_queue->worker);
641 drm_warn(>_to_xe(gt)->drm, "ACC Queue full, dropping ACC");
643 spin_unlock(&acc_queue->lock);
645 return full ? -ENOSPC : 0;