2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/mm_types.h>
24 #include <linux/slab.h>
25 #include <linux/types.h>
26 #include <linux/sched/signal.h>
27 #include <linux/sched/mm.h>
28 #include <linux/uaccess.h>
29 #include <linux/mman.h>
30 #include <linux/memory.h>
32 #include "kfd_events.h"
33 #include <linux/device.h>
36 * Wrapper around wait_queue_entry_t
38 struct kfd_event_waiter {
39 wait_queue_entry_t wait;
40 struct kfd_event *event; /* Event to wait for */
41 bool activated; /* Becomes true when event is signaled */
45 * Each signal event needs a 64-bit signal slot where the signaler will write
46 * a 1 before sending an interrupt. (This is needed because some interrupts
47 * do not contain enough spare data bits to identify an event.)
48 * We get whole pages and map them to the process VA.
49 * Individual signal events use their event_id as slot index.
51 struct kfd_signal_page {
52 uint64_t *kernel_address;
53 uint64_t __user *user_address;
57 static uint64_t *page_slots(struct kfd_signal_page *page)
59 return page->kernel_address;
62 static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
65 struct kfd_signal_page *page;
67 page = kzalloc(sizeof(*page), GFP_KERNEL);
71 backing_store = (void *) __get_free_pages(GFP_KERNEL,
72 get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
74 goto fail_alloc_signal_store;
76 /* Initialize all events to unsignaled */
77 memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
78 KFD_SIGNAL_EVENT_LIMIT * 8);
80 page->kernel_address = backing_store;
81 pr_debug("Allocated new event signal page at %p, for process %p\n",
86 fail_alloc_signal_store:
91 static int allocate_event_notification_slot(struct kfd_process *p,
96 if (!p->signal_page) {
97 p->signal_page = allocate_signal_page(p);
102 id = idr_alloc(&p->event_idr, ev, 0, KFD_SIGNAL_EVENT_LIMIT,
108 page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT;
114 * Assumes that p->event_mutex is held and of course that p is not going
115 * away (current or locked).
117 static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
119 return idr_find(&p->event_idr, id);
123 * lookup_signaled_event_by_partial_id - Lookup signaled event from partial ID
124 * @p: Pointer to struct kfd_process
126 * @bits: Number of valid bits in @id
128 * Finds the first signaled event with a matching partial ID. If no
129 * matching signaled event is found, returns NULL. In that case the
130 * caller should assume that the partial ID is invalid and do an
131 * exhaustive search of all siglaned events.
133 * If multiple events with the same partial ID signal at the same
134 * time, they will be found one interrupt at a time, not necessarily
135 * in the same order the interrupts occurred. As long as the number of
136 * interrupts is correct, all signaled events will be seen by the
139 static struct kfd_event *lookup_signaled_event_by_partial_id(
140 struct kfd_process *p, uint32_t id, uint32_t bits)
142 struct kfd_event *ev;
144 if (!p->signal_page || id >= KFD_SIGNAL_EVENT_LIMIT)
147 /* Fast path for the common case that @id is not a partial ID
148 * and we only need a single lookup.
150 if (bits > 31 || (1U << bits) >= KFD_SIGNAL_EVENT_LIMIT) {
151 if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
154 return idr_find(&p->event_idr, id);
157 /* General case for partial IDs: Iterate over all matching IDs
158 * and find the first one that has signaled.
160 for (ev = NULL; id < KFD_SIGNAL_EVENT_LIMIT && !ev; id += 1U << bits) {
161 if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
164 ev = idr_find(&p->event_idr, id);
170 static int create_signal_event(struct file *devkfd,
171 struct kfd_process *p,
172 struct kfd_event *ev)
176 if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
177 if (!p->signal_event_limit_reached) {
178 pr_warn("Signal event wasn't created because limit was reached\n");
179 p->signal_event_limit_reached = true;
184 ret = allocate_event_notification_slot(p, ev);
186 pr_warn("Signal event wasn't created because out of kernel memory\n");
190 p->signal_event_count++;
192 ev->user_signal_address = &p->signal_page->user_address[ev->event_id];
193 pr_debug("Signal event number %zu created with id %d, address %p\n",
194 p->signal_event_count, ev->event_id,
195 ev->user_signal_address);
200 static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
202 /* Cast KFD_LAST_NONSIGNAL_EVENT to uint32_t. This allows an
203 * intentional integer overflow to -1 without a compiler
204 * warning. idr_alloc treats a negative value as "maximum
207 int id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID,
208 (uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1,
218 void kfd_event_init_process(struct kfd_process *p)
220 mutex_init(&p->event_mutex);
221 idr_init(&p->event_idr);
222 p->signal_page = NULL;
223 p->signal_event_count = 0;
226 static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
228 struct kfd_event_waiter *waiter;
230 /* Wake up pending waiters. They will return failure */
231 list_for_each_entry(waiter, &ev->wq.head, wait.entry)
232 waiter->event = NULL;
233 wake_up_all(&ev->wq);
235 if (ev->type == KFD_EVENT_TYPE_SIGNAL ||
236 ev->type == KFD_EVENT_TYPE_DEBUG)
237 p->signal_event_count--;
239 idr_remove(&p->event_idr, ev->event_id);
243 static void destroy_events(struct kfd_process *p)
245 struct kfd_event *ev;
248 idr_for_each_entry(&p->event_idr, ev, id)
249 destroy_event(p, ev);
250 idr_destroy(&p->event_idr);
254 * We assume that the process is being destroyed and there is no need to
255 * unmap the pages or keep bookkeeping data in order.
257 static void shutdown_signal_page(struct kfd_process *p)
259 struct kfd_signal_page *page = p->signal_page;
262 free_pages((unsigned long)page->kernel_address,
263 get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
268 void kfd_event_free_process(struct kfd_process *p)
271 shutdown_signal_page(p);
274 static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
276 return ev->type == KFD_EVENT_TYPE_SIGNAL ||
277 ev->type == KFD_EVENT_TYPE_DEBUG;
280 static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
282 return ev->type == KFD_EVENT_TYPE_SIGNAL;
285 int kfd_event_create(struct file *devkfd, struct kfd_process *p,
286 uint32_t event_type, bool auto_reset, uint32_t node_id,
287 uint32_t *event_id, uint32_t *event_trigger_data,
288 uint64_t *event_page_offset, uint32_t *event_slot_index)
291 struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);
296 ev->type = event_type;
297 ev->auto_reset = auto_reset;
298 ev->signaled = false;
300 init_waitqueue_head(&ev->wq);
302 *event_page_offset = 0;
304 mutex_lock(&p->event_mutex);
306 switch (event_type) {
307 case KFD_EVENT_TYPE_SIGNAL:
308 case KFD_EVENT_TYPE_DEBUG:
309 ret = create_signal_event(devkfd, p, ev);
311 *event_page_offset = KFD_MMAP_EVENTS_MASK;
312 *event_page_offset <<= PAGE_SHIFT;
313 *event_slot_index = ev->event_id;
317 ret = create_other_event(p, ev);
322 *event_id = ev->event_id;
323 *event_trigger_data = ev->event_id;
328 mutex_unlock(&p->event_mutex);
333 /* Assumes that p is current. */
334 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
336 struct kfd_event *ev;
339 mutex_lock(&p->event_mutex);
341 ev = lookup_event_by_id(p, event_id);
344 destroy_event(p, ev);
348 mutex_unlock(&p->event_mutex);
352 static void set_event(struct kfd_event *ev)
354 struct kfd_event_waiter *waiter;
356 /* Auto reset if the list is non-empty and we're waking
357 * someone. waitqueue_active is safe here because we're
358 * protected by the p->event_mutex, which is also held when
359 * updating the wait queues in kfd_wait_on_events.
361 ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
363 list_for_each_entry(waiter, &ev->wq.head, wait.entry)
364 waiter->activated = true;
366 wake_up_all(&ev->wq);
369 /* Assumes that p is current. */
370 int kfd_set_event(struct kfd_process *p, uint32_t event_id)
373 struct kfd_event *ev;
375 mutex_lock(&p->event_mutex);
377 ev = lookup_event_by_id(p, event_id);
379 if (ev && event_can_be_cpu_signaled(ev))
384 mutex_unlock(&p->event_mutex);
388 static void reset_event(struct kfd_event *ev)
390 ev->signaled = false;
393 /* Assumes that p is current. */
394 int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
397 struct kfd_event *ev;
399 mutex_lock(&p->event_mutex);
401 ev = lookup_event_by_id(p, event_id);
403 if (ev && event_can_be_cpu_signaled(ev))
408 mutex_unlock(&p->event_mutex);
413 static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
415 page_slots(p->signal_page)[ev->event_id] = UNSIGNALED_EVENT_SLOT;
418 static void set_event_from_interrupt(struct kfd_process *p,
419 struct kfd_event *ev)
421 if (ev && event_can_be_gpu_signaled(ev)) {
422 acknowledge_signal(p, ev);
427 void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
428 uint32_t valid_id_bits)
430 struct kfd_event *ev = NULL;
433 * Because we are called from arbitrary context (workqueue) as opposed
434 * to process context, kfd_process could attempt to exit while we are
435 * running so the lookup function returns a locked process.
437 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
440 return; /* Presumably process exited. */
442 mutex_lock(&p->event_mutex);
445 ev = lookup_signaled_event_by_partial_id(p, partial_id,
448 set_event_from_interrupt(p, ev);
449 } else if (p->signal_page) {
451 * Partial ID lookup failed. Assume that the event ID
452 * in the interrupt payload was invalid and do an
453 * exhaustive search of signaled events.
455 uint64_t *slots = page_slots(p->signal_page);
459 pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
460 partial_id, valid_id_bits);
462 if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT/2) {
463 /* With relatively few events, it's faster to
464 * iterate over the event IDR
466 idr_for_each_entry(&p->event_idr, ev, id) {
467 if (id >= KFD_SIGNAL_EVENT_LIMIT)
470 if (slots[id] != UNSIGNALED_EVENT_SLOT)
471 set_event_from_interrupt(p, ev);
474 /* With relatively many events, it's faster to
475 * iterate over the signal slots and lookup
476 * only signaled events from the IDR.
478 for (id = 0; id < KFD_SIGNAL_EVENT_LIMIT; id++)
479 if (slots[id] != UNSIGNALED_EVENT_SLOT) {
480 ev = lookup_event_by_id(p, id);
481 set_event_from_interrupt(p, ev);
486 mutex_unlock(&p->event_mutex);
487 mutex_unlock(&p->mutex);
490 static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
492 struct kfd_event_waiter *event_waiters;
495 event_waiters = kmalloc_array(num_events,
496 sizeof(struct kfd_event_waiter),
499 for (i = 0; (event_waiters) && (i < num_events) ; i++) {
500 init_wait(&event_waiters[i].wait);
501 event_waiters[i].activated = false;
504 return event_waiters;
507 static int init_event_waiter_get_status(struct kfd_process *p,
508 struct kfd_event_waiter *waiter,
511 struct kfd_event *ev = lookup_event_by_id(p, event_id);
517 waiter->activated = ev->signaled;
518 ev->signaled = ev->signaled && !ev->auto_reset;
523 static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter)
525 struct kfd_event *ev = waiter->event;
527 /* Only add to the wait list if we actually need to
528 * wait on this event.
530 if (!waiter->activated)
531 add_wait_queue(&ev->wq, &waiter->wait);
534 /* test_event_condition - Test condition of events being waited for
535 * @all: Return completion only if all events have signaled
536 * @num_events: Number of events to wait for
537 * @event_waiters: Array of event waiters, one per event
539 * Returns KFD_IOC_WAIT_RESULT_COMPLETE if all (or one) event(s) have
540 * signaled. Returns KFD_IOC_WAIT_RESULT_TIMEOUT if no (or not all)
541 * events have signaled. Returns KFD_IOC_WAIT_RESULT_FAIL if any of
542 * the events have been destroyed.
544 static uint32_t test_event_condition(bool all, uint32_t num_events,
545 struct kfd_event_waiter *event_waiters)
548 uint32_t activated_count = 0;
550 for (i = 0; i < num_events; i++) {
551 if (!event_waiters[i].event)
552 return KFD_IOC_WAIT_RESULT_FAIL;
554 if (event_waiters[i].activated) {
556 return KFD_IOC_WAIT_RESULT_COMPLETE;
562 return activated_count == num_events ?
563 KFD_IOC_WAIT_RESULT_COMPLETE : KFD_IOC_WAIT_RESULT_TIMEOUT;
567 * Copy event specific data, if defined.
568 * Currently only memory exception events have additional data to copy to user
570 static int copy_signaled_event_data(uint32_t num_events,
571 struct kfd_event_waiter *event_waiters,
572 struct kfd_event_data __user *data)
574 struct kfd_hsa_memory_exception_data *src;
575 struct kfd_hsa_memory_exception_data __user *dst;
576 struct kfd_event_waiter *waiter;
577 struct kfd_event *event;
580 for (i = 0; i < num_events; i++) {
581 waiter = &event_waiters[i];
582 event = waiter->event;
583 if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
584 dst = &data[i].memory_exception_data;
585 src = &event->memory_exception_data;
586 if (copy_to_user(dst, src,
587 sizeof(struct kfd_hsa_memory_exception_data)))
598 static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
600 if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE)
603 if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE)
604 return MAX_SCHEDULE_TIMEOUT;
607 * msecs_to_jiffies interprets all values above 2^31-1 as infinite,
608 * but we consider them finite.
609 * This hack is wrong, but nobody is likely to notice.
611 user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF);
613 return msecs_to_jiffies(user_timeout_ms) + 1;
616 static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
620 for (i = 0; i < num_events; i++)
621 if (waiters[i].event)
622 remove_wait_queue(&waiters[i].event->wq,
628 int kfd_wait_on_events(struct kfd_process *p,
629 uint32_t num_events, void __user *data,
630 bool all, uint32_t user_timeout_ms,
631 uint32_t *wait_result)
633 struct kfd_event_data __user *events =
634 (struct kfd_event_data __user *) data;
638 struct kfd_event_waiter *event_waiters = NULL;
639 long timeout = user_timeout_to_jiffies(user_timeout_ms);
641 event_waiters = alloc_event_waiters(num_events);
642 if (!event_waiters) {
647 mutex_lock(&p->event_mutex);
649 for (i = 0; i < num_events; i++) {
650 struct kfd_event_data event_data;
652 if (copy_from_user(&event_data, &events[i],
653 sizeof(struct kfd_event_data))) {
658 ret = init_event_waiter_get_status(p, &event_waiters[i],
659 event_data.event_id);
664 /* Check condition once. */
665 *wait_result = test_event_condition(all, num_events, event_waiters);
666 if (*wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) {
667 ret = copy_signaled_event_data(num_events,
668 event_waiters, events);
670 } else if (WARN_ON(*wait_result == KFD_IOC_WAIT_RESULT_FAIL)) {
671 /* This should not happen. Events shouldn't be
672 * destroyed while we're holding the event_mutex
677 /* Add to wait lists if we need to wait. */
678 for (i = 0; i < num_events; i++)
679 init_event_waiter_add_to_waitlist(&event_waiters[i]);
681 mutex_unlock(&p->event_mutex);
684 if (fatal_signal_pending(current)) {
689 if (signal_pending(current)) {
691 * This is wrong when a nonzero, non-infinite timeout
692 * is specified. We need to use
693 * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
694 * contains a union with data for each user and it's
695 * in generic kernel code that I don't want to
702 /* Set task state to interruptible sleep before
703 * checking wake-up conditions. A concurrent wake-up
704 * will put the task back into runnable state. In that
705 * case schedule_timeout will not put the task to
706 * sleep and we'll get a chance to re-check the
707 * updated conditions almost immediately. Otherwise,
708 * this race condition would lead to a soft hang or a
711 set_current_state(TASK_INTERRUPTIBLE);
713 *wait_result = test_event_condition(all, num_events,
715 if (*wait_result != KFD_IOC_WAIT_RESULT_TIMEOUT)
721 timeout = schedule_timeout(timeout);
723 __set_current_state(TASK_RUNNING);
725 /* copy_signaled_event_data may sleep. So this has to happen
726 * after the task state is set back to RUNNING.
728 if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE)
729 ret = copy_signaled_event_data(num_events,
730 event_waiters, events);
732 mutex_lock(&p->event_mutex);
734 free_waiters(num_events, event_waiters);
735 mutex_unlock(&p->event_mutex);
738 *wait_result = KFD_IOC_WAIT_RESULT_FAIL;
739 else if (*wait_result == KFD_IOC_WAIT_RESULT_FAIL)
745 int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
749 struct kfd_signal_page *page;
751 /* check required size is logical */
752 if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) !=
753 get_order(vma->vm_end - vma->vm_start)) {
754 pr_err("Event page mmap requested illegal size\n");
758 page = p->signal_page;
760 /* Probably KFD bug, but mmap is user-accessible. */
761 pr_debug("Signal page could not be found\n");
765 pfn = __pa(page->kernel_address);
768 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
769 | VM_DONTDUMP | VM_PFNMAP;
771 pr_debug("Mapping signal page\n");
772 pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
773 pr_debug(" end user address == 0x%08lx\n", vma->vm_end);
774 pr_debug(" pfn == 0x%016lX\n", pfn);
775 pr_debug(" vm_flags == 0x%08lX\n", vma->vm_flags);
776 pr_debug(" size == 0x%08lX\n",
777 vma->vm_end - vma->vm_start);
779 page->user_address = (uint64_t __user *)vma->vm_start;
781 /* mapping the page to user process */
782 return remap_pfn_range(vma, vma->vm_start, pfn,
783 vma->vm_end - vma->vm_start, vma->vm_page_prot);
787 * Assumes that p->event_mutex is held and of course
788 * that p is not going away (current or locked).
790 static void lookup_events_by_type_and_signal(struct kfd_process *p,
791 int type, void *event_data)
793 struct kfd_hsa_memory_exception_data *ev_data;
794 struct kfd_event *ev;
796 bool send_signal = true;
798 ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
800 id = KFD_FIRST_NONSIGNAL_EVENT_ID;
801 idr_for_each_entry_continue(&p->event_idr, ev, id)
802 if (ev->type == type) {
805 "Event found: id %X type %d",
806 ev->event_id, ev->type);
808 if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data)
809 ev->memory_exception_data = *ev_data;
812 /* Send SIGTERM no event of type "type" has been found*/
816 "Sending SIGTERM to HSA Process with PID %d ",
817 p->lead_thread->pid);
818 send_sig(SIGTERM, p->lead_thread, 0);
821 "HSA Process (PID %d) got unhandled exception",
822 p->lead_thread->pid);
827 void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
828 unsigned long address, bool is_write_requested,
829 bool is_execute_requested)
831 struct kfd_hsa_memory_exception_data memory_exception_data;
832 struct vm_area_struct *vma;
835 * Because we are called from arbitrary context (workqueue) as opposed
836 * to process context, kfd_process could attempt to exit while we are
837 * running so the lookup function returns a locked process.
839 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
840 struct mm_struct *mm;
843 return; /* Presumably process exited. */
845 /* Take a safe reference to the mm_struct, which may otherwise
846 * disappear even while the kfd_process is still referenced.
848 mm = get_task_mm(p->lead_thread);
850 mutex_unlock(&p->mutex);
851 return; /* Process is exiting */
854 memset(&memory_exception_data, 0, sizeof(memory_exception_data));
856 down_read(&mm->mmap_sem);
857 vma = find_vma(mm, address);
859 memory_exception_data.gpu_id = dev->id;
860 memory_exception_data.va = address;
861 /* Set failure reason */
862 memory_exception_data.failure.NotPresent = 1;
863 memory_exception_data.failure.NoExecute = 0;
864 memory_exception_data.failure.ReadOnly = 0;
866 if (vma->vm_start > address) {
867 memory_exception_data.failure.NotPresent = 1;
868 memory_exception_data.failure.NoExecute = 0;
869 memory_exception_data.failure.ReadOnly = 0;
871 memory_exception_data.failure.NotPresent = 0;
872 if (is_write_requested && !(vma->vm_flags & VM_WRITE))
873 memory_exception_data.failure.ReadOnly = 1;
875 memory_exception_data.failure.ReadOnly = 0;
876 if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
877 memory_exception_data.failure.NoExecute = 1;
879 memory_exception_data.failure.NoExecute = 0;
883 up_read(&mm->mmap_sem);
886 mutex_lock(&p->event_mutex);
888 /* Lookup events by type and signal them */
889 lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
890 &memory_exception_data);
892 mutex_unlock(&p->event_mutex);
893 mutex_unlock(&p->mutex);
896 void kfd_signal_hw_exception_event(unsigned int pasid)
899 * Because we are called from arbitrary context (workqueue) as opposed
900 * to process context, kfd_process could attempt to exit while we are
901 * running so the lookup function returns a locked process.
903 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
906 return; /* Presumably process exited. */
908 mutex_lock(&p->event_mutex);
910 /* Lookup events by type and signal them */
911 lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
913 mutex_unlock(&p->event_mutex);
914 mutex_unlock(&p->mutex);