2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/mm_types.h>
24 #include <linux/slab.h>
25 #include <linux/types.h>
26 #include <linux/sched/signal.h>
27 #include <linux/uaccess.h>
29 #include <linux/mman.h>
30 #include <linux/memory.h>
32 #include "kfd_events.h"
33 #include <linux/device.h>
36 * A task can only be on a single wait_queue at a time, but we need to support
37 * waiting on multiple events (any/all).
38 * Instead of each event simply having a wait_queue with sleeping tasks, it
39 * has a singly-linked list of tasks.
40 * A thread that wants to sleep creates an array of these, one for each event
41 * and adds one to each event's waiter chain.
43 struct kfd_event_waiter {
44 struct list_head waiters;
45 struct task_struct *sleeping_task;
47 /* Transitions to true when the event this belongs to is signaled. */
51 struct kfd_event *event;
56 * Over-complicated pooled allocator for event notification slots.
58 * Each signal event needs a 64-bit signal slot where the signaler will write
59 * a 1 before sending an interrupt.l (This is needed because some interrupts
60 * do not contain enough spare data bits to identify an event.)
61 * We get whole pages from vmalloc and map them to the process VA.
62 * Individual signal events are then allocated a slot in a page.
66 struct list_head event_pages; /* kfd_process.signal_event_pages */
67 uint64_t *kernel_address;
68 uint64_t __user *user_address;
69 uint32_t page_index; /* Index into the mmap aperture. */
70 unsigned int free_slots;
71 unsigned long used_slot_bitmap[0];
74 #define SLOTS_PER_PAGE KFD_SIGNAL_EVENT_LIMIT
75 #define SLOT_BITMAP_SIZE BITS_TO_LONGS(SLOTS_PER_PAGE)
76 #define BITS_PER_PAGE (ilog2(SLOTS_PER_PAGE)+1)
77 #define SIGNAL_PAGE_SIZE (sizeof(struct signal_page) + \
78 SLOT_BITMAP_SIZE * sizeof(long))
81 * For signal events, the event ID is used as the interrupt user data.
82 * For SQ s_sendmsg interrupts, this is limited to 8 bits.
85 #define INTERRUPT_DATA_BITS 8
86 #define SIGNAL_EVENT_ID_SLOT_SHIFT 0
88 static uint64_t *page_slots(struct signal_page *page)
90 return page->kernel_address;
93 static bool allocate_free_slot(struct kfd_process *process,
94 struct signal_page **out_page,
95 unsigned int *out_slot_index)
97 struct signal_page *page;
99 list_for_each_entry(page, &process->signal_event_pages, event_pages) {
100 if (page->free_slots > 0) {
102 find_first_zero_bit(page->used_slot_bitmap,
105 __set_bit(slot, page->used_slot_bitmap);
108 page_slots(page)[slot] = UNSIGNALED_EVENT_SLOT;
111 *out_slot_index = slot;
113 pr_debug("Allocated event signal slot in page %p, slot %d\n",
120 pr_debug("No free event signal slots were found for process %p\n",
126 #define list_tail_entry(head, type, member) \
127 list_entry((head)->prev, type, member)
129 static bool allocate_signal_page(struct file *devkfd, struct kfd_process *p)
132 struct signal_page *page;
134 page = kzalloc(SIGNAL_PAGE_SIZE, GFP_KERNEL);
136 goto fail_alloc_signal_page;
138 page->free_slots = SLOTS_PER_PAGE;
140 backing_store = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
141 get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
143 goto fail_alloc_signal_store;
145 /* prevent user-mode info leaks */
146 memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
147 KFD_SIGNAL_EVENT_LIMIT * 8);
149 page->kernel_address = backing_store;
151 if (list_empty(&p->signal_event_pages))
152 page->page_index = 0;
154 page->page_index = list_tail_entry(&p->signal_event_pages,
156 event_pages)->page_index + 1;
158 pr_debug("Allocated new event signal page at %p, for process %p\n",
160 pr_debug("Page index is %d\n", page->page_index);
162 list_add(&page->event_pages, &p->signal_event_pages);
166 fail_alloc_signal_store:
168 fail_alloc_signal_page:
172 static bool allocate_event_notification_slot(struct file *devkfd,
173 struct kfd_process *p,
174 struct signal_page **page,
175 unsigned int *signal_slot_index)
179 ret = allocate_free_slot(p, page, signal_slot_index);
181 ret = allocate_signal_page(devkfd, p);
183 ret = allocate_free_slot(p, page, signal_slot_index);
189 /* Assumes that the process's event_mutex is locked. */
190 static void release_event_notification_slot(struct signal_page *page,
193 __clear_bit(slot_index, page->used_slot_bitmap);
196 /* We don't free signal pages, they are retained by the process
197 * and reused until it exits.
201 static struct signal_page *lookup_signal_page_by_index(struct kfd_process *p,
202 unsigned int page_index)
204 struct signal_page *page;
207 * This is safe because we don't delete signal pages until the
210 list_for_each_entry(page, &p->signal_event_pages, event_pages)
211 if (page->page_index == page_index)
218 * Assumes that p->event_mutex is held and of course that p is not going
219 * away (current or locked).
221 static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
223 struct kfd_event *ev;
225 hash_for_each_possible(p->events, ev, events, id)
226 if (ev->event_id == id)
232 static u32 make_signal_event_id(struct signal_page *page,
233 unsigned int signal_slot_index)
235 return page->page_index |
236 (signal_slot_index << SIGNAL_EVENT_ID_SLOT_SHIFT);
240 * Produce a kfd event id for a nonsignal event.
241 * These are arbitrary numbers, so we do a sequential search through
242 * the hash table for an unused number.
244 static u32 make_nonsignal_event_id(struct kfd_process *p)
248 for (id = p->next_nonsignal_event_id;
249 id < KFD_LAST_NONSIGNAL_EVENT_ID &&
250 lookup_event_by_id(p, id);
254 if (id < KFD_LAST_NONSIGNAL_EVENT_ID) {
257 * What if id == LAST_NONSIGNAL_EVENT_ID - 1?
258 * Then next_nonsignal_event_id = LAST_NONSIGNAL_EVENT_ID so
259 * the first loop fails immediately and we proceed with the
260 * wraparound loop below.
262 p->next_nonsignal_event_id = id + 1;
267 for (id = KFD_FIRST_NONSIGNAL_EVENT_ID;
268 id < KFD_LAST_NONSIGNAL_EVENT_ID &&
269 lookup_event_by_id(p, id);
274 if (id < KFD_LAST_NONSIGNAL_EVENT_ID) {
275 p->next_nonsignal_event_id = id + 1;
279 p->next_nonsignal_event_id = KFD_FIRST_NONSIGNAL_EVENT_ID;
283 static struct kfd_event *lookup_event_by_page_slot(struct kfd_process *p,
284 struct signal_page *page,
285 unsigned int signal_slot)
287 return lookup_event_by_id(p, make_signal_event_id(page, signal_slot));
290 static int create_signal_event(struct file *devkfd,
291 struct kfd_process *p,
292 struct kfd_event *ev)
294 if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
295 pr_warn("Signal event wasn't created because limit was reached\n");
299 if (!allocate_event_notification_slot(devkfd, p, &ev->signal_page,
300 &ev->signal_slot_index)) {
301 pr_warn("Signal event wasn't created because out of kernel memory\n");
305 p->signal_event_count++;
307 ev->user_signal_address =
308 &ev->signal_page->user_address[ev->signal_slot_index];
310 ev->event_id = make_signal_event_id(ev->signal_page,
311 ev->signal_slot_index);
313 pr_debug("Signal event number %zu created with id %d, address %p\n",
314 p->signal_event_count, ev->event_id,
315 ev->user_signal_address);
321 * No non-signal events are supported yet.
322 * We create them as events that never signal.
323 * Set event calls from user-mode are failed.
325 static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
327 ev->event_id = make_nonsignal_event_id(p);
328 if (ev->event_id == 0)
334 void kfd_event_init_process(struct kfd_process *p)
336 mutex_init(&p->event_mutex);
337 hash_init(p->events);
338 INIT_LIST_HEAD(&p->signal_event_pages);
339 p->next_nonsignal_event_id = KFD_FIRST_NONSIGNAL_EVENT_ID;
340 p->signal_event_count = 0;
343 static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
345 if (ev->signal_page) {
346 release_event_notification_slot(ev->signal_page,
347 ev->signal_slot_index);
348 p->signal_event_count--;
352 * Abandon the list of waiters. Individual waiting threads will
353 * clean up their own data.
355 list_del(&ev->waiters);
357 hash_del(&ev->events);
361 static void destroy_events(struct kfd_process *p)
363 struct kfd_event *ev;
364 struct hlist_node *tmp;
365 unsigned int hash_bkt;
367 hash_for_each_safe(p->events, hash_bkt, tmp, ev, events)
368 destroy_event(p, ev);
372 * We assume that the process is being destroyed and there is no need to
373 * unmap the pages or keep bookkeeping data in order.
375 static void shutdown_signal_pages(struct kfd_process *p)
377 struct signal_page *page, *tmp;
379 list_for_each_entry_safe(page, tmp, &p->signal_event_pages,
381 free_pages((unsigned long)page->kernel_address,
382 get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
387 void kfd_event_free_process(struct kfd_process *p)
390 shutdown_signal_pages(p);
393 static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
395 return ev->type == KFD_EVENT_TYPE_SIGNAL ||
396 ev->type == KFD_EVENT_TYPE_DEBUG;
399 static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
401 return ev->type == KFD_EVENT_TYPE_SIGNAL;
404 int kfd_event_create(struct file *devkfd, struct kfd_process *p,
405 uint32_t event_type, bool auto_reset, uint32_t node_id,
406 uint32_t *event_id, uint32_t *event_trigger_data,
407 uint64_t *event_page_offset, uint32_t *event_slot_index)
410 struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);
415 ev->type = event_type;
416 ev->auto_reset = auto_reset;
417 ev->signaled = false;
419 INIT_LIST_HEAD(&ev->waiters);
421 *event_page_offset = 0;
423 mutex_lock(&p->event_mutex);
425 switch (event_type) {
426 case KFD_EVENT_TYPE_SIGNAL:
427 case KFD_EVENT_TYPE_DEBUG:
428 ret = create_signal_event(devkfd, p, ev);
430 *event_page_offset = (ev->signal_page->page_index |
431 KFD_MMAP_EVENTS_MASK);
432 *event_page_offset <<= PAGE_SHIFT;
433 *event_slot_index = ev->signal_slot_index;
437 ret = create_other_event(p, ev);
442 hash_add(p->events, &ev->events, ev->event_id);
444 *event_id = ev->event_id;
445 *event_trigger_data = ev->event_id;
450 mutex_unlock(&p->event_mutex);
455 /* Assumes that p is current. */
456 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
458 struct kfd_event *ev;
461 mutex_lock(&p->event_mutex);
463 ev = lookup_event_by_id(p, event_id);
466 destroy_event(p, ev);
470 mutex_unlock(&p->event_mutex);
474 static void set_event(struct kfd_event *ev)
476 struct kfd_event_waiter *waiter;
477 struct kfd_event_waiter *next;
479 /* Auto reset if the list is non-empty and we're waking someone. */
480 ev->signaled = !ev->auto_reset || list_empty(&ev->waiters);
482 list_for_each_entry_safe(waiter, next, &ev->waiters, waiters) {
483 waiter->activated = true;
485 /* _init because free_waiters will call list_del */
486 list_del_init(&waiter->waiters);
488 wake_up_process(waiter->sleeping_task);
492 /* Assumes that p is current. */
493 int kfd_set_event(struct kfd_process *p, uint32_t event_id)
496 struct kfd_event *ev;
498 mutex_lock(&p->event_mutex);
500 ev = lookup_event_by_id(p, event_id);
502 if (ev && event_can_be_cpu_signaled(ev))
507 mutex_unlock(&p->event_mutex);
511 static void reset_event(struct kfd_event *ev)
513 ev->signaled = false;
516 /* Assumes that p is current. */
517 int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
520 struct kfd_event *ev;
522 mutex_lock(&p->event_mutex);
524 ev = lookup_event_by_id(p, event_id);
526 if (ev && event_can_be_cpu_signaled(ev))
531 mutex_unlock(&p->event_mutex);
536 static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
538 page_slots(ev->signal_page)[ev->signal_slot_index] =
539 UNSIGNALED_EVENT_SLOT;
542 static bool is_slot_signaled(struct signal_page *page, unsigned int index)
544 return page_slots(page)[index] != UNSIGNALED_EVENT_SLOT;
547 static void set_event_from_interrupt(struct kfd_process *p,
548 struct kfd_event *ev)
550 if (ev && event_can_be_gpu_signaled(ev)) {
551 acknowledge_signal(p, ev);
556 void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
557 uint32_t valid_id_bits)
559 struct kfd_event *ev;
562 * Because we are called from arbitrary context (workqueue) as opposed
563 * to process context, kfd_process could attempt to exit while we are
564 * running so the lookup function returns a locked process.
566 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
569 return; /* Presumably process exited. */
571 mutex_lock(&p->event_mutex);
573 if (valid_id_bits >= INTERRUPT_DATA_BITS) {
574 /* Partial ID is a full ID. */
575 ev = lookup_event_by_id(p, partial_id);
576 set_event_from_interrupt(p, ev);
579 * Partial ID is in fact partial. For now we completely
580 * ignore it, but we could use any bits we did receive to
583 struct signal_page *page;
586 list_for_each_entry(page, &p->signal_event_pages, event_pages)
587 for (i = 0; i < SLOTS_PER_PAGE; i++)
588 if (is_slot_signaled(page, i)) {
589 ev = lookup_event_by_page_slot(p,
591 set_event_from_interrupt(p, ev);
595 mutex_unlock(&p->event_mutex);
596 mutex_unlock(&p->mutex);
599 static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
601 struct kfd_event_waiter *event_waiters;
604 event_waiters = kmalloc_array(num_events,
605 sizeof(struct kfd_event_waiter),
608 for (i = 0; (event_waiters) && (i < num_events) ; i++) {
609 INIT_LIST_HEAD(&event_waiters[i].waiters);
610 event_waiters[i].sleeping_task = current;
611 event_waiters[i].activated = false;
614 return event_waiters;
617 static int init_event_waiter(struct kfd_process *p,
618 struct kfd_event_waiter *waiter,
620 uint32_t input_index)
622 struct kfd_event *ev = lookup_event_by_id(p, event_id);
628 waiter->input_index = input_index;
629 waiter->activated = ev->signaled;
630 ev->signaled = ev->signaled && !ev->auto_reset;
632 list_add(&waiter->waiters, &ev->waiters);
637 static bool test_event_condition(bool all, uint32_t num_events,
638 struct kfd_event_waiter *event_waiters)
641 uint32_t activated_count = 0;
643 for (i = 0; i < num_events; i++) {
644 if (event_waiters[i].activated) {
652 return activated_count == num_events;
656 * Copy event specific data, if defined.
657 * Currently only memory exception events have additional data to copy to user
659 static bool copy_signaled_event_data(uint32_t num_events,
660 struct kfd_event_waiter *event_waiters,
661 struct kfd_event_data __user *data)
663 struct kfd_hsa_memory_exception_data *src;
664 struct kfd_hsa_memory_exception_data __user *dst;
665 struct kfd_event_waiter *waiter;
666 struct kfd_event *event;
669 for (i = 0; i < num_events; i++) {
670 waiter = &event_waiters[i];
671 event = waiter->event;
672 if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
673 dst = &data[waiter->input_index].memory_exception_data;
674 src = &event->memory_exception_data;
675 if (copy_to_user(dst, src,
676 sizeof(struct kfd_hsa_memory_exception_data)))
687 static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
689 if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE)
692 if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE)
693 return MAX_SCHEDULE_TIMEOUT;
696 * msecs_to_jiffies interprets all values above 2^31-1 as infinite,
697 * but we consider them finite.
698 * This hack is wrong, but nobody is likely to notice.
700 user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF);
702 return msecs_to_jiffies(user_timeout_ms) + 1;
705 static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
709 for (i = 0; i < num_events; i++)
710 list_del(&waiters[i].waiters);
715 int kfd_wait_on_events(struct kfd_process *p,
716 uint32_t num_events, void __user *data,
717 bool all, uint32_t user_timeout_ms,
718 enum kfd_event_wait_result *wait_result)
720 struct kfd_event_data __user *events =
721 (struct kfd_event_data __user *) data;
724 struct kfd_event_waiter *event_waiters = NULL;
725 long timeout = user_timeout_to_jiffies(user_timeout_ms);
727 mutex_lock(&p->event_mutex);
729 event_waiters = alloc_event_waiters(num_events);
730 if (!event_waiters) {
735 for (i = 0; i < num_events; i++) {
736 struct kfd_event_data event_data;
738 if (copy_from_user(&event_data, &events[i],
739 sizeof(struct kfd_event_data))) {
744 ret = init_event_waiter(p, &event_waiters[i],
745 event_data.event_id, i);
750 mutex_unlock(&p->event_mutex);
753 if (fatal_signal_pending(current)) {
758 if (signal_pending(current)) {
760 * This is wrong when a nonzero, non-infinite timeout
761 * is specified. We need to use
762 * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
763 * contains a union with data for each user and it's
764 * in generic kernel code that I don't want to
771 if (test_event_condition(all, num_events, event_waiters)) {
772 if (copy_signaled_event_data(num_events,
773 event_waiters, events))
774 *wait_result = KFD_WAIT_COMPLETE;
776 *wait_result = KFD_WAIT_ERROR;
781 *wait_result = KFD_WAIT_TIMEOUT;
785 timeout = schedule_timeout_interruptible(timeout);
787 __set_current_state(TASK_RUNNING);
789 mutex_lock(&p->event_mutex);
790 free_waiters(num_events, event_waiters);
791 mutex_unlock(&p->event_mutex);
797 free_waiters(num_events, event_waiters);
799 mutex_unlock(&p->event_mutex);
801 *wait_result = KFD_WAIT_ERROR;
806 int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
809 unsigned int page_index;
811 struct signal_page *page;
813 /* check required size is logical */
814 if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) !=
815 get_order(vma->vm_end - vma->vm_start)) {
816 pr_err("Event page mmap requested illegal size\n");
820 page_index = vma->vm_pgoff;
822 page = lookup_signal_page_by_index(p, page_index);
824 /* Probably KFD bug, but mmap is user-accessible. */
825 pr_debug("Signal page could not be found for page_index %u\n",
830 pfn = __pa(page->kernel_address);
833 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
834 | VM_DONTDUMP | VM_PFNMAP;
836 pr_debug("Mapping signal page\n");
837 pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
838 pr_debug(" end user address == 0x%08lx\n", vma->vm_end);
839 pr_debug(" pfn == 0x%016lX\n", pfn);
840 pr_debug(" vm_flags == 0x%08lX\n", vma->vm_flags);
841 pr_debug(" size == 0x%08lX\n",
842 vma->vm_end - vma->vm_start);
844 page->user_address = (uint64_t __user *)vma->vm_start;
846 /* mapping the page to user process */
847 return remap_pfn_range(vma, vma->vm_start, pfn,
848 vma->vm_end - vma->vm_start, vma->vm_page_prot);
852 * Assumes that p->event_mutex is held and of course
853 * that p is not going away (current or locked).
855 static void lookup_events_by_type_and_signal(struct kfd_process *p,
856 int type, void *event_data)
858 struct kfd_hsa_memory_exception_data *ev_data;
859 struct kfd_event *ev;
861 bool send_signal = true;
863 ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
865 hash_for_each(p->events, bkt, ev, events)
866 if (ev->type == type) {
869 "Event found: id %X type %d",
870 ev->event_id, ev->type);
872 if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data)
873 ev->memory_exception_data = *ev_data;
876 /* Send SIGTERM no event of type "type" has been found*/
880 "Sending SIGTERM to HSA Process with PID %d ",
881 p->lead_thread->pid);
882 send_sig(SIGTERM, p->lead_thread, 0);
885 "HSA Process (PID %d) got unhandled exception",
886 p->lead_thread->pid);
891 void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
892 unsigned long address, bool is_write_requested,
893 bool is_execute_requested)
895 struct kfd_hsa_memory_exception_data memory_exception_data;
896 struct vm_area_struct *vma;
899 * Because we are called from arbitrary context (workqueue) as opposed
900 * to process context, kfd_process could attempt to exit while we are
901 * running so the lookup function returns a locked process.
903 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
906 return; /* Presumably process exited. */
908 memset(&memory_exception_data, 0, sizeof(memory_exception_data));
910 down_read(&p->mm->mmap_sem);
911 vma = find_vma(p->mm, address);
913 memory_exception_data.gpu_id = dev->id;
914 memory_exception_data.va = address;
915 /* Set failure reason */
916 memory_exception_data.failure.NotPresent = 1;
917 memory_exception_data.failure.NoExecute = 0;
918 memory_exception_data.failure.ReadOnly = 0;
920 if (vma->vm_start > address) {
921 memory_exception_data.failure.NotPresent = 1;
922 memory_exception_data.failure.NoExecute = 0;
923 memory_exception_data.failure.ReadOnly = 0;
925 memory_exception_data.failure.NotPresent = 0;
926 if (is_write_requested && !(vma->vm_flags & VM_WRITE))
927 memory_exception_data.failure.ReadOnly = 1;
929 memory_exception_data.failure.ReadOnly = 0;
930 if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
931 memory_exception_data.failure.NoExecute = 1;
933 memory_exception_data.failure.NoExecute = 0;
937 up_read(&p->mm->mmap_sem);
939 mutex_lock(&p->event_mutex);
941 /* Lookup events by type and signal them */
942 lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
943 &memory_exception_data);
945 mutex_unlock(&p->event_mutex);
946 mutex_unlock(&p->mutex);
949 void kfd_signal_hw_exception_event(unsigned int pasid)
952 * Because we are called from arbitrary context (workqueue) as opposed
953 * to process context, kfd_process could attempt to exit while we are
954 * running so the lookup function returns a locked process.
956 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
959 return; /* Presumably process exited. */
961 mutex_lock(&p->event_mutex);
963 /* Lookup events by type and signal them */
964 lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
966 mutex_unlock(&p->event_mutex);
967 mutex_unlock(&p->mutex);