Lines Matching +full:signal +full:- +full:id

1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
27 #include <linux/sched/signal.h>
47 * Each signal event needs a 64-bit signal slot where the signaler will write
51 * Individual signal events use their event_id as slot index.
61 return page->kernel_address; in page_slots()
82 page->kernel_address = backing_store; in allocate_signal_page()
83 page->need_to_free_pages = true; in allocate_signal_page()
84 pr_debug("Allocated new event signal page at %p, for process %p\n", in allocate_signal_page()
98 int id; in allocate_event_notification_slot() local
100 if (!p->signal_page) { in allocate_event_notification_slot()
101 p->signal_page = allocate_signal_page(p); in allocate_event_notification_slot()
102 if (!p->signal_page) in allocate_event_notification_slot()
103 return -ENOMEM; in allocate_event_notification_slot()
105 p->signal_mapped_size = 256*8; in allocate_event_notification_slot()
109 id = idr_alloc(&p->event_idr, ev, *restore_id, *restore_id + 1, in allocate_event_notification_slot()
113 * Compatibility with old user mode: Only use signal slots in allocate_event_notification_slot()
118 id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8, in allocate_event_notification_slot()
121 if (id < 0) in allocate_event_notification_slot()
122 return id; in allocate_event_notification_slot()
124 ev->event_id = id; in allocate_event_notification_slot()
125 page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT; in allocate_event_notification_slot()
131 * Assumes that p->event_mutex or rcu_readlock is held and of course that p is
134 static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id) in lookup_event_by_id() argument
136 return idr_find(&p->event_idr, id); in lookup_event_by_id()
140 * lookup_signaled_event_by_partial_id - Lookup signaled event from partial ID
142 * @id: ID to look up
143 * @bits: Number of valid bits in @id
145 * Finds the first signaled event with a matching partial ID. If no
147 * caller should assume that the partial ID is invalid and do an
150 * If multiple events with the same partial ID signal at the same
157 struct kfd_process *p, uint32_t id, uint32_t bits) in lookup_signaled_event_by_partial_id() argument
161 if (!p->signal_page || id >= KFD_SIGNAL_EVENT_LIMIT) in lookup_signaled_event_by_partial_id()
164 /* Fast path for the common case that @id is not a partial ID in lookup_signaled_event_by_partial_id()
168 if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT) in lookup_signaled_event_by_partial_id()
171 return idr_find(&p->event_idr, id); in lookup_signaled_event_by_partial_id()
177 for (ev = NULL; id < KFD_SIGNAL_EVENT_LIMIT && !ev; id += 1U << bits) { in lookup_signaled_event_by_partial_id()
178 if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT) in lookup_signaled_event_by_partial_id()
181 ev = idr_find(&p->event_idr, id); in lookup_signaled_event_by_partial_id()
192 if (p->signal_mapped_size && in create_signal_event()
193 p->signal_event_count == p->signal_mapped_size / 8) { in create_signal_event()
194 if (!p->signal_event_limit_reached) { in create_signal_event()
195 pr_debug("Signal event wasn't created because limit was reached\n"); in create_signal_event()
196 p->signal_event_limit_reached = true; in create_signal_event()
198 return -ENOSPC; in create_signal_event()
203 pr_warn("Signal event wasn't created because out of kernel memory\n"); in create_signal_event()
207 p->signal_event_count++; in create_signal_event()
209 ev->user_signal_address = &p->signal_page->user_address[ev->event_id]; in create_signal_event()
210 pr_debug("Signal event number %zu created with id %d, address %p\n", in create_signal_event()
211 p->signal_event_count, ev->event_id, in create_signal_event()
212 ev->user_signal_address); in create_signal_event()
219 int id; in create_other_event() local
222 id = idr_alloc(&p->event_idr, ev, *restore_id, *restore_id + 1, in create_other_event()
226 * intentional integer overflow to -1 without a compiler in create_other_event()
230 id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID, in create_other_event()
234 if (id < 0) in create_other_event()
235 return id; in create_other_event()
236 ev->event_id = id; in create_other_event()
243 int id; in kfd_event_init_process() local
245 mutex_init(&p->event_mutex); in kfd_event_init_process()
246 idr_init(&p->event_idr); in kfd_event_init_process()
247 p->signal_page = NULL; in kfd_event_init_process()
248 p->signal_event_count = 1; in kfd_event_init_process()
249 /* Allocate event ID 0. It is used for a fast path to ignore bogus events in kfd_event_init_process()
250 * that are sent by the CP without a context ID in kfd_event_init_process()
252 id = idr_alloc(&p->event_idr, NULL, 0, 1, GFP_KERNEL); in kfd_event_init_process()
253 if (id < 0) { in kfd_event_init_process()
254 idr_destroy(&p->event_idr); in kfd_event_init_process()
255 mutex_destroy(&p->event_mutex); in kfd_event_init_process()
256 return id; in kfd_event_init_process()
266 spin_lock(&ev->lock); in destroy_event()
267 list_for_each_entry(waiter, &ev->wq.head, wait.entry) in destroy_event()
268 WRITE_ONCE(waiter->event, NULL); in destroy_event()
269 wake_up_all(&ev->wq); in destroy_event()
270 spin_unlock(&ev->lock); in destroy_event()
272 if (ev->type == KFD_EVENT_TYPE_SIGNAL || in destroy_event()
273 ev->type == KFD_EVENT_TYPE_DEBUG) in destroy_event()
274 p->signal_event_count--; in destroy_event()
276 idr_remove(&p->event_idr, ev->event_id); in destroy_event()
283 uint32_t id; in destroy_events() local
285 idr_for_each_entry(&p->event_idr, ev, id) in destroy_events()
288 idr_destroy(&p->event_idr); in destroy_events()
289 mutex_destroy(&p->event_mutex); in destroy_events()
298 struct kfd_signal_page *page = p->signal_page; in shutdown_signal_page()
301 if (page->need_to_free_pages) in shutdown_signal_page()
302 free_pages((unsigned long)page->kernel_address, in shutdown_signal_page()
316 return ev->type == KFD_EVENT_TYPE_SIGNAL || in event_can_be_gpu_signaled()
317 ev->type == KFD_EVENT_TYPE_DEBUG; in event_can_be_gpu_signaled()
322 return ev->type == KFD_EVENT_TYPE_SIGNAL; in event_can_be_cpu_signaled()
330 if (p->signal_page) in kfd_event_page_set()
331 return -EBUSY; in kfd_event_page_set()
335 return -ENOMEM; in kfd_event_page_set()
341 page->kernel_address = kernel_address; in kfd_event_page_set()
343 p->signal_page = page; in kfd_event_page_set()
344 p->signal_mapped_size = size; in kfd_event_page_set()
345 p->signal_handle = user_handle; in kfd_event_page_set()
357 if (p->signal_page) { in kfd_kmap_event_page()
359 return -EINVAL; in kfd_kmap_event_page()
364 pr_err("Getting device by id failed in %s\n", __func__); in kfd_kmap_event_page()
365 return -EINVAL; in kfd_kmap_event_page()
367 kfd = pdd->dev; in kfd_kmap_event_page()
377 return -EINVAL; in kfd_kmap_event_page()
404 return -ENOMEM; in kfd_event_create()
406 ev->type = event_type; in kfd_event_create()
407 ev->auto_reset = auto_reset; in kfd_event_create()
408 ev->signaled = false; in kfd_event_create()
410 spin_lock_init(&ev->lock); in kfd_event_create()
411 init_waitqueue_head(&ev->wq); in kfd_event_create()
415 mutex_lock(&p->event_mutex); in kfd_event_create()
423 *event_slot_index = ev->event_id; in kfd_event_create()
432 *event_id = ev->event_id; in kfd_event_create()
433 *event_trigger_data = ev->event_id; in kfd_event_create()
438 mutex_unlock(&p->event_mutex); in kfd_event_create()
455 return -ENOMEM; in kfd_criu_restore_event()
459 ret = -ENOMEM; in kfd_criu_restore_event()
464 ret = -EINVAL; in kfd_criu_restore_event()
470 ret = -EFAULT; in kfd_criu_restore_event()
475 if (ev_priv->user_handle) { in kfd_criu_restore_event()
476 ret = kfd_kmap_event_page(p, ev_priv->user_handle); in kfd_criu_restore_event()
481 ev->type = ev_priv->type; in kfd_criu_restore_event()
482 ev->auto_reset = ev_priv->auto_reset; in kfd_criu_restore_event()
483 ev->signaled = ev_priv->signaled; in kfd_criu_restore_event()
485 spin_lock_init(&ev->lock); in kfd_criu_restore_event()
486 init_waitqueue_head(&ev->wq); in kfd_criu_restore_event()
488 mutex_lock(&p->event_mutex); in kfd_criu_restore_event()
489 switch (ev->type) { in kfd_criu_restore_event()
492 ret = create_signal_event(devkfd, p, ev, &ev_priv->event_id); in kfd_criu_restore_event()
495 memcpy(&ev->memory_exception_data, in kfd_criu_restore_event()
496 &ev_priv->memory_exception_data, in kfd_criu_restore_event()
499 ret = create_other_event(p, ev, &ev_priv->event_id); in kfd_criu_restore_event()
502 memcpy(&ev->hw_exception_data, in kfd_criu_restore_event()
503 &ev_priv->hw_exception_data, in kfd_criu_restore_event()
506 ret = create_other_event(p, ev, &ev_priv->event_id); in kfd_criu_restore_event()
509 mutex_unlock(&p->event_mutex); in kfd_criu_restore_event()
537 return -ENOMEM; in kfd_criu_checkpoint_events()
540 idr_for_each_entry(&p->event_idr, ev, ev_id) { in kfd_criu_checkpoint_events()
549 ev_priv->object_type = KFD_CRIU_OBJECT_TYPE_EVENT; in kfd_criu_checkpoint_events()
552 if (i == 0 && p->signal_page) in kfd_criu_checkpoint_events()
553 ev_priv->user_handle = p->signal_handle; in kfd_criu_checkpoint_events()
555 ev_priv->event_id = ev->event_id; in kfd_criu_checkpoint_events()
556 ev_priv->auto_reset = ev->auto_reset; in kfd_criu_checkpoint_events()
557 ev_priv->type = ev->type; in kfd_criu_checkpoint_events()
558 ev_priv->signaled = ev->signaled; in kfd_criu_checkpoint_events()
560 if (ev_priv->type == KFD_EVENT_TYPE_MEMORY) in kfd_criu_checkpoint_events()
561 memcpy(&ev_priv->memory_exception_data, in kfd_criu_checkpoint_events()
562 &ev->memory_exception_data, in kfd_criu_checkpoint_events()
564 else if (ev_priv->type == KFD_EVENT_TYPE_HW_EXCEPTION) in kfd_criu_checkpoint_events()
565 memcpy(&ev_priv->hw_exception_data, in kfd_criu_checkpoint_events()
566 &ev->hw_exception_data, in kfd_criu_checkpoint_events()
569 pr_debug("Checkpointed event[%d] id = 0x%08x auto_reset = %x type = %x signaled = %x\n", in kfd_criu_checkpoint_events()
571 ev_priv->event_id, in kfd_criu_checkpoint_events()
572 ev_priv->auto_reset, in kfd_criu_checkpoint_events()
573 ev_priv->type, in kfd_criu_checkpoint_events()
574 ev_priv->signaled); in kfd_criu_checkpoint_events()
582 ret = -EFAULT; in kfd_criu_checkpoint_events()
594 uint32_t id; in kfd_get_num_events() local
597 idr_for_each_entry(&p->event_idr, ev, id) in kfd_get_num_events()
609 mutex_lock(&p->event_mutex); in kfd_event_destroy()
616 ret = -EINVAL; in kfd_event_destroy()
618 mutex_unlock(&p->event_mutex); in kfd_event_destroy()
626 /* Auto reset if the list is non-empty and we're waking in set_event()
628 * protected by the ev->lock, which is also held when in set_event()
631 ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq); in set_event()
633 list_for_each_entry(waiter, &ev->wq.head, wait.entry) in set_event()
634 WRITE_ONCE(waiter->activated, true); in set_event()
636 wake_up_all(&ev->wq); in set_event()
649 ret = -EINVAL; in kfd_set_event()
652 spin_lock(&ev->lock); in kfd_set_event()
657 ret = -EINVAL; in kfd_set_event()
659 spin_unlock(&ev->lock); in kfd_set_event()
667 ev->signaled = false; in reset_event()
680 ret = -EINVAL; in kfd_reset_event()
683 spin_lock(&ev->lock); in kfd_reset_event()
688 ret = -EINVAL; in kfd_reset_event()
690 spin_unlock(&ev->lock); in kfd_reset_event()
699 WRITE_ONCE(page_slots(p->signal_page)[ev->event_id], UNSIGNALED_EVENT_SLOT); in acknowledge_signal()
707 spin_lock(&ev->lock); in set_event_from_interrupt()
709 spin_unlock(&ev->lock); in set_event_from_interrupt()
735 } else if (p->signal_page) { in kfd_signal_event_interrupt()
737 * Partial ID lookup failed. Assume that the event ID in kfd_signal_event_interrupt()
741 uint64_t *slots = page_slots(p->signal_page); in kfd_signal_event_interrupt()
742 uint32_t id; in kfd_signal_event_interrupt() local
745 pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n", in kfd_signal_event_interrupt()
748 if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT / 64) { in kfd_signal_event_interrupt()
752 idr_for_each_entry(&p->event_idr, ev, id) { in kfd_signal_event_interrupt()
753 if (id >= KFD_SIGNAL_EVENT_LIMIT) in kfd_signal_event_interrupt()
756 if (READ_ONCE(slots[id]) != UNSIGNALED_EVENT_SLOT) in kfd_signal_event_interrupt()
761 * iterate over the signal slots and lookup in kfd_signal_event_interrupt()
764 for (id = 1; id < KFD_SIGNAL_EVENT_LIMIT; id++) in kfd_signal_event_interrupt()
765 if (READ_ONCE(slots[id]) != UNSIGNALED_EVENT_SLOT) { in kfd_signal_event_interrupt()
766 ev = lookup_event_by_id(p, id); in kfd_signal_event_interrupt()
802 return -EINVAL; in init_event_waiter()
804 spin_lock(&ev->lock); in init_event_waiter()
805 waiter->event = ev; in init_event_waiter()
806 waiter->activated = ev->signaled; in init_event_waiter()
807 ev->signaled = ev->signaled && !ev->auto_reset; in init_event_waiter()
808 if (!waiter->activated) in init_event_waiter()
809 add_wait_queue(&ev->wq, &waiter->wait); in init_event_waiter()
810 spin_unlock(&ev->lock); in init_event_waiter()
815 /* test_event_condition - Test condition of events being waited for
863 event = waiter->event; in copy_signaled_event_data()
865 return -EINVAL; /* event was destroyed */ in copy_signaled_event_data()
866 if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) { in copy_signaled_event_data()
868 src = &event->memory_exception_data; in copy_signaled_event_data()
871 return -EFAULT; in copy_signaled_event_data()
887 * msecs_to_jiffies interprets all values above 2^31-1 as infinite, in user_timeout_to_jiffies()
903 spin_lock(&waiters[i].event->lock); in free_waiters()
904 remove_wait_queue(&waiters[i].event->wq, in free_waiters()
907 waiters[i].event && waiters[i].event->auto_reset) in free_waiters()
909 spin_unlock(&waiters[i].event->lock); in free_waiters()
930 ret = -ENOMEM; in kfd_wait_on_events()
934 /* Use p->event_mutex here to protect against concurrent creation and in kfd_wait_on_events()
937 mutex_lock(&p->event_mutex); in kfd_wait_on_events()
944 ret = -EFAULT; in kfd_wait_on_events()
967 mutex_unlock(&p->event_mutex); in kfd_wait_on_events()
971 ret = -EINTR; in kfd_wait_on_events()
976 ret = -ERESTARTSYS; in kfd_wait_on_events()
980 max(0l, timeout-1)); in kfd_wait_on_events()
985 * checking wake-up conditions. A concurrent wake-up in kfd_wait_on_events()
988 * sleep and we'll get a chance to re-check the in kfd_wait_on_events()
1007 mutex_lock(&p->event_mutex); in kfd_wait_on_events()
1013 * still exists. Therefore this must be under the p->event_mutex in kfd_wait_on_events()
1021 free_waiters(num_events, event_waiters, ret == -ERESTARTSYS); in kfd_wait_on_events()
1022 mutex_unlock(&p->event_mutex); in kfd_wait_on_events()
1027 ret = -EIO; in kfd_wait_on_events()
1040 get_order(vma->vm_end - vma->vm_start)) { in kfd_event_mmap()
1042 return -EINVAL; in kfd_event_mmap()
1045 page = p->signal_page; in kfd_event_mmap()
1047 /* Probably KFD bug, but mmap is user-accessible. */ in kfd_event_mmap()
1048 pr_debug("Signal page could not be found\n"); in kfd_event_mmap()
1049 return -EINVAL; in kfd_event_mmap()
1052 pfn = __pa(page->kernel_address); in kfd_event_mmap()
1055 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE in kfd_event_mmap()
1058 pr_debug("Mapping signal page\n"); in kfd_event_mmap()
1059 pr_debug(" start user address == 0x%08lx\n", vma->vm_start); in kfd_event_mmap()
1060 pr_debug(" end user address == 0x%08lx\n", vma->vm_end); in kfd_event_mmap()
1062 pr_debug(" vm_flags == 0x%08lX\n", vma->vm_flags); in kfd_event_mmap()
1064 vma->vm_end - vma->vm_start); in kfd_event_mmap()
1066 page->user_address = (uint64_t __user *)vma->vm_start; in kfd_event_mmap()
1069 ret = remap_pfn_range(vma, vma->vm_start, pfn, in kfd_event_mmap()
1070 vma->vm_end - vma->vm_start, vma->vm_page_prot); in kfd_event_mmap()
1072 p->signal_mapped_size = vma->vm_end - vma->vm_start; in kfd_event_mmap()
1085 uint32_t id; in lookup_events_by_type_and_signal() local
1092 id = KFD_FIRST_NONSIGNAL_EVENT_ID; in lookup_events_by_type_and_signal()
1093 idr_for_each_entry_continue(&p->event_idr, ev, id) in lookup_events_by_type_and_signal()
1094 if (ev->type == type) { in lookup_events_by_type_and_signal()
1097 "Event found: id %X type %d", in lookup_events_by_type_and_signal()
1098 ev->event_id, ev->type); in lookup_events_by_type_and_signal()
1099 spin_lock(&ev->lock); in lookup_events_by_type_and_signal()
1101 if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data) in lookup_events_by_type_and_signal()
1102 ev->memory_exception_data = *ev_data; in lookup_events_by_type_and_signal()
1103 spin_unlock(&ev->lock); in lookup_events_by_type_and_signal()
1109 p->lead_thread->pid, p->pasid); in lookup_events_by_type_and_signal()
1110 send_sig(SIGSEGV, p->lead_thread, 0); in lookup_events_by_type_and_signal()
1118 p->lead_thread->pid, p->pasid); in lookup_events_by_type_and_signal()
1119 send_sig(SIGTERM, p->lead_thread, 0); in lookup_events_by_type_and_signal()
1123 p->lead_thread->pid, p->pasid); in lookup_events_by_type_and_signal()
1153 mm = get_task_mm(p->lead_thread); in kfd_signal_iommu_event()
1159 user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id); in kfd_signal_iommu_event()
1160 if (unlikely(user_gpu_id == -EINVAL)) { in kfd_signal_iommu_event()
1161 WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id); in kfd_signal_iommu_event()
1175 if (vma && address >= vma->vm_start) { in kfd_signal_iommu_event()
1178 if (is_write_requested && !(vma->vm_flags & VM_WRITE)) in kfd_signal_iommu_event()
1183 if (is_execute_requested && !(vma->vm_flags & VM_EXEC)) in kfd_signal_iommu_event()
1231 uint32_t id; in kfd_signal_vm_fault_event() local
1239 user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id); in kfd_signal_vm_fault_event()
1240 if (unlikely(user_gpu_id == -EINVAL)) { in kfd_signal_vm_fault_event()
1241 WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id); in kfd_signal_vm_fault_event()
1250 memory_exception_data.va = (info->page_addr) << PAGE_SHIFT; in kfd_signal_vm_fault_event()
1252 info->prot_valid ? 1 : 0; in kfd_signal_vm_fault_event()
1254 info->prot_exec ? 1 : 0; in kfd_signal_vm_fault_event()
1256 info->prot_write ? 1 : 0; in kfd_signal_vm_fault_event()
1262 id = KFD_FIRST_NONSIGNAL_EVENT_ID; in kfd_signal_vm_fault_event()
1263 idr_for_each_entry_continue(&p->event_idr, ev, id) in kfd_signal_vm_fault_event()
1264 if (ev->type == KFD_EVENT_TYPE_MEMORY) { in kfd_signal_vm_fault_event()
1265 spin_lock(&ev->lock); in kfd_signal_vm_fault_event()
1266 ev->memory_exception_data = memory_exception_data; in kfd_signal_vm_fault_event()
1268 spin_unlock(&ev->lock); in kfd_signal_vm_fault_event()
1282 uint32_t id, idx; in kfd_signal_reset_event() local
1283 int reset_cause = atomic_read(&dev->sram_ecc_flag) ? in kfd_signal_reset_event()
1298 int user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id); in kfd_signal_reset_event()
1300 if (unlikely(user_gpu_id == -EINVAL)) { in kfd_signal_reset_event()
1301 WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id); in kfd_signal_reset_event()
1307 id = KFD_FIRST_NONSIGNAL_EVENT_ID; in kfd_signal_reset_event()
1308 idr_for_each_entry_continue(&p->event_idr, ev, id) { in kfd_signal_reset_event()
1309 if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) { in kfd_signal_reset_event()
1310 spin_lock(&ev->lock); in kfd_signal_reset_event()
1311 ev->hw_exception_data = hw_exception_data; in kfd_signal_reset_event()
1312 ev->hw_exception_data.gpu_id = user_gpu_id; in kfd_signal_reset_event()
1314 spin_unlock(&ev->lock); in kfd_signal_reset_event()
1316 if (ev->type == KFD_EVENT_TYPE_MEMORY && in kfd_signal_reset_event()
1318 spin_lock(&ev->lock); in kfd_signal_reset_event()
1319 ev->memory_exception_data = memory_exception_data; in kfd_signal_reset_event()
1320 ev->memory_exception_data.gpu_id = user_gpu_id; in kfd_signal_reset_event()
1322 spin_unlock(&ev->lock); in kfd_signal_reset_event()
1337 uint32_t id = KFD_FIRST_NONSIGNAL_EVENT_ID; in kfd_signal_poison_consumed_event() local
1343 user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id); in kfd_signal_poison_consumed_event()
1344 if (unlikely(user_gpu_id == -EINVAL)) { in kfd_signal_poison_consumed_event()
1345 WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id); in kfd_signal_poison_consumed_event()
1361 idr_for_each_entry_continue(&p->event_idr, ev, id) { in kfd_signal_poison_consumed_event()
1362 if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) { in kfd_signal_poison_consumed_event()
1363 spin_lock(&ev->lock); in kfd_signal_poison_consumed_event()
1364 ev->hw_exception_data = hw_exception_data; in kfd_signal_poison_consumed_event()
1366 spin_unlock(&ev->lock); in kfd_signal_poison_consumed_event()
1369 if (ev->type == KFD_EVENT_TYPE_MEMORY) { in kfd_signal_poison_consumed_event()
1370 spin_lock(&ev->lock); in kfd_signal_poison_consumed_event()
1371 ev->memory_exception_data = memory_exception_data; in kfd_signal_poison_consumed_event()
1373 spin_unlock(&ev->lock); in kfd_signal_poison_consumed_event()
1379 /* user application will handle SIGBUS signal */ in kfd_signal_poison_consumed_event()
1380 send_sig(SIGBUS, p->lead_thread, 0); in kfd_signal_poison_consumed_event()