| /Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/subdev/fault/ |
| D | base.c | 30 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); in nvkm_fault_ntfy_fini() local 31 fault->func->buffer.intr(fault->buffer[index], false); in nvkm_fault_ntfy_fini() 37 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); in nvkm_fault_ntfy_init() local 38 fault->func->buffer.intr(fault->buffer[index], true); in nvkm_fault_ntfy_init() 65 struct nvkm_fault *fault = nvkm_fault(subdev); in nvkm_fault_intr() local 66 return fault->func->intr(fault); in nvkm_fault_intr() 72 struct nvkm_fault *fault = nvkm_fault(subdev); in nvkm_fault_fini() local 73 if (fault->func->fini) in nvkm_fault_fini() 74 fault->func->fini(fault); in nvkm_fault_fini() 81 struct nvkm_fault *fault = nvkm_fault(subdev); in nvkm_fault_init() local [all …]
|
| D | gv100.c | 33 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_process() 42 const u32 base = get * buffer->fault->func->buffer.entry_size; in gv100_fault_buffer_process() 76 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_intr() 87 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_fini() 95 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_init() 107 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_info() 120 struct nvkm_fault *fault = container_of(notify, typeof(*fault), nrpfb); in gv100_fault_ntfy_nrpfb() local 121 gv100_fault_buffer_process(fault->buffer[0]); in gv100_fault_ntfy_nrpfb() 126 gv100_fault_intr_fault(struct nvkm_fault *fault) in gv100_fault_intr_fault() argument 128 struct nvkm_subdev *subdev = &fault->subdev; in gv100_fault_intr_fault() [all …]
|
| D | tu102.c | 42 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_fini() 50 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_init() 62 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_info() 73 tu102_fault_intr_fault(struct nvkm_fault *fault) in tu102_fault_intr_fault() argument 75 struct nvkm_subdev *subdev = &fault->subdev; in tu102_fault_intr_fault() 99 tu102_fault_intr(struct nvkm_fault *fault) in tu102_fault_intr() argument 101 struct nvkm_subdev *subdev = &fault->subdev; in tu102_fault_intr() 106 tu102_fault_intr_fault(fault); in tu102_fault_intr() 112 if (fault->buffer[0]) { in tu102_fault_intr() 113 nvkm_event_send(&fault->event, 1, 0, NULL, 0); in tu102_fault_intr() [all …]
|
| D | user.c | 35 struct nvkm_device *device = buffer->fault->subdev.device; in nvkm_ufault_map() 48 *pevent = &buffer->fault->event; in nvkm_ufault_ntfy() 58 buffer->fault->func->buffer.fini(buffer); in nvkm_ufault_fini() 66 buffer->fault->func->buffer.init(buffer); in nvkm_ufault_init() 92 struct nvkm_fault *fault = device->fault; in nvkm_ufault_new() local 93 struct nvkm_fault_buffer *buffer = fault->buffer[fault->func->user.rp]; in nvkm_ufault_new()
|
| /Linux-v5.4/drivers/infiniband/hw/hfi1/ |
| D | fault.c | 111 if (!ibd->fault->n_rxfaults[i] && !ibd->fault->n_txfaults[i]) in _fault_stats_seq_show() 116 (unsigned long long)ibd->fault->n_rxfaults[i], in _fault_stats_seq_show() 117 (unsigned long long)ibd->fault->n_txfaults[i]); in _fault_stats_seq_show() 138 struct fault *fault = file->private_data; in fault_opcodes_write() local 180 bitmap_zero(fault->opcodes, sizeof(fault->opcodes) * in fault_opcodes_write() 190 clear_bit(i, fault->opcodes); in fault_opcodes_write() 192 set_bit(i, fault->opcodes); in fault_opcodes_write() 212 struct fault *fault = file->private_data; in fault_opcodes_read() local 213 size_t bitsize = sizeof(fault->opcodes) * BITS_PER_BYTE; in fault_opcodes_read() 221 bit = find_first_bit(fault->opcodes, bitsize); in fault_opcodes_read() [all …]
|
| /Linux-v5.4/drivers/gpu/drm/nouveau/ |
| D | nouveau_svm.c | 63 u8 fault; member 65 } **fault; member 406 struct nouveau_svm_fault *fault) in nouveau_svm_fault_cancel_fault() argument 408 nouveau_svm_fault_cancel(svm, fault->inst, in nouveau_svm_fault_cancel_fault() 409 fault->hub, in nouveau_svm_fault_cancel_fault() 410 fault->gpc, in nouveau_svm_fault_cancel_fault() 411 fault->client); in nouveau_svm_fault_cancel_fault() 446 struct nouveau_svm_fault *fault; in nouveau_svm_fault_cache() local 454 if (!buffer->fault[buffer->fault_nr]) { in nouveau_svm_fault_cache() 455 fault = kmalloc(sizeof(*fault), GFP_KERNEL); in nouveau_svm_fault_cache() [all …]
|
| /Linux-v5.4/drivers/iommu/ |
| D | amd_iommu_v2.c | 68 struct fault { struct 428 static void handle_fault_error(struct fault *fault) in handle_fault_error() argument 432 if (!fault->dev_state->inv_ppr_cb) { in handle_fault_error() 433 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID); in handle_fault_error() 437 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev, in handle_fault_error() 438 fault->pasid, in handle_fault_error() 439 fault->address, in handle_fault_error() 440 fault->flags); in handle_fault_error() 443 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS); in handle_fault_error() 446 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID); in handle_fault_error() [all …]
|
| /Linux-v5.4/arch/nios2/kernel/ |
| D | misaligned.c | 72 unsigned int fault; in handle_unaligned_c() local 85 fault = 0; in handle_unaligned_c() 98 fault |= __get_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 99 fault |= __get_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() 111 fault |= __put_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 112 fault |= __put_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() 116 fault |= __get_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 117 fault |= __get_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() 133 fault |= __put_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 134 fault |= __put_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() [all …]
|
| /Linux-v5.4/arch/arc/kernel/ |
| D | unaligned.c | 50 goto fault; \ 65 goto fault; \ 92 goto fault; \ 125 goto fault; \ 159 fault: state->fault = 1; in fixup_load() 179 goto fault; in fixup_store() 191 fault: state->fault = 1; in fixup_store() 225 if (state.fault) in misaligned_fixup() 226 goto fault; in misaligned_fixup() 230 goto fault; in misaligned_fixup() [all …]
|
| /Linux-v5.4/arch/s390/mm/ |
| D | fault.c | 322 vm_fault_t fault) in do_fault_error() argument 326 switch (fault) { in do_fault_error() 335 si_code = (fault == VM_FAULT_BADMAP) ? in do_fault_error() 351 if (fault & VM_FAULT_OOM) { in do_fault_error() 356 } else if (fault & VM_FAULT_SIGSEGV) { in do_fault_error() 362 } else if (fault & VM_FAULT_SIGBUS) { in do_fault_error() 395 vm_fault_t fault; in do_exception() local 415 fault = VM_FAULT_BADCONTEXT; in do_exception() 421 fault = VM_FAULT_BADMAP; in do_exception() 447 fault = VM_FAULT_BADMAP; in do_exception() [all …]
|
| /Linux-v5.4/mm/ |
| D | hmm.c | 284 bool fault, bool write_fault, in hmm_vma_walk_hole_() argument 300 if (fault || write_fault) { in hmm_vma_walk_hole_() 310 return (fault || write_fault) ? -EBUSY : 0; in hmm_vma_walk_hole_() 315 bool *fault, bool *write_fault) in hmm_pte_need_fault() argument 342 *fault = true; in hmm_pte_need_fault() 348 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); in hmm_pte_need_fault() 353 *fault = true; in hmm_pte_need_fault() 359 uint64_t cpu_flags, bool *fault, in hmm_range_need_fault() argument 365 *fault = *write_fault = false; in hmm_range_need_fault() 369 *fault = *write_fault = false; in hmm_range_need_fault() [all …]
|
| /Linux-v5.4/arch/mips/kernel/ |
| D | unaligned.c | 944 goto fault; in emulate_load_store_insn() 953 goto fault; in emulate_load_store_insn() 979 goto fault; in emulate_load_store_insn() 992 goto fault; in emulate_load_store_insn() 1005 goto fault; in emulate_load_store_insn() 1020 goto fault; in emulate_load_store_insn() 1033 goto fault; in emulate_load_store_insn() 1058 goto fault; in emulate_load_store_insn() 1077 goto fault; in emulate_load_store_insn() 1096 goto fault; in emulate_load_store_insn() [all …]
|
| /Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ |
| D | gk20a.c | 29 .intr.fault = gf100_fifo_intr_fault, 31 .fault.access = gk104_fifo_fault_access, 32 .fault.engine = gk104_fifo_fault_engine, 33 .fault.reason = gk104_fifo_fault_reason, 34 .fault.hubclient = gk104_fifo_fault_hubclient, 35 .fault.gpcclient = gk104_fifo_fault_gpcclient,
|
| D | gm20b.c | 29 .intr.fault = gm107_fifo_intr_fault, 31 .fault.access = gk104_fifo_fault_access, 32 .fault.engine = gm107_fifo_fault_engine, 33 .fault.reason = gk104_fifo_fault_reason, 34 .fault.hubclient = gk104_fifo_fault_hubclient, 35 .fault.gpcclient = gk104_fifo_fault_gpcclient,
|
| D | gp10b.c | 29 .intr.fault = gp100_fifo_intr_fault, 31 .fault.access = gk104_fifo_fault_access, 32 .fault.engine = gp100_fifo_fault_engine, 33 .fault.reason = gk104_fifo_fault_reason, 34 .fault.hubclient = gk104_fifo_fault_hubclient, 35 .fault.gpcclient = gk104_fifo_fault_gpcclient,
|
| D | gm200.c | 45 .intr.fault = gm107_fifo_intr_fault, 47 .fault.access = gk104_fifo_fault_access, 48 .fault.engine = gm107_fifo_fault_engine, 49 .fault.reason = gk104_fifo_fault_reason, 50 .fault.hubclient = gk104_fifo_fault_hubclient, 51 .fault.gpcclient = gk104_fifo_fault_gpcclient,
|
| D | gk110.c | 51 .intr.fault = gf100_fifo_intr_fault, 53 .fault.access = gk104_fifo_fault_access, 54 .fault.engine = gk104_fifo_fault_engine, 55 .fault.reason = gk104_fifo_fault_reason, 56 .fault.hubclient = gk104_fifo_fault_hubclient, 57 .fault.gpcclient = gk104_fifo_fault_gpcclient,
|
| D | gk208.c | 48 .intr.fault = gf100_fifo_intr_fault, 50 .fault.access = gk104_fifo_fault_access, 51 .fault.engine = gk104_fifo_fault_engine, 52 .fault.reason = gk104_fifo_fault_reason, 53 .fault.hubclient = gk104_fifo_fault_hubclient, 54 .fault.gpcclient = gk104_fifo_fault_gpcclient,
|
| /Linux-v5.4/arch/parisc/mm/ |
| D | fault.c | 266 vm_fault_t fault = 0; in do_page_fault() local 305 fault = handle_mm_fault(vma, address, flags); in do_page_fault() 307 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) in do_page_fault() 310 if (unlikely(fault & VM_FAULT_ERROR)) { in do_page_fault() 316 if (fault & VM_FAULT_OOM) in do_page_fault() 318 else if (fault & VM_FAULT_SIGSEGV) in do_page_fault() 320 else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| in do_page_fault() 326 if (fault & VM_FAULT_MAJOR) in do_page_fault() 330 if (fault & VM_FAULT_RETRY) { in do_page_fault() 391 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { in do_page_fault() [all …]
|
| /Linux-v5.4/arch/m68k/mm/ |
| D | fault.c | 73 vm_fault_t fault; in do_page_fault() local 138 fault = handle_mm_fault(vma, address, flags); in do_page_fault() 139 pr_debug("handle_mm_fault returns %x\n", fault); in do_page_fault() 141 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) in do_page_fault() 144 if (unlikely(fault & VM_FAULT_ERROR)) { in do_page_fault() 145 if (fault & VM_FAULT_OOM) in do_page_fault() 147 else if (fault & VM_FAULT_SIGSEGV) in do_page_fault() 149 else if (fault & VM_FAULT_SIGBUS) in do_page_fault() 160 if (fault & VM_FAULT_MAJOR) in do_page_fault() 164 if (fault & VM_FAULT_RETRY) { in do_page_fault()
|
| /Linux-v5.4/drivers/gpu/drm/i915/gt/ |
| D | intel_gt.c | 101 u32 fault; in gen6_check_faults() local 104 fault = GEN6_RING_FAULT_REG_READ(engine); in gen6_check_faults() 105 if (fault & RING_FAULT_VALID) { in gen6_check_faults() 111 fault & PAGE_MASK, in gen6_check_faults() 112 fault & RING_FAULT_GTTSEL_MASK ? in gen6_check_faults() 114 RING_FAULT_SRCID(fault), in gen6_check_faults() 115 RING_FAULT_FAULT_TYPE(fault)); in gen6_check_faults() 124 u32 fault; in gen8_check_faults() local 136 fault = intel_uncore_read(uncore, fault_reg); in gen8_check_faults() 137 if (fault & RING_FAULT_VALID) { in gen8_check_faults() [all …]
|
| /Linux-v5.4/arch/unicore32/mm/ |
| D | fault.c | 166 vm_fault_t fault; in __do_pf() local 169 fault = VM_FAULT_BADMAP; in __do_pf() 181 fault = VM_FAULT_BADACCESS; in __do_pf() 189 fault = handle_mm_fault(vma, addr & PAGE_MASK, flags); in __do_pf() 190 return fault; in __do_pf() 196 return fault; in __do_pf() 204 vm_fault_t fault; in do_pf() local 247 fault = __do_pf(mm, addr, fsr, flags, tsk); in do_pf() 253 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) in do_pf() 256 if (!(fault & VM_FAULT_ERROR) && (flags & FAULT_FLAG_ALLOW_RETRY)) { in do_pf() [all …]
|
| /Linux-v5.4/arch/powerpc/mm/ |
| D | fault.c | 143 vm_fault_t fault) in do_sigbus() argument 150 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { in do_sigbus() 156 if (fault & VM_FAULT_HWPOISON_LARGE) in do_sigbus() 157 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); in do_sigbus() 158 if (fault & VM_FAULT_HWPOISON) in do_sigbus() 171 vm_fault_t fault) in mm_fault_error() argument 181 if (fault & VM_FAULT_OOM) { in mm_fault_error() 190 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| in mm_fault_error() 192 return do_sigbus(regs, addr, fault); in mm_fault_error() 193 else if (fault & VM_FAULT_SIGSEGV) in mm_fault_error() [all …]
|
| /Linux-v5.4/include/asm-generic/ |
| D | termios-base.h | 21 goto fault; in user_termio_to_kernel_termios() 25 goto fault; in user_termio_to_kernel_termios() 29 goto fault; in user_termio_to_kernel_termios() 33 goto fault; in user_termio_to_kernel_termios() 37 goto fault; in user_termio_to_kernel_termios() 40 goto fault; in user_termio_to_kernel_termios() 44 fault: in user_termio_to_kernel_termios()
|
| /Linux-v5.4/Documentation/fault-injection/ |
| D | fault-injection.rst | 8 Available fault injection capabilities 21 injects futex deadlock and uaddr fault errors. 40 - NVMe fault injection 48 Configure fault-injection capabilities behavior 54 fault-inject-debugfs kernel module provides some debugfs entries for runtime 55 configuration of fault-injection capabilities. 93 to debug the problems revealed by fault injection. 192 that the fault setup with a previous write to this file was injected. 193 A positive integer N indicates that the fault wasn't yet injected. 202 How to add new fault injection capability [all …]
|