/Linux-v5.10/drivers/gpu/drm/nouveau/nvkm/subdev/fault/ |
D | base.c | 30 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); in nvkm_fault_ntfy_fini() local 31 fault->func->buffer.intr(fault->buffer[index], false); in nvkm_fault_ntfy_fini() 37 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); in nvkm_fault_ntfy_init() local 38 fault->func->buffer.intr(fault->buffer[index], true); in nvkm_fault_ntfy_init() 65 struct nvkm_fault *fault = nvkm_fault(subdev); in nvkm_fault_intr() local 66 return fault->func->intr(fault); in nvkm_fault_intr() 72 struct nvkm_fault *fault = nvkm_fault(subdev); in nvkm_fault_fini() local 73 if (fault->func->fini) in nvkm_fault_fini() 74 fault->func->fini(fault); in nvkm_fault_fini() 81 struct nvkm_fault *fault = nvkm_fault(subdev); in nvkm_fault_init() local [all …]
|
D | gv100.c | 33 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_process() 42 const u32 base = get * buffer->fault->func->buffer.entry_size; in gv100_fault_buffer_process() 76 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_intr() 87 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_fini() 95 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_init() 107 struct nvkm_device *device = buffer->fault->subdev.device; in gv100_fault_buffer_info() 120 struct nvkm_fault *fault = container_of(notify, typeof(*fault), nrpfb); in gv100_fault_ntfy_nrpfb() local 121 gv100_fault_buffer_process(fault->buffer[0]); in gv100_fault_ntfy_nrpfb() 126 gv100_fault_intr_fault(struct nvkm_fault *fault) in gv100_fault_intr_fault() argument 128 struct nvkm_subdev *subdev = &fault->subdev; in gv100_fault_intr_fault() [all …]
|
D | tu102.c | 42 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_fini() 50 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_init() 62 struct nvkm_device *device = buffer->fault->subdev.device; in tu102_fault_buffer_info() 73 tu102_fault_intr_fault(struct nvkm_fault *fault) in tu102_fault_intr_fault() argument 75 struct nvkm_subdev *subdev = &fault->subdev; in tu102_fault_intr_fault() 99 tu102_fault_intr(struct nvkm_fault *fault) in tu102_fault_intr() argument 101 struct nvkm_subdev *subdev = &fault->subdev; in tu102_fault_intr() 106 tu102_fault_intr_fault(fault); in tu102_fault_intr() 112 if (fault->buffer[0]) { in tu102_fault_intr() 113 nvkm_event_send(&fault->event, 1, 0, NULL, 0); in tu102_fault_intr() [all …]
|
/Linux-v5.10/drivers/infiniband/hw/hfi1/ |
D | fault.c | 55 #include "fault.h" 111 if (!ibd->fault->n_rxfaults[i] && !ibd->fault->n_txfaults[i]) in _fault_stats_seq_show() 116 (unsigned long long)ibd->fault->n_rxfaults[i], in _fault_stats_seq_show() 117 (unsigned long long)ibd->fault->n_txfaults[i]); in _fault_stats_seq_show() 138 struct fault *fault = file->private_data; in fault_opcodes_write() local 180 bitmap_zero(fault->opcodes, sizeof(fault->opcodes) * in fault_opcodes_write() 190 clear_bit(i, fault->opcodes); in fault_opcodes_write() 192 set_bit(i, fault->opcodes); in fault_opcodes_write() 212 struct fault *fault = file->private_data; in fault_opcodes_read() local 213 size_t bitsize = sizeof(fault->opcodes) * BITS_PER_BYTE; in fault_opcodes_read() [all …]
|
/Linux-v5.10/arch/powerpc/mm/ |
D | fault.c | 6 * Derived from "arch/i386/mm/fault.c" 109 * 5. T1 : enters fault handler, takes mmap_lock, etc... in bad_access_pkey() 137 vm_fault_t fault) in do_sigbus() argument 144 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { in do_sigbus() 147 pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", in do_sigbus() 150 if (fault & VM_FAULT_HWPOISON_LARGE) in do_sigbus() 151 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); in do_sigbus() 152 if (fault & VM_FAULT_HWPOISON) in do_sigbus() 165 vm_fault_t fault) in mm_fault_error() argument 168 * Kernel page fault interrupted by SIGKILL. We have no reason to in mm_fault_error() [all …]
|
/Linux-v5.10/drivers/gpu/drm/nouveau/ |
D | nouveau_svm.c | 63 u8 fault; member 65 } **fault; member 155 * page fault) and maybe some other commands. in nouveau_svmm_bind() 369 /* Issue fault replay for GPU to retry accesses that faulted previously. */ 380 /* Cancel a replayable fault that could not be handled. 382 * Cancelling the fault will trigger recovery to reset the engine 402 struct nouveau_svm_fault *fault) in nouveau_svm_fault_cancel_fault() argument 404 nouveau_svm_fault_cancel(svm, fault->inst, in nouveau_svm_fault_cancel_fault() 405 fault->hub, in nouveau_svm_fault_cancel_fault() 406 fault->gpc, in nouveau_svm_fault_cancel_fault() [all …]
|
/Linux-v5.10/Documentation/fault-injection/ |
D | fault-injection.rst | 2 Fault injection capabilities infrastructure 8 Available fault injection capabilities 25 injects futex deadlock and uaddr fault errors. 44 - NVMe fault injection 52 Configure fault-injection capabilities behavior 58 fault-inject-debugfs kernel module provides some debugfs entries for runtime 59 configuration of fault-injection capabilities. 97 to debug the problems revealed by fault injection. 197 that the fault setup with a previous write to this file was injected. 198 A positive integer N indicates that the fault wasn't yet injected. [all …]
|
/Linux-v5.10/drivers/iommu/amd/ |
D | iommu_v2.c | 66 struct fault { struct 426 static void handle_fault_error(struct fault *fault) in handle_fault_error() argument 430 if (!fault->dev_state->inv_ppr_cb) { in handle_fault_error() 431 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID); in handle_fault_error() 435 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev, in handle_fault_error() 436 fault->pasid, in handle_fault_error() 437 fault->address, in handle_fault_error() 438 fault->flags); in handle_fault_error() 441 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS); in handle_fault_error() 444 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID); in handle_fault_error() [all …]
|
/Linux-v5.10/arch/arm/mm/ |
D | fsr-3level.c | 7 { do_bad, SIGBUS, 0, "reserved translation fault" }, 8 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, 9 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, 10 { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, 11 { do_bad, SIGBUS, 0, "reserved access flag fault" }, 12 { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, 13 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, 14 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, 15 { do_bad, SIGBUS, 0, "reserved permission fault" }, 16 { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, [all …]
|
D | fsr-2level.c | 12 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, 14 { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, 16 { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, 18 { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, 20 { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, 22 { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, 50 { do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" }, 52 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, 53 { do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" }, 54 { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, [all …]
|
/Linux-v5.10/arch/powerpc/platforms/powernv/ |
D | vas-fault.c | 3 * VAS Fault handling. 21 * The maximum FIFO size for fault window can be 8MB 23 * instance will be having fault window. 56 * Whereas if NX encounters page fault, the kernel will handle the 57 * fault and update CSB with translation error. 104 * error and fault address. If csb_addr passed by user space is in update_csb() 179 pr_err("Fault fifo size %d, Max crbs %d\n", vinst->fault_fifo_size, in dump_fifo() 183 pr_err("Fault FIFO Dump:\n"); in dump_fifo() 191 * Process valid CRBs in fault FIFO. 194 * request buffers, raises interrupt on the CPU to handle the fault. [all …]
|
/Linux-v5.10/arch/microblaze/mm/ |
D | fault.c | 2 * arch/microblaze/mm/fault.c 6 * Derived from "arch/ppc/mm/fault.c" 9 * Derived from "arch/i386/mm/fault.c" 71 /* Are we prepared to handle this fault? */ in bad_page_fault() 83 * The error_code parameter is ESR for a data fault, 84 * 0 for an instruction fault. 93 vm_fault_t fault; in do_page_fault() local 115 pr_emerg("Page fault in user mode with faulthandler_disabled(), mm = %p\n", in do_page_fault() 119 die("Weird page fault", regs, SIGSEGV); in do_page_fault() 130 * erroneous fault occurring in a code path which already holds mmap_lock in do_page_fault() [all …]
|
/Linux-v5.10/arch/arm64/mm/ |
D | fault.c | 3 * Based on arch/arm/mm/fault.c 269 * If we now have a valid translation, treat the translation fault as in is_spurious_el1_translation_fault() 276 * If we got a different type of fault from the AT instruction, in is_spurious_el1_translation_fault() 277 * treat the translation fault as spurious. in is_spurious_el1_translation_fault() 305 * Are we prepared to handle this kernel fault? in __do_kernel_fault() 312 "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr)) in __do_kernel_fault() 340 * an alignment fault not caused by the memory type would take in set_thread_esr() 341 * precedence over translation fault for a real access to empty in set_thread_esr() 342 * space. Unfortunately we can't easily distinguish "alignment fault in set_thread_esr() 343 * not caused by memory type" from "alignment fault caused by memory in set_thread_esr() [all …]
|
/Linux-v5.10/arch/parisc/mm/ |
D | fault.c | 44 * the instruction has generated some sort of a memory access fault). 104 * Data TLB miss fault/data page fault in parisc_acctyp() 197 [6] "Instruction TLB miss fault", 206 [15] "Data TLB miss fault", 207 [16] "Non-access ITLB miss fault", 208 [17] "Non-access DTLB miss fault", 267 vm_fault_t fault = 0; in do_page_fault() local 302 * If for any reason at all we couldn't handle the fault, make in do_page_fault() 304 * fault. in do_page_fault() 307 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() [all …]
|
/Linux-v5.10/arch/powerpc/lib/ |
D | checksum_32.S | 109 EX_TABLE(8 ## n ## 0b, fault); \ 110 EX_TABLE(8 ## n ## 1b, fault); \ 111 EX_TABLE(8 ## n ## 2b, fault); \ 112 EX_TABLE(8 ## n ## 3b, fault); \ 113 EX_TABLE(8 ## n ## 4b, fault); \ 114 EX_TABLE(8 ## n ## 5b, fault); \ 115 EX_TABLE(8 ## n ## 6b, fault); \ 116 EX_TABLE(8 ## n ## 7b, fault); 243 fault: label 247 EX_TABLE(70b, fault); [all …]
|
/Linux-v5.10/arch/m68k/mm/ |
D | fault.c | 3 * linux/arch/m68k/mm/fault.c 62 * bit 0 == 0 means no page found, 1 means protection fault 73 vm_fault_t fault; in do_page_fault() local 76 pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", in do_page_fault() 81 * context, we must not take the fault.. in do_page_fault() 135 * If for any reason at all we couldn't handle the fault, in do_page_fault() 137 * the fault. in do_page_fault() 140 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 141 pr_debug("handle_mm_fault returns %x\n", fault); in do_page_fault() 143 if (fault_signal_pending(fault, regs)) in do_page_fault() [all …]
|
/Linux-v5.10/arch/hexagon/mm/ |
D | vm_fault.c | 3 * Memory fault handling for Hexagon 9 * Page fault handling for the Hexagon Virtual Machine. 34 * Canonical page fault handler 42 vm_fault_t fault; in do_page_fault() local 48 * then must not take the fault. in do_page_fault() 94 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 96 if (fault_signal_pending(fault, regs)) in do_page_fault() 100 if (likely(!(fault & VM_FAULT_ERROR))) { in do_page_fault() 102 if (fault & VM_FAULT_RETRY) { in do_page_fault() 118 if (fault & VM_FAULT_OOM) { in do_page_fault() [all …]
|
/Linux-v5.10/arch/ia64/mm/ |
D | fault.c | 3 * MMU fault handling support. 72 vm_fault_t fault; in ia64_do_page_fault() local 82 * If we're in an interrupt or have no user context, we must not take the fault.. in ia64_do_page_fault() 89 * If fault is in region 5 and we are in the kernel, we may already in ia64_do_page_fault() 145 * If for any reason at all we couldn't handle the fault, make in ia64_do_page_fault() 147 * fault. in ia64_do_page_fault() 149 fault = handle_mm_fault(vma, address, flags, regs); in ia64_do_page_fault() 151 if (fault_signal_pending(fault, regs)) in ia64_do_page_fault() 154 if (unlikely(fault & VM_FAULT_ERROR)) { in ia64_do_page_fault() 157 * to us that made us unable to handle the page fault in ia64_do_page_fault() [all …]
|
/Linux-v5.10/arch/nios2/kernel/ |
D | misaligned.c | 72 unsigned int fault; in handle_unaligned_c() local 85 fault = 0; in handle_unaligned_c() 98 fault |= __get_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 99 fault |= __get_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() 111 fault |= __put_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 112 fault |= __put_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() 116 fault |= __get_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 117 fault |= __get_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() 133 fault |= __put_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 134 fault |= __put_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() [all …]
|
/Linux-v5.10/arch/alpha/mm/ |
D | fault.c | 3 * linux/arch/alpha/mm/fault.c 65 * 2 = fault-on-read 66 * 3 = fault-on-execute 67 * 4 = fault-on-write 92 vm_fault_t fault; in do_page_fault() local 110 we must not take the fault. */ in do_page_fault() 150 /* If for any reason at all we couldn't handle the fault, in do_page_fault() 152 the fault. */ in do_page_fault() 153 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault() 155 if (fault_signal_pending(fault, regs)) in do_page_fault() [all …]
|
/Linux-v5.10/arch/s390/mm/ |
D | fault.c | 8 * Derived from "arch/i386/mm/fault.c" 170 pr_alert("Fault in "); in dump_fault_info() 220 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ", in report_user_fault() 256 /* Are we prepared to handle this kernel fault? */ in do_no_context() 320 vm_fault_t fault) in do_fault_error() argument 324 switch (fault) { in do_fault_error() 333 si_code = (fault == VM_FAULT_BADMAP) ? in do_fault_error() 347 default: /* fault & VM_FAULT_ERROR */ in do_fault_error() 348 if (fault & VM_FAULT_OOM) { in do_fault_error() 353 } else if (fault & VM_FAULT_SIGSEGV) { in do_fault_error() [all …]
|
/Linux-v5.10/Documentation/i2c/ |
D | fault-codes.rst | 2 I2C/SMBUS Fault Codes 5 This is a summary of the most important conventions for use of fault 9 A "Fault" is not always an "Error" 11 Not all fault reports imply errors; "page faults" should be a familiar 15 recovery, triggered by a fault report, there is no error. 17 In a similar way, sometimes a "fault" code just reports one defined 23 the right fault code, so that it can (in turn) behave correctly. 26 I2C and SMBus fault codes 29 some positive number indicating a non-fault return. The specific 36 cases (unless the hardware doesn't provide unique fault reports). [all …]
|
/Linux-v5.10/arch/x86/mm/ |
D | fault.c | 38 * Returns 0 if mmiotrace is disabled, or if the fault is not 120 * If it was a exec (instruction fetch) fault on NX page, then in is_prefetch() 121 * do not ignore the fault: in is_prefetch() 194 * Handle a fault on the vmalloc or module mapping area 205 * unhandled page-fault when they are accessed. 416 * The OS sees this as a page fault with the upper 32bits of RIP cleared. 450 * We catch this in the page fault handler because these addresses 536 pr_alert("BUG: unable to handle page fault for address: %px\n", in show_fault_oops() 559 * contributory exception from user code and gets a page fault in show_fault_oops() 560 * during delivery, the page fault can be delivered as though in show_fault_oops() [all …]
|
/Linux-v5.10/arch/mips/kernel/ |
D | unaligned.c | 170 goto fault; in emulate_load_store_insn() 179 goto fault; in emulate_load_store_insn() 204 goto fault; in emulate_load_store_insn() 217 goto fault; in emulate_load_store_insn() 230 goto fault; in emulate_load_store_insn() 245 goto fault; in emulate_load_store_insn() 258 goto fault; in emulate_load_store_insn() 283 goto fault; in emulate_load_store_insn() 302 goto fault; in emulate_load_store_insn() 321 goto fault; in emulate_load_store_insn() [all …]
|
/Linux-v5.10/arch/sh/mm/ |
D | fault.c | 2 * Page fault handler for SH with an MMU. 7 * Based on linux/arch/i386/mm/fault.c: 157 * be another reason for the fault. Return NULL here to in vmalloc_sync_one() 158 * signal that we have not taken care of the fault. in vmalloc_sync_one() 174 * Handle a fault on the vmalloc or module mapping area 225 /* Are we prepared to handle this kernel fault? */ in no_context() 316 unsigned long address, vm_fault_t fault) in mm_fault_error() argument 322 if (fault_signal_pending(fault, regs)) { in mm_fault_error() 329 if (!(fault & VM_FAULT_RETRY)) in mm_fault_error() 332 if (!(fault & VM_FAULT_ERROR)) in mm_fault_error() [all …]
|