Lines Matching full:fault
17 * struct iopf_queue - IO Page Fault queue
18 * @wq: the fault workqueue
29 * struct iopf_device_param - IO Page Fault data attached to a device
44 struct iommu_fault fault; member
60 .pasid = iopf->fault.prm.pasid, in iopf_complete_group()
61 .grpid = iopf->fault.prm.grpid, in iopf_complete_group()
65 if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) && in iopf_complete_group()
66 (iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID)) in iopf_complete_group()
80 struct iommu_fault_page_request *prm = &iopf->fault.prm; in iopf_handle_single()
114 /* Access fault */ in iopf_handle_single()
144 if (!(iopf->fault.prm.flags & in iopf_handle_group()
154 * iommu_queue_iopf - IO Page Fault handler
155 * @fault: fault event
158 * Add a fault to the device workqueue, to be handled by mm.
177 * b. The IOMMU driver flushes all fault queues on unbind() before freeing the
186 int iommu_queue_iopf(struct iommu_fault *fault, void *cookie) in iommu_queue_iopf() argument
198 if (fault->type != IOMMU_FAULT_PAGE_REQ) in iommu_queue_iopf()
199 /* Not a recoverable page fault */ in iommu_queue_iopf()
210 if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { in iommu_queue_iopf()
215 iopf->fault = *fault; in iommu_queue_iopf()
235 group->last_fault.fault = *fault; in iommu_queue_iopf()
242 if (iopf->fault.prm.grpid == fault->prm.grpid) in iommu_queue_iopf()
243 /* Insert *before* the last fault */ in iommu_queue_iopf()
252 if (iopf->fault.prm.grpid == fault->prm.grpid) { in iommu_queue_iopf()
268 * that no new fault is added to the queue. In particular it must flush its
295 * iopf_queue_discard_partial - Remove all pending partial fault
326 * iopf_queue_add_device - Add producer to the fault queue
367 * iopf_queue_remove_device - Remove producer from fault queue
409 * iopf_queue_alloc - Allocate and initialize a fault queue