Lines Matching full:iommu
24 #include "iommu.h"
27 #include "../iommu-sva-lib.h"
68 int intel_svm_enable_prq(struct intel_iommu *iommu) in intel_svm_enable_prq() argument
76 pr_warn("IOMMU: %s: Failed to allocate page request queue\n", in intel_svm_enable_prq()
77 iommu->name); in intel_svm_enable_prq()
80 iommu->prq = page_address(pages); in intel_svm_enable_prq()
82 irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu); in intel_svm_enable_prq()
84 pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n", in intel_svm_enable_prq()
85 iommu->name); in intel_svm_enable_prq()
89 iommu->pr_irq = irq; in intel_svm_enable_prq()
91 snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), in intel_svm_enable_prq()
92 "dmar%d-iopfq", iommu->seq_id); in intel_svm_enable_prq()
93 iopfq = iopf_queue_alloc(iommu->iopfq_name); in intel_svm_enable_prq()
95 pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name); in intel_svm_enable_prq()
99 iommu->iopf_queue = iopfq; in intel_svm_enable_prq()
101 snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id); in intel_svm_enable_prq()
104 iommu->prq_name, iommu); in intel_svm_enable_prq()
106 pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n", in intel_svm_enable_prq()
107 iommu->name); in intel_svm_enable_prq()
110 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_enable_prq()
111 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_enable_prq()
112 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER); in intel_svm_enable_prq()
114 init_completion(&iommu->prq_complete); in intel_svm_enable_prq()
119 iopf_queue_free(iommu->iopf_queue); in intel_svm_enable_prq()
120 iommu->iopf_queue = NULL; in intel_svm_enable_prq()
123 iommu->pr_irq = 0; in intel_svm_enable_prq()
125 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_enable_prq()
126 iommu->prq = NULL; in intel_svm_enable_prq()
131 int intel_svm_finish_prq(struct intel_iommu *iommu) in intel_svm_finish_prq() argument
133 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_finish_prq()
134 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_finish_prq()
135 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); in intel_svm_finish_prq()
137 if (iommu->pr_irq) { in intel_svm_finish_prq()
138 free_irq(iommu->pr_irq, iommu); in intel_svm_finish_prq()
139 dmar_free_hwirq(iommu->pr_irq); in intel_svm_finish_prq()
140 iommu->pr_irq = 0; in intel_svm_finish_prq()
143 if (iommu->iopf_queue) { in intel_svm_finish_prq()
144 iopf_queue_free(iommu->iopf_queue); in intel_svm_finish_prq()
145 iommu->iopf_queue = NULL; in intel_svm_finish_prq()
148 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_finish_prq()
149 iommu->prq = NULL; in intel_svm_finish_prq()
154 void intel_svm_check(struct intel_iommu *iommu) in intel_svm_check() argument
156 if (!pasid_supported(iommu)) in intel_svm_check()
160 !cap_fl1gp_support(iommu->cap)) { in intel_svm_check()
162 iommu->name); in intel_svm_check()
167 !cap_fl5lp_support(iommu->cap)) { in intel_svm_check()
169 iommu->name); in intel_svm_check()
173 iommu->flags |= VTD_FLAG_SVM_CAPABLE; in intel_svm_check()
186 qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih); in __flush_svm_range_dev()
188 qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid, in __flush_svm_range_dev()
253 intel_pasid_tear_down_entry(sdev->iommu, sdev->dev, in intel_mm_release()
311 static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu, in intel_svm_bind_mm() argument
365 sdev->iommu = iommu; in intel_svm_bind_mm()
383 ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid, in intel_svm_bind_mm()
409 struct intel_iommu *iommu; in intel_svm_unbind_mm() local
414 iommu = device_to_iommu(dev, NULL, NULL); in intel_svm_unbind_mm()
415 if (!iommu) in intel_svm_unbind_mm()
434 intel_pasid_tear_down_entry(iommu, dev, in intel_svm_unbind_mm()
512 struct intel_iommu *iommu; in intel_svm_drain_prq() local
526 iommu = info->iommu; in intel_svm_drain_prq()
530 did = domain_id_iommu(domain, iommu); in intel_svm_drain_prq()
538 reinit_completion(&iommu->prq_complete); in intel_svm_drain_prq()
539 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in intel_svm_drain_prq()
540 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in intel_svm_drain_prq()
544 req = &iommu->prq[head / sizeof(*req)]; in intel_svm_drain_prq()
550 wait_for_completion(&iommu->prq_complete); in intel_svm_drain_prq()
586 reinit_completion(&iommu->prq_complete); in intel_svm_drain_prq()
587 qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN); in intel_svm_drain_prq()
588 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in intel_svm_drain_prq()
589 wait_for_completion(&iommu->prq_complete); in intel_svm_drain_prq()
610 static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, in intel_svm_prq_report() argument
643 } else if (dmar_latency_enabled(iommu, DMAR_LATENCY_PRQ)) { in intel_svm_prq_report()
654 static void handle_bad_prq_event(struct intel_iommu *iommu, in handle_bad_prq_event() argument
660 iommu->name, ((unsigned long long *)req)[0], in handle_bad_prq_event()
690 qi_submit_sync(iommu, &desc, 1, 0); in handle_bad_prq_event()
695 struct intel_iommu *iommu = d; in prq_event_thread() local
705 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
707 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
708 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
711 req = &iommu->prq[head / sizeof(*req)]; in prq_event_thread()
715 pr_err("IOMMU: %s: Page request without PASID\n", in prq_event_thread()
716 iommu->name); in prq_event_thread()
718 handle_bad_prq_event(iommu, req, QI_RESP_INVALID); in prq_event_thread()
723 pr_err("IOMMU: %s: Address is not canonical\n", in prq_event_thread()
724 iommu->name); in prq_event_thread()
729 pr_err("IOMMU: %s: Page request in Privilege Mode\n", in prq_event_thread()
730 iommu->name); in prq_event_thread()
735 pr_err("IOMMU: %s: Execution request not supported\n", in prq_event_thread()
736 iommu->name); in prq_event_thread()
744 pdev = pci_get_domain_bus_and_slot(iommu->segment, in prq_event_thread()
748 * If prq is to be handled outside iommu driver via receiver of in prq_event_thread()
754 if (intel_svm_prq_report(iommu, &pdev->dev, req)) in prq_event_thread()
755 handle_bad_prq_event(iommu, req, QI_RESP_INVALID); in prq_event_thread()
757 trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1, in prq_event_thread()
759 iommu->prq_seq_number++); in prq_event_thread()
765 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail); in prq_event_thread()
771 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in prq_event_thread()
772 pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n", in prq_event_thread()
773 iommu->name); in prq_event_thread()
774 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
775 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
777 iopf_queue_discard_partial(iommu->iopf_queue); in prq_event_thread()
778 writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
779 pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared", in prq_event_thread()
780 iommu->name); in prq_event_thread()
784 if (!completion_done(&iommu->prq_complete)) in prq_event_thread()
785 complete(&iommu->prq_complete); in prq_event_thread()
792 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); in intel_svm_bind() local
801 if (!ecap_srs(iommu->ecap)) { in intel_svm_bind()
803 iommu->name); in intel_svm_bind()
809 iommu->name); in intel_svm_bind()
823 sva = intel_svm_bind_mm(iommu, dev, mm, flags); in intel_svm_bind()
856 struct intel_iommu *iommu; in intel_svm_page_response() local
867 iommu = device_to_iommu(dev, &bus, &devfn); in intel_svm_page_response()
868 if (!iommu) in intel_svm_page_response()
912 dmar_latency_update(iommu, DMAR_LATENCY_PRQ, in intel_svm_page_response()
916 qi_submit_sync(iommu, &desc, 1, 0); in intel_svm_page_response()