Lines Matching +full:tlb +full:- +full:split
1 /* SPDX-License-Identifier: GPL-2.0 */
24 * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management.
26 * @tlb_flush_all: Synchronously invalidate the entire TLB context.
27 * @tlb_flush_walk: Synchronously invalidate all intermediate TLB state
30 * @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a
31 * single page. IOMMUs that cannot batch TLB invalidation
48 * struct io_pgtable_cfg - Configuration data for a set of page tables.
51 * action by the low-level page table allocator.
58 * @tlb: TLB management callbacks for this set of tables.
66 * even in non-secure state where they should normally be ignored.
71 * format, and/or requires some format-specific default value.
78 * for use in the upper half of a split address space.
80 * IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability
81 * attributes set in the TCR for a non-coherent page-table walker.
93 const struct iommu_flush_ops *tlb; member
96 /* Low-level data specific to the table format */
144 * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
171 * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
178 * the callback routines in cfg->tlb.
185 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
187 * live, but the TLB can be dirty.
199 * struct io_pgtable - Internal structure describing a set of page tables.
218 if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_all) in io_pgtable_tlb_flush_all()
219 iop->cfg.tlb->tlb_flush_all(iop->cookie); in io_pgtable_tlb_flush_all()
226 if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_walk) in io_pgtable_tlb_flush_walk()
227 iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie); in io_pgtable_tlb_flush_walk()
235 if (iop->cfg.tlb && iop->cfg.tlb->tlb_add_page) in io_pgtable_tlb_add_page()
236 iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie); in io_pgtable_tlb_add_page()
240 * struct io_pgtable_init_fns - Alloc/free a set of page tables for a