/Linux-v5.10/drivers/staging/media/ipu3/ |
D | ipu3-mmu.c | 21 #include "ipu3-mmu.h" 73 * @mmu: MMU to perform the invalidate operation on 78 static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu) in imgu_mmu_tlb_invalidate() argument 80 writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE); in imgu_mmu_tlb_invalidate() 83 static void call_if_imgu_is_powered(struct imgu_mmu *mmu, in call_if_imgu_is_powered() argument 84 void (*func)(struct imgu_mmu *mmu)) in call_if_imgu_is_powered() argument 86 if (!pm_runtime_get_if_in_use(mmu->dev)) in call_if_imgu_is_powered() 89 func(mmu); in call_if_imgu_is_powered() 90 pm_runtime_put(mmu->dev); in call_if_imgu_is_powered() 95 * @mmu: MMU to set the CIO gate bit in. [all …]
|
/Linux-v5.10/drivers/gpu/drm/nouveau/nvif/ |
D | mmu.c | 22 #include <nvif/mmu.h> 28 nvif_mmu_dtor(struct nvif_mmu *mmu) in nvif_mmu_dtor() argument 30 kfree(mmu->kind); in nvif_mmu_dtor() 31 kfree(mmu->type); in nvif_mmu_dtor() 32 kfree(mmu->heap); in nvif_mmu_dtor() 33 nvif_object_dtor(&mmu->object); in nvif_mmu_dtor() 38 struct nvif_mmu *mmu) in nvif_mmu_ctor() argument 50 mmu->heap = NULL; in nvif_mmu_ctor() 51 mmu->type = NULL; in nvif_mmu_ctor() 52 mmu->kind = NULL; in nvif_mmu_ctor() [all …]
|
/Linux-v5.10/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | Kbuild | 2 nvkm-y += nvkm/subdev/mmu/base.o 3 nvkm-y += nvkm/subdev/mmu/nv04.o 4 nvkm-y += nvkm/subdev/mmu/nv41.o 5 nvkm-y += nvkm/subdev/mmu/nv44.o 6 nvkm-y += nvkm/subdev/mmu/nv50.o 7 nvkm-y += nvkm/subdev/mmu/g84.o 8 nvkm-y += nvkm/subdev/mmu/mcp77.o 9 nvkm-y += nvkm/subdev/mmu/gf100.o 10 nvkm-y += nvkm/subdev/mmu/gk104.o 11 nvkm-y += nvkm/subdev/mmu/gk20a.o [all …]
|
D | base.c | 42 nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt) in nvkm_mmu_ptp_put() argument 51 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_put() 56 nvkm_mmu_ptc_put(mmu, force, &ptp->pt); in nvkm_mmu_ptp_put() 65 nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero) in nvkm_mmu_ptp_get() argument 74 ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head); in nvkm_mmu_ptp_get() 82 ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false); in nvkm_mmu_ptp_get() 93 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_get() 120 nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size) in nvkm_mmu_ptc_find() argument 124 list_for_each_entry(ptc, &mmu->ptc.list, head) { in nvkm_mmu_ptc_find() 134 list_add(&ptc->head, &mmu->ptc.list); in nvkm_mmu_ptc_find() [all …]
|
D | ummu.c | 35 struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; in nvkm_ummu_sclass() local 37 if (mmu->func->mem.user.oclass && oclass->client->super) { in nvkm_ummu_sclass() 39 oclass->base = mmu->func->mem.user; in nvkm_ummu_sclass() 45 if (mmu->func->vmm.user.oclass) { in nvkm_ummu_sclass() 47 oclass->base = mmu->func->vmm.user; in nvkm_ummu_sclass() 59 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_heap() local 67 if ((index = args->v0.index) >= mmu->heap_nr) in nvkm_ummu_heap() 69 args->v0.size = mmu->heap[index].size; in nvkm_ummu_heap() 79 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_type() local 87 if ((index = args->v0.index) >= mmu->type_nr) in nvkm_ummu_type() [all …]
|
/Linux-v5.10/drivers/staging/media/atomisp/pci/mmu/ |
D | isp_mmu.c | 21 * ISP MMU management wrap code 41 #include "mmu/isp_mmu.h" 51 * that are only 32-bit capable(e.g. the ISP MMU). 57 static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt, 79 static phys_addr_t isp_pte_to_pgaddr(struct isp_mmu *mmu, in isp_pte_to_pgaddr() argument 82 return mmu->driver->pte_to_phys(mmu, pte); in isp_pte_to_pgaddr() 85 static unsigned int isp_pgaddr_to_pte_valid(struct isp_mmu *mmu, in isp_pgaddr_to_pte_valid() argument 88 unsigned int pte = mmu->driver->phys_to_pte(mmu, phys); in isp_pgaddr_to_pte_valid() 90 return (unsigned int)(pte | ISP_PTE_VALID_MASK(mmu)); in isp_pgaddr_to_pte_valid() 97 static phys_addr_t alloc_page_table(struct isp_mmu *mmu) in alloc_page_table() argument [all …]
|
/Linux-v5.10/drivers/iommu/ |
D | ipmmu-vmsa.c | 74 struct ipmmu_vmsa_device *mmu; member 102 /* MMU "context" registers */ 152 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) in ipmmu_is_root() argument 154 return mmu->root == mmu; in ipmmu_is_root() 159 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); in __ipmmu_check_device() local 162 if (ipmmu_is_root(mmu)) in __ipmmu_check_device() 163 *rootp = mmu; in __ipmmu_check_device() 180 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) in ipmmu_read() argument 182 return ioread32(mmu->base + offset); in ipmmu_read() 185 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, in ipmmu_write() argument [all …]
|
/Linux-v5.10/drivers/staging/media/atomisp/include/mmu/ |
D | isp_mmu.h | 21 * ISP MMU driver for classic two-level page tables 88 unsigned int (*get_pd_base)(struct isp_mmu *mmu, phys_addr_t pd_base); 100 void (*tlb_flush_range)(struct isp_mmu *mmu, 102 void (*tlb_flush_all)(struct isp_mmu *mmu); 103 unsigned int (*phys_to_pte)(struct isp_mmu *mmu, 105 phys_addr_t (*pte_to_phys)(struct isp_mmu *mmu, 120 #define ISP_PTE_VALID_MASK(mmu) \ argument 121 ((mmu)->driver->pte_valid_mask) 123 #define ISP_PTE_VALID(mmu, pte) \ argument 124 ((pte) & ISP_PTE_VALID_MASK(mmu)) [all …]
|
/Linux-v5.10/arch/x86/kernel/ |
D | paravirt.c | 334 /* Mmu ops. */ 335 .mmu.flush_tlb_user = native_flush_tlb_local, 336 .mmu.flush_tlb_kernel = native_flush_tlb_global, 337 .mmu.flush_tlb_one_user = native_flush_tlb_one_user, 338 .mmu.flush_tlb_others = native_flush_tlb_others, 339 .mmu.tlb_remove_table = 342 .mmu.exit_mmap = paravirt_nop, 345 .mmu.read_cr2 = __PV_IS_CALLEE_SAVE(native_read_cr2), 346 .mmu.write_cr2 = native_write_cr2, 347 .mmu.read_cr3 = __native_read_cr3, [all …]
|
/Linux-v5.10/arch/m68k/ |
D | Kconfig.cpu | 6 default M68KCLASSIC if MMU 7 default COLDFIRE if !MMU 39 depends on !MMU 51 a paging MMU. 61 System-On-Chip parts, and does not contain a paging MMU. 65 depends on MMU 71 68851 MMU (Memory Management Unit) to run Linux/m68k, except on the 76 depends on MMU && !MMU_SUN3 82 work, as it does not include an MMU (Memory Management Unit). 86 depends on MMU && !MMU_SUN3 [all …]
|
D | Kconfig | 7 select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE 14 select DMA_DIRECT_REMAP if HAS_DMA && MMU && !COLDFIRE 19 select GENERIC_STRNCPY_FROM_USER if MMU 20 select GENERIC_STRNLEN_USER if MMU 21 select HAVE_AOUT if MMU 24 select HAVE_FUTEX_CMPXCHG if MMU && FUTEX 28 select MMU_GATHER_NO_RANGE if MMU 31 select NO_DMA if !MMU && !COLDFIRE 35 select UACCESS_MEMCPY if !MMU 78 config MMU config [all …]
|
D | Kconfig.machine | 8 depends on MMU 9 select MMU_MOTOROLA if MMU 17 depends on MMU 18 select MMU_MOTOROLA if MMU 28 depends on MMU 29 select MMU_MOTOROLA if MMU 41 depends on MMU 42 select MMU_MOTOROLA if MMU 49 depends on MMU 50 select MMU_MOTOROLA if MMU [all …]
|
/Linux-v5.10/drivers/gpu/drm/panfrost/ |
D | panfrost_mmu.c | 31 /* Wait for the MMU status to indicate there is no active command, in in wait_ready() 46 /* write AS_COMMAND when MMU is ready to accept another command */ in write_cmd() 92 /* Run the MMU operation */ in mmu_hw_do_operation_locked() 100 struct panfrost_mmu *mmu, in mmu_hw_do_operation() argument 106 ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op); in mmu_hw_do_operation() 111 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) in panfrost_mmu_enable() argument 113 int as_nr = mmu->as; in panfrost_mmu_enable() 114 struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg; in panfrost_mmu_enable() 145 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) in panfrost_mmu_as_get() argument 151 as = mmu->as; in panfrost_mmu_as_get() [all …]
|
/Linux-v5.10/arch/arm/mm/ |
D | Kconfig | 11 depends on !MMU 30 select CPU_COPY_V4WT if MMU 34 select CPU_TLB_V4WT if MMU 37 MMU built around an ARM7TDMI core. 45 depends on !MMU 63 depends on !MMU 82 select CPU_COPY_V4WB if MMU 86 select CPU_TLB_V4WBI if MMU 101 select CPU_COPY_V4WB if MMU 105 select CPU_TLB_V4WBI if MMU [all …]
|
/Linux-v5.10/arch/riscv/ |
D | Kconfig | 19 select ARCH_HAS_DEBUG_VIRTUAL if MMU 28 select ARCH_HAS_STRICT_KERNEL_RWX if MMU 31 select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU 35 select CLINT_TIMER if !MMU 47 select GENERIC_PTDUMP if MMU 50 select GENERIC_STRNCPY_FROM_USER if MMU 51 select GENERIC_STRNLEN_USER if MMU 52 select GENERIC_TIME_VSYSCALL if MMU && 64BIT 57 select HAVE_ARCH_KASAN if MMU && 64BIT 60 select HAVE_ARCH_MMAP_RND_BITS if MMU [all …]
|
/Linux-v5.10/drivers/gpu/drm/msm/ |
D | msm_mmu.h | 13 void (*detach)(struct msm_mmu *mmu); 14 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, 16 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len); 17 void (*destroy)(struct msm_mmu *mmu); 34 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, in msm_mmu_init() argument 37 mmu->dev = dev; in msm_mmu_init() 38 mmu->funcs = funcs; in msm_mmu_init() 39 mmu->type = type; in msm_mmu_init() 45 static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, in msm_mmu_set_fault_handler() argument 48 mmu->arg = arg; in msm_mmu_set_fault_handler() [all …]
|
D | msm_iommu.c | 27 static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu) in to_pagetable() argument 29 return container_of(mmu, struct msm_iommu_pagetable, base); in to_pagetable() 32 static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova, in msm_iommu_pagetable_unmap() argument 35 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); in msm_iommu_pagetable_unmap() 51 static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, in msm_iommu_pagetable_map() argument 54 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); in msm_iommu_pagetable_map() 68 msm_iommu_pagetable_unmap(mmu, iova, mapped); in msm_iommu_pagetable_map() 82 static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu) in msm_iommu_pagetable_destroy() argument 84 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); in msm_iommu_pagetable_destroy() 100 int msm_iommu_pagetable_params(struct msm_mmu *mmu, in msm_iommu_pagetable_params() argument [all …]
|
D | msm_gem_vma.c | 18 if (aspace->mmu) in msm_gem_address_space_destroy() 19 aspace->mmu->funcs->destroy(aspace->mmu); in msm_gem_address_space_destroy() 54 if (aspace->mmu) in msm_gem_purge_vma() 55 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size); in msm_gem_purge_vma() 87 if (aspace && aspace->mmu) in msm_gem_map_vma() 88 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, in msm_gem_map_vma() 143 msm_gem_address_space_create(struct msm_mmu *mmu, const char *name, in msm_gem_address_space_create() argument 148 if (IS_ERR(mmu)) in msm_gem_address_space_create() 149 return ERR_CAST(mmu); in msm_gem_address_space_create() 157 aspace->mmu = mmu; in msm_gem_address_space_create()
|
/Linux-v5.10/arch/arc/mm/ |
D | tlb.c | 18 * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore, 21 * vineetg: April 2011 : Preparing for MMU V3 22 * -MMU v2/v3 BCRs decoded differently 27 * = walks MMU only if range spans < 32 entries, as opposed to 256 30 * -Changes related to MMU v2 (Rel 4.8) 33 * -In TLB Flush operations (Metal Fix MMU) there is a explicit command to 58 #include <asm/mmu.h> 60 /* Need for ARC MMU v2 62 * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc. 126 /* MMU v2 introduced the uTLB Flush command. in utlb_invalidate() [all …]
|
/Linux-v5.10/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/ |
D | mmu_public.h | 23 /*! Set the page table base index of MMU[ID] 25 \param ID[in] MMU identifier 28 \return none, MMU[ID].page_table_base_index = base_index 34 /*! Get the page table base index of MMU[ID] 36 \param ID[in] MMU identifier 39 \return MMU[ID].page_table_base_index 44 /*! Invalidate the page table cache of MMU[ID] 46 \param ID[in] MMU identifier 59 /*! Write to a control register of MMU[ID] 61 \param ID[in] MMU identifier [all …]
|
/Linux-v5.10/arch/sh/mm/ |
D | Kconfig | 4 config MMU config 9 Some SH processors (such as SH-2/SH-2A) lack an MMU. In order to 12 On other systems (such as the SH-3 and 4) where an MMU exists, 14 MMU implicitly switched off. 18 default "0x80000000" if MMU 28 default "14" if !MMU 78 default !MMU 82 depends on MMU && CPU_SH4A && !CPU_SH4AL_DSP 92 depends on (CPU_SHX2 || CPU_SHX3) && MMU 96 depends on MMU && (CPU_SH3 || CPU_SH4) [all …]
|
/Linux-v5.10/arch/x86/include/asm/ |
D | paravirt.h | 58 PVOP_VCALL0(mmu.flush_tlb_user); in __flush_tlb_local() 63 PVOP_VCALL0(mmu.flush_tlb_kernel); in __flush_tlb_global() 68 PVOP_VCALL1(mmu.flush_tlb_one_user, addr); in __flush_tlb_one_user() 74 PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info); in __flush_tlb_others() 79 PVOP_VCALL2(mmu.tlb_remove_table, tlb, table); in paravirt_tlb_remove_table() 84 PVOP_VCALL1(mmu.exit_mmap, mm); in paravirt_arch_exit_mmap() 125 return PVOP_CALLEE0(unsigned long, mmu.read_cr2); in read_cr2() 130 PVOP_VCALL1(mmu.write_cr2, x); in write_cr2() 135 return PVOP_CALL0(unsigned long, mmu.read_cr3); in __read_cr3() 140 PVOP_VCALL1(mmu.write_cr3, x); in write_cr3() [all …]
|
/Linux-v5.10/arch/xtensa/ |
D | Kconfig | 5 select ARCH_HAS_BINFMT_FLAT if !MMU 6 select ARCH_HAS_DMA_PREP_COHERENT if MMU 7 select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU 8 select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU 9 select ARCH_HAS_DMA_SET_UNCACHED if MMU 17 select DMA_REMAP if MMU 26 select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL 33 select HAVE_FUTEX_CMPXCHG if !MMU 79 config MMU config 97 select MMU [all …]
|
/Linux-v5.10/Documentation/devicetree/bindings/iommu/ |
D | samsung,sysmmu.yaml | 7 title: Samsung Exynos IOMMU H/W, System MMU (System Memory Management Unit) 17 System MMU is an IOMMU and supports identical translation table format to 19 permissions, shareability and security protection. In addition, System MMU has 25 master), but one System MMU can handle transactions from only one peripheral 26 device. The relation between a System MMU and the peripheral device needs to be 31 * MFC has one System MMU on its left and right bus. 32 * FIMD in Exynos5420 has one System MMU for window 0 and 4, the other system MMU 34 * M2M Scalers and G2D in Exynos5420 has one System MMU on the read channel and 35 the other System MMU on the write channel. 37 For information on assigning System MMU controller to its peripheral devices, [all …]
|
/Linux-v5.10/Documentation/admin-guide/mm/ |
D | nommu-mmap.rst | 2 No-MMU memory mapping support 5 The kernel has limited support for memory mapping under no-MMU conditions, such 16 The behaviour is similar between the MMU and no-MMU cases, but not identical; 21 In the MMU case: VM regions backed by arbitrary pages; copy-on-write 24 In the no-MMU case: VM regions backed by arbitrary contiguous runs of 30 shared across fork() or clone() without CLONE_VM in the MMU case. Since 31 the no-MMU case doesn't support these, behaviour is identical to 36 In the MMU case: VM regions backed by pages read from file; changes to 39 In the no-MMU case: 56 are visible in other processes (no MMU protection), but should not [all …]
|