/Linux-v4.19/drivers/gpu/drm/nouveau/nvif/ |
D | mmu.c | 28 nvif_mmu_fini(struct nvif_mmu *mmu) in nvif_mmu_fini() argument 30 kfree(mmu->kind); in nvif_mmu_fini() 31 kfree(mmu->type); in nvif_mmu_fini() 32 kfree(mmu->heap); in nvif_mmu_fini() 33 nvif_object_fini(&mmu->object); in nvif_mmu_fini() 37 nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu) in nvif_mmu_init() argument 49 mmu->heap = NULL; in nvif_mmu_init() 50 mmu->type = NULL; in nvif_mmu_init() 51 mmu->kind = NULL; in nvif_mmu_init() 54 &mmu->object); in nvif_mmu_init() [all …]
|
D | mem.c | 28 nvif_mem_init_map(struct nvif_mmu *mmu, u8 type, u64 size, struct nvif_mem *mem) in nvif_mem_init_map() argument 30 int ret = nvif_mem_init(mmu, mmu->mem, NVIF_MEM_MAPPABLE | type, 0, in nvif_mem_init_map() 47 nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page, in nvif_mem_init_type() argument 70 ret = nvif_object_init(&mmu->object, 0, oclass, args, in nvif_mem_init_type() 73 mem->type = mmu->type[type].type; in nvif_mem_init_type() 86 nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page, in nvif_mem_init() argument 93 for (i = 0; ret && i < mmu->type_nr; i++) { in nvif_mem_init() 94 if ((mmu->type[i].type & type) == type) { in nvif_mem_init() 95 ret = nvif_mem_init_type(mmu, oclass, i, page, size, in nvif_mem_init()
|
/Linux-v4.19/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | base.c | 42 nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt) in nvkm_mmu_ptp_put() argument 51 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_put() 56 nvkm_mmu_ptc_put(mmu, force, &ptp->pt); in nvkm_mmu_ptp_put() 65 nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero) in nvkm_mmu_ptp_get() argument 74 ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head); in nvkm_mmu_ptp_get() 82 ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false); in nvkm_mmu_ptp_get() 93 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_get() 120 nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size) in nvkm_mmu_ptc_find() argument 124 list_for_each_entry(ptc, &mmu->ptc.list, head) { in nvkm_mmu_ptc_find() 134 list_add(&ptc->head, &mmu->ptc.list); in nvkm_mmu_ptc_find() [all …]
|
D | Kbuild | 1 nvkm-y += nvkm/subdev/mmu/base.o 2 nvkm-y += nvkm/subdev/mmu/nv04.o 3 nvkm-y += nvkm/subdev/mmu/nv41.o 4 nvkm-y += nvkm/subdev/mmu/nv44.o 5 nvkm-y += nvkm/subdev/mmu/nv50.o 6 nvkm-y += nvkm/subdev/mmu/g84.o 7 nvkm-y += nvkm/subdev/mmu/mcp77.o 8 nvkm-y += nvkm/subdev/mmu/gf100.o 9 nvkm-y += nvkm/subdev/mmu/gk104.o 10 nvkm-y += nvkm/subdev/mmu/gk20a.o [all …]
|
D | ummu.c | 35 struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; in nvkm_ummu_sclass() local 37 if (mmu->func->mem.user.oclass && oclass->client->super) { in nvkm_ummu_sclass() 39 oclass->base = mmu->func->mem.user; in nvkm_ummu_sclass() 45 if (mmu->func->vmm.user.oclass) { in nvkm_ummu_sclass() 47 oclass->base = mmu->func->vmm.user; in nvkm_ummu_sclass() 59 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_heap() local 67 if ((index = args->v0.index) >= mmu->heap_nr) in nvkm_ummu_heap() 69 args->v0.size = mmu->heap[index].size; in nvkm_ummu_heap() 79 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_type() local 87 if ((index = args->v0.index) >= mmu->type_nr) in nvkm_ummu_type() [all …]
|
D | mem.c | 33 struct nvkm_mmu *mmu; member 88 dma_unmap_page(mem->mmu->subdev.device->dev, in nvkm_mem_dtor() 144 nvkm_mem_new_host(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in nvkm_mem_new_host() argument 147 struct device *dev = mmu->subdev.device->dev; in nvkm_mem_new_host() 157 if ( (mmu->type[type].type & NVKM_MEM_COHERENT) && in nvkm_mem_new_host() 158 !(mmu->type[type].type & NVKM_MEM_UNCACHED)) in nvkm_mem_new_host() 169 mem->mmu = mmu; in nvkm_mem_new_host() 199 if (mmu->dma_bits > 32) in nvkm_mem_new_host() 209 mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev, in nvkm_mem_new_host() 224 nvkm_mem_new_type(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in nvkm_mem_new_type() argument [all …]
|
D | umem.c | 73 struct nvkm_device *device = umem->mmu->subdev.device; in nvkm_umem_unmap() 91 struct nvkm_mmu *mmu = umem->mmu; in nvkm_umem_map() local 110 int ret = mmu->func->mem.umap(mmu, umem->memory, argv, argc, in nvkm_umem_map() 146 struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu; in nvkm_umem_new() local 162 if (type >= mmu->type_nr) in nvkm_umem_new() 168 umem->mmu = mmu; in nvkm_umem_new() 169 umem->type = mmu->type[type].type; in nvkm_umem_new() 174 if (mmu->type[type].type & NVKM_MEM_MAPPABLE) { in nvkm_umem_new() 179 ret = nvkm_mem_new_type(mmu, type, page, size, argv, argc, in nvkm_umem_new()
|
D | nv44.c | 32 nv44_mmu_init(struct nvkm_mmu *mmu) in nv44_mmu_init() argument 34 struct nvkm_device *device = mmu->subdev.device; in nv44_mmu_init() 35 struct nvkm_memory *pt = mmu->vmm->pd->pt[0]->memory; in nv44_mmu_init() 46 nvkm_wr32(device, 0x100818, mmu->vmm->null); in nv44_mmu_init() 59 .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
|
D | memnv04.c | 31 nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv, in nv04_mem_map() argument 37 struct nvkm_device *device = mmu->subdev.device; in nv04_mem_map() 51 nv04_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in nv04_mem_new() argument 62 if (mmu->type[type].type & NVKM_MEM_MAPPABLE) in nv04_mem_new() 67 return nvkm_ram_get(mmu->subdev.device, type, 0x01, page, in nv04_mem_new()
|
D | nv41.c | 32 nv41_mmu_init(struct nvkm_mmu *mmu) in nv41_mmu_init() argument 34 struct nvkm_device *device = mmu->subdev.device; in nv41_mmu_init() 35 nvkm_wr32(device, 0x100800, 0x00000002 | mmu->vmm->pd->pt[0]->addr); in nv41_mmu_init() 44 .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
|
D | memgf100.c | 34 gf100_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv, in gf100_mem_map() argument 42 struct nvkm_device *device = mmu->subdev.device; in gf100_mem_map() 69 gf100_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in gf100_mem_new() argument 87 if (mmu->type[type].type & (NVKM_MEM_DISP | NVKM_MEM_COMP)) in gf100_mem_new() 92 return nvkm_ram_get(mmu->subdev.device, type, 0x01, page, in gf100_mem_new()
|
/Linux-v4.19/drivers/iommu/ |
D | ipmmu-vmsa.c | 70 struct ipmmu_vmsa_device *mmu; member 224 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) in ipmmu_is_root() argument 226 return mmu->root == mmu; in ipmmu_is_root() 231 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); in __ipmmu_check_device() local 234 if (ipmmu_is_root(mmu)) in __ipmmu_check_device() 235 *rootp = mmu; in __ipmmu_check_device() 252 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) in ipmmu_read() argument 254 return ioread32(mmu->base + offset); in ipmmu_read() 257 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, in ipmmu_write() argument 260 iowrite32(data, mmu->base + offset); in ipmmu_write() [all …]
|
/Linux-v4.19/drivers/gpu/drm/etnaviv/ |
D | etnaviv_mmu.c | 128 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu, in etnaviv_iommu_remove_mapping() argument 133 etnaviv_iommu_unmap(mmu, mapping->vram_node.start, in etnaviv_iommu_remove_mapping() 138 static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, in etnaviv_iommu_find_iova() argument 145 lockdep_assert_held(&mmu->lock); in etnaviv_iommu_find_iova() 153 ret = drm_mm_insert_node_in_range(&mmu->mm, node, in etnaviv_iommu_find_iova() 159 drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode); in etnaviv_iommu_find_iova() 163 list_for_each_entry(free, &mmu->mappings, mmu_node) { in etnaviv_iommu_find_iova() 205 etnaviv_iommu_remove_mapping(mmu, m); in etnaviv_iommu_find_iova() 206 m->mmu = NULL; in etnaviv_iommu_find_iova() 222 int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, in etnaviv_iommu_map_gem() argument [all …]
|
/Linux-v4.19/drivers/gpu/drm/msm/ |
D | msm_iommu.c | 37 static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names, in msm_iommu_attach() argument 40 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_attach() 43 pm_runtime_get_sync(mmu->dev); in msm_iommu_attach() 44 ret = iommu_attach_device(iommu->domain, mmu->dev); in msm_iommu_attach() 45 pm_runtime_put_sync(mmu->dev); in msm_iommu_attach() 50 static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names, in msm_iommu_detach() argument 53 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_detach() 55 pm_runtime_get_sync(mmu->dev); in msm_iommu_detach() 56 iommu_detach_device(iommu->domain, mmu->dev); in msm_iommu_detach() 57 pm_runtime_put_sync(mmu->dev); in msm_iommu_detach() [all …]
|
D | msm_mmu.h | 24 int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt); 25 void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt); 26 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, 28 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, 30 void (*destroy)(struct msm_mmu *mmu); 40 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, in msm_mmu_init() argument 43 mmu->dev = dev; in msm_mmu_init() 44 mmu->funcs = funcs; in msm_mmu_init() 50 static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, in msm_mmu_set_fault_handler() argument 53 mmu->arg = arg; in msm_mmu_set_fault_handler() [all …]
|
D | msm_gem_vma.c | 29 if (aspace->mmu) in msm_gem_address_space_destroy() 30 aspace->mmu->funcs->destroy(aspace->mmu); in msm_gem_address_space_destroy() 48 if (aspace->mmu) { in msm_gem_unmap_vma() 50 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size); in msm_gem_unmap_vma() 82 if (aspace->mmu) { in msm_gem_map_vma() 84 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, in msm_gem_map_vma() 108 aspace->mmu = msm_iommu_new(dev, domain); in msm_gem_address_space_create()
|
/Linux-v4.19/arch/arc/mm/ |
D | tlb.c | 255 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; in local_flush_tlb_all() local 258 int num_tlb = mmu->sets * mmu->ways; in local_flush_tlb_all() 731 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; in read_decode_mmu_bcr() local 763 mmu->ver = (tmp >> 24); in read_decode_mmu_bcr() 766 if (mmu->ver <= 2) { in read_decode_mmu_bcr() 768 mmu->pg_sz_k = TO_KB(0x2000); in read_decode_mmu_bcr() 769 mmu->sets = 1 << mmu2->sets; in read_decode_mmu_bcr() 770 mmu->ways = 1 << mmu2->ways; in read_decode_mmu_bcr() 771 mmu->u_dtlb = mmu2->u_dtlb; in read_decode_mmu_bcr() 772 mmu->u_itlb = mmu2->u_itlb; in read_decode_mmu_bcr() [all …]
|
/Linux-v4.19/drivers/gpu/drm/nouveau/include/nvif/ |
D | mmu.h | 37 nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind) in nvif_mmu_kind_valid() argument 39 const u8 invalid = mmu->kind_nr - 1; in nvif_mmu_kind_valid() 41 if (kind >= mmu->kind_nr || mmu->kind[kind] == invalid) in nvif_mmu_kind_valid() 48 nvif_mmu_type(struct nvif_mmu *mmu, u8 mask) in nvif_mmu_type() argument 51 for (i = 0; i < mmu->type_nr; i++) { in nvif_mmu_type() 52 if ((mmu->type[i].type & mask) == mask) in nvif_mmu_type()
|
/Linux-v4.19/drivers/gpu/drm/nouveau/ |
D | nouveau_mem.c | 100 struct nvif_mmu *mmu = &cli->mmu; in nouveau_mem_host() local 111 if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND)) in nouveau_mem_host() 113 if (mem->comp && !(mmu->type[type].type & NVIF_MEM_COMP)) { in nouveau_mem_host() 114 if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) in nouveau_mem_host() 115 mem->kind = mmu->kind[mem->kind]; in nouveau_mem_host() 124 ret = nvif_mem_init_type(mmu, cli->mem->oclass, type, PAGE_SHIFT, in nouveau_mem_host() 138 struct nvif_mmu *mmu = &cli->mmu; in nouveau_mem_vram() local 147 ret = nvif_mem_init_type(mmu, cli->mem->oclass, in nouveau_mem_vram() 155 ret = nvif_mem_init_type(mmu, cli->mem->oclass, in nouveau_mem_vram() 158 .bankswz = mmu->kind[mem->kind] == 2, in nouveau_mem_vram()
|
/Linux-v4.19/arch/powerpc/kvm/ |
D | book3s_32_mmu.c | 414 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; in kvmppc_mmu_book3s_32_init() local 416 mmu->mtsrin = kvmppc_mmu_book3s_32_mtsrin; in kvmppc_mmu_book3s_32_init() 417 mmu->mfsrin = kvmppc_mmu_book3s_32_mfsrin; in kvmppc_mmu_book3s_32_init() 418 mmu->xlate = kvmppc_mmu_book3s_32_xlate; in kvmppc_mmu_book3s_32_init() 419 mmu->reset_msr = kvmppc_mmu_book3s_32_reset_msr; in kvmppc_mmu_book3s_32_init() 420 mmu->tlbie = kvmppc_mmu_book3s_32_tlbie; in kvmppc_mmu_book3s_32_init() 421 mmu->esid_to_vsid = kvmppc_mmu_book3s_32_esid_to_vsid; in kvmppc_mmu_book3s_32_init() 422 mmu->ea_to_vp = kvmppc_mmu_book3s_32_ea_to_vp; in kvmppc_mmu_book3s_32_init() 423 mmu->is_dcbz32 = kvmppc_mmu_book3s_32_is_dcbz32; in kvmppc_mmu_book3s_32_init() 425 mmu->slbmte = NULL; in kvmppc_mmu_book3s_32_init() [all …]
|
/Linux-v4.19/fs/ramfs/ |
D | Makefile | 7 file-mmu-y := file-nommu.o 8 file-mmu-$(CONFIG_MMU) := file-mmu.o 9 ramfs-objs += inode.o $(file-mmu-y)
|
/Linux-v4.19/arch/x86/kvm/ |
D | mmu.h | 83 if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) in kvm_mmu_reload() 105 if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) in kvm_mmu_load_cr3() 106 vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa | in kvm_mmu_load_cr3() 161 static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in permission_fault() argument 184 bool fault = (mmu->permissions[index] >> pte_access) & 1; in permission_fault() 188 if (unlikely(mmu->pkru_mask)) { in permission_fault() 203 pkru_bits &= mmu->pkru_mask >> offset; in permission_fault()
|
D | paging_tmpl.h | 37 #define PT_HAVE_ACCESSED_DIRTY(mmu) true argument 57 #define PT_HAVE_ACCESSED_DIRTY(mmu) true argument 70 #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad) argument 107 static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access, in FNAME() 113 if (!PT_HAVE_ACCESSED_DIRTY(mmu)) in FNAME() 134 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME() 161 if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) in FNAME() 168 if (PT_HAVE_ACCESSED_DIRTY(&vcpu->arch.mmu) && !(gpte & PT_GUEST_ACCESSED_MASK)) in FNAME() 203 struct kvm_mmu *mmu, in FNAME() 214 if (!PT_HAVE_ACCESSED_DIRTY(mmu)) in FNAME() [all …]
|
D | mmu.c | 2184 || vcpu->arch.mmu.sync_page(vcpu, sp) == 0) { in __kvm_sync_page() 2378 role = vcpu->arch.mmu.base_role; in kvm_mmu_get_page() 2384 if (!vcpu->arch.mmu.direct_map in kvm_mmu_get_page() 2385 && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { in kvm_mmu_get_page() 2460 iterator->level = vcpu->arch.mmu.shadow_root_level; in shadow_walk_init_using_root() 2463 vcpu->arch.mmu.root_level < PT64_ROOT_4LEVEL && in shadow_walk_init_using_root() 2464 !vcpu->arch.mmu.direct_map) in shadow_walk_init_using_root() 2472 BUG_ON(root != vcpu->arch.mmu.root_hpa); in shadow_walk_init_using_root() 2475 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; in shadow_walk_init_using_root() 2486 shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu.root_hpa, in shadow_walk_init() [all …]
|
/Linux-v4.19/drivers/gpu/drm/nouveau/nvkm/engine/device/ |
D | base.c | 88 .mmu = nv04_mmu_new, 109 .mmu = nv04_mmu_new, 131 .mmu = nv04_mmu_new, 151 .mmu = nv04_mmu_new, 173 .mmu = nv04_mmu_new, 195 .mmu = nv04_mmu_new, 217 .mmu = nv04_mmu_new, 239 .mmu = nv04_mmu_new, 261 .mmu = nv04_mmu_new, 283 .mmu = nv04_mmu_new, [all …]
|