Home
last modified time | relevance | path

Searched refs:mmu (Results 1 – 25 of 248) sorted by relevance

12345678910

/Linux-v5.4/drivers/staging/media/ipu3/
Dipu3-mmu.c78 static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu) in imgu_mmu_tlb_invalidate() argument
80 writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE); in imgu_mmu_tlb_invalidate()
83 static void call_if_imgu_is_powered(struct imgu_mmu *mmu, in call_if_imgu_is_powered() argument
84 void (*func)(struct imgu_mmu *mmu)) in call_if_imgu_is_powered() argument
86 if (!pm_runtime_get_if_in_use(mmu->dev)) in call_if_imgu_is_powered()
89 func(mmu); in call_if_imgu_is_powered()
90 pm_runtime_put(mmu->dev); in call_if_imgu_is_powered()
101 static void imgu_mmu_set_halt(struct imgu_mmu *mmu, bool halt) in imgu_mmu_set_halt() argument
106 writel(halt, mmu->base + REG_GP_HALT); in imgu_mmu_set_halt()
107 ret = readl_poll_timeout(mmu->base + REG_GP_HALTED, in imgu_mmu_set_halt()
[all …]
/Linux-v5.4/drivers/gpu/drm/nouveau/nvif/
Dmmu.c28 nvif_mmu_fini(struct nvif_mmu *mmu) in nvif_mmu_fini() argument
30 kfree(mmu->kind); in nvif_mmu_fini()
31 kfree(mmu->type); in nvif_mmu_fini()
32 kfree(mmu->heap); in nvif_mmu_fini()
33 nvif_object_fini(&mmu->object); in nvif_mmu_fini()
37 nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu) in nvif_mmu_init() argument
49 mmu->heap = NULL; in nvif_mmu_init()
50 mmu->type = NULL; in nvif_mmu_init()
51 mmu->kind = NULL; in nvif_mmu_init()
54 &mmu->object); in nvif_mmu_init()
[all …]
Dmem.c28 nvif_mem_init_map(struct nvif_mmu *mmu, u8 type, u64 size, struct nvif_mem *mem) in nvif_mem_init_map() argument
30 int ret = nvif_mem_init(mmu, mmu->mem, NVIF_MEM_MAPPABLE | type, 0, in nvif_mem_init_map()
47 nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page, in nvif_mem_init_type() argument
70 ret = nvif_object_init(&mmu->object, 0, oclass, args, in nvif_mem_init_type()
73 mem->type = mmu->type[type].type; in nvif_mem_init_type()
86 nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page, in nvif_mem_init() argument
93 for (i = 0; ret && i < mmu->type_nr; i++) { in nvif_mem_init()
94 if ((mmu->type[i].type & type) == type) { in nvif_mem_init()
95 ret = nvif_mem_init_type(mmu, oclass, i, page, size, in nvif_mem_init()
/Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dbase.c42 nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt) in nvkm_mmu_ptp_put() argument
51 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_put()
56 nvkm_mmu_ptc_put(mmu, force, &ptp->pt); in nvkm_mmu_ptp_put()
65 nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero) in nvkm_mmu_ptp_get() argument
74 ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head); in nvkm_mmu_ptp_get()
82 ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false); in nvkm_mmu_ptp_get()
93 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_get()
120 nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size) in nvkm_mmu_ptc_find() argument
124 list_for_each_entry(ptc, &mmu->ptc.list, head) { in nvkm_mmu_ptc_find()
134 list_add(&ptc->head, &mmu->ptc.list); in nvkm_mmu_ptc_find()
[all …]
DKbuild2 nvkm-y += nvkm/subdev/mmu/base.o
3 nvkm-y += nvkm/subdev/mmu/nv04.o
4 nvkm-y += nvkm/subdev/mmu/nv41.o
5 nvkm-y += nvkm/subdev/mmu/nv44.o
6 nvkm-y += nvkm/subdev/mmu/nv50.o
7 nvkm-y += nvkm/subdev/mmu/g84.o
8 nvkm-y += nvkm/subdev/mmu/mcp77.o
9 nvkm-y += nvkm/subdev/mmu/gf100.o
10 nvkm-y += nvkm/subdev/mmu/gk104.o
11 nvkm-y += nvkm/subdev/mmu/gk20a.o
[all …]
Dummu.c35 struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; in nvkm_ummu_sclass() local
37 if (mmu->func->mem.user.oclass && oclass->client->super) { in nvkm_ummu_sclass()
39 oclass->base = mmu->func->mem.user; in nvkm_ummu_sclass()
45 if (mmu->func->vmm.user.oclass) { in nvkm_ummu_sclass()
47 oclass->base = mmu->func->vmm.user; in nvkm_ummu_sclass()
59 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_heap() local
67 if ((index = args->v0.index) >= mmu->heap_nr) in nvkm_ummu_heap()
69 args->v0.size = mmu->heap[index].size; in nvkm_ummu_heap()
79 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_type() local
87 if ((index = args->v0.index) >= mmu->type_nr) in nvkm_ummu_type()
[all …]
Dmem.c33 struct nvkm_mmu *mmu; member
88 dma_unmap_page(mem->mmu->subdev.device->dev, in nvkm_mem_dtor()
144 nvkm_mem_new_host(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in nvkm_mem_new_host() argument
147 struct device *dev = mmu->subdev.device->dev; in nvkm_mem_new_host()
157 if ( (mmu->type[type].type & NVKM_MEM_COHERENT) && in nvkm_mem_new_host()
158 !(mmu->type[type].type & NVKM_MEM_UNCACHED)) in nvkm_mem_new_host()
169 mem->mmu = mmu; in nvkm_mem_new_host()
199 if (mmu->dma_bits > 32) in nvkm_mem_new_host()
209 mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev, in nvkm_mem_new_host()
224 nvkm_mem_new_type(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in nvkm_mem_new_type() argument
[all …]
Dumem.c73 struct nvkm_device *device = umem->mmu->subdev.device; in nvkm_umem_unmap()
91 struct nvkm_mmu *mmu = umem->mmu; in nvkm_umem_map() local
110 int ret = mmu->func->mem.umap(mmu, umem->memory, argv, argc, in nvkm_umem_map()
146 struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu; in nvkm_umem_new() local
162 if (type >= mmu->type_nr) in nvkm_umem_new()
168 umem->mmu = mmu; in nvkm_umem_new()
169 umem->type = mmu->type[type].type; in nvkm_umem_new()
174 if (mmu->type[type].type & NVKM_MEM_MAPPABLE) { in nvkm_umem_new()
179 ret = nvkm_mem_new_type(mmu, type, page, size, argv, argc, in nvkm_umem_new()
Dnv44.c32 nv44_mmu_init(struct nvkm_mmu *mmu) in nv44_mmu_init() argument
34 struct nvkm_device *device = mmu->subdev.device; in nv44_mmu_init()
35 struct nvkm_memory *pt = mmu->vmm->pd->pt[0]->memory; in nv44_mmu_init()
46 nvkm_wr32(device, 0x100818, mmu->vmm->null); in nv44_mmu_init()
59 .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
Dmemnv04.c31 nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv, in nv04_mem_map() argument
37 struct nvkm_device *device = mmu->subdev.device; in nv04_mem_map()
51 nv04_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in nv04_mem_new() argument
62 if (mmu->type[type].type & NVKM_MEM_MAPPABLE) in nv04_mem_new()
67 return nvkm_ram_get(mmu->subdev.device, type, 0x01, page, in nv04_mem_new()
/Linux-v5.4/drivers/iommu/
Dipmmu-vmsa.c72 struct ipmmu_vmsa_device *mmu; member
228 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) in ipmmu_is_root() argument
230 return mmu->root == mmu; in ipmmu_is_root()
235 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); in __ipmmu_check_device() local
238 if (ipmmu_is_root(mmu)) in __ipmmu_check_device()
239 *rootp = mmu; in __ipmmu_check_device()
256 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) in ipmmu_read() argument
258 return ioread32(mmu->base + offset); in ipmmu_read()
261 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, in ipmmu_write() argument
264 iowrite32(data, mmu->base + offset); in ipmmu_write()
[all …]
/Linux-v5.4/arch/x86/kernel/
Dparavirt.c359 .mmu.flush_tlb_user = native_flush_tlb,
360 .mmu.flush_tlb_kernel = native_flush_tlb_global,
361 .mmu.flush_tlb_one_user = native_flush_tlb_one_user,
362 .mmu.flush_tlb_others = native_flush_tlb_others,
363 .mmu.tlb_remove_table =
366 .mmu.exit_mmap = paravirt_nop,
369 .mmu.read_cr2 = __PV_IS_CALLEE_SAVE(native_read_cr2),
370 .mmu.write_cr2 = native_write_cr2,
371 .mmu.read_cr3 = __native_read_cr3,
372 .mmu.write_cr3 = native_write_cr3,
[all …]
/Linux-v5.4/drivers/gpu/drm/panfrost/
Dpanfrost_mmu.c100 struct panfrost_mmu *mmu, in mmu_hw_do_operation() argument
106 ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op); in mmu_hw_do_operation()
111 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) in panfrost_mmu_enable() argument
113 int as_nr = mmu->as; in panfrost_mmu_enable()
114 struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg; in panfrost_mmu_enable()
145 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) in panfrost_mmu_as_get() argument
151 as = mmu->as; in panfrost_mmu_as_get()
153 int en = atomic_inc_return(&mmu->as_count); in panfrost_mmu_as_get()
156 list_move(&mmu->list, &pfdev->as_lru_list); in panfrost_mmu_as_get()
179 mmu->as = as; in panfrost_mmu_as_get()
[all …]
/Linux-v5.4/drivers/gpu/drm/msm/
Dmsm_mmu.h13 int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt);
14 void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
15 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
17 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len);
18 void (*destroy)(struct msm_mmu *mmu);
28 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, in msm_mmu_init() argument
31 mmu->dev = dev; in msm_mmu_init()
32 mmu->funcs = funcs; in msm_mmu_init()
38 static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, in msm_mmu_set_fault_handler() argument
41 mmu->arg = arg; in msm_mmu_set_fault_handler()
[all …]
Dmsm_iommu.c26 static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names, in msm_iommu_attach() argument
29 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_attach()
31 return iommu_attach_device(iommu->domain, mmu->dev); in msm_iommu_attach()
34 static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names, in msm_iommu_detach() argument
37 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_detach()
39 iommu_detach_device(iommu->domain, mmu->dev); in msm_iommu_detach()
42 static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, in msm_iommu_map() argument
45 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_map()
54 static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len) in msm_iommu_unmap() argument
56 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_unmap()
[all …]
Dmsm_gpummu.c24 static int msm_gpummu_attach(struct msm_mmu *mmu, const char * const *names, in msm_gpummu_attach() argument
30 static void msm_gpummu_detach(struct msm_mmu *mmu, const char * const *names, in msm_gpummu_detach() argument
35 static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova, in msm_gpummu_map() argument
38 struct msm_gpummu *gpummu = to_msm_gpummu(mmu); in msm_gpummu_map()
64 static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len) in msm_gpummu_unmap() argument
66 struct msm_gpummu *gpummu = to_msm_gpummu(mmu); in msm_gpummu_unmap()
79 static void msm_gpummu_destroy(struct msm_mmu *mmu) in msm_gpummu_destroy() argument
81 struct msm_gpummu *gpummu = to_msm_gpummu(mmu); in msm_gpummu_destroy()
83 dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base, in msm_gpummu_destroy()
118 void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base, in msm_gpummu_params() argument
[all …]
Dmsm_gem_vma.c18 if (aspace->mmu) in msm_gem_address_space_destroy()
19 aspace->mmu->funcs->destroy(aspace->mmu); in msm_gem_address_space_destroy()
44 if (aspace->mmu) in msm_gem_purge_vma()
45 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size); in msm_gem_purge_vma()
77 if (aspace && aspace->mmu) in msm_gem_map_vma()
78 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, in msm_gem_map_vma()
143 aspace->mmu = msm_iommu_new(dev, domain); in msm_gem_address_space_create()
166 aspace->mmu = msm_gpummu_new(dev, gpu); in msm_gem_address_space_create_a2xx()
/Linux-v5.4/arch/arc/mm/
Dtlb.c252 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; in local_flush_tlb_all() local
255 int num_tlb = mmu->sets * mmu->ways; in local_flush_tlb_all()
728 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; in read_decode_mmu_bcr() local
760 mmu->ver = (tmp >> 24); in read_decode_mmu_bcr()
763 if (mmu->ver <= 2) { in read_decode_mmu_bcr()
765 mmu->pg_sz_k = TO_KB(0x2000); in read_decode_mmu_bcr()
766 mmu->sets = 1 << mmu2->sets; in read_decode_mmu_bcr()
767 mmu->ways = 1 << mmu2->ways; in read_decode_mmu_bcr()
768 mmu->u_dtlb = mmu2->u_dtlb; in read_decode_mmu_bcr()
769 mmu->u_itlb = mmu2->u_itlb; in read_decode_mmu_bcr()
[all …]
/Linux-v5.4/arch/x86/include/asm/
Dparavirt.h52 PVOP_VCALL0(mmu.flush_tlb_user); in __flush_tlb()
57 PVOP_VCALL0(mmu.flush_tlb_kernel); in __flush_tlb_global()
62 PVOP_VCALL1(mmu.flush_tlb_one_user, addr); in __flush_tlb_one_user()
68 PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info); in flush_tlb_others()
73 PVOP_VCALL2(mmu.tlb_remove_table, tlb, table); in paravirt_tlb_remove_table()
78 PVOP_VCALL1(mmu.exit_mmap, mm); in paravirt_arch_exit_mmap()
119 return PVOP_CALLEE0(unsigned long, mmu.read_cr2); in read_cr2()
124 PVOP_VCALL1(mmu.write_cr2, x); in write_cr2()
129 return PVOP_CALL0(unsigned long, mmu.read_cr3); in __read_cr3()
134 PVOP_VCALL1(mmu.write_cr3, x); in write_cr3()
[all …]
/Linux-v5.4/drivers/gpu/drm/nouveau/include/nvif/
Dmmu.h37 nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind) in nvif_mmu_kind_valid() argument
39 const u8 invalid = mmu->kind_nr - 1; in nvif_mmu_kind_valid()
41 if (kind >= mmu->kind_nr || mmu->kind[kind] == invalid) in nvif_mmu_kind_valid()
48 nvif_mmu_type(struct nvif_mmu *mmu, u8 mask) in nvif_mmu_type() argument
51 for (i = 0; i < mmu->type_nr; i++) { in nvif_mmu_type()
52 if ((mmu->type[i].type & mask) == mask) in nvif_mmu_type()
/Linux-v5.4/drivers/gpu/drm/nouveau/
Dnouveau_mem.c100 struct nvif_mmu *mmu = &cli->mmu; in nouveau_mem_host() local
111 if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND)) in nouveau_mem_host()
113 if (mem->comp && !(mmu->type[type].type & NVIF_MEM_COMP)) { in nouveau_mem_host()
114 if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) in nouveau_mem_host()
115 mem->kind = mmu->kind[mem->kind]; in nouveau_mem_host()
124 ret = nvif_mem_init_type(mmu, cli->mem->oclass, type, PAGE_SHIFT, in nouveau_mem_host()
138 struct nvif_mmu *mmu = &cli->mmu; in nouveau_mem_vram() local
147 ret = nvif_mem_init_type(mmu, cli->mem->oclass, in nouveau_mem_vram()
155 ret = nvif_mem_init_type(mmu, cli->mem->oclass, in nouveau_mem_vram()
158 .bankswz = mmu->kind[mem->kind] == 2, in nouveau_mem_vram()
/Linux-v5.4/arch/powerpc/kvm/
Dbook3s_32_mmu.c404 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; in kvmppc_mmu_book3s_32_init() local
406 mmu->mtsrin = kvmppc_mmu_book3s_32_mtsrin; in kvmppc_mmu_book3s_32_init()
407 mmu->mfsrin = kvmppc_mmu_book3s_32_mfsrin; in kvmppc_mmu_book3s_32_init()
408 mmu->xlate = kvmppc_mmu_book3s_32_xlate; in kvmppc_mmu_book3s_32_init()
409 mmu->reset_msr = kvmppc_mmu_book3s_32_reset_msr; in kvmppc_mmu_book3s_32_init()
410 mmu->tlbie = kvmppc_mmu_book3s_32_tlbie; in kvmppc_mmu_book3s_32_init()
411 mmu->esid_to_vsid = kvmppc_mmu_book3s_32_esid_to_vsid; in kvmppc_mmu_book3s_32_init()
412 mmu->ea_to_vp = kvmppc_mmu_book3s_32_ea_to_vp; in kvmppc_mmu_book3s_32_init()
413 mmu->is_dcbz32 = kvmppc_mmu_book3s_32_is_dcbz32; in kvmppc_mmu_book3s_32_init()
415 mmu->slbmte = NULL; in kvmppc_mmu_book3s_32_init()
[all …]
/Linux-v5.4/fs/ramfs/
DMakefile8 file-mmu-y := file-nommu.o
9 file-mmu-$(CONFIG_MMU) := file-mmu.o
10 ramfs-objs += inode.o $(file-mmu-y)
/Linux-v5.4/arch/x86/kvm/
Dmmu.h78 if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE)) in kvm_mmu_reload()
100 if (VALID_PAGE(vcpu->arch.mmu->root_hpa)) in kvm_mmu_load_cr3()
101 vcpu->arch.mmu->set_cr3(vcpu, vcpu->arch.mmu->root_hpa | in kvm_mmu_load_cr3()
156 static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in permission_fault() argument
179 bool fault = (mmu->permissions[index] >> pte_access) & 1; in permission_fault()
183 if (unlikely(mmu->pkru_mask)) { in permission_fault()
198 pkru_bits &= mmu->pkru_mask >> offset; in permission_fault()
Dpaging_tmpl.h34 #define PT_HAVE_ACCESSED_DIRTY(mmu) true argument
54 #define PT_HAVE_ACCESSED_DIRTY(mmu) true argument
67 #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad) argument
104 static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access, in FNAME()
110 if (!PT_HAVE_ACCESSED_DIRTY(mmu)) in FNAME()
131 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME()
178 if (is_rsvd_bits_set(vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) in FNAME()
185 if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) && in FNAME()
221 struct kvm_mmu *mmu, in FNAME()
232 if (!PT_HAVE_ACCESSED_DIRTY(mmu)) in FNAME()
[all …]

12345678910