/Linux-v6.1/drivers/gpu/drm/nouveau/nvif/ |
D | mmu.c | 22 #include <nvif/mmu.h> 28 nvif_mmu_dtor(struct nvif_mmu *mmu) in nvif_mmu_dtor() argument 30 kfree(mmu->kind); in nvif_mmu_dtor() 31 kfree(mmu->type); in nvif_mmu_dtor() 32 kfree(mmu->heap); in nvif_mmu_dtor() 33 nvif_object_dtor(&mmu->object); in nvif_mmu_dtor() 38 struct nvif_mmu *mmu) in nvif_mmu_ctor() argument 41 { NVIF_CLASS_MEM_GF100, -1 }, in nvif_mmu_ctor() 42 { NVIF_CLASS_MEM_NV50 , -1 }, in nvif_mmu_ctor() 43 { NVIF_CLASS_MEM_NV04 , -1 }, in nvif_mmu_ctor() [all …]
|
D | mem.c | 28 nvif_mem_ctor_map(struct nvif_mmu *mmu, const char *name, u8 type, u64 size, in nvif_mem_ctor_map() argument 31 int ret = nvif_mem_ctor(mmu, name, mmu->mem, NVIF_MEM_MAPPABLE | type, in nvif_mem_ctor_map() 34 ret = nvif_object_map(&mem->object, NULL, 0); in nvif_mem_ctor_map() 44 nvif_object_dtor(&mem->object); in nvif_mem_dtor() 48 nvif_mem_ctor_type(struct nvif_mmu *mmu, const char *name, s32 oclass, in nvif_mem_ctor_type() argument 49 int type, u8 page, u64 size, void *argv, u32 argc, in nvif_mem_ctor_type() argument 56 mem->object.client = NULL; in nvif_mem_ctor_type() 57 if (type < 0) in nvif_mem_ctor_type() 58 return -EINVAL; in nvif_mem_ctor_type() 62 return -ENOMEM; in nvif_mem_ctor_type() [all …]
|
/Linux-v6.1/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | base.c | 42 nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt) in nvkm_mmu_ptp_put() argument 44 const int slot = pt->base >> pt->ptp->shift; in nvkm_mmu_ptp_put() 45 struct nvkm_mmu_ptp *ptp = pt->ptp; in nvkm_mmu_ptp_put() 50 if (!ptp->free) in nvkm_mmu_ptp_put() 51 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_put() 52 ptp->free |= BIT(slot); in nvkm_mmu_ptp_put() 54 /* If there's no more sub-allocations, destroy PTP. */ in nvkm_mmu_ptp_put() 55 if (ptp->free == ptp->mask) { in nvkm_mmu_ptp_put() 56 nvkm_mmu_ptc_put(mmu, force, &ptp->pt); in nvkm_mmu_ptp_put() 57 list_del(&ptp->head); in nvkm_mmu_ptp_put() [all …]
|
D | ummu.c | 35 struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; in nvkm_ummu_sclass() local 37 if (mmu->func->mem.user.oclass) { in nvkm_ummu_sclass() 38 if (index-- == 0) { in nvkm_ummu_sclass() 39 oclass->base = mmu->func->mem.user; in nvkm_ummu_sclass() 40 oclass->ctor = nvkm_umem_new; in nvkm_ummu_sclass() 45 if (mmu->func->vmm.user.oclass) { in nvkm_ummu_sclass() 46 if (index-- == 0) { in nvkm_ummu_sclass() 47 oclass->base = mmu->func->vmm.user; in nvkm_ummu_sclass() 48 oclass->ctor = nvkm_uvmm_new; in nvkm_ummu_sclass() 53 return -EINVAL; in nvkm_ummu_sclass() [all …]
|
D | umem.c | 37 struct nvkm_client *master = client->object.client; in nvkm_umem_search() 45 spin_lock(&master->lock); in nvkm_umem_search() 46 list_for_each_entry(umem, &master->umem, head) { in nvkm_umem_search() 47 if (umem->object.object == handle) { in nvkm_umem_search() 48 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search() 52 spin_unlock(&master->lock); in nvkm_umem_search() 56 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search() 59 return memory ? memory : ERR_PTR(-ENOENT); in nvkm_umem_search() 67 if (!umem->map) in nvkm_umem_unmap() 68 return -EEXIST; in nvkm_umem_unmap() [all …]
|
D | mem.c | 33 struct nvkm_mmu *mmu; member 45 return nvkm_mem(memory)->target; in nvkm_mem_target() 58 if (mem->pages == 1 && mem->mem) in nvkm_mem_addr() 59 return mem->dma[0]; in nvkm_mem_addr() 66 return nvkm_mem(memory)->pages << PAGE_SHIFT; in nvkm_mem_size() 75 .memory = &mem->memory, in nvkm_mem_map_dma() 77 .dma = mem->dma, in nvkm_mem_map_dma() 86 if (mem->mem) { in nvkm_mem_dtor() 87 while (mem->pages--) { in nvkm_mem_dtor() 88 dma_unmap_page(mem->mmu->subdev.device->dev, in nvkm_mem_dtor() [all …]
|
D | vmmgf100.c | 35 u64 base = (addr >> 8) | map->type; in gf100_vmm_pgt_pte() 38 if (map->ctag && !(map->next & (1ULL << 44))) { in gf100_vmm_pgt_pte() 39 while (ptes--) { in gf100_vmm_pgt_pte() 40 data = base | ((map->ctag >> 1) << 44); in gf100_vmm_pgt_pte() 41 if (!(map->ctag++ & 1)) in gf100_vmm_pgt_pte() 45 base += map->next; in gf100_vmm_pgt_pte() 48 map->type += ptes * map->ctag; in gf100_vmm_pgt_pte() 50 while (ptes--) { in gf100_vmm_pgt_pte() 52 data += map->next; in gf100_vmm_pgt_pte() 68 if (map->page->shift == PAGE_SHIFT) { in gf100_vmm_pgt_dma() [all …]
|
D | memnv04.c | 31 nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv, in nv04_mem_map() argument 37 struct nvkm_device *device = mmu->subdev.device; in nv04_mem_map() 39 int ret = -ENOSYS; in nv04_mem_map() 41 if ((ret = nvif_unvers(ret, &argv, &argc, args->vn))) in nv04_mem_map() 44 *paddr = device->func->resource_addr(device, 1) + addr; in nv04_mem_map() 46 *pvma = ERR_PTR(-ENODEV); in nv04_mem_map() 51 nv04_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in nv04_mem_new() argument 57 int ret = -ENOSYS; in nv04_mem_new() 59 if ((ret = nvif_unvers(ret, &argv, &argc, args->vn))) in nv04_mem_new() 62 if (mmu->type[type].type & NVKM_MEM_MAPPABLE) in nv04_mem_new() [all …]
|
D | vmmnv50.c | 35 u64 next = addr + map->type, data; in nv50_vmm_pgt_pte() 39 map->type += ptes * map->ctag; in nv50_vmm_pgt_pte() 42 for (log2blk = 7; log2blk >= 0; log2blk--) { in nv50_vmm_pgt_pte() 49 next += pten * map->next; in nv50_vmm_pgt_pte() 50 ptes -= pten; in nv50_vmm_pgt_pte() 52 while (pten--) in nv50_vmm_pgt_pte() 68 if (map->page->shift == PAGE_SHIFT) { in nv50_vmm_pgt_dma() 70 nvkm_kmap(pt->memory); in nv50_vmm_pgt_dma() 71 while (ptes--) { in nv50_vmm_pgt_dma() 72 const u64 data = *map->dma++ + map->type; in nv50_vmm_pgt_dma() [all …]
|
D | vmmgp100.c | 37 struct device *dev = vmm->mmu->subdev.device->dev; in gp100_vmm_pfn_unmap() 40 nvkm_kmap(pt->memory); in gp100_vmm_pfn_unmap() 41 while (ptes--) { in gp100_vmm_pfn_unmap() 42 u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0); in gp100_vmm_pfn_unmap() 43 u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4); in gp100_vmm_pfn_unmap() 51 nvkm_done(pt->memory); in gp100_vmm_pfn_unmap() 59 nvkm_kmap(pt->memory); in gp100_vmm_pfn_clear() 60 while (ptes--) { in gp100_vmm_pfn_clear() 61 u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0); in gp100_vmm_pfn_clear() 62 u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4); in gp100_vmm_pfn_clear() [all …]
|
D | memgf100.c | 34 gf100_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv, in gf100_mem_map() argument 42 struct nvkm_device *device = mmu->subdev.device; in gf100_mem_map() 44 int ret = -ENOSYS; in gf100_mem_map() 46 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { in gf100_mem_map() 47 uvmm.ro = args->v0.ro; in gf100_mem_map() 48 uvmm.kind = args->v0.kind; in gf100_mem_map() 50 if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) { in gf100_mem_map() 63 *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr; in gf100_mem_map() 64 *psize = (*pvma)->size; in gf100_mem_map() 69 gf100_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in gf100_mem_new() argument [all …]
|
D | nv44.c | 32 nv44_mmu_init(struct nvkm_mmu *mmu) in nv44_mmu_init() argument 34 struct nvkm_device *device = mmu->subdev.device; in nv44_mmu_init() 35 struct nvkm_memory *pt = mmu->vmm->pd->pt[0]->memory; in nv44_mmu_init() 43 addr -= ((nvkm_memory_addr(pt) >> 19) + 1) << 19; in nv44_mmu_init() 46 nvkm_wr32(device, 0x100818, mmu->vmm->null); in nv44_mmu_init() 59 .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}}, 60 .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map }, 61 .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv44_vmm_new, true }, 65 nv44_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, in nv44_mmu_new() argument 68 if (device->type == NVKM_DEVICE_AGP || in nv44_mmu_new() [all …]
|
D | vmmtu102.c | 29 struct nvkm_device *device = vmm->mmu->subdev.device; in tu102_vmm_flush() 30 u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24; in tu102_vmm_flush() local 32 type |= 0x00000001; /* PAGE_ALL */ in tu102_vmm_flush() 33 if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR])) in tu102_vmm_flush() 34 type |= 0x00000004; /* HUB_ONLY */ in tu102_vmm_flush() 36 mutex_lock(&vmm->mmu->mutex); in tu102_vmm_flush() 38 nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8); in tu102_vmm_flush() 41 nvkm_wr32(device, 0xb830b0, 0x80000000 | type); in tu102_vmm_flush() 48 mutex_unlock(&vmm->mmu->mutex); in tu102_vmm_flush() 71 tu102_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, in tu102_vmm_new() argument [all …]
|
D | nv41.c | 32 nv41_mmu_init(struct nvkm_mmu *mmu) in nv41_mmu_init() argument 34 struct nvkm_device *device = mmu->subdev.device; in nv41_mmu_init() 35 nvkm_wr32(device, 0x100800, 0x00000002 | mmu->vmm->pd->pt[0]->addr); in nv41_mmu_init() 44 .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}}, 45 .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map }, 46 .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv41_vmm_new, true }, 50 nv41_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, in nv41_mmu_new() argument 53 if (device->type == NVKM_DEVICE_AGP || in nv41_mmu_new() 54 !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) in nv41_mmu_new() 55 return nv04_mmu_new(device, type, inst, pmmu); in nv41_mmu_new() [all …]
|
/Linux-v6.1/drivers/gpu/drm/msm/ |
D | msm_mmu.h | 1 /* SPDX-License-Identifier: GPL-2.0-only */ 13 void (*detach)(struct msm_mmu *mmu); 14 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, 16 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len); 17 void (*destroy)(struct msm_mmu *mmu); 18 void (*resume_translation)(struct msm_mmu *mmu); 32 enum msm_mmu_type type; member 35 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, in msm_mmu_init() argument 36 const struct msm_mmu_funcs *funcs, enum msm_mmu_type type) in msm_mmu_init() argument 38 mmu->dev = dev; in msm_mmu_init() [all …]
|
/Linux-v6.1/arch/x86/kernel/ |
D | paravirt.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 6 2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc 46 ".size _paravirt_nop, . - _paravirt_nop\n\t" 47 ".type _paravirt_nop, @function\n\t" 57 ".size paravirt_ret0, . - paravirt_ret0\n\t" 58 ".type paravirt_ret0, @function\n\t" 98 unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr, in paravirt_patch() argument 102 * Neat trick to map patch type back to the call within the in paravirt_patch() 105 void *opfunc = *((void **)&pv_ops + type); in paravirt_patch() 140 .name = "paravirt-ioport", [all …]
|
/Linux-v6.1/drivers/gpu/drm/nouveau/include/nvif/ |
D | mmu.h | 27 u8 type; member 29 } *type; member 39 nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind) in nvif_mmu_kind_valid() argument 42 if (kind >= mmu->kind_nr || mmu->kind[kind] == mmu->kind_inv) in nvif_mmu_kind_valid() 49 nvif_mmu_type(struct nvif_mmu *mmu, u8 mask) in nvif_mmu_type() argument 52 for (i = 0; i < mmu->type_nr; i++) { in nvif_mmu_type() 53 if ((mmu->type[i].type & mask) == mask) in nvif_mmu_type() 56 return -EINVAL; in nvif_mmu_type()
|
/Linux-v6.1/drivers/gpu/drm/nouveau/ |
D | nouveau_mem.c | 45 switch (vmm->object.oclass) { in nouveau_mem_map() 52 args.nv50.kind = mem->kind; in nouveau_mem_map() 53 args.nv50.comp = mem->comp; in nouveau_mem_map() 60 if (mem->mem.type & NVIF_MEM_VRAM) in nouveau_mem_map() 66 args.gf100.kind = mem->kind; in nouveau_mem_map() 71 return -ENOSYS; in nouveau_mem_map() 74 return nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, &mem->mem, 0); in nouveau_mem_map() 80 nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[1]); in nouveau_mem_fini() 81 nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[0]); in nouveau_mem_fini() 82 mutex_lock(&mem->cli->drm->master.lock); in nouveau_mem_fini() [all …]
|
/Linux-v6.1/arch/m68k/ |
D | Kconfig.cpu | 1 # SPDX-License-Identifier: GPL-2.0 2 comment "Processor Type" 6 default M68KCLASSIC if MMU 7 default COLDFIRE if !MMU 13 applications, and are all System-On-Chip (SOC) devices, as opposed 41 depends on !MMU 54 System-On-Chip devices (eg 68328, 68302, etc). It does not contain 55 a paging MMU. 59 depends on MMU 65 68851 MMU (Memory Management Unit) to run Linux/m68k, except on the [all …]
|
/Linux-v6.1/arch/m68k/include/asm/ |
D | sun3mmu.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 3 * Definitions for Sun3 custom MMU. 10 #include <asm/sun3-head.h> 12 /* MMU characteristics. */ 19 #define SUN3_PMEG_MASK (SUN3_PMEG_SIZE - 1) 23 #define SUN3_PTE_MASK (SUN3_PTE_SIZE - 1) 32 #define AC_CONTEXT 0x30000000 /* 34c current mmu-context */ 36 #define AC_SYNC_ERR 0x60000000 /* c fault type */ 38 #define AC_ASYNC_ERR 0x60000008 /* c asynchronous fault type */ 44 #define AC_VME_VECTOR 0xE0000000 /* 4 For non-Autovector VME, byte */ [all …]
|
/Linux-v6.1/arch/riscv/ |
D | Kconfig | 1 # SPDX-License-Identifier: GPL-2.0-only 4 # see Documentation/kbuild/kconfig-language.rst. 21 select ARCH_HAS_DEBUG_VIRTUAL if MMU 29 select ARCH_HAS_SET_DIRECT_MAP if MMU 30 select ARCH_HAS_SET_MEMORY if MMU 31 select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL 32 select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL 39 select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU 40 select ARCH_SUPPORTS_HUGETLBFS if MMU 41 select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU [all …]
|
/Linux-v6.1/arch/arm/mm/ |
D | Kconfig | 1 # SPDX-License-Identifier: GPL-2.0 2 comment "Processor Type" 11 depends on !MMU 17 A 32-bit RISC microprocessor based on the ARM7 processor core 30 select CPU_COPY_V4WT if MMU 34 select CPU_TLB_V4WT if MMU 36 A 32-bit RISC processor with 8kByte Cache, Write Buffer and 37 MMU built around an ARM7TDMI core. 45 depends on !MMU 53 A 32-bit RISC processor with 8KB cache or 4KB variants, [all …]
|
/Linux-v6.1/drivers/gpu/drm/panfrost/ |
D | panfrost_mmu.c | 1 // SPDX-License-Identifier: GPL-2.0 9 #include <linux/dma-mapping.h> 13 #include <linux/io-pgtable.h> 26 #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg) 27 #define mmu_read(dev, reg) readl(dev->iomem + reg) 34 /* Wait for the MMU status to indicate there is no active command, in in wait_ready() 36 ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr), in wait_ready() 42 dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n"); in wait_ready() 52 /* write AS_COMMAND when MMU is ready to accept another command */ in write_cmd() 78 region_width = max(fls64(region_start ^ (region_end - 1)), in lock_region() [all …]
|
/Linux-v6.1/arch/m68k/include/uapi/asm/ |
D | bootinfo.h | 1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 3 * asm/bootinfo.h -- Definition of the Linux/m68k boot information structure 57 #define BI_MACHTYPE 0x0001 /* machine type (__be32) */ 58 #define BI_CPUTYPE 0x0002 /* cpu type (__be32) */ 59 #define BI_FPUTYPE 0x0003 /* fpu type (__be32) */ 60 #define BI_MMUTYPE 0x0004 /* mmu type (__be32) */ 70 * - length [ 2 bytes, 16-bit big endian ] 71 * - seed data [ `length` bytes, padded to preserve 4-byte struct alignment ] 97 * CPU, FPU and MMU types (BI_CPUTYPE, BI_FPUTYPE, BI_MMUTYPE) 123 #define FPUB_SUNFPA 4 /* Sun-3 FPA */ [all …]
|
/Linux-v6.1/arch/x86/include/asm/ |
D | paravirt.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 5 * para-virtualization: those hooks are defined here. */ 10 #include <asm/nospec-branch.h> 71 PVOP_VCALL0(mmu.flush_tlb_user); in __flush_tlb_local() 76 PVOP_VCALL0(mmu.flush_tlb_kernel); in __flush_tlb_global() 81 PVOP_VCALL1(mmu.flush_tlb_one_user, addr); in __flush_tlb_one_user() 87 PVOP_VCALL2(mmu.flush_tlb_multi, cpumask, info); in __flush_tlb_multi() 92 PVOP_VCALL2(mmu.tlb_remove_table, tlb, table); in paravirt_tlb_remove_table() 97 PVOP_VCALL1(mmu.exit_mmap, mm); in paravirt_arch_exit_mmap() 103 PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc); in notify_page_enc_status_changed() [all …]
|