/Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | vmmnv44.c | 28 dma_addr_t *list, u32 ptei, u32 ptes) in nv44_vmm_pgt_fill() argument 38 while (ptes--) { in nv44_vmm_pgt_fill() 74 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) in nv44_vmm_pgt_pte() argument 79 const u32 pten = min(ptes, 4 - (ptei & 3)); in nv44_vmm_pgt_pte() 84 ptes -= pten; in nv44_vmm_pgt_pte() 87 while (ptes >= 4) { in nv44_vmm_pgt_pte() 94 ptes -= 4; in nv44_vmm_pgt_pte() 97 if (ptes) { in nv44_vmm_pgt_pte() 98 for (i = 0; i < ptes; i++, addr += 0x1000) in nv44_vmm_pgt_pte() 100 nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, ptes); in nv44_vmm_pgt_pte() [all …]
|
D | vmmnv41.c | 28 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) in nv41_vmm_pgt_pte() argument 31 while (ptes--) { in nv41_vmm_pgt_pte() 39 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in nv41_vmm_pgt_sgl() argument 41 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte); in nv41_vmm_pgt_sgl() 46 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in nv41_vmm_pgt_dma() argument 50 while (ptes--) { in nv41_vmm_pgt_dma() 56 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte); in nv41_vmm_pgt_dma() 62 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in nv41_vmm_pgt_unmap() argument 64 VMM_FO032(pt, vmm, ptei * 4, 0, ptes); in nv41_vmm_pgt_unmap()
|
D | vmmgp100.c | 35 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in gp100_vmm_pfn_unmap() argument 41 while (ptes--) { in gp100_vmm_pfn_unmap() 56 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in gp100_vmm_pfn_clear() argument 60 while (ptes--) { in gp100_vmm_pfn_clear() 76 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in gp100_vmm_pgt_pfn() argument 82 while (ptes--) { in gp100_vmm_pgt_pfn() 110 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) in gp100_vmm_pgt_pte() argument 114 map->type += ptes * map->ctag; in gp100_vmm_pgt_pte() 116 while (ptes--) { in gp100_vmm_pgt_pte() 124 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in gp100_vmm_pgt_sgl() argument [all …]
|
D | vmmnv04.c | 29 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) in nv04_vmm_pgt_pte() argument 32 while (ptes--) { in nv04_vmm_pgt_pte() 40 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in nv04_vmm_pgt_sgl() argument 42 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte); in nv04_vmm_pgt_sgl() 47 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in nv04_vmm_pgt_dma() argument 51 while (ptes--) in nv04_vmm_pgt_dma() 55 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte); in nv04_vmm_pgt_dma() 61 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in nv04_vmm_pgt_unmap() argument 63 VMM_FO032(pt, vmm, 8 + (ptei * 4), 0, ptes); in nv04_vmm_pgt_unmap()
|
D | vmmnv50.c | 33 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) in nv50_vmm_pgt_pte() argument 39 map->type += ptes * map->ctag; in nv50_vmm_pgt_pte() 41 while (ptes) { in nv50_vmm_pgt_pte() 44 if (ptes >= pten && IS_ALIGNED(ptei, pten)) in nv50_vmm_pgt_pte() 50 ptes -= pten; in nv50_vmm_pgt_pte() 59 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in nv50_vmm_pgt_sgl() argument 61 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte); in nv50_vmm_pgt_sgl() 66 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in nv50_vmm_pgt_dma() argument 69 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes); in nv50_vmm_pgt_dma() 71 while (ptes--) { in nv50_vmm_pgt_dma() [all …]
|
D | vmmgf100.c | 33 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) in gf100_vmm_pgt_pte() argument 39 while (ptes--) { in gf100_vmm_pgt_pte() 48 map->type += ptes * map->ctag; in gf100_vmm_pgt_pte() 50 while (ptes--) { in gf100_vmm_pgt_pte() 59 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in gf100_vmm_pgt_sgl() argument 61 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte); in gf100_vmm_pgt_sgl() 66 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in gf100_vmm_pgt_dma() argument 69 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes); in gf100_vmm_pgt_dma() 71 while (ptes--) { in gf100_vmm_pgt_dma() 80 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte); in gf100_vmm_pgt_dma() [all …]
|
D | vmm.c | 198 const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes) in nvkm_vmm_unref_sptes() argument 209 for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) { in nvkm_vmm_unref_sptes() 210 const u32 pten = min(sptn - spti, ptes); in nvkm_vmm_unref_sptes() 212 ptes -= pten; in nvkm_vmm_unref_sptes() 222 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) { in nvkm_vmm_unref_sptes() 236 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) { in nvkm_vmm_unref_sptes() 243 TRA(it, "LPTE %05x: U -> S %d PTEs", pteb, ptes); in nvkm_vmm_unref_sptes() 244 pair->func->sparse(vmm, pgt->pt[0], pteb, ptes); in nvkm_vmm_unref_sptes() 251 TRA(it, "LPTE %05x: U -> I %d PTEs", pteb, ptes); in nvkm_vmm_unref_sptes() 252 pair->func->invalid(vmm, pgt->pt[0], pteb, ptes); in nvkm_vmm_unref_sptes() [all …]
|
D | vmmgk104.c | 26 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in gk104_vmm_lpt_invalid() argument 29 VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(1) /* PRIV. */, ptes); in gk104_vmm_lpt_invalid()
|
D | vmmgm200.c | 29 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in gm200_vmm_pgt_sparse() argument 32 VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(32) /* VOL. */, ptes); in gm200_vmm_pgt_sparse()
|
D | vmm.h | 54 struct nvkm_mmu_pt *, u32 ptei, u32 ptes); 58 u32 ptei, u32 ptes, struct nvkm_vmm_map *); 72 bool (*pfn_clear)(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32 ptei, u32 ptes);
|
/Linux-v5.4/arch/x86/xen/ |
D | grant-table.c | 28 pte_t **ptes; member 45 set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i], in arch_gnttab_map_shared() 67 set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i], in arch_gnttab_map_status() 77 pte_t **ptes; in arch_gnttab_unmap() local 82 ptes = gnttab_status_vm_area.ptes; in arch_gnttab_unmap() 84 ptes = gnttab_shared_vm_area.ptes; in arch_gnttab_unmap() 89 set_pte_at(&init_mm, addr, ptes[i], __pte(0)); in arch_gnttab_unmap() 96 area->ptes = kmalloc_array(nr_frames, sizeof(*area->ptes), GFP_KERNEL); in arch_gnttab_valloc() 97 if (area->ptes == NULL) in arch_gnttab_valloc() 100 area->area = alloc_vm_area(PAGE_SIZE * nr_frames, area->ptes); in arch_gnttab_valloc() [all …]
|
/Linux-v5.4/block/partitions/ |
D | efi.c | 338 gpt_header **gpt, gpt_entry **ptes) in is_gpt_valid() argument 343 if (!ptes) in is_gpt_valid() 432 if (!(*ptes = alloc_read_gpt_entries(state, *gpt))) in is_gpt_valid() 436 crc = efi_crc32((const unsigned char *) (*ptes), pt_size); in is_gpt_valid() 447 kfree(*ptes); in is_gpt_valid() 448 *ptes = NULL; in is_gpt_valid() 584 gpt_entry **ptes) in find_valid_gpt() argument 593 if (!ptes) in find_valid_gpt() 633 *ptes = pptes; in find_valid_gpt() 642 *ptes = aptes; in find_valid_gpt() [all …]
|
/Linux-v5.4/arch/alpha/kernel/ |
D | pci_iommu.c | 88 arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid); in iommu_arena_new_node() 89 if (!NODE_DATA(nid) || !arena->ptes) { in iommu_arena_new_node() 93 arena->ptes = memblock_alloc(mem_size, align); in iommu_arena_new_node() 94 if (!arena->ptes) in iommu_arena_new_node() 105 arena->ptes = memblock_alloc(mem_size, align); in iommu_arena_new_node() 106 if (!arena->ptes) in iommu_arena_new_node() 137 unsigned long *ptes; in iommu_arena_find_pages() local 152 ptes = arena->ptes; in iommu_arena_find_pages() 164 if (ptes[p+i]) in iommu_arena_find_pages() 196 unsigned long *ptes; in iommu_arena_alloc() local [all …]
|
D | core_titan.c | 329 port->tba[0].csr = virt_to_phys(hose->sg_isa->ptes); in titan_init_one_pachip_port() 337 port->tba[2].csr = virt_to_phys(hose->sg_pci->ptes); in titan_init_one_pachip_port() 464 unsigned long *ptes; in titan_ioremap() local 519 ptes = hose->sg_pci->ptes; in titan_ioremap() 523 pfn = ptes[baddr >> PAGE_SHIFT]; in titan_ioremap() 712 pte = aper->arena->ptes[baddr >> PAGE_SHIFT]; in titan_agp_translate()
|
D | core_marvel.c | 297 csrs->POx_TBASE[0].csr = virt_to_phys(hose->sg_isa->ptes); in io7_init_hose() 315 csrs->POx_TBASE[2].csr = virt_to_phys(hose->sg_pci->ptes); in io7_init_hose() 693 unsigned long *ptes; in marvel_ioremap() local 748 ptes = hose->sg_pci->ptes; in marvel_ioremap() 752 pfn = ptes[baddr >> PAGE_SHIFT]; in marvel_ioremap() 1051 pte = aper->arena->ptes[baddr >> PAGE_SHIFT]; in marvel_agp_translate()
|
D | pci_impl.h | 139 unsigned long *ptes; member
|
D | core_cia.c | 464 arena->ptes[4] = pte0; in verify_tb_operation() 488 arena->ptes[5] = pte0; in verify_tb_operation() 524 arena->ptes[4] = 0; in verify_tb_operation() 525 arena->ptes[5] = 0; in verify_tb_operation() 737 *(vip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2; in do_init_arch()
|
/Linux-v5.4/drivers/staging/gasket/ |
D | gasket_page_table.c | 302 static bool gasket_is_pte_range_free(struct gasket_page_table_entry *ptes, in gasket_is_pte_range_free() argument 308 if (ptes[i].status != PTE_FREE) in gasket_is_pte_range_free() 466 struct gasket_page_table_entry *ptes, in gasket_perform_mapping() argument 484 ptes[i].page = NULL; in gasket_perform_mapping() 485 ptes[i].offset = offset; in gasket_perform_mapping() 486 ptes[i].dma_addr = pg_tbl->coherent_pages[0].paddr + in gasket_perform_mapping() 500 ptes[i].page = page; in gasket_perform_mapping() 501 ptes[i].offset = offset; in gasket_perform_mapping() 504 ptes[i].dma_addr = in gasket_perform_mapping() 509 ptes[i].dma_addr)) { in gasket_perform_mapping() [all …]
|
/Linux-v5.4/arch/powerpc/include/asm/ |
D | plpar_wrappers.h | 171 unsigned long *ptes) in plpar_pte_read_4() argument 179 memcpy(ptes, retbuf, 8*sizeof(unsigned long)); in plpar_pte_read_4() 189 unsigned long *ptes) in plpar_pte_read_4_raw() argument 197 memcpy(ptes, retbuf, 8*sizeof(unsigned long)); in plpar_pte_read_4_raw() 345 unsigned long *ptes) in plpar_pte_read_4() argument
|
/Linux-v5.4/arch/powerpc/mm/ptdump/ |
D | hashpagetable.c | 242 struct hash_pte ptes[4]; in pseries_find() local 260 lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes); in pseries_find() 265 if (HPTE_V_COMPARE(ptes[j].v, want_v) && in pseries_find() 266 (ptes[j].v & HPTE_V_VALID)) { in pseries_find() 268 *v = ptes[j].v; in pseries_find() 269 *r = ptes[j].r; in pseries_find()
|
/Linux-v5.4/arch/powerpc/platforms/pseries/ |
D | lpar.c | 802 } ptes[4]; in manual_hpte_clear_all() local 811 lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes); in manual_hpte_clear_all() 818 if ((ptes[j].pteh & HPTE_V_VRMA_MASK) == in manual_hpte_clear_all() 821 if (ptes[j].pteh & HPTE_V_VALID) in manual_hpte_clear_all() 823 &(ptes[j].pteh), &(ptes[j].ptel)); in manual_hpte_clear_all() 910 } ptes[4]; in __pSeries_lpar_hpte_find() local 914 lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes); in __pSeries_lpar_hpte_find() 922 if (HPTE_V_COMPARE(ptes[j].pteh, want_v) && in __pSeries_lpar_hpte_find() 923 (ptes[j].pteh & HPTE_V_VALID)) in __pSeries_lpar_hpte_find()
|
/Linux-v5.4/arch/powerpc/mm/ |
D | hugetlbpage.c | 249 void *ptes[0]; member 261 kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]); in hugepd_free_rcu_callback() 284 (*batchp)->ptes[(*batchp)->index++] = hugepte; in hugepd_free()
|
/Linux-v5.4/arch/x86/kvm/ |
D | paging_tmpl.h | 88 pt_element_t ptes[PT_MAX_FULL_LEVELS]; member 236 pte = orig_pte = walker->ptes[level - 1]; in FNAME() 277 walker->ptes[level - 1] = pte; in FNAME() 408 walker->ptes[walker->level - 1] = pte; in FNAME() 575 return r || curr_pte != gw->ptes[level - 1]; in FNAME()
|
/Linux-v5.4/drivers/xen/xenbus/ |
D | xenbus_client.c | 676 pte_t *ptes[XENBUS_MAX_RING_GRANTS]; in xenbus_map_ring_valloc_pv() local 691 area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes); in xenbus_map_ring_valloc_pv() 698 phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr; in xenbus_map_ring_valloc_pv()
|
/Linux-v5.4/mm/ |
D | swap_state.c | 699 ra_info->ptes = pte; in swap_ra_info() 701 tpte = ra_info->ptes; in swap_ra_info() 739 for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte; in swap_vma_readahead()
|