/Linux-v6.6/drivers/mtd/tests/ |
D | pagetest.c | 34 static int pgsize; variable 66 for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) { in verify_eraseblock() 78 if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) { in verify_eraseblock() 85 if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) { in verify_eraseblock() 99 memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize); in verify_eraseblock() 100 prandom_bytes_state(&rnd_state, boundary + pgsize, pgsize); in verify_eraseblock() 118 pp1 = kcalloc(pgsize, 4, GFP_KERNEL); in crosstest() 121 pp2 = pp1 + pgsize; in crosstest() 122 pp3 = pp2 + pgsize; in crosstest() 123 pp4 = pp3 + pgsize; in crosstest() [all …]
|
D | torturetest.c | 70 static int pgsize; variable 97 addr = (loff_t)(ebnum + 1) * mtd->erasesize - pgcnt * pgsize; in check_eraseblock() 98 len = pgcnt * pgsize; in check_eraseblock() 151 addr = (loff_t)(ebnum + 1) * mtd->erasesize - pgcnt * pgsize; in write_pattern() 152 len = pgcnt * pgsize; in write_pattern() 203 pgsize = 512; in tort_init() 205 pgsize = mtd->writesize; in tort_init() 207 if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) { in tort_init() 235 for (i = 0; i < mtd->erasesize / pgsize; i++) { in tort_init() 237 memset(patt_5A5 + i * pgsize, 0x55, pgsize); in tort_init() [all …]
|
D | speedtest.c | 37 static int pgsize; variable 77 err = mtdtest_write(mtd, addr, pgsize, buf); in write_eraseblock_by_page() 80 addr += pgsize; in write_eraseblock_by_page() 81 buf += pgsize; in write_eraseblock_by_page() 89 size_t sz = pgsize * 2; in write_eraseblock_by_2pages() 102 err = mtdtest_write(mtd, addr, pgsize, buf); in write_eraseblock_by_2pages() 121 err = mtdtest_read(mtd, addr, pgsize, buf); in read_eraseblock_by_page() 124 addr += pgsize; in read_eraseblock_by_page() 125 buf += pgsize; in read_eraseblock_by_page() 133 size_t sz = pgsize * 2; in read_eraseblock_by_2pages() [all …]
|
D | readtest.c | 31 static int pgsize; variable 43 memset(buf, 0 , pgsize); in read_eraseblock_by_page() 44 ret = mtdtest_read(mtd, addr, pgsize, buf); in read_eraseblock_by_page() 72 addr += pgsize; in read_eraseblock_by_page() 73 buf += pgsize; in read_eraseblock_by_page() 138 pgsize = 512; in mtd_readtest_init() 140 pgsize = mtd->writesize; in mtd_readtest_init() 145 pgcnt = mtd->erasesize / pgsize; in mtd_readtest_init() 151 pgsize, ebcnt, pgcnt, mtd->oobsize); in mtd_readtest_init()
|
D | stresstest.c | 38 static int pgsize; variable 95 len = ((len + pgsize - 1) / pgsize) * pgsize; in do_write() 154 pgsize = 512; in mtd_stresstest_init() 156 pgsize = mtd->writesize; in mtd_stresstest_init() 161 pgcnt = mtd->erasesize / pgsize; in mtd_stresstest_init() 167 pgsize, ebcnt, pgcnt, mtd->oobsize); in mtd_stresstest_init()
|
/Linux-v6.6/arch/arm64/mm/ |
D | hugetlbpage.c | 104 pte_t *ptep, size_t *pgsize) in find_num_contig() argument 111 *pgsize = PAGE_SIZE; in find_num_contig() 116 *pgsize = PMD_SIZE; in find_num_contig() 122 static inline int num_contig_ptes(unsigned long size, size_t *pgsize) in num_contig_ptes() argument 126 *pgsize = size; in num_contig_ptes() 139 *pgsize = PMD_SIZE; in num_contig_ptes() 143 *pgsize = PAGE_SIZE; in num_contig_ptes() 154 size_t pgsize; in huge_ptep_get() local 160 ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize); in huge_ptep_get() 184 unsigned long pgsize, in get_clear_contig() argument [all …]
|
/Linux-v6.6/tools/testing/selftests/powerpc/mm/ |
D | pkey_exec_prot.c | 27 static unsigned long pgsize, numinsns; variable 62 if (mprotect(insns, pgsize, PROT_READ | PROT_WRITE)) { in segv_handler() 84 if (mprotect(insns, pgsize, PROT_EXEC)) { in segv_handler() 129 pgsize = getpagesize(); in test() 130 numinsns = pgsize / sizeof(unsigned int); in test() 131 insns = (unsigned int *) mmap(NULL, pgsize, PROT_READ | PROT_WRITE, in test() 179 FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); in test() 196 FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); in test() 216 FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); in test() 233 FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); in test() [all …]
|
D | exec_prot.c | 29 static unsigned long pgsize, numinsns; variable 73 if (mprotect(insns, pgsize, PROT_READ | PROT_WRITE | PROT_EXEC)) { in segv_handler() 98 FAIL_IF(mprotect(insns, pgsize, rights) != 0); in check_exec_fault() 136 pgsize = getpagesize(); in test() 137 numinsns = pgsize / sizeof(unsigned int); in test() 138 insns = (unsigned int *)mmap(NULL, pgsize, PROT_READ | PROT_WRITE, in test() 173 FAIL_IF(mprotect(insns, pgsize, PROT_EXEC) != 0); in test() 185 FAIL_IF(mprotect(insns, pgsize, PROT_EXEC) != 0); in test() 223 FAIL_IF(munmap((void *)insns, pgsize)); in test()
|
D | pkey_siginfo.c | 38 size_t pgsize; in segv_handler() local 67 pgsize = getpagesize(); in segv_handler() 68 pgstart = (void *) ((unsigned long) fault_addr & ~(pgsize - 1)); in segv_handler() 83 mprotect(pgstart, pgsize, PROT_EXEC)) in segv_handler()
|
/Linux-v6.6/tools/testing/selftests/powerpc/tm/ |
D | tm-vmxcopy.c | 39 unsigned long pgsize = getpagesize(); in test_vmxcopy() local 42 int size = pgsize*16; in test_vmxcopy() 44 char buf[pgsize]; in test_vmxcopy() 55 memset(buf, 0, pgsize); in test_vmxcopy() 56 for (i = 0; i < size; i += pgsize) in test_vmxcopy() 57 assert(write(fd, buf, pgsize) == pgsize); in test_vmxcopy()
|
/Linux-v6.6/drivers/iommu/amd/ |
D | io_pgtable_v2.c | 237 phys_addr_t paddr, size_t pgsize, size_t pgcount, in iommu_v2_map_pages() argument 246 size_t size = pgcount << __ffs(pgsize); in iommu_v2_map_pages() 251 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize) || !pgcount) in iommu_v2_map_pages() 258 map_size = get_alloc_page_size(pgsize); in iommu_v2_map_pages() 290 size_t pgsize, size_t pgcount, in iommu_v2_unmap_pages() argument 297 size_t size = pgcount << __ffs(pgsize); in iommu_v2_unmap_pages() 300 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount)) in iommu_v2_unmap_pages()
|
D | io_pgtable.c | 364 phys_addr_t paddr, size_t pgsize, size_t pgcount, in iommu_v1_map_pages() argument 373 BUG_ON(!IS_ALIGNED(iova, pgsize)); in iommu_v1_map_pages() 374 BUG_ON(!IS_ALIGNED(paddr, pgsize)); in iommu_v1_map_pages() 381 count = PAGE_SIZE_PTE_COUNT(pgsize); in iommu_v1_map_pages() 382 pte = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated); in iommu_v1_map_pages() 395 __pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize); in iommu_v1_map_pages() 408 iova += pgsize; in iommu_v1_map_pages() 409 paddr += pgsize; in iommu_v1_map_pages() 412 *mapped += pgsize; in iommu_v1_map_pages() 440 size_t pgsize, size_t pgcount, in iommu_v1_unmap_pages() argument [all …]
|
/Linux-v6.6/drivers/gpu/drm/msm/ |
D | msm_iommu.c | 40 size_t offset, pgsize, pgsize_next; in calc_pgsize() local 55 pgsize = BIT(pgsize_idx); in calc_pgsize() 57 return pgsize; in calc_pgsize() 86 return pgsize; in calc_pgsize() 96 size_t unmapped, pgsize, count; in msm_iommu_pagetable_unmap() local 98 pgsize = calc_pgsize(pagetable, iova, iova, size, &count); in msm_iommu_pagetable_unmap() 100 unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL); in msm_iommu_pagetable_unmap() 127 size_t pgsize, count, mapped = 0; in msm_iommu_pagetable_map() local 130 pgsize = calc_pgsize(pagetable, addr, phys, size, &count); in msm_iommu_pagetable_map() 132 ret = ops->map_pages(ops, addr, phys, pgsize, count, in msm_iommu_pagetable_map()
|
/Linux-v6.6/drivers/gpu/drm/etnaviv/ |
D | etnaviv_mmu.c | 20 size_t pgsize = SZ_4K; in etnaviv_context_unmap() local 22 if (!IS_ALIGNED(iova | size, pgsize)) { in etnaviv_context_unmap() 24 iova, size, pgsize); in etnaviv_context_unmap() 30 pgsize); in etnaviv_context_unmap() 44 size_t pgsize = SZ_4K; in etnaviv_context_map() local 48 if (!IS_ALIGNED(iova | paddr | size, pgsize)) { in etnaviv_context_map() 50 iova, &paddr, size, pgsize); in etnaviv_context_map() 55 ret = context->global->ops->map(context, iova, paddr, pgsize, in etnaviv_context_map() 60 iova += pgsize; in etnaviv_context_map() 61 paddr += pgsize; in etnaviv_context_map() [all …]
|
/Linux-v6.6/drivers/iommu/ |
D | io-pgtable-dart.c | 237 phys_addr_t paddr, size_t pgsize, size_t pgcount, in dart_map_pages() argument 247 if (WARN_ON(pgsize != cfg->pgsize_bitmap)) in dart_map_pages() 287 *mapped += num_entries * pgsize; in dart_map_pages() 299 size_t pgsize, size_t pgcount, in dart_unmap_pages() argument 307 if (WARN_ON(pgsize != cfg->pgsize_bitmap || !pgcount)) in dart_unmap_pages() 332 iova + i * pgsize, pgsize); in dart_unmap_pages() 338 return i * pgsize; in dart_unmap_pages()
|
D | s390-iommu.c | 316 size_t pgsize, size_t pgcount, in s390_iommu_map_pages() argument 320 size_t size = pgcount << __ffs(pgsize); in s390_iommu_map_pages() 323 if (pgsize != SZ_4K) in s390_iommu_map_pages() 330 if (!IS_ALIGNED(iova | paddr, pgsize)) in s390_iommu_map_pages() 382 size_t pgsize, size_t pgcount, in s390_iommu_unmap_pages() argument 386 size_t size = pgcount << __ffs(pgsize); in s390_iommu_unmap_pages()
|
D | io-pgtable-arm.c | 465 phys_addr_t paddr, size_t pgsize, size_t pgcount, in arm_lpae_map_pages() argument 475 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize)) in arm_lpae_map_pages() 488 ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl, in arm_lpae_map_pages() 658 size_t pgsize, size_t pgcount, in arm_lpae_unmap_pages() argument 666 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount)) in arm_lpae_unmap_pages() 674 return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount, in arm_lpae_unmap_pages() 1259 static const unsigned long pgsize[] __initconst = { in arm_lpae_do_selftests() local 1281 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { in arm_lpae_do_selftests() 1283 cfg.pgsize_bitmap = pgsize[i]; in arm_lpae_do_selftests() 1286 pgsize[i], ias[j]); in arm_lpae_do_selftests()
|
D | apple-dart.c | 217 u32 pgsize; member 528 phys_addr_t paddr, size_t pgsize, in apple_dart_map_pages() argument 538 return ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, in apple_dart_map_pages() 543 unsigned long iova, size_t pgsize, in apple_dart_unmap_pages() argument 550 return ops->unmap_pages(ops, iova, pgsize, pgcount, gather); in apple_dart_unmap_pages() 593 .pgsize_bitmap = dart->pgsize, in apple_dart_finalize_domain() 775 if (cfg_dart->pgsize != dart->pgsize) in apple_dart_of_xlate() 1087 dart->pgsize = 1 << FIELD_GET(DART_PARAMS1_PAGE_SHIFT, dart_params[0]); in apple_dart_probe() 1114 dart->force_bypass = dart->pgsize > PAGE_SIZE; in apple_dart_probe() 1139 dart->pgsize, dart->num_streams, dart->supports_bypass, dart->force_bypass); in apple_dart_probe()
|
D | io-pgtable-arm-v7s.c | 545 phys_addr_t paddr, size_t pgsize, size_t pgcount, in arm_v7s_map_pages() argument 560 ret = __arm_v7s_map(data, iova, paddr, pgsize, prot, 1, data->pgd, in arm_v7s_map_pages() 565 iova += pgsize; in arm_v7s_map_pages() 566 paddr += pgsize; in arm_v7s_map_pages() 567 *mapped += pgsize; in arm_v7s_map_pages() 739 size_t pgsize, size_t pgcount, in arm_v7s_unmap_pages() argument 749 ret = __arm_v7s_unmap(data, gather, iova, pgsize, 1, data->pgd); in arm_v7s_unmap_pages() 753 unmapped += pgsize; in arm_v7s_unmap_pages() 754 iova += pgsize; in arm_v7s_unmap_pages()
|
/Linux-v6.6/drivers/vfio/ |
D | vfio_iommu_type1.c | 234 static int vfio_dma_bitmap_alloc(struct vfio_dma *dma, size_t pgsize) in vfio_dma_bitmap_alloc() argument 236 uint64_t npages = dma->size / pgsize; in vfio_dma_bitmap_alloc() 260 static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize) in vfio_dma_populate_bitmap() argument 263 unsigned long pgshift = __ffs(pgsize); in vfio_dma_populate_bitmap() 284 static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize) in vfio_dma_bitmap_alloc_all() argument 292 ret = vfio_dma_bitmap_alloc(dma, pgsize); in vfio_dma_bitmap_alloc_all() 304 vfio_dma_populate_bitmap(dma, pgsize); in vfio_dma_bitmap_alloc_all() 1160 size_t pgsize) in update_user_bitmap() argument 1162 unsigned long pgshift = __ffs(pgsize); in update_user_bitmap() 1196 dma_addr_t iova, size_t size, size_t pgsize) in vfio_iova_dirty_bitmap() argument [all …]
|
/Linux-v6.6/drivers/gpu/drm/panfrost/ |
D | panfrost_mmu.c | 304 size_t pgsize = get_pgsize(iova | paddr, len, &pgcount); in mmu_map_sg() local 306 ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, in mmu_map_sg() 309 mapped = max(mapped, pgsize); in mmu_map_sg() 365 size_t pgsize = get_pgsize(iova, len - unmapped_len, &pgcount); in panfrost_mmu_unmap() local 370 unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL); in panfrost_mmu_unmap() 371 WARN_ON(unmapped_page != pgsize * pgcount); in panfrost_mmu_unmap() 373 iova += pgsize * pgcount; in panfrost_mmu_unmap() 374 unmapped_len += pgsize * pgcount; in panfrost_mmu_unmap()
|
/Linux-v6.6/drivers/infiniband/hw/bnxt_re/ |
D | qplib_res.c | 97 rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) { in bnxt_qplib_fill_user_dma_pages() 117 pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize); in __alloc_pbl() 132 pbl->pg_size = sginfo->pgsize; in __alloc_pbl() 196 pg_size = hwq_attr->sginfo->pgsize; in bnxt_qplib_alloc_init_hwq() 219 hwq_attr->sginfo->pgsize); in bnxt_qplib_alloc_init_hwq() 244 sginfo.pgsize = npde * pg_size; in bnxt_qplib_alloc_init_hwq() 250 sginfo.pgsize = PAGE_SIZE; in bnxt_qplib_alloc_init_hwq() 309 sginfo.pgsize = PAGE_SIZE; in bnxt_qplib_alloc_init_hwq() 393 sginfo.pgsize = PAGE_SIZE; in bnxt_qplib_alloc_tqm_rings() 510 sginfo.pgsize = PAGE_SIZE; in bnxt_qplib_alloc_ctx()
|
/Linux-v6.6/arch/powerpc/kvm/ |
D | book3s_64_mmu.c | 206 int pgsize; in kvmppc_mmu_book3s_64_xlate() local 242 pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K; in kvmppc_mmu_book3s_64_xlate() 271 pgsize = decode_pagesize(slbe, pte1); in kvmppc_mmu_book3s_64_xlate() 272 if (pgsize < 0) in kvmppc_mmu_book3s_64_xlate() 296 eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1; in kvmppc_mmu_book3s_64_xlate() 298 gpte->page_size = pgsize; in kvmppc_mmu_book3s_64_xlate()
|
/Linux-v6.6/include/linux/ |
D | io-pgtable.h | 162 phys_addr_t paddr, size_t pgsize, size_t pgcount, 165 size_t pgsize, size_t pgcount,
|
/Linux-v6.6/drivers/iommu/iommufd/ |
D | selftest.c | 180 size_t pgsize, size_t pgcount, int prot, in mock_domain_map_pages() argument 196 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE); in mock_domain_map_pages() 200 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) { in mock_domain_map_pages() 203 if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize) in mock_domain_map_pages() 228 unsigned long iova, size_t pgsize, in mock_domain_unmap_pages() argument 239 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE); in mock_domain_unmap_pages() 244 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) { in mock_domain_unmap_pages() 260 if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize) in mock_domain_unmap_pages()
|