/Linux-v6.6/tools/testing/selftests/mm/ |
D | mremap_dontunmap.c | 19 unsigned long page_size; variable 46 void *source_mapping = mmap(NULL, num_pages * page_size, PROT_NONE, in kernel_support_for_mremap_dontunmap() 53 mremap(source_mapping, num_pages * page_size, num_pages * page_size, in kernel_support_for_mremap_dontunmap() 58 BUG_ON(munmap(dest_mapping, num_pages * page_size) == -1, in kernel_support_for_mremap_dontunmap() 62 BUG_ON(munmap(source_mapping, num_pages * page_size) == -1, in kernel_support_for_mremap_dontunmap() 71 BUG_ON(size & (page_size - 1), in check_region_contains_byte() 73 BUG_ON((unsigned long)addr & (page_size - 1), in check_region_contains_byte() 76 memset(page_buffer, byte, page_size); in check_region_contains_byte() 78 unsigned long num_pages = size / page_size; in check_region_contains_byte() 84 memcmp(addr + (i * page_size), page_buffer, page_size); in check_region_contains_byte() [all …]
|
D | mlock2-tests.c | 194 unsigned long page_size = getpagesize(); in test_mlock_lock() local 196 map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE, in test_mlock_lock() 203 if (mlock2_(map, 2 * page_size, 0)) { in test_mlock_lock() 216 if (munlock(map, 2 * page_size)) { in test_mlock_lock() 224 munmap(map, 2 * page_size); in test_mlock_lock() 242 unsigned long page_size = getpagesize(); in unlock_onfault_check() local 245 is_vma_lock_on_fault((unsigned long)map + page_size)) { in unlock_onfault_check() 257 unsigned long page_size = getpagesize(); in test_mlock_onfault() local 259 map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE, in test_mlock_onfault() 266 if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) { in test_mlock_onfault() [all …]
|
D | mremap_test.c | 53 #define PTE page_size 155 static void mremap_expand_merge(FILE *maps_fp, unsigned long page_size) in mremap_expand_merge() argument 161 start = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE, in mremap_expand_merge() 169 munmap(start + page_size, page_size); in mremap_expand_merge() 170 remap = mremap(start, page_size, 2 * page_size, 0); in mremap_expand_merge() 173 munmap(start, page_size); in mremap_expand_merge() 174 munmap(start + 2 * page_size, page_size); in mremap_expand_merge() 178 success = is_range_mapped(maps_fp, start, start + 3 * page_size); in mremap_expand_merge() 179 munmap(start, 3 * page_size); in mremap_expand_merge() 193 static void mremap_expand_merge_offset(FILE *maps_fp, unsigned long page_size) in mremap_expand_merge_offset() argument [all …]
|
D | map_fixed_noreplace.c | 46 unsigned long flags, addr, size, page_size; in main() local 49 page_size = sysconf(_SC_PAGE_SIZE); in main() 52 size = 5 * page_size; in main() 64 size = 5 * page_size; in main() 76 if (munmap((void *)addr, 5 * page_size) != 0) { in main() 84 addr = base_addr + page_size; in main() 85 size = 3 * page_size; in main() 105 size = 5 * page_size; in main() 125 addr = base_addr + (2 * page_size); in main() 126 size = page_size; in main() [all …]
|
D | ksm_tests.c | 348 long page_count, int timeout, size_t page_size) in check_ksm_merge() argument 359 map_ptr = allocate_memory(NULL, prot, mapping, '*', page_size * page_count); in check_ksm_merge() 363 if (ksm_merge_pages(merge_type, map_ptr, page_size * page_count, start_time, timeout)) in check_ksm_merge() 369 munmap(map_ptr, page_size * page_count); in check_ksm_merge() 377 munmap(map_ptr, page_size * page_count); in check_ksm_merge() 381 static int check_ksm_unmerge(int merge_type, int mapping, int prot, int timeout, size_t page_size) in check_ksm_unmerge() argument 393 map_ptr = allocate_memory(NULL, prot, mapping, '*', page_size * page_count); in check_ksm_unmerge() 397 if (ksm_merge_pages(merge_type, map_ptr, page_size * page_count, start_time, timeout)) in check_ksm_unmerge() 402 memset(map_ptr + page_size, '+', 1); in check_ksm_unmerge() 411 munmap(map_ptr, page_size * page_count); in check_ksm_unmerge() [all …]
|
D | uffd-unit-tests.c | 190 page_size = default_huge_page_size(); in uffd_setup_environment() 192 page_size = psize(); in uffd_setup_environment() 194 nr_pages = UFFD_TEST_MEM_SIZE / page_size; in uffd_setup_environment() 318 if (test_pin && pin_pages(&args, area_dst, page_size)) in pagemap_test_fork() 356 if (uffd_register(uffd, area_dst, nr_pages * page_size, in uffd_wp_unpopulated_test() 363 wp_range(uffd, (uint64_t)area_dst, page_size, true); in uffd_wp_unpopulated_test() 368 wp_range(uffd, (uint64_t)area_dst, page_size, false); in uffd_wp_unpopulated_test() 373 wp_range(uffd, (uint64_t)area_dst, page_size, true); in uffd_wp_unpopulated_test() 374 if (madvise(area_dst, page_size, MADV_DONTNEED)) in uffd_wp_unpopulated_test() 384 if (madvise(area_dst, page_size, MADV_DONTNEED)) in uffd_wp_unpopulated_test() [all …]
|
D | memfd_secret.c | 37 static unsigned long page_size; variable 181 mem = mmap(NULL, page_size, prot, mode, fd, 0); in test_remote_access() 187 ftruncate(fd, page_size); in test_remote_access() 188 memset(mem, PATTERN, page_size); in test_remote_access() 233 page_size = sysconf(_SC_PAGE_SIZE); in prepare() 234 if (!page_size) in prepare() 246 page_size, mlock_limit_cur, mlock_limit_max); in prepare() 248 if (page_size > mlock_limit_cur) in prepare() 249 mlock_limit_cur = page_size; in prepare() 250 if (page_size > mlock_limit_max) in prepare() [all …]
|
D | uffd-stress.c | 158 copy_page_retry(uffd, page_nr * page_size); in background_thread() 166 wp_range(uffd, (unsigned long)area_dst + start_nr * page_size, in background_thread() 167 nr_pages_per_cpu * page_size, true); in background_thread() 174 copy_page_retry(uffd, page_nr * page_size); in background_thread() 249 uint64_t mem_size = nr_pages * page_size; in userfaultfd_stress() 256 if (posix_memalign(&area, page_size, page_size)) in userfaultfd_stress() 259 bzero(zeropage, page_size); in userfaultfd_stress() 332 nr_pages * page_size, false); in userfaultfd_stress() 393 page_size = default_huge_page_size(); in parse_test_type_arg() 395 page_size = sysconf(_SC_PAGE_SIZE); in parse_test_type_arg() [all …]
|
D | khugepaged.c | 28 static unsigned long page_size; variable 572 for (i = start / page_size; i < end / page_size; i++) in fill_memory() 573 p[i * page_size / sizeof(*p)] = i + 0xdead0000; in fill_memory() 634 for (i = start / page_size; i < end / page_size; i++) { in validate_memory() 635 if (p[i * page_size / sizeof(*p)] != i + 0xdead0000) { in validate_memory() 637 i, p[i * page_size / sizeof(*p)]); in validate_memory() 925 madvise(p, page_size, MADV_DONTNEED); in alloc_at_fault() 962 ops->fault(p, 0, page_size); in collapse_single_pte_entry() 986 ops->fault(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size); in collapse_max_ptes_none() 989 validate_memory(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size); in collapse_max_ptes_none() [all …]
|
/Linux-v6.6/tools/testing/selftests/mincore/ |
D | mincore_selftest.c | 33 int page_size; in TEST() local 37 page_size = sysconf(_SC_PAGESIZE); in TEST() 45 retval = mincore(NULL, page_size, vec); in TEST() 50 addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, in TEST() 58 retval = mincore(addr + 1, page_size, vec); in TEST() 70 retval = mincore(addr, page_size, NULL); in TEST() 73 munmap(addr, page_size); in TEST() 88 int page_size; in TEST() local 90 page_size = sysconf(_SC_PAGESIZE); in TEST() 94 addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, in TEST() [all …]
|
/Linux-v6.6/tools/testing/selftests/bpf/prog_tests/ |
D | mmap.c | 12 long page_size = sysconf(_SC_PAGE_SIZE); in roundup_page() local 13 return (sz + page_size - 1) / page_size * page_size; in roundup_page() 21 const long page_size = sysconf(_SC_PAGE_SIZE); in test_mmap() local 36 err = bpf_map__set_max_entries(skel->maps.rdonly_map, page_size); in test_mmap() 42 4 * (page_size / sizeof(u64))); in test_mmap() 55 tmp1 = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0); in test_mmap() 57 munmap(tmp1, page_size); in test_mmap() 61 tmp1 = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rdmap_fd, 0); in test_mmap() 200 tmp0 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, in test_mmap() 206 tmp1 = mmap(tmp0, 3 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED, in test_mmap() [all …]
|
D | ringbuf.c | 92 int page_size = getpagesize(); in ringbuf_subtest() local 99 skel->maps.ringbuf.max_entries = page_size; in ringbuf_subtest() 107 mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0); in ringbuf_subtest() 109 tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE); in ringbuf_subtest() 112 ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect"); in ringbuf_subtest() 113 ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw"); in ringbuf_subtest() 116 mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size); in ringbuf_subtest() 122 mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size); in ringbuf_subtest() 126 mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 3 * page_size); in ringbuf_subtest() 128 mmap_ptr = mmap(NULL, 2 * page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size); in ringbuf_subtest() [all …]
|
D | ringbuf_multi.c | 46 int page_size = getpagesize(); in test_ringbuf_multi() local 54 ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), page_size, "rb1_size_before"); in test_ringbuf_multi() 55 ASSERT_OK(bpf_map__set_max_entries(skel->maps.ringbuf1, page_size + 1), "rb1_resize"); in test_ringbuf_multi() 56 ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), 2 * page_size, "rb1_size_after"); in test_ringbuf_multi() 57 ASSERT_OK(bpf_map__set_max_entries(skel->maps.ringbuf1, page_size), "rb1_reset"); in test_ringbuf_multi() 58 ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), page_size, "rb1_size_final"); in test_ringbuf_multi() 60 proto_fd = bpf_map_create(BPF_MAP_TYPE_RINGBUF, NULL, 0, 0, page_size, NULL); in test_ringbuf_multi() 76 …if (!ASSERT_ERR(bpf_map__set_max_entries(skel->maps.ringbuf1, 3 * page_size), "rb1_resize_after_lo… in test_ringbuf_multi()
|
/Linux-v6.6/drivers/mtd/spi-nor/ |
D | xilinx.c | 33 .page_size = (_page_size), \ 59 u32 page_size = nor->params->page_size; in s3an_nor_convert_addr() local 62 offset = addr % page_size; in s3an_nor_convert_addr() 63 page = addr / page_size; in s3an_nor_convert_addr() 64 page <<= (page_size > 512) ? 10 : 9; in s3an_nor_convert_addr() 119 u32 page_size; in xilinx_nor_setup() local 144 page_size = (nor->params->page_size == 264) ? 256 : 512; in xilinx_nor_setup() 145 nor->params->page_size = page_size; in xilinx_nor_setup() 146 nor->mtd.writebufsize = page_size; in xilinx_nor_setup() 147 nor->params->size = 8 * page_size * nor->info->n_sectors; in xilinx_nor_setup() [all …]
|
/Linux-v6.6/drivers/pci/endpoint/ |
D | pci-epc-mem.c | 26 unsigned int page_shift = ilog2(mem->window.page_size); in pci_epc_mem_get_order() 54 size_t page_size; in pci_epc_multi_mem_init() local 70 page_size = windows[i].page_size; in pci_epc_multi_mem_init() 71 if (page_size < PAGE_SIZE) in pci_epc_multi_mem_init() 72 page_size = PAGE_SIZE; in pci_epc_multi_mem_init() 73 page_shift = ilog2(page_size); in pci_epc_multi_mem_init() 94 mem->window.page_size = page_size; in pci_epc_multi_mem_init() 129 size_t size, size_t page_size) in pci_epc_mem_init() argument 135 mem_window.page_size = page_size; in pci_epc_mem_init() 192 align_size = ALIGN(size, mem->window.page_size); in pci_epc_mem_alloc_addr() [all …]
|
/Linux-v6.6/tools/testing/selftests/powerpc/primitives/ |
D | load_unaligned_zeropad.c | 38 static int page_size; variable 43 if (mprotect(mem_region + page_size, page_size, PROT_NONE)) { in protect_region() 53 if (mprotect(mem_region + page_size, page_size, PROT_READ|PROT_WRITE)) { in unprotect_region() 125 page_size = getpagesize(); in test_body() 126 mem_region = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE, in test_body() 131 for (i = 0; i < page_size; i++) in test_body() 134 memset(mem_region+page_size, 0, page_size); in test_body() 138 for (i = 0; i < page_size; i++) in test_body()
|
/Linux-v6.6/drivers/accel/habanalabs/common/mmu/ |
D | mmu.c | 32 return hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, in hl_is_dram_va() 175 u32 page_size, u32 *real_page_size, bool is_dram_addr) in hl_mmu_get_real_page_size() argument 181 if ((page_size % mmu_prop->page_size) == 0) { in hl_mmu_get_real_page_size() 182 *real_page_size = mmu_prop->page_size; in hl_mmu_get_real_page_size() 187 page_size, mmu_prop->page_size >> 10); in hl_mmu_get_real_page_size() 192 static struct hl_mmu_properties *hl_mmu_get_prop(struct hl_device *hdev, u32 page_size, in hl_mmu_get_prop() argument 199 else if ((page_size % prop->pmmu_huge.page_size) == 0) in hl_mmu_get_prop() 226 int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flush_pte) in hl_mmu_unmap_page() argument 240 mmu_prop = hl_mmu_get_prop(hdev, page_size, is_dram_addr); in hl_mmu_unmap_page() 245 rc = hdev->asic_funcs->mmu_get_real_page_size(hdev, mmu_prop, page_size, &real_page_size, in hl_mmu_unmap_page() [all …]
|
/Linux-v6.6/arch/powerpc/mm/ |
D | init_64.c | 188 unsigned long page_size) in altmap_cross_boundary() argument 190 unsigned long nr_pfn = page_size / sizeof(struct page); in altmap_cross_boundary() 206 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; in __vmemmap_populate() local 209 start = ALIGN_DOWN(start, page_size); in __vmemmap_populate() 213 for (; start < end; start += page_size) { in __vmemmap_populate() 223 if (vmemmap_populated(start, page_size)) in __vmemmap_populate() 231 if (altmap && !altmap_cross_boundary(altmap, start, page_size)) { in __vmemmap_populate() 232 p = vmemmap_alloc_block_buf(page_size, node, altmap); in __vmemmap_populate() 239 p = vmemmap_alloc_block_buf(page_size, node, NULL); in __vmemmap_populate() 252 int nr_pfns = page_size >> PAGE_SHIFT; in __vmemmap_populate() [all …]
|
/Linux-v6.6/drivers/misc/ |
D | vmw_balloon.c | 245 enum vmballoon_page_size_type page_size; member 574 unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size) in vmballoon_page_order() argument 576 return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0; in vmballoon_page_order() 586 vmballoon_page_in_frames(enum vmballoon_page_size_type page_size) in vmballoon_page_in_frames() argument 588 return 1 << vmballoon_page_order(page_size); in vmballoon_page_in_frames() 598 enum vmballoon_page_size_type page_size) in vmballoon_mark_page_offline() argument 602 for (i = 0; i < vmballoon_page_in_frames(page_size); i++) in vmballoon_mark_page_offline() 613 enum vmballoon_page_size_type page_size) in vmballoon_mark_page_online() argument 617 for (i = 0; i < vmballoon_page_in_frames(page_size); i++) in vmballoon_mark_page_online() 677 if (ctl->page_size == VMW_BALLOON_2M_PAGE) in vmballoon_alloc_page_list() [all …]
|
/Linux-v6.6/tools/testing/selftests/kvm/lib/s390x/ |
D | processor.c | 17 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", in virt_arch_pgd_alloc() 18 vm->page_size); in virt_arch_pgd_alloc() 26 memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_arch_pgd_alloc() 43 memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_alloc_region() 55 TEST_ASSERT((gva % vm->page_size) == 0, in virt_arch_pg_map() 58 gva, vm->page_size); in virt_arch_pg_map() 63 TEST_ASSERT((gpa % vm->page_size) == 0, in virt_arch_pg_map() 66 gva, vm->page_size); in virt_arch_pg_map() 70 gva, vm->max_gfn, vm->page_size); in virt_arch_pg_map() 94 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", in addr_arch_gva2gpa() [all …]
|
/Linux-v6.6/drivers/accel/habanalabs/common/ |
D | memory.c | 29 static int set_alloc_page_size(struct hl_device *hdev, struct hl_mem_in *args, u32 *page_size) in set_alloc_page_size() argument 38 if (prop->supports_user_set_page_size && args->alloc.page_size) { in set_alloc_page_size() 39 psize = args->alloc.page_size; in set_alloc_page_size() 49 *page_size = psize; in set_alloc_page_size() 94 u32 num_curr_pgs, page_size; in alloc_device_memory() local 100 rc = set_alloc_page_size(hdev, args, &page_size); in alloc_device_memory() 104 num_pgs = DIV_ROUND_UP_ULL(args->alloc.mem_size, page_size); in alloc_device_memory() 105 total_size = num_pgs * page_size; in alloc_device_memory() 115 if (is_power_of_2(page_size)) in alloc_device_memory() 117 total_size, NULL, page_size); in alloc_device_memory() [all …]
|
/Linux-v6.6/drivers/net/ethernet/qlogic/qed/ |
D | qed_chain.c | 22 params->page_size); in qed_chain_init() 24 params->page_size, in qed_chain_init() 33 chain->page_size = params->page_size; in qed_chain_init() 88 dma_free_coherent(dev, chain->page_size, virt, phys); in qed_chain_free_next_ptr() 101 dma_free_coherent(&cdev->pdev->dev, chain->page_size, in qed_chain_free_single() 119 dma_free_coherent(dev, chain->page_size, entry->virt_addr, in qed_chain_free_pbl() 164 chain_size = ELEMS_PER_PAGE(params->elem_size, params->page_size); in qed_chain_alloc_sanity_check() 207 virt = dma_alloc_coherent(dev, chain->page_size, &phys, in qed_chain_alloc_next_ptr() 238 virt = dma_alloc_coherent(&cdev->pdev->dev, chain->page_size, in qed_chain_alloc_single() 290 virt = dma_alloc_coherent(dev, chain->page_size, &phys, in qed_chain_alloc_pbl() [all …]
|
/Linux-v6.6/tools/power/acpi/os_specific/service_layers/ |
D | osunixmap.c | 67 acpi_size page_size; in acpi_os_map_memory() local 78 page_size = acpi_os_get_page_size(); in acpi_os_map_memory() 79 offset = where % page_size; in acpi_os_map_memory() 112 acpi_size page_size; in acpi_os_unmap_memory() local 114 page_size = acpi_os_get_page_size(); in acpi_os_unmap_memory() 115 offset = ACPI_TO_INTEGER(where) % page_size; in acpi_os_unmap_memory()
|
/Linux-v6.6/drivers/infiniband/hw/mlx5/ |
D | mem.c | 41 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas, in mlx5_ib_populate_pas() argument 46 rdma_umem_for_each_dma_block (umem, &biter, page_size) { in mlx5_ib_populate_pas() 64 unsigned long page_size; in __mlx5_umem_find_best_quantized_pgoff() local 67 page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, pgoff_bitmask); in __mlx5_umem_find_best_quantized_pgoff() 68 if (!page_size) in __mlx5_umem_find_best_quantized_pgoff() 78 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff() 79 while (page_offset & ~(u64)(page_offset_mask * (page_size / scale))) { in __mlx5_umem_find_best_quantized_pgoff() 80 page_size /= 2; in __mlx5_umem_find_best_quantized_pgoff() 81 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff() 88 if (!(pgsz_bitmap & page_size)) in __mlx5_umem_find_best_quantized_pgoff() [all …]
|
/Linux-v6.6/tools/testing/selftests/powerpc/copyloops/ |
D | exc_validate.c | 81 int page_size; in test_copy_exception() local 85 page_size = getpagesize(); in test_copy_exception() 86 p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE, in test_copy_exception() 94 memset(p, 0, page_size); in test_copy_exception() 98 if (mprotect(p + page_size, page_size, PROT_NONE)) { in test_copy_exception() 103 q = p + page_size - MAX_LEN; in test_copy_exception()
|