/Linux-v5.10/tools/testing/selftests/vm/ |
D | mremap_dontunmap.c | 22 unsigned long page_size; variable 49 void *source_mapping = mmap(NULL, num_pages * page_size, PROT_NONE, in kernel_support_for_mremap_dontunmap() 56 mremap(source_mapping, num_pages * page_size, num_pages * page_size, in kernel_support_for_mremap_dontunmap() 61 BUG_ON(munmap(dest_mapping, num_pages * page_size) == -1, in kernel_support_for_mremap_dontunmap() 65 BUG_ON(munmap(source_mapping, num_pages * page_size) == -1, in kernel_support_for_mremap_dontunmap() 74 BUG_ON(size & (page_size - 1), in check_region_contains_byte() 76 BUG_ON((unsigned long)addr & (page_size - 1), in check_region_contains_byte() 79 memset(page_buffer, byte, page_size); in check_region_contains_byte() 81 unsigned long num_pages = size / page_size; in check_region_contains_byte() 87 memcmp(addr + (i * page_size), page_buffer, page_size); in check_region_contains_byte() [all …]
|
D | mlock2-tests.c | 195 unsigned long page_size = getpagesize(); in test_mlock_lock() local 197 map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE, in test_mlock_lock() 204 if (mlock2_(map, 2 * page_size, 0)) { in test_mlock_lock() 217 if (munlock(map, 2 * page_size)) { in test_mlock_lock() 225 munmap(map, 2 * page_size); in test_mlock_lock() 243 unsigned long page_size = getpagesize(); in unlock_onfault_check() local 246 is_vma_lock_on_fault((unsigned long)map + page_size)) { in unlock_onfault_check() 258 unsigned long page_size = getpagesize(); in test_mlock_onfault() local 260 map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE, in test_mlock_onfault() 267 if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) { in test_mlock_onfault() [all …]
|
D | map_fixed_noreplace.c | 33 unsigned long flags, addr, size, page_size; in main() local 36 page_size = sysconf(_SC_PAGE_SIZE); in main() 43 size = 5 * page_size; in main() 55 if (munmap((void *)addr, 5 * page_size) != 0) { in main() 63 addr = BASE_ADDRESS + page_size; in main() 64 size = 3 * page_size; in main() 84 size = 5 * page_size; in main() 104 addr = BASE_ADDRESS + (2 * page_size); in main() 105 size = page_size; in main() 124 addr = BASE_ADDRESS + (3 * page_size); in main() [all …]
|
D | khugepaged.c | 20 static unsigned long page_size; variable 459 for (i = start / page_size; i < end / page_size; i++) in fill_memory() 460 p[i * page_size / sizeof(*p)] = i + 0xdead0000; in fill_memory() 467 for (i = start / page_size; i < end / page_size; i++) { in validate_memory() 468 if (p[i * page_size / sizeof(*p)] != i + 0xdead0000) { in validate_memory() 470 i, p[i * page_size / sizeof(*p)]); in validate_memory() 526 madvise(p, page_size, MADV_DONTNEED); in alloc_at_fault() 570 fill_memory(p, 0, page_size); in collapse_single_pte_entry() 577 validate_memory(p, 0, page_size); in collapse_single_pte_entry() 592 fill_memory(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size); in collapse_max_ptes_none() [all …]
|
D | userfaultfd.c | 63 static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; variable 101 ((pthread_mutex_t *) ((___area) + (___nr)*page_size)) 108 ((___area) + (___nr)*page_size + \ 173 if (madvise(rel_area, nr_pages * page_size, MADV_DONTNEED)) { in anon_release_pages() 183 if (posix_memalign(alloc_area, page_size, nr_pages * page_size)) { in anon_allocate_area() 200 nr_pages * page_size, in hugetlb_release_pages() 201 nr_pages * page_size)) { in hugetlb_release_pages() 214 *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, in hugetlb_allocate_area() 218 nr_pages * page_size); in hugetlb_allocate_area() 225 area_alias = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, in hugetlb_allocate_area() [all …]
|
D | hmm-tests.c | 55 unsigned int page_size; in FIXTURE() local 63 unsigned int page_size; in FIXTURE() local 82 self->page_size = sysconf(_SC_PAGE_SIZE); in FIXTURE_SETUP() 83 self->page_shift = ffs(self->page_size) - 1; in FIXTURE_SETUP() 91 self->page_size = sysconf(_SC_PAGE_SIZE); in FIXTURE_SETUP() 92 self->page_shift = ffs(self->page_size) - 1; in FIXTURE_SETUP() 230 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F() 252 i = 2 * self->page_size / sizeof(*ptr); in TEST_F() 261 val = *(int *)(buffer->ptr + self->page_size); in TEST_F() 272 for (i = 0; i < 2 * self->page_size / sizeof(*ptr); ++i) in TEST_F() [all …]
|
/Linux-v5.10/tools/testing/selftests/mincore/ |
D | mincore_selftest.c | 34 int page_size; in TEST() local 38 page_size = sysconf(_SC_PAGESIZE); in TEST() 46 retval = mincore(NULL, page_size, vec); in TEST() 51 addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, in TEST() 59 retval = mincore(addr + 1, page_size, vec); in TEST() 71 retval = mincore(addr, page_size, NULL); in TEST() 74 munmap(addr, page_size); in TEST() 89 int page_size; in TEST() local 91 page_size = sysconf(_SC_PAGESIZE); in TEST() 95 addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, in TEST() [all …]
|
/Linux-v5.10/drivers/misc/habanalabs/common/ |
D | mmu.c | 16 return hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, in is_dram_va() 112 int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, in hl_mmu_unmap() argument 130 else if ((page_size % prop->pmmu_huge.page_size) == 0) in hl_mmu_unmap() 139 if ((page_size % mmu_prop->page_size) == 0) { in hl_mmu_unmap() 140 real_page_size = mmu_prop->page_size; in hl_mmu_unmap() 144 page_size, mmu_prop->page_size >> 10); in hl_mmu_unmap() 149 npages = page_size / real_page_size; in hl_mmu_unmap() 188 int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size, in hl_mmu_map() argument 206 else if ((page_size % prop->pmmu_huge.page_size) == 0) in hl_mmu_map() 215 if ((page_size % mmu_prop->page_size) == 0) { in hl_mmu_map() [all …]
|
D | memory.c | 59 u32 num_curr_pgs, page_size, page_shift; in alloc_device_memory() local 64 page_size = hdev->asic_prop.dram_page_size; in alloc_device_memory() 65 page_shift = __ffs(page_size); in alloc_device_memory() 66 num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift; in alloc_device_memory() 95 phys_pg_pack->page_size = page_size; in alloc_device_memory() 108 phys_pg_pack->pages[i] = paddr + i * page_size; in alloc_device_memory() 113 page_size); in alloc_device_memory() 153 page_size); in alloc_device_memory() 285 phys_pg_pack->page_size); in free_phys_pg_pack() 641 u32 npages, page_size = PAGE_SIZE, in init_phys_pg_pack_from_userptr() local [all …]
|
/Linux-v5.10/drivers/pci/endpoint/ |
D | pci-epc-mem.c | 26 unsigned int page_shift = ilog2(mem->window.page_size); in pci_epc_mem_get_order() 54 size_t page_size; in pci_epc_multi_mem_init() local 70 page_size = windows[i].page_size; in pci_epc_multi_mem_init() 71 if (page_size < PAGE_SIZE) in pci_epc_multi_mem_init() 72 page_size = PAGE_SIZE; in pci_epc_multi_mem_init() 73 page_shift = ilog2(page_size); in pci_epc_multi_mem_init() 94 mem->window.page_size = page_size; in pci_epc_multi_mem_init() 119 size_t size, size_t page_size) in pci_epc_mem_init() argument 125 mem_window.page_size = page_size; in pci_epc_mem_init() 182 align_size = ALIGN(size, mem->window.page_size); in pci_epc_mem_alloc_addr() [all …]
|
/Linux-v5.10/arch/powerpc/mm/ |
D | init_64.c | 186 unsigned long page_size) in altmap_cross_boundary() argument 188 unsigned long nr_pfn = page_size / sizeof(struct page); in altmap_cross_boundary() 204 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; in vmemmap_populate() local 207 start = ALIGN_DOWN(start, page_size); in vmemmap_populate() 211 for (; start < end; start += page_size) { in vmemmap_populate() 221 if (vmemmap_populated(start, page_size)) in vmemmap_populate() 229 if (altmap && !altmap_cross_boundary(altmap, start, page_size)) { in vmemmap_populate() 230 p = vmemmap_alloc_block_buf(page_size, node, altmap); in vmemmap_populate() 237 p = vmemmap_alloc_block_buf(page_size, node, NULL); in vmemmap_populate() 250 int nr_pfns = page_size >> PAGE_SHIFT; in vmemmap_populate() [all …]
|
/Linux-v5.10/tools/testing/selftests/powerpc/primitives/ |
D | load_unaligned_zeropad.c | 38 static int page_size; variable 43 if (mprotect(mem_region + page_size, page_size, PROT_NONE)) { in protect_region() 53 if (mprotect(mem_region + page_size, page_size, PROT_READ|PROT_WRITE)) { in unprotect_region() 125 page_size = getpagesize(); in test_body() 126 mem_region = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE, in test_body() 131 for (i = 0; i < page_size; i++) in test_body() 134 memset(mem_region+page_size, 0, page_size); in test_body() 138 for (i = 0; i < page_size; i++) in test_body()
|
/Linux-v5.10/tools/testing/selftests/bpf/prog_tests/ |
D | mmap.c | 12 long page_size = sysconf(_SC_PAGE_SIZE); in roundup_page() local 13 return (sz + page_size - 1) / page_size * page_size; in roundup_page() 21 const long page_size = sysconf(_SC_PAGE_SIZE); in test_mmap() local 186 tmp0 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, in test_mmap() 192 tmp1 = mmap(tmp0, 3 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED, in test_mmap() 195 munmap(tmp0, 4 * page_size); in test_mmap() 200 err = munmap(tmp1 + page_size, page_size); in test_mmap() 202 munmap(tmp1, 4 * page_size); in test_mmap() 207 tmp2 = mmap(tmp1 + page_size, page_size, PROT_READ, in test_mmap() 210 munmap(tmp1, page_size); in test_mmap() [all …]
|
/Linux-v5.10/drivers/mtd/spi-nor/ |
D | xilinx.c | 33 offset = addr % nor->page_size; in s3an_convert_addr() 34 page = addr / nor->page_size; in s3an_convert_addr() 35 page <<= (nor->page_size > 512) ? 10 : 9; in s3an_convert_addr() 67 nor->page_size = (nor->page_size == 264) ? 256 : 512; in xilinx_nor_setup() 68 nor->mtd.writebufsize = nor->page_size; in xilinx_nor_setup() 69 nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors; in xilinx_nor_setup() 70 nor->mtd.erasesize = 8 * nor->page_size; in xilinx_nor_setup()
|
/Linux-v5.10/drivers/misc/ |
D | vmw_balloon.c | 247 enum vmballoon_page_size_type page_size; member 581 unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size) in vmballoon_page_order() argument 583 return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0; in vmballoon_page_order() 593 vmballoon_page_in_frames(enum vmballoon_page_size_type page_size) in vmballoon_page_in_frames() argument 595 return 1 << vmballoon_page_order(page_size); in vmballoon_page_in_frames() 605 enum vmballoon_page_size_type page_size) in vmballoon_mark_page_offline() argument 609 for (i = 0; i < vmballoon_page_in_frames(page_size); i++) in vmballoon_mark_page_offline() 620 enum vmballoon_page_size_type page_size) in vmballoon_mark_page_online() argument 624 for (i = 0; i < vmballoon_page_in_frames(page_size); i++) in vmballoon_mark_page_online() 684 if (ctl->page_size == VMW_BALLOON_2M_PAGE) in vmballoon_alloc_page_list() [all …]
|
/Linux-v5.10/drivers/net/ethernet/qlogic/qed/ |
D | qed_chain.c | 22 params->page_size); in qed_chain_init() 24 params->page_size, in qed_chain_init() 33 chain->page_size = params->page_size; in qed_chain_init() 88 dma_free_coherent(dev, chain->page_size, virt, phys); in qed_chain_free_next_ptr() 101 dma_free_coherent(&cdev->pdev->dev, chain->page_size, in qed_chain_free_single() 119 dma_free_coherent(dev, chain->page_size, entry->virt_addr, in qed_chain_free_pbl() 164 chain_size = ELEMS_PER_PAGE(params->elem_size, params->page_size); in qed_chain_alloc_sanity_check() 207 virt = dma_alloc_coherent(dev, chain->page_size, &phys, in qed_chain_alloc_next_ptr() 238 virt = dma_alloc_coherent(&cdev->pdev->dev, chain->page_size, in qed_chain_alloc_single() 290 virt = dma_alloc_coherent(dev, chain->page_size, &phys, in qed_chain_alloc_pbl() [all …]
|
/Linux-v5.10/tools/testing/selftests/kvm/lib/s390x/ |
D | processor.c | 22 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", in virt_pgd_alloc() 23 vm->page_size); in virt_pgd_alloc() 30 memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_pgd_alloc() 47 memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_alloc_region() 60 TEST_ASSERT((gva % vm->page_size) == 0, in virt_pg_map() 63 gva, vm->page_size); in virt_pg_map() 68 TEST_ASSERT((gpa % vm->page_size) == 0, in virt_pg_map() 71 gva, vm->page_size); in virt_pg_map() 75 gva, vm->max_gfn, vm->page_size); in virt_pg_map() 99 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", in addr_gva2gpa() [all …]
|
/Linux-v5.10/tools/power/acpi/os_specific/service_layers/ |
D | osunixmap.c | 67 acpi_size page_size; in acpi_os_map_memory() local 78 page_size = acpi_os_get_page_size(); in acpi_os_map_memory() 79 offset = where % page_size; in acpi_os_map_memory() 112 acpi_size page_size; in acpi_os_unmap_memory() local 114 page_size = acpi_os_get_page_size(); in acpi_os_unmap_memory() 115 offset = ACPI_TO_INTEGER(where) % page_size; in acpi_os_unmap_memory()
|
/Linux-v5.10/tools/testing/selftests/powerpc/copyloops/ |
D | exc_validate.c | 81 int page_size; in test_copy_exception() local 85 page_size = getpagesize(); in test_copy_exception() 86 p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE, in test_copy_exception() 94 memset(p, 0, page_size); in test_copy_exception() 98 if (mprotect(p + page_size, page_size, PROT_NONE)) { in test_copy_exception() 103 q = p + page_size - MAX_LEN; in test_copy_exception()
|
/Linux-v5.10/arch/um/os-Linux/ |
D | elf_aux.c | 28 long page_size = 0; in scan_elf_aux() local 59 page_size = auxv->a_un.a_val; in scan_elf_aux() 65 ! page_size || (vsyscall_ehdr % page_size) ) { in scan_elf_aux() 72 vsyscall_end = vsyscall_ehdr + page_size; in scan_elf_aux()
|
/Linux-v5.10/tools/testing/selftests/powerpc/mm/ |
D | stack_expansion_ldst.c | 157 static void test_one_type(enum access_type type, unsigned long page_size, unsigned long rlim_cur) in test_one_type() argument 162 for (delta = page_size; delta <= rlim_cur; delta += page_size) in test_one_type() 173 unsigned long page_size; in test() local 176 page_size = getpagesize(); in test() 181 test_one_type(LOAD, page_size, rlimit.rlim_cur); in test() 183 test_one_type(STORE, page_size, rlimit.rlim_cur); in test()
|
/Linux-v5.10/arch/powerpc/mm/book3s64/ |
D | radix_tlb.c | 417 unsigned long pid, unsigned long page_size, in __tlbiel_va_range() argument 423 for (addr = start; addr < end; addr += page_size) in __tlbiel_va_range() 438 unsigned long pid, unsigned long page_size, in _tlbiel_va_range() argument 444 __tlbiel_va_range(start, end, pid, page_size, psize); in _tlbiel_va_range() 449 unsigned long pid, unsigned long page_size, in __tlbie_va_range() argument 455 for (addr = start; addr < end; addr += page_size) in __tlbie_va_range() 458 fixup_tlbie_va_range(addr - page_size, pid, ap); in __tlbie_va_range() 506 unsigned long page_size; member 515 _tlbiel_va_range(t->start, t->end, t->pid, t->page_size, in do_tlbiel_va_range() 531 unsigned long pid, unsigned long page_size, in _tlbie_va_range() argument [all …]
|
/Linux-v5.10/tools/testing/selftests/kvm/lib/ |
D | kvm_util.c | 148 unsigned int page_size; member 201 vm->page_size = vm_guest_mode_params[mode].page_size; in vm_create() 556 amt = vm->page_size - (ptr1 % vm->page_size); in kvm_memcmp_hva_gva() 558 amt = vm->page_size - (ptr2 % vm->page_size); in kvm_memcmp_hva_gva() 609 size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size; in vm_userspace_mem_region_add() 616 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " in vm_userspace_mem_region_add() 619 guest_paddr, vm->page_size); in vm_userspace_mem_region_add() 625 guest_paddr, npages, vm->max_gfn, vm->page_size); in vm_userspace_mem_region_add() 632 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); in vm_userspace_mem_region_add() 639 guest_paddr, npages, vm->page_size, in vm_userspace_mem_region_add() [all …]
|
/Linux-v5.10/drivers/gpu/drm/i915/gem/selftests/ |
D | huge_pages.c | 37 unsigned int page_size = page_sizes[i]; in get_largest_page_size() local 39 if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size) in get_largest_page_size() 40 return page_size; in get_largest_page_size() 89 unsigned int page_size = BIT(bit); in get_huge_pages() local 90 int order = get_order(page_size); in get_huge_pages() 100 sg_set_page(sg, page, page_size, 0); in get_huge_pages() 101 sg_page_sizes |= page_size; in get_huge_pages() 104 rem -= page_size; in get_huge_pages() 111 } while ((rem - ((page_size-1) & page_mask)) >= page_size); in get_huge_pages() 113 page_mask &= (page_size-1); in get_huge_pages() [all …]
|
/Linux-v5.10/arch/powerpc/platforms/ps3/ |
D | mm.c | 351 DBG("%s:%d: page_size %u\n", func, line, r->page_size); in _dma_dump_region() 387 DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size); in _dma_dump_chunk() 399 unsigned long aligned_bus = ALIGN_DOWN(bus_addr, 1 << r->page_size); in dma_find_chunk() 401 1 << r->page_size); in dma_find_chunk() 428 unsigned long aligned_lpar = ALIGN_DOWN(lpar_addr, 1 << r->page_size); in dma_find_chunk_lpar() 430 1 << r->page_size); in dma_find_chunk_lpar() 477 for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) { in dma_ioc0_free_chunk() 478 offset = (1 << r->page_size) * iopage; in dma_ioc0_free_chunk() 587 pages = len >> r->page_size; in dma_ioc0_map_pages() 589 r->page_size, r->len, pages, iopte_flag); in dma_ioc0_map_pages() [all …]
|