| /Linux-v5.10/arch/s390/mm/ |
| D | kasan_init.c | 148 address = (address + PUD_SIZE) & PUD_MASK; in kasan_early_vmemmap_populate() 155 IS_ALIGNED(address, PUD_SIZE) && in kasan_early_vmemmap_populate() 156 end - address >= PUD_SIZE) { in kasan_early_vmemmap_populate() 159 address = (address + PUD_SIZE) & PUD_MASK; in kasan_early_vmemmap_populate() 319 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PUD_SIZE)); in kasan_early_init() 320 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE)); in kasan_early_init()
|
| D | vmem.c | 291 const unsigned long end = start + PUD_SIZE; in try_free_pmd_table() 328 if (IS_ALIGNED(addr, PUD_SIZE) && in modify_pud_table() 329 IS_ALIGNED(next, PUD_SIZE)) { in modify_pud_table() 336 if (IS_ALIGNED(addr, PUD_SIZE) && in modify_pud_table() 337 IS_ALIGNED(next, PUD_SIZE) && in modify_pud_table()
|
| D | hugetlbpage.c | 140 size = PUD_SIZE; in clear_huge_pte_skeys() 205 if (sz == PUD_SIZE) in huge_pte_alloc() 261 else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) in arch_hugetlb_valid_size()
|
| /Linux-v5.10/include/asm-generic/ |
| D | pgtable-nopud.h | 20 #define PUD_SIZE (1UL << PUD_SHIFT) macro 21 #define PUD_MASK (~(PUD_SIZE-1))
|
| /Linux-v5.10/arch/arm64/mm/ |
| D | hugetlbpage.c | 66 case PUD_SIZE: in arch_hugetlb_migration_supported() 130 case PUD_SIZE: in num_contig_ptes() 270 if (sz == PUD_SIZE) { in huge_pte_alloc() 319 if (sz != PUD_SIZE && pud_none(pud)) in huge_pte_offset() 352 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { in arch_make_huge_pte() 509 case PUD_SIZE: in arch_hugetlb_valid_size()
|
| /Linux-v5.10/arch/powerpc/include/asm/nohash/64/ |
| D | pgtable-4k.h | 36 #define PUD_SIZE (1UL << PUD_SHIFT) macro 37 #define PUD_MASK (~(PUD_SIZE-1))
|
| /Linux-v5.10/arch/x86/include/asm/ |
| D | pgtable_64_types.h | 100 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) macro 101 #define PUD_MASK (~(PUD_SIZE - 1))
|
| /Linux-v5.10/drivers/dax/ |
| D | device.c | 154 unsigned int fault_size = PUD_SIZE; in __dev_dax_pud_fault() 160 if (dev_dax->align > PUD_SIZE) { in __dev_dax_pud_fault() 173 (pud_addr + PUD_SIZE) > vmf->vma->vm_end) in __dev_dax_pud_fault() 177 phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE); in __dev_dax_pud_fault() 220 fault_size = PUD_SIZE; in dev_dax_huge_fault()
|
| D | dax-private.h | 94 if (align == PUD_SIZE && IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) in dax_align_valid()
|
| /Linux-v5.10/mm/ |
| D | ioremap.c | 137 if ((end - addr) != PUD_SIZE) in ioremap_try_huge_pud() 140 if (!IS_ALIGNED(addr, PUD_SIZE)) in ioremap_try_huge_pud() 143 if (!IS_ALIGNED(phys_addr, PUD_SIZE)) in ioremap_try_huge_pud()
|
| /Linux-v5.10/arch/x86/mm/ |
| D | kasan_init_64.c | 84 ((end - addr) == PUD_SIZE) && in kasan_populate_pud() 85 IS_ALIGNED(addr, PUD_SIZE)) { in kasan_populate_pud() 86 p = early_alloc(PUD_SIZE, nid, false); in kasan_populate_pud() 90 memblock_free(__pa(p), PUD_SIZE); in kasan_populate_pud()
|
| D | init.c | 336 unsigned long start = round_down(mr[i].start, PUD_SIZE); in adjust_range_page_size_mask() 337 unsigned long end = round_up(mr[i].end, PUD_SIZE); in adjust_range_page_size_mask() 408 end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range() 421 start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range() 422 end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range()
|
| D | kaslr.c | 136 vaddr = round_up(vaddr + 1, PUD_SIZE); in kernel_randomize_memory()
|
| D | init_64.c | 602 paddr_next = (paddr & PUD_MASK) + PUD_SIZE; in phys_pud_init() 1103 if (IS_ALIGNED(addr, PUD_SIZE) && in remove_pud_table() 1104 IS_ALIGNED(next, PUD_SIZE)) { in remove_pud_table() 1107 get_order(PUD_SIZE)); in remove_pud_table() 1119 PUD_SIZE)) { in remove_pud_table() 1121 get_order(PUD_SIZE)); in remove_pud_table()
|
| D | hugetlbpage.c | 187 else if (size == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) in arch_hugetlb_valid_size()
|
| /Linux-v5.10/arch/powerpc/mm/book3s64/ |
| D | radix_pgtable.c | 88 if (map_page_size == PUD_SIZE) { in early_map_kernel_page() 154 if (map_page_size == PUD_SIZE) { in __map_kernel_page() 290 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE && in create_physical_mapping() 292 mapping_size = PUD_SIZE; in create_physical_mapping() 858 if (!IS_ALIGNED(addr, PUD_SIZE) || in remove_pud_table() 859 !IS_ALIGNED(next, PUD_SIZE)) { in remove_pud_table() 1171 flush_tlb_kernel_range(addr, addr + PUD_SIZE); in pud_free_pmd_page()
|
| /Linux-v5.10/mm/kasan/ |
| D | init.c | 154 if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { in zero_pud_populate() 404 if (IS_ALIGNED(addr, PUD_SIZE) && in kasan_remove_pud_table() 405 IS_ALIGNED(next, PUD_SIZE)) in kasan_remove_pud_table()
|
| /Linux-v5.10/arch/riscv/mm/ |
| D | hugetlbpage.c | 19 else if (IS_ENABLED(CONFIG_64BIT) && size == PUD_SIZE) in arch_hugetlb_valid_size()
|
| /Linux-v5.10/arch/x86/mm/pat/ |
| D | set_memory.c | 1184 if (start & (PUD_SIZE - 1)) { in unmap_pud_range() 1185 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; in unmap_pud_range() 1197 while (end - start >= PUD_SIZE) { in unmap_pud_range() 1202 unmap_pmd_range(pud, start, start + PUD_SIZE); in unmap_pud_range() 1204 start += PUD_SIZE; in unmap_pud_range() 1347 if (start & (PUD_SIZE - 1)) { in populate_pud() 1349 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; in populate_pud() 1382 while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) { in populate_pud() 1386 start += PUD_SIZE; in populate_pud() 1387 cpa->pfn += PUD_SIZE >> PAGE_SHIFT; in populate_pud() [all …]
|
| /Linux-v5.10/arch/mips/include/asm/ |
| D | pgtable-64.h | 60 #define PUD_SIZE (1UL << PUD_SHIFT) macro 61 #define PUD_MASK (~(PUD_SIZE-1))
|
| /Linux-v5.10/arch/arm64/include/asm/ |
| D | pgtable-hwdef.h | 60 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) macro 61 #define PUD_MASK (~(PUD_SIZE-1))
|
| /Linux-v5.10/arch/sparc/mm/ |
| D | hugetlbpage.c | 288 if (sz >= PUD_SIZE) in huge_pte_alloc() 335 if (size >= PUD_SIZE) in set_huge_pte_at() 372 if (size >= PUD_SIZE) in huge_ptep_get_and_clear()
|
| /Linux-v5.10/arch/x86/boot/compressed/ |
| D | kaslr.c | 240 if (memparse(p, &p) != PUD_SIZE) { in parse_gb_huge_pages() 527 pud_start = ALIGN(region->start, PUD_SIZE); in process_gb_huge_pages() 528 pud_end = ALIGN_DOWN(region->start + region->size, PUD_SIZE); in process_gb_huge_pages()
|
| /Linux-v5.10/arch/x86/xen/ |
| D | xen-head.S | 91 ELFNOTE(Xen, XEN_ELFNOTE_INIT_P2M, .quad (PUD_SIZE * PTRS_PER_PUD))
|
| /Linux-v5.10/arch/powerpc/mm/ptdump/ |
| D | ptdump.c | 321 addr = start + i * PUD_SIZE; in walk_pud() 326 note_page(st, addr, 2, pud_val(*pud), PUD_SIZE); in walk_pud()
|