Home
last modified time | relevance | path

Searched refs:PMD_SIZE (Results 1 – 25 of 102) sorted by relevance

12345

/Linux-v4.19/arch/x86/mm/
Dinit.c287 unsigned long start = round_down(mr[i].start, PMD_SIZE); in adjust_range_page_size_mask()
288 unsigned long end = round_up(mr[i].end, PMD_SIZE); in adjust_range_page_size_mask()
354 end_pfn = PFN_DOWN(PMD_SIZE); in split_mem_range()
356 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
358 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
368 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
370 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
373 if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE))) in split_mem_range()
374 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
395 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
[all …]
Dinit_64.c341 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { in __init_extra_mapping()
393 unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; in cleanup_highmap()
404 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { in cleanup_highmap()
484 paddr_next = (paddr & PMD_MASK) + PMD_SIZE; in phys_pmd_init()
838 vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE); in free_hugepage_table()
840 free_pagetable(page, get_order(PMD_SIZE)); in free_hugepage_table()
985 if (IS_ALIGNED(addr, PMD_SIZE) && in remove_pmd_table()
986 IS_ALIGNED(next, PMD_SIZE)) { in remove_pmd_table()
1001 PMD_SIZE)) { in remove_pmd_table()
1275 all_end = roundup((unsigned long)_brk_end, PMD_SIZE); in mark_rodata_ro()
[all …]
Dmem_encrypt.c131 vaddr += PMD_SIZE; in __sme_early_map_unmap_mem()
132 paddr += PMD_SIZE; in __sme_early_map_unmap_mem()
133 size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE; in __sme_early_map_unmap_mem()
Dkasan_init_64.c47 ((end - addr) == PMD_SIZE) && in kasan_populate_pmd()
48 IS_ALIGNED(addr, PMD_SIZE)) { in kasan_populate_pmd()
49 p = early_alloc(PMD_SIZE, nid, false); in kasan_populate_pmd()
53 memblock_free(__pa(p), PMD_SIZE); in kasan_populate_pmd()
/Linux-v4.19/arch/x86/include/asm/
Dpgtable_32_types.h12 # define PMD_SIZE (1UL << PMD_SHIFT) macro
13 # define PMD_MASK (~(PMD_SIZE - 1))
56 #define LDT_END_ADDR (LDT_BASE_ADDR + PMD_SIZE)
Dpgtable_64_types.h98 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) macro
99 #define PMD_MASK (~(PMD_SIZE - 1))
/Linux-v4.19/arch/sh/include/asm/
Dpgtable-3level.h24 #define PMD_SIZE (1UL << PMD_SHIFT) macro
25 #define PMD_MASK (~(PMD_SIZE-1))
27 #define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE)
/Linux-v4.19/arch/arm64/mm/
Dhugetlbpage.c65 *pgsize = PMD_SIZE; in find_num_contig()
81 case PMD_SIZE: in num_contig_ptes()
85 *pgsize = PMD_SIZE; in num_contig_ptes()
231 } else if (sz == PMD_SIZE) { in huge_pte_alloc()
237 } else if (sz == (PMD_SIZE * CONT_PMDS)) { in huge_pte_alloc()
271 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && in huge_pte_offset()
292 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { in arch_make_huge_pte()
440 case PMD_SIZE * CONT_PMDS: in setup_hugepagesz()
441 case PMD_SIZE: in setup_hugepagesz()
/Linux-v4.19/arch/parisc/kernel/
Dpci-dma.c86 if (end > PMD_SIZE) in map_pte_uncached()
87 end = PMD_SIZE; in map_pte_uncached()
121 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in map_pmd_uncached()
122 orig_vaddr += PMD_SIZE; in map_pmd_uncached()
166 if (end > PMD_SIZE) in unmap_uncached_pte()
167 end = PMD_SIZE; in unmap_uncached_pte()
206 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in unmap_uncached_pmd()
207 orig_vaddr += PMD_SIZE; in unmap_uncached_pmd()
/Linux-v4.19/arch/s390/mm/
Dvmem.c124 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) && in vmem_add_mem()
127 address += PMD_SIZE; in vmem_add_mem()
191 address += PMD_SIZE; in vmem_remove_range()
196 address += PMD_SIZE; in vmem_remove_range()
268 new_page = vmemmap_alloc_block(PMD_SIZE, node); in vmemmap_populate()
272 address = (address + PMD_SIZE) & PMD_MASK; in vmemmap_populate()
280 address = (address + PMD_SIZE) & PMD_MASK; in vmemmap_populate()
Dhugetlbpage.c141 size = PMD_SIZE; in clear_huge_pte_skeys()
201 else if (sz == PMD_SIZE) in huge_pte_alloc()
257 if (MACHINE_HAS_EDAT1 && size == PMD_SIZE) { in setup_hugepagesz()
/Linux-v4.19/arch/x86/kernel/
Dvmlinux.lds.S65 #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
66 #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
77 . = ALIGN(PMD_SIZE); \
82 . = ALIGN(PMD_SIZE); \
/Linux-v4.19/include/asm-generic/
Dpgtable-nopmd.h22 #define PMD_SIZE (1UL << PMD_SHIFT) macro
23 #define PMD_MASK (~(PMD_SIZE-1))
/Linux-v4.19/arch/nios2/mm/
Dioremap.c33 if (end > PMD_SIZE) in remap_area_pte()
34 end = PMD_SIZE; in remap_area_pte()
70 address = (address + PMD_SIZE) & PMD_MASK; in remap_area_pmd()
/Linux-v4.19/arch/riscv/include/asm/
Dpgtable-64.h26 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) macro
27 #define PMD_MASK (~(PMD_SIZE - 1))
/Linux-v4.19/arch/powerpc/include/asm/nohash/64/
Dpgtable-4k.h31 #define PMD_SIZE (1UL << PMD_SHIFT) macro
32 #define PMD_MASK (~(PMD_SIZE-1))
/Linux-v4.19/arch/mips/mm/
Dioremap.c31 if (end > PMD_SIZE) in remap_area_pte()
32 end = PMD_SIZE; in remap_area_pte()
63 address = (address + PMD_SIZE) & PMD_MASK; in remap_area_pmd()
/Linux-v4.19/arch/x86/boot/compressed/
Dkaslr_64.c138 start = round_down(start, PMD_SIZE); in add_identity_map()
139 end = round_up(end, PMD_SIZE); in add_identity_map()
/Linux-v4.19/arch/m68k/include/asm/
Dpgtable_mm.h38 #define PMD_SIZE (1UL << PMD_SHIFT) macro
39 #define PMD_MASK (~(PMD_SIZE-1))
/Linux-v4.19/arch/um/include/asm/
Dpgtable-3level.h28 #define PMD_SIZE (1UL << PMD_SHIFT) macro
29 #define PMD_MASK (~(PMD_SIZE-1))
/Linux-v4.19/arch/arm/include/asm/
Dpgtable-2level.h88 #define PMD_SIZE (1UL << PMD_SHIFT) macro
89 #define PMD_MASK (~(PMD_SIZE-1))
/Linux-v4.19/arch/powerpc/mm/
Dpgtable-radix.c105 if (map_page_size == PMD_SIZE) { in early_map_kernel_page()
162 if (map_page_size == PMD_SIZE) { in __map_kernel_page()
287 } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE && in create_physical_mapping()
289 mapping_size = PMD_SIZE; in create_physical_mapping()
299 max_mapping_size = PMD_SIZE; in create_physical_mapping()
303 if (split_text_mapping && (mapping_size == PMD_SIZE) && in create_physical_mapping()
817 split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd); in remove_pmd_table()
/Linux-v4.19/arch/sparc/mm/
Dhugetlbpage.c292 if (sz >= PMD_SIZE) in huge_pte_alloc()
332 else if (size >= PMD_SIZE) in set_huge_pte_at()
369 else if (size >= PMD_SIZE) in huge_ptep_get_and_clear()
499 addr += PMD_SIZE; in hugetlb_free_pgd_range()
509 end -= PMD_SIZE; in hugetlb_free_pgd_range()
/Linux-v4.19/arch/arm64/include/asm/
Dpgtable-hwdef.h61 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) macro
62 #define PMD_MASK (~(PMD_SIZE-1))
110 #define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE)
/Linux-v4.19/fs/
Ddax.c46 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
47 #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
323 return PMD_SIZE; in dax_entry_size()
1592 if ((pmd_addr + PMD_SIZE) > vma->vm_end) in dax_iomap_pmd_fault()
1632 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); in dax_iomap_pmd_fault()
1636 if (iomap.offset + iomap.length < pos + PMD_SIZE) in dax_iomap_pmd_fault()
1643 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn); in dax_iomap_pmd_fault()
1664 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); in dax_iomap_pmd_fault()
1681 int copied = PMD_SIZE; in dax_iomap_pmd_fault()
1691 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, in dax_iomap_pmd_fault()
[all …]

12345