Lines Matching full:pmd
75 DEFINE_POPULATE(pud_populate, pud, pmd, init) in DEFINE_POPULATE()
76 DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init) in DEFINE_POPULATE()
90 DEFINE_ENTRY(pmd, pmd, init)
276 pmd_t *pmd = (pmd_t *) spp_getpage(); in fill_pmd() local
277 pud_populate(&init_mm, pud, pmd); in fill_pmd()
278 if (pmd != pmd_offset(pud, 0)) in fill_pmd()
280 pmd, pmd_offset(pud, 0)); in fill_pmd()
285 static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) in fill_pte() argument
287 if (pmd_none(*pmd)) { in fill_pte()
289 pmd_populate_kernel(&init_mm, pmd, pte); in fill_pte()
290 if (pte != pte_offset_kernel(pmd, 0)) in fill_pte()
293 return pte_offset_kernel(pmd, vaddr); in fill_pte()
298 pmd_t *pmd = fill_pmd(pud, vaddr); in __set_pte_vaddr() local
299 pte_t *pte = fill_pte(pmd, vaddr); in __set_pte_vaddr()
357 pmd_t *pmd; in populate_extra_pte() local
359 pmd = populate_extra_pmd(vaddr); in populate_extra_pte()
360 return fill_pte(pmd, vaddr); in populate_extra_pte()
372 pmd_t *pmd; in __init_extra_mapping() local
393 pmd = (pmd_t *) spp_getpage(); in __init_extra_mapping()
394 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | in __init_extra_mapping()
397 pmd = pmd_offset(pud, phys); in __init_extra_mapping()
398 BUG_ON(!pmd_none(*pmd)); in __init_extra_mapping()
399 set_pmd(pmd, __pmd(phys | pgprot_val(prot))); in __init_extra_mapping()
431 pmd_t *pmd = level2_kernel_pgt; in cleanup_highmap() local
441 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { in cleanup_highmap()
442 if (pmd_none(*pmd)) in cleanup_highmap()
445 set_pmd(pmd, __pmd(0)); in cleanup_highmap()
503 * Create PMD level page table mapping for physical addresses. The virtual
517 pmd_t *pmd = pmd_page + pmd_index(paddr); in phys_pmd_init() local
528 set_pmd_init(pmd, __pmd(0), init); in phys_pmd_init()
532 if (!pmd_none(*pmd)) { in phys_pmd_init()
533 if (!pmd_large(*pmd)) { in phys_pmd_init()
535 pte = (pte_t *)pmd_page_vaddr(*pmd); in phys_pmd_init()
560 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); in phys_pmd_init()
566 set_pmd_init(pmd, in phys_pmd_init()
578 pmd_populate_kernel_init(&init_mm, pmd, pte, init); in phys_pmd_init()
602 pmd_t *pmd; in phys_pud_init() local
621 pmd = pmd_offset(pud, 0); in phys_pud_init()
622 paddr_last = phys_pmd_init(pmd, paddr, in phys_pud_init()
660 pmd = alloc_low_page(); in phys_pud_init()
661 paddr_last = phys_pmd_init(pmd, paddr, paddr_end, in phys_pud_init()
665 pud_populate_init(&init_mm, pud, pmd, init); in phys_pud_init()
779 * The virtual and physical addresses have to be aligned on PMD level
793 * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe()
852 /* Returns true if the PMD is completely unused and thus it can be freed */
923 * consecutive sections. Remember for the last added PMD where the in vmemmap_use_new_sub_pmd()
1005 static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) in free_pte_table() argument
1017 free_pagetable(pmd_page(*pmd), 0); in free_pte_table()
1019 pmd_clear(pmd); in free_pte_table()
1025 pmd_t *pmd; in free_pmd_table() local
1029 pmd = pmd_start + i; in free_pmd_table()
1030 if (!pmd_none(*pmd)) in free_pmd_table()
1034 /* free a pmd talbe */ in free_pmd_table()
1108 pmd_t *pmd; in remove_pmd_table() local
1110 pmd = pmd_start + pmd_index(addr); in remove_pmd_table()
1111 for (; addr < end; addr = next, pmd++) { in remove_pmd_table()
1114 if (!pmd_present(*pmd)) in remove_pmd_table()
1117 if (pmd_large(*pmd)) { in remove_pmd_table()
1121 free_hugepage_table(pmd_page(*pmd), in remove_pmd_table()
1125 pmd_clear(pmd); in remove_pmd_table()
1131 free_hugepage_table(pmd_page(*pmd), in remove_pmd_table()
1134 pmd_clear(pmd); in remove_pmd_table()
1141 pte_base = (pte_t *)pmd_page_vaddr(*pmd); in remove_pmd_table()
1143 free_pte_table(pte_base, pmd); in remove_pmd_table()
1391 * is a full PMD. If we would align _brk_end to PAGE_SIZE we in mark_rodata_ro()
1392 * split the PMD and the reminder between _brk_end and the end in mark_rodata_ro()
1393 * of the PMD will remain mapped executable. in mark_rodata_ro()
1395 * Any PMD which was setup after the one which covers _brk_end in mark_rodata_ro()
1425 pmd_t *pmd; in kern_addr_valid() local
1446 pmd = pmd_offset(pud, addr); in kern_addr_valid()
1447 if (!pmd_present(*pmd)) in kern_addr_valid()
1450 if (pmd_large(*pmd)) in kern_addr_valid()
1451 return pfn_valid(pmd_pfn(*pmd)); in kern_addr_valid()
1453 pte = pte_offset_kernel(pmd, addr); in kern_addr_valid()
1530 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1544 pmd_t *pmd; in vmemmap_populate_hugepages() local
1561 pmd = pmd_offset(pud, addr); in vmemmap_populate_hugepages()
1562 if (pmd_none(*pmd)) { in vmemmap_populate_hugepages()
1571 set_pmd(pmd, __pmd(pte_val(entry))); in vmemmap_populate_hugepages()
1576 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", in vmemmap_populate_hugepages()
1593 } else if (pmd_large(*pmd)) { in vmemmap_populate_hugepages()
1594 vmemmap_verify((pte_t *)pmd, node, addr, next); in vmemmap_populate_hugepages()
1637 pmd_t *pmd; in register_page_bootmem_memmap() local
1667 pmd = pmd_offset(pud, addr); in register_page_bootmem_memmap()
1668 if (pmd_none(*pmd)) in register_page_bootmem_memmap()
1670 get_page_bootmem(section_nr, pmd_page(*pmd), in register_page_bootmem_memmap()
1673 pte = pte_offset_kernel(pmd, addr); in register_page_bootmem_memmap()
1681 pmd = pmd_offset(pud, addr); in register_page_bootmem_memmap()
1682 if (pmd_none(*pmd)) in register_page_bootmem_memmap()
1686 page = pmd_page(*pmd); in register_page_bootmem_memmap()
1698 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", in vmemmap_populate_print_last()