| /Linux-v5.10/arch/x86/platform/efi/ |
| D | efi_64.c | 83 pgd = efi_pgd + pgd_index(EFI_VA_END); in efi_alloc_page_tables() 126 MAYBE_BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END)); in efi_sync_low_kernel_mappings() 130 pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET); in efi_sync_low_kernel_mappings() 133 num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET); in efi_sync_low_kernel_mappings() 143 pgd_efi = efi_pgd + pgd_index(EFI_VA_END); in efi_sync_low_kernel_mappings()
|
| /Linux-v5.10/arch/x86/power/ |
| D | hibernate_64.c | 77 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); in set_up_temporary_text_mapping() 81 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); in set_up_temporary_text_mapping()
|
| D | hibernate_32.c | 89 pgd_idx = pgd_index(PAGE_OFFSET); in resume_physical_mapping_init() 151 pgd = pgd_base + pgd_index(restore_jump_address); in set_up_temporary_text_mapping()
|
| /Linux-v5.10/arch/x86/mm/ |
| D | mem_encrypt_identity.c | 100 pgd_p = ppd->pgd + pgd_index(ppd->vaddr); in sme_clear_pgd() 112 pgd = ppd->pgd + pgd_index(ppd->vaddr); in sme_prepare_pgd() 408 decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1); in sme_encrypt_kernel() 412 check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1); in sme_encrypt_kernel()
|
| D | init_32.c | 112 int pgd_idx = pgd_index(vaddr); in populate_extra_pmd() 141 pgd_idx = pgd_index(vaddr); in page_table_range_init_count() 223 pgd_idx = pgd_index(vaddr); in page_table_range_init() 300 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); in kernel_physical_mapping_init() 477 pgd = base + pgd_index(va); in native_pagetable_init()
|
| D | pgtable_32.c | 35 pgd = swapper_pg_dir + pgd_index(vaddr); in set_pte_vaddr()
|
| D | fault.c | 152 unsigned index = pgd_index(address); in vmalloc_sync_one() 291 pgd_t *pgd = &base[pgd_index(address)]; in dump_pagetable() 356 pgd_t *pgd = base + pgd_index(address); in dump_pagetable() 518 pgd += pgd_index(address); in show_fault_oops() 1044 pgd = init_mm.pgd + pgd_index(address); in spurious_kernel_fault()
|
| D | kasan_init_64.c | 240 pgd += pgd_index(addr); in kasan_map_early_shadow() 346 set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)], in kasan_init()
|
| /Linux-v5.10/arch/arm/mm/ |
| D | pgd.c | 57 new_p4d = p4d_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), in pgd_alloc() 139 pgd = pgd_base + pgd_index(0); in pgd_free()
|
| D | idmap.c | 96 pgd += pgd_index(addr); in identity_mapping_add()
|
| D | ioremap.c | 124 sizeof(pgd_t) * (pgd_index(VMALLOC_END) - in __check_vmalloc_seq() 125 pgd_index(VMALLOC_START))); in __check_vmalloc_seq()
|
| /Linux-v5.10/arch/mips/kvm/ |
| D | mmu.c | 111 pgd += pgd_index(addr); in kvm_mips_walk_pgd() 234 int i_min = pgd_index(start_gpa); in kvm_mips_flush_gpa_pgd() 235 int i_max = pgd_index(end_gpa); in kvm_mips_flush_gpa_pgd() 356 int i_min = pgd_index(start); \ 357 int i_max = pgd_index(end); \ 895 int i_min = pgd_index(start_gva); in kvm_mips_flush_gva_pgd() 896 int i_max = pgd_index(end_gva); in kvm_mips_flush_gva_pgd()
|
| /Linux-v5.10/tools/testing/selftests/kvm/lib/aarch64/ |
| D | processor.c | 24 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pgd_index() function 108 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8; in _virt_pg_map() 155 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8; in addr_gva2gpa()
|
| /Linux-v5.10/arch/nds32/kernel/ |
| D | pm.c | 23 L1_PPTB_mskBASE)) + pgd_index((unsigned int)cpu_resume); in nds32_suspend2ram()
|
| /Linux-v5.10/arch/mips/mm/ |
| D | pgtable-32.c | 84 pgd = swapper_pg_dir + pgd_index(vaddr); in pagetable_init()
|
| /Linux-v5.10/arch/ia64/include/asm/ |
| D | pgtable.h | 360 pgd_index (unsigned long address) in pgd_index() function 367 #define pgd_index pgd_index macro
|
| /Linux-v5.10/arch/csky/mm/ |
| D | highmem.c | 93 pgd = swapper_pg_dir + pgd_index(vaddr); in kmap_pages_init()
|
| D | fault.c | 81 int offset = pgd_index(address); in do_page_fault()
|
| D | init.c | 160 i = pgd_index(vaddr); in fixrange_init()
|
| /Linux-v5.10/arch/powerpc/mm/ |
| D | pgtable.c | 296 pgd = mm->pgd + pgd_index(addr); in assert_pte_locked() 361 pgdp = pgdir + pgd_index(ea); in __find_linux_pte()
|
| /Linux-v5.10/arch/sh/mm/ |
| D | fault.c | 51 pgd += pgd_index(addr); in show_pte() 122 unsigned index = pgd_index(address); in vmalloc_sync_one()
|
| /Linux-v5.10/arch/x86/xen/ |
| D | mmu_pv.c | 448 if (offset < pgd_index(USER_LIMIT)) { in xen_get_user_pgd() 613 hole_low = pgd_index(GUARD_HOLE_BASE_ADDR); in __xen_pgd_walk() 614 hole_high = pgd_index(GUARD_HOLE_END_ADDR); in __xen_pgd_walk() 616 nr = pgd_index(limit) + 1; in __xen_pgd_walk() 1395 user_pgd[pgd_index(VSYSCALL_ADDR)] = in xen_pgd_alloc() 1713 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); in xen_setup_kernel_pagetable() 1742 i = pgd_index(xen_start_info->mfn_list); in xen_setup_kernel_pagetable() 1743 if (i && i < pgd_index(__START_KERNEL_map)) in xen_setup_kernel_pagetable() 1821 pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) * in xen_early_virt_to_phys()
|
| /Linux-v5.10/arch/riscv/mm/ |
| D | kasan_init.c | 17 pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START); in kasan_early_init()
|
| /Linux-v5.10/arch/x86/kernel/ |
| D | head64.c | 176 p = pgd + pgd_index(__START_KERNEL_map); in __startup_64() 336 pgd_p = &early_top_pgt[pgd_index(address)].pgd; in __early_make_pgtable()
|
| /Linux-v5.10/arch/um/kernel/ |
| D | mem.c | 106 i = pgd_index(vaddr); in fixrange_init()
|