Home
last modified time | relevance | path

Searched refs:virt_to_page (Results 1 – 25 of 300) sorted by relevance

12345678910>>...12

/Linux-v5.4/arch/s390/kernel/
Dvdso.c156 arch_set_page_dat(virt_to_page(segment_table), SEGMENT_ORDER); in vdso_alloc_per_cpu()
157 arch_set_page_dat(virt_to_page(page_table), 0); in vdso_alloc_per_cpu()
280 struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); in vdso_init()
284 vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data); in vdso_init()
297 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); in vdso_init()
301 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); in vdso_init()
306 get_page(virt_to_page(vdso_data)); in vdso_init()
/Linux-v5.4/arch/powerpc/mm/
Dpgtable_64.c110 return virt_to_page(pgd_page_vaddr(pgd)); in pgd_page()
120 return virt_to_page(pud_page_vaddr(pud)); in pud_page()
133 return virt_to_page(pmd_page_vaddr(pmd)); in pmd_page()
Dpgtable-frag.c23 page = virt_to_page(pte_frag); in pte_frag_destroy()
111 struct page *page = virt_to_page(table); in pte_fragment_free()
Dpgtable_32.c206 struct page *page = virt_to_page(_sinittext); in mark_initmem_nx()
227 page = virt_to_page(_stext); in mark_rodata_ro()
236 page = virt_to_page(__start_rodata); in mark_rodata_ro()
/Linux-v5.4/arch/s390/include/asm/
Dpgalloc.h77 if (!pgtable_pmd_page_ctor(virt_to_page(table))) { in pmd_alloc_one()
86 pgtable_pmd_page_dtor(virt_to_page(pmd)); in pmd_free()
113 if (!pgtable_pmd_page_ctor(virt_to_page(table))) { in pgd_alloc()
124 pgtable_pmd_page_dtor(virt_to_page(pgd)); in pgd_free()
/Linux-v5.4/arch/x86/mm/
Dpgtable.c56 struct page *page = virt_to_page(pmd); in ___pmd_free_tlb()
73 paravirt_tlb_remove_table(tlb, virt_to_page(pud)); in ___pud_free_tlb()
80 paravirt_tlb_remove_table(tlb, virt_to_page(p4d)); in ___p4d_free_tlb()
88 struct page *page = virt_to_page(pgd); in pgd_list_add()
95 struct page *page = virt_to_page(pgd); in pgd_list_del()
108 virt_to_page(pgd)->pt_mm = mm; in pgd_set_mm()
212 pgtable_pmd_page_dtor(virt_to_page(pmds[i])); in free_pmds()
231 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) { in preallocate_pmds()
/Linux-v5.4/arch/arm64/include/asm/
Dtlb.h55 struct page *page = virt_to_page(pmdp); in __pmd_free_tlb()
66 tlb_remove_table(tlb, virt_to_page(pudp)); in __pud_free_tlb()
/Linux-v5.4/arch/s390/mm/
Dpage-states.c115 page = virt_to_page(pmd_val(*pmd)); in mark_kernel_pmd()
133 page = virt_to_page(pud_val(*pud)); in mark_kernel_pud()
154 page = virt_to_page(p4d_val(*p4d)); in mark_kernel_p4d()
176 page = virt_to_page(pgd_val(*pgd)); in mark_kernel_pgd()
/Linux-v5.4/arch/csky/mm/
Dinit.c72 ClearPageReserved(virt_to_page(addr)); in free_initmem()
73 init_page_count(virt_to_page(addr)); in free_initmem()
/Linux-v5.4/arch/m68k/include/asm/
Dpage_no.h26 #define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)) macro
29 #define pfn_to_page(pfn) virt_to_page(pfn_to_virt(pfn))
/Linux-v5.4/arch/um/kernel/skas/
Dmmu.c39 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); in init_stub_pte()
107 mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start); in uml_setup_stubs()
108 mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack); in uml_setup_stubs()
/Linux-v5.4/virt/kvm/arm/
Dmmu.c102 put_page(virt_to_page(pmd)); in stage2_dissolve_pmd()
120 put_page(virt_to_page(pudp)); in stage2_dissolve_pud()
161 put_page(virt_to_page(pgd)); in clear_stage2_pgd_entry()
171 put_page(virt_to_page(pud)); in clear_stage2_pud_entry()
181 put_page(virt_to_page(pmd)); in clear_stage2_pmd_entry()
255 put_page(virt_to_page(pte)); in unmap_stage2_ptes()
281 put_page(virt_to_page(pmd)); in unmap_stage2_pmds()
308 put_page(virt_to_page(pud)); in unmap_stage2_puds()
453 put_page(virt_to_page(pgd)); in clear_hyp_pgd_entry()
462 put_page(virt_to_page(pud)); in clear_hyp_pud_entry()
[all …]
/Linux-v5.4/arch/arm/kernel/
Dvdso.c205 vdso_data_page = virt_to_page(vdso_data); in vdso_init()
211 page = virt_to_page(vdso_start + i * PAGE_SIZE); in vdso_init()
337 flush_dcache_page(virt_to_page(vdso_data)); in update_vsyscall()
344 flush_dcache_page(virt_to_page(vdso_data)); in update_vsyscall_tz()
/Linux-v5.4/arch/parisc/mm/
Dioremap.c60 for (page = virt_to_page(t_addr); in __ioremap()
61 page <= virt_to_page(t_end); page++) { in __ioremap()
/Linux-v5.4/arch/riscv/kernel/
Dvdso.c46 pg = virt_to_page(vdso_start + (i << PAGE_SHIFT)); in vdso_init()
49 vdso_pagelist[i] = virt_to_page(vdso_data); in vdso_init()
/Linux-v5.4/arch/nds32/mm/
Dmm-nds32.c35 inc_zone_page_state(virt_to_page((unsigned long *)new_pgd), in pgd_alloc()
60 dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE); in pgd_free()
/Linux-v5.4/arch/arc/include/asm/
Dpgalloc.h110 page = virt_to_page(pte_pg); in pte_alloc_one()
126 pgtable_pte_page_dtor(virt_to_page(ptep)); in pte_free()
/Linux-v5.4/fs/ubifs/
Dcrypto.c42 err = fscrypt_encrypt_block_inplace(inode, virt_to_page(p), pad_len, in ubifs_encrypt()
67 err = fscrypt_decrypt_block_inplace(inode, virt_to_page(&dn->data), in ubifs_decrypt()
/Linux-v5.4/arch/xtensa/mm/
Dkasan_init.c28 mk_pte(virt_to_page(kasan_early_shadow_page), in kasan_early_init()
95 mk_pte(virt_to_page(kasan_early_shadow_page), in kasan_init()
/Linux-v5.4/arch/alpha/include/asm/
Dmmzone.h73 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) macro
96 __xx = virt_to_page(kvirt); \
/Linux-v5.4/mm/kasan/
Dinit.c40 return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d)); in kasan_p4d_table()
52 return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud)); in kasan_pud_table()
64 return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd)); in kasan_pmd_table()
76 return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte)); in kasan_pte_table()
81 return pte_page(pte) == virt_to_page(lm_alias(kasan_early_shadow_page)); in kasan_early_shadow_page_entry()
/Linux-v5.4/mm/
Dz3fold.c443 struct page *page = virt_to_page(zhdr); in __release_z3fold_page()
498 struct page *page = virt_to_page(zhdr); in free_pages_work()
566 struct page *page = virt_to_page(zhdr); in z3fold_compact_page()
616 page = virt_to_page(zhdr); in do_compact_page()
692 page = virt_to_page(zhdr); in __z3fold_alloc()
736 page = virt_to_page(zhdr); in __z3fold_alloc()
905 page = virt_to_page(zhdr); in z3fold_alloc()
925 page = virt_to_page(zhdr); in z3fold_alloc()
1004 page = virt_to_page(zhdr); in z3fold_free()
1269 page = virt_to_page(zhdr); in z3fold_map()
[all …]
/Linux-v5.4/kernel/events/
Dring_buffer.c595 struct page *page = virt_to_page(rb->aux_pages[idx]); in rb_free_aux_page()
684 struct page *page = virt_to_page(rb->aux_pages[0]); in rb_alloc_aux()
739 return virt_to_page(rb->user_page); in __perf_mmap_to_page()
741 return virt_to_page(rb->data_pages[pgoff - 1]); in __perf_mmap_to_page()
804 struct page *page = virt_to_page((void *)addr); in perf_mmap_free_page()
916 return virt_to_page(rb->aux_pages[aux_pgoff]); in perf_mmap_to_page()
/Linux-v5.4/arch/sparc/kernel/
Dleon_smp.c250 free_reserved_page(virt_to_page(&trapbase_cpu1)); in leon_smp_done()
253 free_reserved_page(virt_to_page(&trapbase_cpu2)); in leon_smp_done()
256 free_reserved_page(virt_to_page(&trapbase_cpu3)); in leon_smp_done()
/Linux-v5.4/arch/nios2/mm/
Dioremap.c136 for (page = virt_to_page(t_addr); in __ioremap()
137 page <= virt_to_page(t_end); page++) in __ioremap()

12345678910>>...12