/Linux-v6.1/include/trace/events/ |
D | fs_dax.h | 65 struct page *zero_page, 67 TP_ARGS(inode, vmf, zero_page, radix_entry), 72 __field(struct page *, zero_page) 81 __entry->zero_page = zero_page; 91 __entry->zero_page, 99 struct page *zero_page, void *radix_entry), \ 100 TP_ARGS(inode, vmf, zero_page, radix_entry))
|
/Linux-v6.1/arch/arm/mm/ |
D | nommu.c | 158 void *zero_page; in paging_init() local 164 zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); in paging_init() 165 if (!zero_page) in paging_init() 171 empty_zero_page = virt_to_page(zero_page); in paging_init()
|
D | mmu.c | 1763 void *zero_page; in paging_init() local 1786 zero_page = early_alloc(PAGE_SIZE); in paging_init() 1790 empty_zero_page = virt_to_page(zero_page); in paging_init()
|
/Linux-v6.1/arch/arm64/kernel/ |
D | hibernate.c | 403 void *zero_page; in swsusp_arch_resume() local 428 zero_page = (void *)get_safe_page(GFP_ATOMIC); in swsusp_arch_resume() 429 if (!zero_page) { in swsusp_arch_resume() 465 resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); in swsusp_arch_resume()
|
D | machine_kexec.c | 152 kimage->arch.zero_page = __pa_symbol(empty_zero_page); in machine_kexec_post_load()
|
D | asm-offsets.c | 177 DEFINE(KIMAGE_ARCH_ZERO_PAGE, offsetof(struct kimage, arch.zero_page)); in main()
|
/Linux-v6.1/mm/ |
D | huge_memory.c | 155 struct page *zero_page; in get_huge_zero_page() local 160 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, in get_huge_zero_page() 162 if (!zero_page) { in get_huge_zero_page() 167 if (cmpxchg(&huge_zero_page, NULL, zero_page)) { in get_huge_zero_page() 169 __free_pages(zero_page, compound_order(zero_page)); in get_huge_zero_page() 172 WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page)); in get_huge_zero_page() 221 struct page *zero_page = xchg(&huge_zero_page, NULL); in shrink_huge_zero_page_scan() local 222 BUG_ON(zero_page == NULL); in shrink_huge_zero_page_scan() 224 __free_pages(zero_page, compound_order(zero_page)); in shrink_huge_zero_page_scan() 767 struct page *zero_page) in set_huge_zero_page() argument [all …]
|
/Linux-v6.1/arch/arm64/include/asm/ |
D | kexec.h | 122 phys_addr_t zero_page; member
|
D | assembler.h | 501 .macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2 502 phys_to_ttbr \tmp, \zero_page
|
/Linux-v6.1/drivers/dma/ |
D | bcm2835-dma.c | 50 dma_addr_t zero_page; member 750 if (buf_addr == od->zero_page && !c->is_lite_channel) in bcm2835_dma_prep_dma_cyclic() 852 dma_unmap_page_attrs(od->ddev.dev, od->zero_page, PAGE_SIZE, in bcm2835_dma_free() 937 od->zero_page = dma_map_page_attrs(od->ddev.dev, ZERO_PAGE(0), 0, in bcm2835_dma_probe() 940 if (dma_mapping_error(od->ddev.dev, od->zero_page)) { in bcm2835_dma_probe()
|
/Linux-v6.1/fs/ |
D | dax.c | 1173 struct page *zero_page; in dax_pmd_load_hole() local 1178 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); in dax_pmd_load_hole() 1180 if (unlikely(!zero_page)) in dax_pmd_load_hole() 1183 pfn = page_to_pfn_t(zero_page); in dax_pmd_load_hole() 1203 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); in dax_pmd_load_hole() 1207 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); in dax_pmd_load_hole() 1213 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); in dax_pmd_load_hole()
|
/Linux-v6.1/include/target/ |
D | target_core_fabric.h | 215 u32 length, bool zero_page, bool chainable);
|
/Linux-v6.1/drivers/nvdimm/ |
D | pfn_devs.c | 366 void *zero_page = page_address(ZERO_PAGE(0)); in nd_pfn_clear_memmap_errors() local 404 rc = nvdimm_write_bytes(ndns, nsoff, zero_page, in nd_pfn_clear_memmap_errors()
|
D | btt.c | 510 void *zero_page = page_address(ZERO_PAGE(0)); in arena_clear_freelist_error() local 520 ret = arena_write_bytes(arena, nsoff, zero_page, in arena_clear_freelist_error()
|
/Linux-v6.1/drivers/target/ |
D | target_core_transport.c | 2729 bool zero_page, bool chainable) in target_alloc_sgl() argument 2731 gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0); in target_alloc_sgl()
|
/Linux-v6.1/virt/kvm/ |
D | kvm_main.c | 3281 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); in kvm_clear_guest() local 3288 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len); in kvm_clear_guest()
|
/Linux-v6.1/arch/x86/kvm/vmx/ |
D | vmx.c | 3702 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); in init_rmode_tss() local 3707 if (__copy_to_user(ua + PAGE_SIZE * i, zero_page, PAGE_SIZE)) in init_rmode_tss()
|