/Linux-v5.15/mm/ |
D | khugepaged.c | 1068 struct page *new_page; in collapse_huge_page() local 1087 new_page = khugepaged_alloc_page(hpage, gfp, node); in collapse_huge_page() 1088 if (!new_page) { in collapse_huge_page() 1093 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) { in collapse_huge_page() 1097 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC); in collapse_huge_page() 1185 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl, in collapse_huge_page() 1194 __SetPageUptodate(new_page); in collapse_huge_page() 1197 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); in collapse_huge_page() 1202 page_add_new_anon_rmap(new_page, vma, address, true); in collapse_huge_page() 1203 lru_cache_add_inactive_or_unevictable(new_page, vma); in collapse_huge_page() [all …]
|
D | hugetlb.c | 2608 struct page *new_page; in alloc_and_dissolve_huge_page() local 2619 new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL); in alloc_and_dissolve_huge_page() 2620 if (!new_page) in alloc_and_dissolve_huge_page() 2632 SetHPageTemporary(new_page); in alloc_and_dissolve_huge_page() 2633 if (!put_page_testzero(new_page)) { in alloc_and_dissolve_huge_page() 2640 ClearHPageTemporary(new_page); in alloc_and_dissolve_huge_page() 2642 __prep_new_huge_page(h, new_page); in alloc_and_dissolve_huge_page() 2685 enqueue_huge_page(h, new_page); in alloc_and_dissolve_huge_page() 2699 set_page_refcounted(new_page); in alloc_and_dissolve_huge_page() 2700 update_and_free_page(h, new_page, false); in alloc_and_dissolve_huge_page() [all …]
|
D | memory.c | 888 struct page *new_page; in copy_present_page() local 906 new_page = *prealloc; in copy_present_page() 907 if (!new_page) in copy_present_page() 915 copy_user_highpage(new_page, page, addr, src_vma); in copy_present_page() 916 __SetPageUptodate(new_page); in copy_present_page() 917 page_add_new_anon_rmap(new_page, dst_vma, addr, false); in copy_present_page() 918 lru_cache_add_inactive_or_unevictable(new_page, dst_vma); in copy_present_page() 919 rss[mm_counter(new_page)]++; in copy_present_page() 922 pte = mk_pte(new_page, dst_vma->vm_page_prot); in copy_present_page() 987 struct page *new_page; in page_copy_prealloc() local [all …]
|
D | ksm.c | 2565 struct page *new_page; in ksm_might_need_to_copy() local 2580 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in ksm_might_need_to_copy() 2581 if (new_page && mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL)) { in ksm_might_need_to_copy() 2582 put_page(new_page); in ksm_might_need_to_copy() 2583 new_page = NULL; in ksm_might_need_to_copy() 2585 if (new_page) { in ksm_might_need_to_copy() 2586 copy_user_highpage(new_page, page, address, vma); in ksm_might_need_to_copy() 2588 SetPageDirty(new_page); in ksm_might_need_to_copy() 2589 __SetPageUptodate(new_page); in ksm_might_need_to_copy() 2590 __SetPageLocked(new_page); in ksm_might_need_to_copy() [all …]
|
D | slab.h | 249 gfp_t gfp, bool new_page); 379 bool new_page) in memcg_alloc_page_obj_cgroups() argument
|
D | migrate.c | 1610 struct page *new_page = NULL; in alloc_migration_target() local 1640 new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask); in alloc_migration_target() 1642 if (new_page && PageTransHuge(new_page)) in alloc_migration_target() 1643 prep_transhuge_page(new_page); in alloc_migration_target() 1645 return new_page; in alloc_migration_target()
|
/Linux-v5.15/arch/s390/mm/ |
D | vmem.c | 173 void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE); in modify_pte_table() local 175 if (!new_page) in modify_pte_table() 177 pte_val(*pte) = __pa(new_page) | prot; in modify_pte_table() 249 void *new_page; in modify_pmd_table() local 258 new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE); in modify_pmd_table() 259 if (new_page) { in modify_pmd_table() 260 pmd_val(*pmd) = __pa(new_page) | prot; in modify_pmd_table()
|
/Linux-v5.15/kernel/events/ |
D | uprobes.c | 155 struct page *old_page, struct page *new_page) in __replace_page() argument 169 if (new_page) { in __replace_page() 170 err = mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL); in __replace_page() 184 if (new_page) { in __replace_page() 185 get_page(new_page); in __replace_page() 186 page_add_new_anon_rmap(new_page, vma, addr, false); in __replace_page() 187 lru_cache_add_inactive_or_unevictable(new_page, vma); in __replace_page() 199 if (new_page) in __replace_page() 201 mk_pte(new_page, vma->vm_page_prot)); in __replace_page() 468 struct page *old_page, *new_page; in uprobe_write_opcode() local [all …]
|
/Linux-v5.15/fs/f2fs/ |
D | namei.c | 932 struct page *old_page, *new_page = NULL; in f2fs_rename() local 1007 &new_page); in f2fs_rename() 1009 if (IS_ERR(new_page)) in f2fs_rename() 1010 err = PTR_ERR(new_page); in f2fs_rename() 1022 f2fs_set_link(new_dir, new_entry, new_page, old_inode); in f2fs_rename() 1023 new_page = NULL; in f2fs_rename() 1103 f2fs_put_page(new_page, 0); in f2fs_rename() 1122 struct page *old_page, *new_page; in f2fs_cross_rename() local 1157 new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); in f2fs_cross_rename() 1159 if (IS_ERR(new_page)) in f2fs_cross_rename() [all …]
|
/Linux-v5.15/fs/sysv/ |
D | namei.c | 219 struct page * new_page; in sysv_rename() local 227 new_de = sysv_find_entry(new_dentry, &new_page); in sysv_rename() 230 sysv_set_link(new_de, new_page, old_inode); in sysv_rename()
|
/Linux-v5.15/fs/minix/ |
D | namei.c | 215 struct page * new_page; in minix_rename() local 223 new_de = minix_find_entry(new_dentry, &new_page); in minix_rename() 226 minix_set_link(new_de, new_page, old_inode); in minix_rename()
|
/Linux-v5.15/fs/ext2/ |
D | namei.c | 366 struct page *new_page; in ext2_rename() local 374 &new_page, &page_addr); in ext2_rename() 379 ext2_set_link(new_dir, new_de, new_page, page_addr, old_inode, 1); in ext2_rename() 380 ext2_put_page(new_page, page_addr); in ext2_rename()
|
/Linux-v5.15/fs/ufs/ |
D | namei.c | 273 struct page *new_page; in ufs_rename() local 281 new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); in ufs_rename() 284 ufs_set_link(new_dir, new_de, new_page, old_inode, 1); in ufs_rename()
|
/Linux-v5.15/fs/ubifs/ |
D | budget.c | 367 znodes = req->new_ino + (req->new_page << UBIFS_BLOCKS_PER_PAGE_SHIFT) + in calc_idx_growth() 384 if (req->new_page) in calc_data_growth() 430 ubifs_assert(c, req->new_page <= 1); in ubifs_budget_space() 517 ubifs_assert(c, req->new_page <= 1); in ubifs_release_budget()
|
D | file.c | 198 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 }; in release_new_page_budget() 224 struct ubifs_budget_req req = { .new_page = 1 }; in write_begin_slow() 365 req.new_page = 1; in allocate_budget() 1514 struct ubifs_budget_req req = { .new_page = 1 }; in ubifs_vm_page_mkwrite()
|
D | ubifs.h | 888 unsigned int new_page:1; member 898 unsigned int new_page; member
|
/Linux-v5.15/drivers/net/ethernet/ti/ |
D | cpsw.c | 346 struct page *new_page, *page = token; in cpsw_rx_handler() local 378 new_page = page; in cpsw_rx_handler() 387 new_page = page_pool_dev_alloc_pages(pool); in cpsw_rx_handler() 388 if (unlikely(!new_page)) { in cpsw_rx_handler() 389 new_page = page; in cpsw_rx_handler() 441 xmeta = page_address(new_page) + CPSW_XMETA_OFFSET; in cpsw_rx_handler() 445 dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM; in cpsw_rx_handler() 446 ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, in cpsw_rx_handler() 450 page_pool_recycle_direct(pool, new_page); in cpsw_rx_handler()
|
D | cpsw_new.c | 284 struct page *new_page, *page = token; in cpsw_rx_handler() local 322 new_page = page; in cpsw_rx_handler() 331 new_page = page_pool_dev_alloc_pages(pool); in cpsw_rx_handler() 332 if (unlikely(!new_page)) { in cpsw_rx_handler() 333 new_page = page; in cpsw_rx_handler() 385 xmeta = page_address(new_page) + CPSW_XMETA_OFFSET; in cpsw_rx_handler() 389 dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM; in cpsw_rx_handler() 390 ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, in cpsw_rx_handler() 394 page_pool_recycle_direct(pool, new_page); in cpsw_rx_handler()
|
/Linux-v5.15/fs/nilfs2/ |
D | namei.c | 377 struct page *new_page; in nilfs_rename() local 385 new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); in nilfs_rename() 388 nilfs_set_link(new_dir, new_de, new_page, old_inode); in nilfs_rename()
|
/Linux-v5.15/drivers/net/ethernet/microsoft/mana/ |
D | mana_en.c | 974 struct page *new_page; in mana_process_rx_cqe() local 1016 new_page = alloc_page(GFP_ATOMIC); in mana_process_rx_cqe() 1018 if (new_page) { in mana_process_rx_cqe() 1019 da = dma_map_page(dev, new_page, 0, rxq->datasize, in mana_process_rx_cqe() 1023 __free_page(new_page); in mana_process_rx_cqe() 1024 new_page = NULL; in mana_process_rx_cqe() 1028 new_buf = new_page ? page_to_virt(new_page) : NULL; in mana_process_rx_cqe()
|
/Linux-v5.15/drivers/tty/serial/ |
D | icom.c | 348 unsigned char *new_page = NULL; in load_code() local 421 new_page = dma_alloc_coherent(&dev->dev, 4096, &temp_pci, GFP_KERNEL); in load_code() 423 if (!new_page) { in load_code() 443 new_page[index] = fw->data[index]; in load_code() 500 if (new_page != NULL) in load_code() 501 dma_free_coherent(&dev->dev, 4096, new_page, temp_pci); in load_code()
|
/Linux-v5.15/fs/jbd2/ |
D | journal.c | 348 struct page *new_page; in jbd2_journal_write_metadata_buffer() local 377 new_page = virt_to_page(jh_in->b_frozen_data); in jbd2_journal_write_metadata_buffer() 380 new_page = jh2bh(jh_in)->b_page; in jbd2_journal_write_metadata_buffer() 384 mapped_data = kmap_atomic(new_page); in jbd2_journal_write_metadata_buffer() 424 mapped_data = kmap_atomic(new_page); in jbd2_journal_write_metadata_buffer() 428 new_page = virt_to_page(tmp); in jbd2_journal_write_metadata_buffer() 445 mapped_data = kmap_atomic(new_page); in jbd2_journal_write_metadata_buffer() 450 set_bh_page(new_bh, new_page, new_offset); in jbd2_journal_write_metadata_buffer()
|
/Linux-v5.15/Documentation/networking/ |
D | page_pool.rst | 156 new_page = page_pool_dev_alloc_pages(page_pool);
|
/Linux-v5.15/drivers/net/wireless/intel/iwlwifi/fw/ |
D | dbg.c | 568 struct page *new_page; in alloc_sgtable() local 579 new_page = alloc_page(GFP_KERNEL); in alloc_sgtable() 580 if (!new_page) { in alloc_sgtable() 584 new_page = sg_page(iter); in alloc_sgtable() 585 if (new_page) in alloc_sgtable() 586 __free_page(new_page); in alloc_sgtable() 593 sg_set_page(iter, new_page, alloc_size, 0); in alloc_sgtable()
|
/Linux-v5.15/drivers/staging/rts5208/ |
D | xd.c | 1102 u32 old_page, new_page; in xd_copy_page() local 1116 new_page = (new_blk << xd_card->block_shift) + start_page; in xd_copy_page() 1185 xd_assign_phy_addr(chip, new_page, XD_RW_ADDR); in xd_copy_page() 1208 new_page++; in xd_copy_page()
|