| /Linux-v5.4/mm/ |
| D | khugepaged.c | 954 struct page *new_page; in collapse_huge_page() local 974 new_page = khugepaged_alloc_page(hpage, gfp, node); in collapse_huge_page() 975 if (!new_page) { in collapse_huge_page() 980 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { in collapse_huge_page() 988 mem_cgroup_cancel_charge(new_page, memcg, true); in collapse_huge_page() 996 mem_cgroup_cancel_charge(new_page, memcg, true); in collapse_huge_page() 1007 mem_cgroup_cancel_charge(new_page, memcg, true); in collapse_huge_page() 1075 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); in collapse_huge_page() 1077 __SetPageUptodate(new_page); in collapse_huge_page() 1080 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); in collapse_huge_page() [all …]
|
| D | swap_state.c | 363 struct page *found_page = NULL, *new_page = NULL; in __read_swap_cache_async() local 397 if (!new_page) { in __read_swap_cache_async() 398 new_page = alloc_page_vma(gfp_mask, vma, addr); in __read_swap_cache_async() 399 if (!new_page) in __read_swap_cache_async() 419 __SetPageLocked(new_page); in __read_swap_cache_async() 420 __SetPageSwapBacked(new_page); in __read_swap_cache_async() 421 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); in __read_swap_cache_async() 424 SetPageWorkingset(new_page); in __read_swap_cache_async() 425 lru_cache_add_anon(new_page); in __read_swap_cache_async() 427 return new_page; in __read_swap_cache_async() [all …]
|
| D | migrate.c | 2001 struct page *new_page = NULL; in migrate_misplaced_transhuge_page() local 2005 new_page = alloc_pages_node(node, in migrate_misplaced_transhuge_page() 2008 if (!new_page) in migrate_misplaced_transhuge_page() 2010 prep_transhuge_page(new_page); in migrate_misplaced_transhuge_page() 2014 put_page(new_page); in migrate_misplaced_transhuge_page() 2019 __SetPageLocked(new_page); in migrate_misplaced_transhuge_page() 2021 __SetPageSwapBacked(new_page); in migrate_misplaced_transhuge_page() 2024 new_page->mapping = page->mapping; in migrate_misplaced_transhuge_page() 2025 new_page->index = page->index; in migrate_misplaced_transhuge_page() 2028 migrate_page_copy(new_page, page); in migrate_misplaced_transhuge_page() [all …]
|
| D | huge_memory.c | 1316 struct page *page = NULL, *new_page; in do_huge_pmd_wp_page() local 1366 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); in do_huge_pmd_wp_page() 1368 new_page = NULL; in do_huge_pmd_wp_page() 1370 if (likely(new_page)) { in do_huge_pmd_wp_page() 1371 prep_transhuge_page(new_page); in do_huge_pmd_wp_page() 1388 if (unlikely(mem_cgroup_try_charge_delay(new_page, vma->vm_mm, in do_huge_pmd_wp_page() 1390 put_page(new_page); in do_huge_pmd_wp_page() 1403 clear_huge_page(new_page, vmf->address, HPAGE_PMD_NR); in do_huge_pmd_wp_page() 1405 copy_user_huge_page(new_page, page, vmf->address, in do_huge_pmd_wp_page() 1407 __SetPageUptodate(new_page); in do_huge_pmd_wp_page() [all …]
|
| D | hugetlb.c | 3684 struct page *old_page, *new_page; in hugetlb_cow() local 3722 new_page = alloc_huge_page(vma, haddr, outside_reserve); in hugetlb_cow() 3724 if (IS_ERR(new_page)) { in hugetlb_cow() 3749 ret = vmf_error(PTR_ERR(new_page)); in hugetlb_cow() 3762 copy_user_huge_page(new_page, old_page, address, vma, in hugetlb_cow() 3764 __SetPageUptodate(new_page); in hugetlb_cow() 3777 ClearPagePrivate(new_page); in hugetlb_cow() 3783 make_huge_pte(vma, new_page, 1)); in hugetlb_cow() 3785 hugepage_add_new_anon_rmap(new_page, vma, haddr); in hugetlb_cow() 3786 set_page_huge_active(new_page); in hugetlb_cow() [all …]
|
| D | memory.c | 2311 struct page *new_page = NULL; in wp_page_copy() local 2321 new_page = alloc_zeroed_user_highpage_movable(vma, in wp_page_copy() 2323 if (!new_page) in wp_page_copy() 2326 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, in wp_page_copy() 2328 if (!new_page) in wp_page_copy() 2330 cow_user_page(new_page, old_page, vmf->address, vma); in wp_page_copy() 2333 if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false)) in wp_page_copy() 2336 __SetPageUptodate(new_page); in wp_page_copy() 2358 entry = mk_pte(new_page, vma->vm_page_prot); in wp_page_copy() 2367 page_add_new_anon_rmap(new_page, vma, vmf->address, false); in wp_page_copy() [all …]
|
| D | ksm.c | 2563 struct page *new_page; in ksm_might_need_to_copy() local 2578 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in ksm_might_need_to_copy() 2579 if (new_page) { in ksm_might_need_to_copy() 2580 copy_user_highpage(new_page, page, address, vma); in ksm_might_need_to_copy() 2582 SetPageDirty(new_page); in ksm_might_need_to_copy() 2583 __SetPageUptodate(new_page); in ksm_might_need_to_copy() 2584 __SetPageLocked(new_page); in ksm_might_need_to_copy() 2587 return new_page; in ksm_might_need_to_copy()
|
| D | memory-failure.c | 1613 static struct page *new_page(struct page *p, unsigned long private) in new_page() function 1714 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, in soft_offline_huge_page() 1804 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, in __soft_offline_page()
|
| /Linux-v5.4/include/linux/ |
| D | migrate.h | 39 struct page *new_page = NULL; in new_page_nodemask() local 53 new_page = __alloc_pages_nodemask(gfp_mask, order, in new_page_nodemask() 56 if (new_page && PageTransHuge(new_page)) in new_page_nodemask() 57 prep_transhuge_page(new_page); in new_page_nodemask() 59 return new_page; in new_page_nodemask()
|
| /Linux-v5.4/arch/s390/mm/ |
| D | vmem.c | 265 void *new_page; in vmemmap_populate() local 267 new_page = vmemmap_alloc_block(PMD_SIZE, node); in vmemmap_populate() 268 if (!new_page) in vmemmap_populate() 270 pmd_val(*pm_dir) = __pa(new_page) | sgt_prot; in vmemmap_populate() 285 void *new_page; in vmemmap_populate() local 287 new_page = vmemmap_alloc_block(PAGE_SIZE, node); in vmemmap_populate() 288 if (!new_page) in vmemmap_populate() 290 pte_val(*pt_dir) = __pa(new_page) | pgt_prot; in vmemmap_populate()
|
| /Linux-v5.4/kernel/events/ |
| D | uprobes.c | 155 struct page *old_page, struct page *new_page) in __replace_page() argument 170 if (new_page) { in __replace_page() 171 err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, in __replace_page() 183 if (new_page) in __replace_page() 184 mem_cgroup_cancel_charge(new_page, memcg, false); in __replace_page() 189 if (new_page) { in __replace_page() 190 get_page(new_page); in __replace_page() 191 page_add_new_anon_rmap(new_page, vma, addr, false); in __replace_page() 192 mem_cgroup_commit_charge(new_page, memcg, false, false); in __replace_page() 193 lru_cache_add_active_or_unevictable(new_page, vma); in __replace_page() [all …]
|
| /Linux-v5.4/fs/f2fs/ |
| D | namei.c | 853 struct page *old_page, *new_page = NULL; in f2fs_rename() local 915 &new_page); in f2fs_rename() 917 if (IS_ERR(new_page)) in f2fs_rename() 918 err = PTR_ERR(new_page); in f2fs_rename() 930 f2fs_set_link(new_dir, new_entry, new_page, old_inode); in f2fs_rename() 1027 if (new_page) in f2fs_rename() 1028 f2fs_put_page(new_page, 0); in f2fs_rename() 1048 struct page *old_page, *new_page; in f2fs_cross_rename() local 1083 new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); in f2fs_cross_rename() 1085 if (IS_ERR(new_page)) in f2fs_cross_rename() [all …]
|
| /Linux-v5.4/fs/minix/ |
| D | namei.c | 211 struct page * new_page; in minix_rename() local 219 new_de = minix_find_entry(new_dentry, &new_page); in minix_rename() 222 minix_set_link(new_de, new_page, old_inode); in minix_rename()
|
| /Linux-v5.4/fs/sysv/ |
| D | namei.c | 216 struct page * new_page; in sysv_rename() local 224 new_de = sysv_find_entry(new_dentry, &new_page); in sysv_rename() 227 sysv_set_link(new_de, new_page, old_inode); in sysv_rename()
|
| /Linux-v5.4/fs/ufs/ |
| D | namei.c | 270 struct page *new_page; in ufs_rename() local 278 new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); in ufs_rename() 281 ufs_set_link(new_dir, new_de, new_page, old_inode, 1); in ufs_rename()
|
| /Linux-v5.4/fs/ubifs/ |
| D | budget.c | 367 znodes = req->new_ino + (req->new_page << UBIFS_BLOCKS_PER_PAGE_SHIFT) + in calc_idx_growth() 384 if (req->new_page) in calc_data_growth() 430 ubifs_assert(c, req->new_page <= 1); in ubifs_budget_space() 517 ubifs_assert(c, req->new_page <= 1); in ubifs_release_budget()
|
| D | file.c | 198 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 }; in release_new_page_budget() 224 struct ubifs_budget_req req = { .new_page = 1 }; in write_begin_slow() 365 req.new_page = 1; in allocate_budget() 1521 struct ubifs_budget_req req = { .new_page = 1 }; in ubifs_vm_page_mkwrite()
|
| D | ubifs.h | 886 unsigned int new_page:1; member 896 unsigned int new_page; member
|
| /Linux-v5.4/fs/ext2/ |
| D | namei.c | 349 struct page *new_page; in ext2_rename() local 357 new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); in ext2_rename() 360 ext2_set_link(new_dir, new_de, new_page, old_inode, 1); in ext2_rename()
|
| /Linux-v5.4/fs/nilfs2/ |
| D | namei.c | 374 struct page *new_page; in nilfs_rename() local 382 new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); in nilfs_rename() 385 nilfs_set_link(new_dir, new_de, new_page, old_inode); in nilfs_rename()
|
| /Linux-v5.4/fs/jbd2/ |
| D | journal.c | 345 struct page *new_page; in jbd2_journal_write_metadata_buffer() local 374 new_page = virt_to_page(jh_in->b_frozen_data); in jbd2_journal_write_metadata_buffer() 377 new_page = jh2bh(jh_in)->b_page; in jbd2_journal_write_metadata_buffer() 381 mapped_data = kmap_atomic(new_page); in jbd2_journal_write_metadata_buffer() 421 mapped_data = kmap_atomic(new_page); in jbd2_journal_write_metadata_buffer() 425 new_page = virt_to_page(tmp); in jbd2_journal_write_metadata_buffer() 442 mapped_data = kmap_atomic(new_page); in jbd2_journal_write_metadata_buffer() 447 set_bh_page(new_bh, new_page, new_offset); in jbd2_journal_write_metadata_buffer()
|
| /Linux-v5.4/drivers/tty/serial/ |
| D | icom.c | 344 unsigned char *new_page = NULL; in load_code() local 417 new_page = pci_alloc_consistent(dev, 4096, &temp_pci); in load_code() 419 if (!new_page) { in load_code() 439 new_page[index] = fw->data[index]; in load_code() 496 if (new_page != NULL) in load_code() 497 pci_free_consistent(dev, 4096, new_page, temp_pci); in load_code()
|
| /Linux-v5.4/drivers/net/ethernet/ti/ |
| D | cpsw.c | 674 struct page *new_page, *page = token; in cpsw_rx_handler() local 706 new_page = page; in cpsw_rx_handler() 715 new_page = page_pool_dev_alloc_pages(pool); in cpsw_rx_handler() 716 if (unlikely(!new_page)) { in cpsw_rx_handler() 717 new_page = page; in cpsw_rx_handler() 775 xmeta = page_address(new_page) + CPSW_XMETA_OFFSET; in cpsw_rx_handler() 779 dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM; in cpsw_rx_handler() 780 ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, in cpsw_rx_handler() 784 page_pool_recycle_direct(pool, new_page); in cpsw_rx_handler()
|
| /Linux-v5.4/drivers/net/wireless/intel/iwlwifi/fw/ |
| D | dbg.c | 629 struct page *new_page; in alloc_sgtable() local 640 new_page = alloc_page(GFP_KERNEL); in alloc_sgtable() 641 if (!new_page) { in alloc_sgtable() 645 new_page = sg_page(iter); in alloc_sgtable() 646 if (new_page) in alloc_sgtable() 647 __free_page(new_page); in alloc_sgtable() 654 sg_set_page(iter, new_page, alloc_size, 0); in alloc_sgtable()
|
| /Linux-v5.4/drivers/staging/rts5208/ |
| D | xd.c | 1102 u32 old_page, new_page; in xd_copy_page() local 1116 new_page = (new_blk << xd_card->block_shift) + start_page; in xd_copy_page() 1185 xd_assign_phy_addr(chip, new_page, XD_RW_ADDR); in xd_copy_page() 1208 new_page++; in xd_copy_page()
|