/Linux-v6.1/include/linux/ |
D | page_ref.h | 29 extern void __page_ref_set(struct page *page, int v); 30 extern void __page_ref_mod(struct page *page, int v); 31 extern void __page_ref_mod_and_test(struct page *page, int v, int ret); 32 extern void __page_ref_mod_and_return(struct page *page, int v, int ret); 33 extern void __page_ref_mod_unless(struct page *page, int v, int u); 34 extern void __page_ref_freeze(struct page *page, int v, int ret); 35 extern void __page_ref_unfreeze(struct page *page, int v); 41 static inline void __page_ref_set(struct page *page, int v) in __page_ref_set() argument 44 static inline void __page_ref_mod(struct page *page, int v) in __page_ref_mod() argument 47 static inline void __page_ref_mod_and_test(struct page *page, int v, int ret) in __page_ref_mod_and_test() argument [all …]
|
D | page-flags.h | 214 static __always_inline const struct page *page_fixed_fake_head(const struct page *page) in page_fixed_fake_head() argument 217 return page; in page_fixed_fake_head() 225 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && in page_fixed_fake_head() 226 test_bit(PG_head, &page->flags)) { in page_fixed_fake_head() 232 unsigned long head = READ_ONCE(page[1].compound_head); in page_fixed_fake_head() 235 return (const struct page *)(head - 1); in page_fixed_fake_head() 237 return page; in page_fixed_fake_head() 240 static inline const struct page *page_fixed_fake_head(const struct page *page) in page_fixed_fake_head() argument 242 return page; in page_fixed_fake_head() 246 static __always_inline int page_is_fake_head(struct page *page) in page_is_fake_head() argument [all …]
|
D | highmem.h | 37 static inline void *kmap(struct page *page); 46 static inline void kunmap(struct page *page); 54 static inline struct page *kmap_to_page(void *addr); 96 static inline void *kmap_local_page(struct page *page); 180 static inline void *kmap_atomic(struct page *page); 187 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma… in flush_anon_page() argument 203 static inline void clear_user_highpage(struct page *page, unsigned long vaddr) in clear_user_highpage() argument 205 void *addr = kmap_local_page(page); in clear_user_highpage() 206 clear_user_page(addr, vaddr, page); in clear_user_highpage() 226 static inline struct page * [all …]
|
D | hugetlb_cgroup.h | 70 __hugetlb_cgroup_from_page(struct page *page, bool rsvd) in __hugetlb_cgroup_from_page() argument 72 VM_BUG_ON_PAGE(!PageHuge(page), page); in __hugetlb_cgroup_from_page() 74 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) in __hugetlb_cgroup_from_page() 77 return (void *)page_private(page + SUBPAGE_INDEX_CGROUP_RSVD); in __hugetlb_cgroup_from_page() 79 return (void *)page_private(page + SUBPAGE_INDEX_CGROUP); in __hugetlb_cgroup_from_page() 82 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) in hugetlb_cgroup_from_page() argument 84 return __hugetlb_cgroup_from_page(page, false); in hugetlb_cgroup_from_page() 88 hugetlb_cgroup_from_page_rsvd(struct page *page) in hugetlb_cgroup_from_page_rsvd() argument 90 return __hugetlb_cgroup_from_page(page, true); in hugetlb_cgroup_from_page_rsvd() 93 static inline void __set_hugetlb_cgroup(struct page *page, in __set_hugetlb_cgroup() argument [all …]
|
D | highmem-internal.h | 10 void *__kmap_local_page_prot(struct page *page, pgprot_t prot); 35 void *kmap_high(struct page *page); 36 void kunmap_high(struct page *page); 38 struct page *__kmap_to_page(void *addr); 40 static inline void *kmap(struct page *page) in kmap() argument 45 if (!PageHighMem(page)) in kmap() 46 addr = page_address(page); in kmap() 48 addr = kmap_high(page); in kmap() 53 static inline void kunmap(struct page *page) in kunmap() argument 56 if (!PageHighMem(page)) in kunmap() [all …]
|
D | balloon_compaction.h | 58 int (*migratepage)(struct balloon_dev_info *, struct page *newpage, 59 struct page *page, enum migrate_mode mode); 62 extern struct page *balloon_page_alloc(void); 64 struct page *page); 65 extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info); 92 struct page *page) in balloon_page_insert() argument 94 __SetPageOffline(page); in balloon_page_insert() 95 __SetPageMovable(page, &balloon_mops); in balloon_page_insert() 96 set_page_private(page, (unsigned long)balloon); in balloon_page_insert() 97 list_add(&page->lru, &balloon->pages); in balloon_page_insert() [all …]
|
D | mm.h | 146 static inline void __mm_zero_struct_page(struct page *page) in __mm_zero_struct_page() argument 148 unsigned long *_pp = (void *)page; in __mm_zero_struct_page() 151 BUILD_BUG_ON(sizeof(struct page) & 7); in __mm_zero_struct_page() 152 BUILD_BUG_ON(sizeof(struct page) < 56); in __mm_zero_struct_page() 153 BUILD_BUG_ON(sizeof(struct page) > 80); in __mm_zero_struct_page() 155 switch (sizeof(struct page)) { in __mm_zero_struct_page() 176 #define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page))) 215 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) argument 218 #define nth_page(page,n) ((page) + (n)) argument 219 #define folio_page_idx(folio, p) ((p) - &(folio)->page) [all …]
|
D | page_owner.h | 11 extern void __reset_page_owner(struct page *page, unsigned short order); 12 extern void __set_page_owner(struct page *page, 14 extern void __split_page_owner(struct page *page, unsigned int nr); 16 extern void __set_page_owner_migrate_reason(struct page *page, int reason); 17 extern void __dump_page_owner(const struct page *page); 21 static inline void reset_page_owner(struct page *page, unsigned short order) in reset_page_owner() argument 24 __reset_page_owner(page, order); in reset_page_owner() 27 static inline void set_page_owner(struct page *page, in set_page_owner() argument 31 __set_page_owner(page, order, gfp_mask); in set_page_owner() 34 static inline void split_page_owner(struct page *page, unsigned int nr) in split_page_owner() argument [all …]
|
D | migrate.h | 10 typedef struct page *new_page_t(struct page *page, unsigned long private); 11 typedef void free_page_t(struct page *page, unsigned long private); 53 bool (*isolate_page)(struct page *, isolate_mode_t); 54 int (*migrate_page)(struct page *dst, struct page *src, 56 void (*putback_page)(struct page *); 72 extern struct page *alloc_migration_target(struct page *page, unsigned long private); 73 extern int isolate_movable_page(struct page *page, isolate_mode_t mode); 91 static inline struct page *alloc_migration_target(struct page *page, in alloc_migration_target() argument 94 static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) in isolate_movable_page() argument 106 bool PageMovable(struct page *page); [all …]
|
/Linux-v6.1/mm/ |
D | folio-compat.c | 12 struct address_space *page_mapping(struct page *page) in page_mapping() argument 14 return folio_mapping(page_folio(page)); in page_mapping() 18 void unlock_page(struct page *page) in unlock_page() argument 20 return folio_unlock(page_folio(page)); in unlock_page() 24 void end_page_writeback(struct page *page) in end_page_writeback() argument 26 return folio_end_writeback(page_folio(page)); in end_page_writeback() 30 void wait_on_page_writeback(struct page *page) in wait_on_page_writeback() argument 32 return folio_wait_writeback(page_folio(page)); in wait_on_page_writeback() 36 void wait_for_stable_page(struct page *page) in wait_for_stable_page() argument 38 return folio_wait_stable(page_folio(page)); in wait_for_stable_page() [all …]
|
D | page_isolation.c | 33 static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long end_pfn, in has_unmovable_pages() 36 struct page *page = pfn_to_page(start_pfn); in has_unmovable_pages() local 37 struct zone *zone = page_zone(page); in has_unmovable_pages() 43 if (is_migrate_cma_page(page)) { in has_unmovable_pages() 52 return page; in has_unmovable_pages() 56 page = pfn_to_page(pfn); in has_unmovable_pages() 64 if (PageReserved(page)) in has_unmovable_pages() 65 return page; in has_unmovable_pages() 81 if (PageHuge(page) || PageTransCompound(page)) { in has_unmovable_pages() 82 struct page *head = compound_head(page); in has_unmovable_pages() [all …]
|
D | balloon_compaction.c | 15 struct page *page) in balloon_page_enqueue_one() argument 23 BUG_ON(!trylock_page(page)); in balloon_page_enqueue_one() 24 balloon_page_insert(b_dev_info, page); in balloon_page_enqueue_one() 25 unlock_page(page); in balloon_page_enqueue_one() 43 struct page *page, *tmp; in balloon_page_list_enqueue() local 48 list_for_each_entry_safe(page, tmp, pages, lru) { in balloon_page_list_enqueue() 49 list_del(&page->lru); in balloon_page_list_enqueue() 50 balloon_page_enqueue_one(b_dev_info, page); in balloon_page_list_enqueue() 79 struct page *page, *tmp; in balloon_page_list_dequeue() local 84 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_list_dequeue() [all …]
|
D | migrate_device.c | 74 struct page *page; in migrate_vma_collect_pmd() local 82 page = pmd_page(*pmdp); in migrate_vma_collect_pmd() 83 if (is_huge_zero_page(page)) { in migrate_vma_collect_pmd() 92 get_page(page); in migrate_vma_collect_pmd() 94 if (unlikely(!trylock_page(page))) in migrate_vma_collect_pmd() 97 ret = split_huge_page(page); in migrate_vma_collect_pmd() 98 unlock_page(page); in migrate_vma_collect_pmd() 99 put_page(page); in migrate_vma_collect_pmd() 117 struct page *page; in migrate_vma_collect_pmd() local 141 page = pfn_swap_entry_to_page(entry); in migrate_vma_collect_pmd() [all …]
|
D | bootmem_info.c | 17 void get_page_bootmem(unsigned long info, struct page *page, unsigned long type) in get_page_bootmem() argument 19 page->index = type; in get_page_bootmem() 20 SetPagePrivate(page); in get_page_bootmem() 21 set_page_private(page, info); in get_page_bootmem() 22 page_ref_inc(page); in get_page_bootmem() 25 void put_page_bootmem(struct page *page) in put_page_bootmem() argument 27 unsigned long type = page->index; in put_page_bootmem() 32 if (page_ref_dec_return(page) == 1) { in put_page_bootmem() 33 page->index = 0; in put_page_bootmem() 34 ClearPagePrivate(page); in put_page_bootmem() [all …]
|
D | page_io.c | 33 struct page *page = bio_first_page_all(bio); in end_swap_bio_write() local 36 SetPageError(page); in end_swap_bio_write() 45 set_page_dirty(page); in end_swap_bio_write() 49 ClearPageReclaim(page); in end_swap_bio_write() 51 end_page_writeback(page); in end_swap_bio_write() 57 struct page *page = bio_first_page_all(bio); in end_swap_bio_read() local 61 SetPageError(page); in end_swap_bio_read() 62 ClearPageUptodate(page); in end_swap_bio_read() 69 SetPageUptodate(page); in end_swap_bio_read() 71 unlock_page(page); in end_swap_bio_read() [all …]
|
/Linux-v6.1/fs/btrfs/ |
D | subpage.c | 66 bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page) in btrfs_is_subpage() argument 76 if (!page->mapping || !page->mapping->host || in btrfs_is_subpage() 77 is_data_inode(page->mapping->host)) in btrfs_is_subpage() 121 struct page *page, enum btrfs_subpage_type type) in btrfs_attach_subpage() argument 129 if (page->mapping) in btrfs_attach_subpage() 130 ASSERT(PageLocked(page)); in btrfs_attach_subpage() 133 if (!btrfs_is_subpage(fs_info, page) || PagePrivate(page)) in btrfs_attach_subpage() 140 attach_page_private(page, subpage); in btrfs_attach_subpage() 145 struct page *page) in btrfs_detach_subpage() argument 150 if (!btrfs_is_subpage(fs_info, page) || !PagePrivate(page)) in btrfs_detach_subpage() [all …]
|
D | subpage.h | 77 bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page); 81 struct page *page, enum btrfs_subpage_type type); 83 struct page *page); 91 struct page *page); 93 struct page *page); 96 struct page *page, u64 start, u32 len); 98 struct page *page, u64 start, u32 len); 101 struct page *page, u64 start, u32 len); 103 struct page *page, u64 start, u32 len); 105 struct page *page, u64 start, u32 len); [all …]
|
/Linux-v6.1/net/core/ |
D | page_pool.c | 223 static void page_pool_return_page(struct page_pool *pool, struct page *page); 226 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) in page_pool_refill_alloc_cache() 229 struct page *page; in page_pool_refill_alloc_cache() local 250 page = __ptr_ring_consume(r); in page_pool_refill_alloc_cache() 251 if (unlikely(!page)) in page_pool_refill_alloc_cache() 254 if (likely(page_to_nid(page) == pref_nid)) { in page_pool_refill_alloc_cache() 255 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_refill_alloc_cache() 262 page_pool_return_page(pool, page); in page_pool_refill_alloc_cache() 264 page = NULL; in page_pool_refill_alloc_cache() 271 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache() [all …]
|
/Linux-v6.1/fs/jfs/ |
D | jfs_metapage.c | 49 unlock_page(mp->page); in __lock_metapage() 51 lock_page(mp->page); in __lock_metapage() 80 #define mp_anchor(page) ((struct meta_anchor *)page_private(page)) argument 82 static inline struct metapage *page_to_mp(struct page *page, int offset) in page_to_mp() argument 84 if (!PagePrivate(page)) in page_to_mp() 86 return mp_anchor(page)->mp[offset >> L2PSIZE]; in page_to_mp() 89 static inline int insert_metapage(struct page *page, struct metapage *mp) in insert_metapage() argument 95 if (PagePrivate(page)) in insert_metapage() 96 a = mp_anchor(page); in insert_metapage() 101 set_page_private(page, (unsigned long)a); in insert_metapage() [all …]
|
/Linux-v6.1/fs/sysv/ |
D | dir.c | 31 static inline void dir_put_page(struct page *page) in dir_put_page() argument 33 kunmap(page); in dir_put_page() 34 put_page(page); in dir_put_page() 37 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) in dir_commit_chunk() argument 39 struct address_space *mapping = page->mapping; in dir_commit_chunk() 43 block_write_end(NULL, mapping, pos, len, len, page, NULL); in dir_commit_chunk() 49 err = write_one_page(page); in dir_commit_chunk() 51 unlock_page(page); in dir_commit_chunk() 55 static struct page * dir_get_page(struct inode *dir, unsigned long n) in dir_get_page() 58 struct page *page = read_mapping_page(mapping, n, NULL); in dir_get_page() local [all …]
|
/Linux-v6.1/sound/pci/trident/ |
D | trident_memory.c | 22 #define __set_tlb_bus(trident,page,addr) \ argument 23 (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)) 24 #define __tlb_to_addr(trident,page) \ argument 25 (dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1)) 32 #define set_tlb_bus(trident,page,addr) __set_tlb_bus(trident,page,addr) argument 34 #define set_silent_tlb(trident,page) __set_tlb_bus(trident, page, trident->tlb.silent_page->addr) argument 38 #define aligned_page_offset(page) ((page) << 12) argument 40 #define page_to_addr(trident,page) __tlb_to_addr(trident, page) argument 47 #define aligned_page_offset(page) ((page) << 13) argument 48 #define page_to_addr(trident,page) __tlb_to_addr(trident, (page) << 1) argument [all …]
|
/Linux-v6.1/fs/ecryptfs/ |
D | mmap.c | 33 struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index) in ecryptfs_get_locked_page() 35 struct page *page = read_mapping_page(inode->i_mapping, index, NULL); in ecryptfs_get_locked_page() local 36 if (!IS_ERR(page)) in ecryptfs_get_locked_page() 37 lock_page(page); in ecryptfs_get_locked_page() 38 return page; in ecryptfs_get_locked_page() 52 static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc) in ecryptfs_writepage() argument 56 rc = ecryptfs_encrypt_page(page); in ecryptfs_writepage() 59 "page (upper index [0x%.16lx])\n", page->index); in ecryptfs_writepage() 60 ClearPageUptodate(page); in ecryptfs_writepage() 63 SetPageUptodate(page); in ecryptfs_writepage() [all …]
|
/Linux-v6.1/include/trace/events/ |
D | page_ref.h | 15 TP_PROTO(struct page *page, int v), 17 TP_ARGS(page, v), 30 __entry->pfn = page_to_pfn(page); 31 __entry->flags = page->flags; 32 __entry->count = page_ref_count(page); 33 __entry->mapcount = page_mapcount(page); 34 __entry->mapping = page->mapping; 35 __entry->mt = get_pageblock_migratetype(page); 49 TP_PROTO(struct page *page, int v), 51 TP_ARGS(page, v) [all …]
|
/Linux-v6.1/arch/s390/mm/ |
D | page-states.c | 61 static inline unsigned char get_page_state(struct page *page) in get_page_state() argument 67 : "a" (page_to_phys(page)), in get_page_state() 72 static inline void set_page_unused(struct page *page, int order) in set_page_unused() argument 79 : "a" (page_to_phys(page + i)), in set_page_unused() 83 static inline void set_page_stable_dat(struct page *page, int order) in set_page_stable_dat() argument 90 : "a" (page_to_phys(page + i)), in set_page_stable_dat() 94 static inline void set_page_stable_nodat(struct page *page, int order) in set_page_stable_nodat() argument 101 : "a" (page_to_phys(page + i)), in set_page_stable_nodat() 108 struct page *page; in mark_kernel_pmd() local 116 page = phys_to_page(pmd_val(*pmd)); in mark_kernel_pmd() [all …]
|
/Linux-v6.1/fs/nilfs2/ |
D | dir.c | 67 static inline void nilfs_put_page(struct page *page) in nilfs_put_page() argument 69 kunmap(page); in nilfs_put_page() 70 put_page(page); in nilfs_put_page() 87 static int nilfs_prepare_chunk(struct page *page, unsigned int from, in nilfs_prepare_chunk() argument 90 loff_t pos = page_offset(page) + from; in nilfs_prepare_chunk() 92 return __block_write_begin(page, pos, to - from, nilfs_get_block); in nilfs_prepare_chunk() 95 static void nilfs_commit_chunk(struct page *page, in nilfs_commit_chunk() argument 100 loff_t pos = page_offset(page) + from; in nilfs_commit_chunk() 105 nr_dirty = nilfs_page_count_clean_buffers(page, from, to); in nilfs_commit_chunk() 106 copied = block_write_end(NULL, mapping, pos, len, len, page, NULL); in nilfs_commit_chunk() [all …]
|