Home
last modified time | relevance | path

Searched refs:page (Results 1 – 25 of 2708) sorted by relevance

12345678910>>...109

/Linux-v5.15/include/linux/
Dpage_ref.h29 extern void __page_ref_set(struct page *page, int v);
30 extern void __page_ref_mod(struct page *page, int v);
31 extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
32 extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
33 extern void __page_ref_mod_unless(struct page *page, int v, int u);
34 extern void __page_ref_freeze(struct page *page, int v, int ret);
35 extern void __page_ref_unfreeze(struct page *page, int v);
41 static inline void __page_ref_set(struct page *page, int v) in __page_ref_set() argument
44 static inline void __page_ref_mod(struct page *page, int v) in __page_ref_mod() argument
47 static inline void __page_ref_mod_and_test(struct page *page, int v, int ret) in __page_ref_mod_and_test() argument
[all …]
Dpage-flags.h194 static inline unsigned long _compound_head(const struct page *page) in _compound_head() argument
196 unsigned long head = READ_ONCE(page->compound_head); in _compound_head()
200 return (unsigned long)page; in _compound_head()
203 #define compound_head(page) ((typeof(page))_compound_head(page)) argument
205 static __always_inline int PageTail(struct page *page) in PageTail() argument
207 return READ_ONCE(page->compound_head) & 1; in PageTail()
210 static __always_inline int PageCompound(struct page *page) in PageCompound() argument
212 return test_bit(PG_head, &page->flags) || PageTail(page); in PageCompound()
216 static inline int PagePoisoned(const struct page *page) in PagePoisoned() argument
218 return page->flags == PAGE_POISON_PATTERN; in PagePoisoned()
[all …]
Dpagemap.h163 void release_pages(struct page **pages, int nr);
168 static inline struct address_space *page_mapping_file(struct page *page) in page_mapping_file() argument
170 if (unlikely(PageSwapCache(page))) in page_mapping_file()
172 return page_mapping(page); in page_mapping_file()
219 static inline int __page_cache_add_speculative(struct page *page, int count) in __page_cache_add_speculative() argument
234 VM_BUG_ON_PAGE(page_count(page) == 0, page); in __page_cache_add_speculative()
235 page_ref_add(page, count); in __page_cache_add_speculative()
238 if (unlikely(!page_ref_add_unless(page, count, 0))) { in __page_cache_add_speculative()
247 VM_BUG_ON_PAGE(PageTail(page), page); in __page_cache_add_speculative()
252 static inline int page_cache_get_speculative(struct page *page) in page_cache_get_speculative() argument
[all …]
Dpage_idle.h12 static inline bool page_is_young(struct page *page) in page_is_young() argument
14 return PageYoung(page); in page_is_young()
17 static inline void set_page_young(struct page *page) in set_page_young() argument
19 SetPageYoung(page); in set_page_young()
22 static inline bool test_and_clear_page_young(struct page *page) in test_and_clear_page_young() argument
24 return TestClearPageYoung(page); in test_and_clear_page_young()
27 static inline bool page_is_idle(struct page *page) in page_is_idle() argument
29 return PageIdle(page); in page_is_idle()
32 static inline void set_page_idle(struct page *page) in set_page_idle() argument
34 SetPageIdle(page); in set_page_idle()
[all …]
Dballoon_compaction.h58 int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
59 struct page *page, enum migrate_mode mode);
63 extern struct page *balloon_page_alloc(void);
65 struct page *page);
66 extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
83 extern bool balloon_page_isolate(struct page *page,
85 extern void balloon_page_putback(struct page *page);
87 struct page *newpage,
88 struct page *page, enum migrate_mode mode);
100 struct page *page) in balloon_page_insert() argument
[all …]
Dmigrate.h10 typedef struct page *new_page_t(struct page *page, unsigned long private);
11 typedef void free_page_t(struct page *page, unsigned long private);
46 struct page *newpage, struct page *page,
51 extern struct page *alloc_migration_target(struct page *page, unsigned long private);
52 extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
54 extern void migrate_page_states(struct page *newpage, struct page *page);
55 extern void migrate_page_copy(struct page *newpage, struct page *page);
57 struct page *newpage, struct page *page);
59 struct page *newpage, struct page *page, int extra_count);
67 static inline struct page *alloc_migration_target(struct page *page, in alloc_migration_target() argument
[all …]
Dmm.h152 static inline void __mm_zero_struct_page(struct page *page) in __mm_zero_struct_page() argument
154 unsigned long *_pp = (void *)page; in __mm_zero_struct_page()
157 BUILD_BUG_ON(sizeof(struct page) & 7); in __mm_zero_struct_page()
158 BUILD_BUG_ON(sizeof(struct page) < 56); in __mm_zero_struct_page()
159 BUILD_BUG_ON(sizeof(struct page) > 80); in __mm_zero_struct_page()
161 switch (sizeof(struct page)) { in __mm_zero_struct_page()
182 #define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
224 int __add_to_page_cache_locked(struct page *page, struct address_space *mapping,
228 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) argument
230 #define nth_page(page,n) ((page) + (n)) argument
[all …]
Dpage_owner.h11 extern void __reset_page_owner(struct page *page, unsigned int order);
12 extern void __set_page_owner(struct page *page,
14 extern void __split_page_owner(struct page *page, unsigned int nr);
15 extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
16 extern void __set_page_owner_migrate_reason(struct page *page, int reason);
17 extern void __dump_page_owner(const struct page *page);
21 static inline void reset_page_owner(struct page *page, unsigned int order) in reset_page_owner() argument
24 __reset_page_owner(page, order); in reset_page_owner()
27 static inline void set_page_owner(struct page *page, in set_page_owner() argument
31 __set_page_owner(page, order, gfp_mask); in set_page_owner()
[all …]
Dhugetlb_cgroup.h63 __hugetlb_cgroup_from_page(struct page *page, bool rsvd) in __hugetlb_cgroup_from_page() argument
65 VM_BUG_ON_PAGE(!PageHuge(page), page); in __hugetlb_cgroup_from_page()
67 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) in __hugetlb_cgroup_from_page()
70 return (void *)page_private(page + SUBPAGE_INDEX_CGROUP_RSVD); in __hugetlb_cgroup_from_page()
72 return (void *)page_private(page + SUBPAGE_INDEX_CGROUP); in __hugetlb_cgroup_from_page()
75 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) in hugetlb_cgroup_from_page() argument
77 return __hugetlb_cgroup_from_page(page, false); in hugetlb_cgroup_from_page()
81 hugetlb_cgroup_from_page_rsvd(struct page *page) in hugetlb_cgroup_from_page_rsvd() argument
83 return __hugetlb_cgroup_from_page(page, true); in hugetlb_cgroup_from_page_rsvd()
86 static inline int __set_hugetlb_cgroup(struct page *page, in __set_hugetlb_cgroup() argument
[all …]
Dhighmem-internal.h10 void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
35 void *kmap_high(struct page *page);
36 void kunmap_high(struct page *page);
38 struct page *__kmap_to_page(void *addr);
40 static inline void *kmap(struct page *page) in kmap() argument
45 if (!PageHighMem(page)) in kmap()
46 addr = page_address(page); in kmap()
48 addr = kmap_high(page); in kmap()
53 static inline void kunmap(struct page *page) in kunmap() argument
56 if (!PageHighMem(page)) in kunmap()
[all …]
Dhighmem.h37 static inline void *kmap(struct page *page);
46 static inline void kunmap(struct page *page);
54 static inline struct page *kmap_to_page(void *addr);
97 static inline void *kmap_local_page(struct page *page);
110 static inline void *kmap_atomic(struct page *page);
128 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma… in flush_anon_page() argument
144 static inline void clear_user_highpage(struct page *page, unsigned long vaddr) in clear_user_highpage() argument
146 void *addr = kmap_atomic(page); in clear_user_highpage()
147 clear_user_page(addr, vaddr, page); in clear_user_highpage()
165 static inline struct page *
[all …]
Dswap.h325 void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
326 void workingset_refault(struct page *page, void *shadow);
327 void workingset_activation(struct page *page);
347 extern void lru_note_cost_page(struct page *);
348 extern void lru_cache_add(struct page *);
349 extern void mark_page_accessed(struct page *);
368 extern void rotate_reclaimable_page(struct page *page);
369 extern void deactivate_file_page(struct page *page);
370 extern void deactivate_page(struct page *page);
371 extern void mark_page_lazyfree(struct page *page);
[all …]
Dmm_inline.h22 static inline int page_is_file_lru(struct page *page) in page_is_file_lru() argument
24 return !PageSwapBacked(page); in page_is_file_lru()
45 static __always_inline void __clear_page_lru_flags(struct page *page) in __clear_page_lru_flags() argument
47 VM_BUG_ON_PAGE(!PageLRU(page), page); in __clear_page_lru_flags()
49 __ClearPageLRU(page); in __clear_page_lru_flags()
52 if (PageActive(page) && PageUnevictable(page)) in __clear_page_lru_flags()
55 __ClearPageActive(page); in __clear_page_lru_flags()
56 __ClearPageUnevictable(page); in __clear_page_lru_flags()
66 static __always_inline enum lru_list page_lru(struct page *page) in page_lru() argument
70 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); in page_lru()
[all …]
/Linux-v5.15/mm/
Dswap.c80 static void __page_cache_release(struct page *page) in __page_cache_release() argument
82 if (PageLRU(page)) { in __page_cache_release()
86 lruvec = lock_page_lruvec_irqsave(page, &flags); in __page_cache_release()
87 del_page_from_lru_list(page, lruvec); in __page_cache_release()
88 __clear_page_lru_flags(page); in __page_cache_release()
91 __ClearPageWaiters(page); in __page_cache_release()
94 static void __put_single_page(struct page *page) in __put_single_page() argument
96 __page_cache_release(page); in __put_single_page()
97 mem_cgroup_uncharge(page); in __put_single_page()
98 free_unref_page(page, 0); in __put_single_page()
[all …]
Dfilemap.c125 struct page *page, void *shadow) in page_cache_delete() argument
127 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete()
133 if (!PageHuge(page)) { in page_cache_delete()
134 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete()
135 nr = compound_nr(page); in page_cache_delete()
138 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_cache_delete()
139 VM_BUG_ON_PAGE(PageTail(page), page); in page_cache_delete()
140 VM_BUG_ON_PAGE(nr != 1 && shadow, page); in page_cache_delete()
145 page->mapping = NULL; in page_cache_delete()
151 struct page *page) in unaccount_page_cache_page() argument
[all …]
Dmigrate.c61 int isolate_movable_page(struct page *page, isolate_mode_t mode) in isolate_movable_page() argument
74 if (unlikely(!get_page_unless_zero(page))) in isolate_movable_page()
82 if (unlikely(!__PageMovable(page))) in isolate_movable_page()
95 if (unlikely(!trylock_page(page))) in isolate_movable_page()
98 if (!PageMovable(page) || PageIsolated(page)) in isolate_movable_page()
101 mapping = page_mapping(page); in isolate_movable_page()
102 VM_BUG_ON_PAGE(!mapping, page); in isolate_movable_page()
104 if (!mapping->a_ops->isolate_page(page, mode)) in isolate_movable_page()
108 WARN_ON_ONCE(PageIsolated(page)); in isolate_movable_page()
109 __SetPageIsolated(page); in isolate_movable_page()
[all …]
Drmap.c486 struct anon_vma *page_get_anon_vma(struct page *page) in page_get_anon_vma() argument
492 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma()
495 if (!page_mapped(page)) in page_get_anon_vma()
511 if (!page_mapped(page)) { in page_get_anon_vma()
529 struct anon_vma *page_lock_anon_vma_read(struct page *page) in page_lock_anon_vma_read() argument
536 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read()
539 if (!page_mapped(page)) in page_lock_anon_vma_read()
550 if (!page_mapped(page)) { in page_lock_anon_vma_read()
563 if (!page_mapped(page)) { in page_lock_anon_vma_read()
709 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) in page_address_in_vma() argument
[all …]
Dpage_isolation.c18 static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags) in set_migratetype_isolate() argument
20 struct zone *zone = page_zone(page); in set_migratetype_isolate()
21 struct page *unmovable; in set_migratetype_isolate()
31 if (is_migrate_isolate_page(page)) { in set_migratetype_isolate()
40 unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags); in set_migratetype_isolate()
43 int mt = get_pageblock_migratetype(page); in set_migratetype_isolate()
45 set_pageblock_migratetype(page, MIGRATE_ISOLATE); in set_migratetype_isolate()
47 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, in set_migratetype_isolate()
67 static void unset_migratetype_isolate(struct page *page, unsigned migratetype) in unset_migratetype_isolate() argument
74 struct page *buddy; in unset_migratetype_isolate()
[all …]
Dpage_io.c31 struct page *page = bio_first_page_all(bio); in end_swap_bio_write() local
34 SetPageError(page); in end_swap_bio_write()
43 set_page_dirty(page); in end_swap_bio_write()
47 ClearPageReclaim(page); in end_swap_bio_write()
49 end_page_writeback(page); in end_swap_bio_write()
53 static void swap_slot_free_notify(struct page *page) in swap_slot_free_notify() argument
65 if (unlikely(!PageSwapCache(page))) in swap_slot_free_notify()
68 sis = page_swap_info(page); in swap_slot_free_notify()
89 entry.val = page_private(page); in swap_slot_free_notify()
95 SetPageDirty(page); in swap_slot_free_notify()
[all …]
Dtruncate.c80 struct page *page = pvec->pages[i]; in truncate_exceptional_pvec_entries() local
83 if (!xa_is_value(page)) { in truncate_exceptional_pvec_entries()
84 pvec->pages[j++] = page; in truncate_exceptional_pvec_entries()
93 __clear_shadow_entry(mapping, index, page); in truncate_exceptional_pvec_entries()
146 void do_invalidatepage(struct page *page, unsigned int offset, in do_invalidatepage() argument
149 void (*invalidatepage)(struct page *, unsigned int, unsigned int); in do_invalidatepage()
151 invalidatepage = page->mapping->a_ops->invalidatepage; in do_invalidatepage()
157 (*invalidatepage)(page, offset, length); in do_invalidatepage()
170 static void truncate_cleanup_page(struct page *page) in truncate_cleanup_page() argument
172 if (page_mapped(page)) in truncate_cleanup_page()
[all …]
/Linux-v5.15/net/core/
Dpage_pool.c110 static void page_pool_return_page(struct page_pool *pool, struct page *page);
113 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) in page_pool_refill_alloc_cache()
116 struct page *page; in page_pool_refill_alloc_cache() local
138 page = __ptr_ring_consume(r); in page_pool_refill_alloc_cache()
139 if (unlikely(!page)) in page_pool_refill_alloc_cache()
142 if (likely(page_to_nid(page) == pref_nid)) { in page_pool_refill_alloc_cache()
143 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_refill_alloc_cache()
150 page_pool_return_page(pool, page); in page_pool_refill_alloc_cache()
151 page = NULL; in page_pool_refill_alloc_cache()
158 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache()
[all …]
/Linux-v5.15/fs/btrfs/
Dsubpage.c67 struct page *page, enum btrfs_subpage_type type) in btrfs_attach_subpage() argument
76 if (page->mapping) in btrfs_attach_subpage()
77 ASSERT(PageLocked(page)); in btrfs_attach_subpage()
79 if (fs_info->sectorsize == PAGE_SIZE || PagePrivate(page)) in btrfs_attach_subpage()
85 attach_page_private(page, subpage); in btrfs_attach_subpage()
90 struct page *page) in btrfs_detach_subpage() argument
95 if (fs_info->sectorsize == PAGE_SIZE || !PagePrivate(page)) in btrfs_detach_subpage()
98 subpage = (struct btrfs_subpage *)detach_page_private(page); in btrfs_detach_subpage()
138 struct page *page) in btrfs_page_inc_eb_refs() argument
145 ASSERT(PagePrivate(page) && page->mapping); in btrfs_page_inc_eb_refs()
[all …]
/Linux-v5.15/fs/9p/
Dcache.h30 extern int __v9fs_fscache_release_page(struct page *page, gfp_t gfp);
31 extern void __v9fs_fscache_invalidate_page(struct page *page);
33 struct page *page);
38 extern void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page);
40 struct page *page);
42 static inline int v9fs_fscache_release_page(struct page *page, in v9fs_fscache_release_page() argument
45 return __v9fs_fscache_release_page(page, gfp); in v9fs_fscache_release_page()
48 static inline void v9fs_fscache_invalidate_page(struct page *page) in v9fs_fscache_invalidate_page() argument
50 __v9fs_fscache_invalidate_page(page); in v9fs_fscache_invalidate_page()
54 struct page *page) in v9fs_readpage_from_fscache() argument
[all …]
/Linux-v5.15/fs/jfs/
Djfs_metapage.c48 unlock_page(mp->page); in __lock_metapage()
50 lock_page(mp->page); in __lock_metapage()
79 #define mp_anchor(page) ((struct meta_anchor *)page_private(page)) argument
81 static inline struct metapage *page_to_mp(struct page *page, int offset) in page_to_mp() argument
83 if (!PagePrivate(page)) in page_to_mp()
85 return mp_anchor(page)->mp[offset >> L2PSIZE]; in page_to_mp()
88 static inline int insert_metapage(struct page *page, struct metapage *mp) in insert_metapage() argument
94 if (PagePrivate(page)) in insert_metapage()
95 a = mp_anchor(page); in insert_metapage()
100 set_page_private(page, (unsigned long)a); in insert_metapage()
[all …]
/Linux-v5.15/fs/sysv/
Ddir.c31 static inline void dir_put_page(struct page *page) in dir_put_page() argument
33 kunmap(page); in dir_put_page()
34 put_page(page); in dir_put_page()
37 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) in dir_commit_chunk() argument
39 struct address_space *mapping = page->mapping; in dir_commit_chunk()
43 block_write_end(NULL, mapping, pos, len, len, page, NULL); in dir_commit_chunk()
49 err = write_one_page(page); in dir_commit_chunk()
51 unlock_page(page); in dir_commit_chunk()
55 static struct page * dir_get_page(struct inode *dir, unsigned long n) in dir_get_page()
58 struct page *page = read_mapping_page(mapping, n, NULL); in dir_get_page() local
[all …]

12345678910>>...109