Home
last modified time | relevance | path

Searched full:page (Results 1 – 25 of 5011) sorted by relevance

12345678910>>...201

/Linux-v5.15/include/linux/
Dpage_ref.h7 #include <linux/page-flags.h>
29 extern void __page_ref_set(struct page *page, int v);
30 extern void __page_ref_mod(struct page *page, int v);
31 extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
32 extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
33 extern void __page_ref_mod_unless(struct page *page, int v, int u);
34 extern void __page_ref_freeze(struct page *page, int v, int ret);
35 extern void __page_ref_unfreeze(struct page *page, int v);
41 static inline void __page_ref_set(struct page *page, int v) in __page_ref_set() argument
44 static inline void __page_ref_mod(struct page *page, int v) in __page_ref_mod() argument
[all …]
Dpage-flags.h3 * Macros for manipulating and testing page->flags
18 * Various page->flags bits:
20 * PG_reserved is set for special pages. The "struct page" of such a page
25 * - Pages reserved or allocated early during boot (before the page allocator
27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
29 * be given to the page allocator.
32 * - The zero page(s)
33 * - Pages not added to the page allocator when onlining a section because
49 * Consequently, PG_reserved for a page mapped into user space can indicate
50 * the zero page, the vDSO, MMIO pages or device memory.
[all …]
Dballoon_compaction.h7 * Balloon page migration makes use of the general non-lru movable page
10 * page->private is used to reference the responsible balloon device.
11 * page->mapping is used in context of non-lru page migration to reference
12 * the address space operations for page isolation/migration/compaction.
14 * As the page isolation scanning step a compaction thread does is a lockless
15 * procedure (from a page standpoint), it might bring some racy situations while
16 * performing balloon page compaction. In order to sort out these racy scenarios
17 * and safely perform balloon's page compaction and migration we must, always,
20 * i. when updating a balloon's page ->mapping element, strictly do it under
23 * +-page_lock(page);
[all …]
Dpagemap.h163 void release_pages(struct page **pages, int nr);
168 static inline struct address_space *page_mapping_file(struct page *page) in page_mapping_file() argument
170 if (unlikely(PageSwapCache(page))) in page_mapping_file()
172 return page_mapping(page); in page_mapping_file()
176 * speculatively take a reference to a page.
177 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
181 * been used to lookup the page in the pagecache radix-tree (or page table):
187 * page has been finished with, no matter what it is subsequently allocated
194 * 1. find page in radix tree
196 * 3. check the page is still in pagecache (if no, goto 1)
[all …]
Dmm_inline.h9 * page_is_file_lru - should the page be on a file LRU or anon LRU?
10 * @page: the page to test
12 * Returns 1 if @page is a regular filesystem backed page cache page or a lazily
13 * freed anonymous page (e.g. via MADV_FREE). Returns 0 if @page is a normal
14 * anonymous page, a tmpfs page or otherwise ram or swap backed page. Used by
15 * functions that manipulate the LRU lists, to sort a page onto the right LRU
18 * We would like to get this info without a page flag, but the state
19 * needs to survive until the page is last deleted from the LRU, which
22 static inline int page_is_file_lru(struct page *page) in page_is_file_lru() argument
24 return !PageSwapBacked(page); in page_is_file_lru()
[all …]
Dpage_idle.h6 #include <linux/page-flags.h>
12 static inline bool page_is_young(struct page *page) in page_is_young() argument
14 return PageYoung(page); in page_is_young()
17 static inline void set_page_young(struct page *page) in set_page_young() argument
19 SetPageYoung(page); in set_page_young()
22 static inline bool test_and_clear_page_young(struct page *page) in test_and_clear_page_young() argument
24 return TestClearPageYoung(page); in test_and_clear_page_young()
27 static inline bool page_is_idle(struct page *page) in page_is_idle() argument
29 return PageIdle(page); in page_is_idle()
32 static inline void set_page_idle(struct page *page) in set_page_idle() argument
[all …]
Dpage_owner.h11 extern void __reset_page_owner(struct page *page, unsigned int order);
12 extern void __set_page_owner(struct page *page,
14 extern void __split_page_owner(struct page *page, unsigned int nr);
15 extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
16 extern void __set_page_owner_migrate_reason(struct page *page, int reason);
17 extern void __dump_page_owner(const struct page *page);
21 static inline void reset_page_owner(struct page *page, unsigned int order) in reset_page_owner() argument
24 __reset_page_owner(page, order); in reset_page_owner()
27 static inline void set_page_owner(struct page *page, in set_page_owner() argument
31 __set_page_owner(page, order, gfp_mask); in set_page_owner()
[all …]
Dmm.h27 #include <linux/page-flags.h>
101 #include <asm/page.h>
129 * a zero page mapping on a read fault.
132 * related to the physical page in case of virtualization.
145 /* This function must be updated when the size of struct page grows above 80
152 static inline void __mm_zero_struct_page(struct page *page) in __mm_zero_struct_page() argument
154 unsigned long *_pp = (void *)page; in __mm_zero_struct_page()
156 /* Check that struct page is either 56, 64, 72, or 80 bytes */ in __mm_zero_struct_page()
157 BUILD_BUG_ON(sizeof(struct page) & 7); in __mm_zero_struct_page()
158 BUILD_BUG_ON(sizeof(struct page) < 56); in __mm_zero_struct_page()
[all …]
Dmigrate.h10 typedef struct page *new_page_t(struct page *page, unsigned long private);
11 typedef void free_page_t(struct page *page, unsigned long private);
17 * - negative errno on page migration failure;
18 * - zero on page migration success;
46 struct page *newpage, struct page *page,
51 extern struct page *alloc_migration_target(struct page *page, unsigned long private);
52 extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
54 extern void migrate_page_states(struct page *newpage, struct page *page);
55 extern void migrate_page_copy(struct page *newpage, struct page *page);
57 struct page *newpage, struct page *page);
[all …]
Dhuge_mm.h28 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
48 * @pgprot: page protection to use
67 * @pgprot: page protection to use
184 void prep_transhuge_page(struct page *page);
185 void free_transhuge_page(struct page *page);
186 bool is_transparent_hugepage(struct page *page);
188 bool can_split_huge_page(struct page *page, int *pextra_pins);
189 int split_huge_page_to_list(struct page *page, struct list_head *list);
190 static inline int split_huge_page(struct page *page) in split_huge_page() argument
192 return split_huge_page_to_list(page, NULL); in split_huge_page()
[all …]
Dhighmem.h17 * kmap - Map a page for long term usage
18 * @page: Pointer to the page to be mapped
37 static inline void *kmap(struct page *page);
46 static inline void kunmap(struct page *page);
49 * kmap_to_page - Get the page for a kmap'ed address
52 * Returns: The page which is mapped to @addr.
54 static inline struct page *kmap_to_page(void *addr);
63 * kmap_local_page - Map a page for temporary usage
64 * @page: Pointer to the page to be mapped
93 * On HIGHMEM enabled systems mapping a highmem page has the side effect of
[all …]
Dhugetlb_cgroup.h26 * Minimum page order trackable by hugetlb cgroup.
28 * The second tail page (hpage[SUBPAGE_INDEX_CGROUP]) is the fault
29 * usage cgroup. The third tail page (hpage[SUBPAGE_INDEX_CGROUP_RSVD])
63 __hugetlb_cgroup_from_page(struct page *page, bool rsvd) in __hugetlb_cgroup_from_page() argument
65 VM_BUG_ON_PAGE(!PageHuge(page), page); in __hugetlb_cgroup_from_page()
67 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) in __hugetlb_cgroup_from_page()
70 return (void *)page_private(page + SUBPAGE_INDEX_CGROUP_RSVD); in __hugetlb_cgroup_from_page()
72 return (void *)page_private(page + SUBPAGE_INDEX_CGROUP); in __hugetlb_cgroup_from_page()
75 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) in hugetlb_cgroup_from_page() argument
77 return __hugetlb_cgroup_from_page(page, false); in hugetlb_cgroup_from_page()
[all …]
/Linux-v5.15/mm/
Dswap.c46 /* How many pages do we try to swap or page in/out together? */
80 static void __page_cache_release(struct page *page) in __page_cache_release() argument
82 if (PageLRU(page)) { in __page_cache_release()
86 lruvec = lock_page_lruvec_irqsave(page, &flags); in __page_cache_release()
87 del_page_from_lru_list(page, lruvec); in __page_cache_release()
88 __clear_page_lru_flags(page); in __page_cache_release()
91 __ClearPageWaiters(page); in __page_cache_release()
94 static void __put_single_page(struct page *page) in __put_single_page() argument
96 __page_cache_release(page); in __put_single_page()
97 mem_cgroup_uncharge(page); in __put_single_page()
[all …]
Dfilemap.c65 * finished 'unifying' the page and buffer cache and SMP-threaded the
66 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
125 struct page *page, void *shadow) in page_cache_delete() argument
127 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete()
133 if (!PageHuge(page)) { in page_cache_delete()
134 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete()
135 nr = compound_nr(page); in page_cache_delete()
138 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_cache_delete()
139 VM_BUG_ON_PAGE(PageTail(page), page); in page_cache_delete()
140 VM_BUG_ON_PAGE(nr != 1 && shadow, page); in page_cache_delete()
[all …]
Dmigrate.c7 * Page migration was first developed in the context of the memory hotplug
61 int isolate_movable_page(struct page *page, isolate_mode_t mode) in isolate_movable_page() argument
69 * In case we 'win' a race for a movable page being freed under us and in isolate_movable_page()
72 * release this page, thus avoiding a nasty leakage. in isolate_movable_page()
74 if (unlikely(!get_page_unless_zero(page))) in isolate_movable_page()
78 * Check PageMovable before holding a PG_lock because page's owner in isolate_movable_page()
79 * assumes anybody doesn't touch PG_lock of newly allocated page in isolate_movable_page()
80 * so unconditionally grabbing the lock ruins page's owner side. in isolate_movable_page()
82 if (unlikely(!__PageMovable(page))) in isolate_movable_page()
86 * compaction threads can race against page migration functions in isolate_movable_page()
[all …]
Drmap.c10 * Provides methods for unmapping each kind of mapped page:
26 * page->flags PG_locked (lock_page) * (see hugetlbfs below)
29 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
51 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
52 * page->flags PG_locked (lock_page)
273 * searches where page is mapped.
463 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
467 * that might have been relevant to this page.
469 * The page might have been remapped to a different anon_vma or the anon_vma
474 * ensure that any anon_vma obtained from the page will still be valid for as
[all …]
Dballoon_compaction.c15 struct page *page) in balloon_page_enqueue_one() argument
18 * Block others from accessing the 'page' when we get around to in balloon_page_enqueue_one()
20 * holding a reference to the 'page' at this point. If we are not, then in balloon_page_enqueue_one()
23 BUG_ON(!trylock_page(page)); in balloon_page_enqueue_one()
24 balloon_page_insert(b_dev_info, page); in balloon_page_enqueue_one()
25 unlock_page(page); in balloon_page_enqueue_one()
30 * balloon_page_list_enqueue() - inserts a list of pages into the balloon page
32 * @b_dev_info: balloon device descriptor where we will insert a new page to
43 struct page *page, *tmp; in balloon_page_list_enqueue() local
48 list_for_each_entry_safe(page, tmp, pages, lru) { in balloon_page_list_enqueue()
[all …]
Dpage_io.c10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
31 struct page *page = bio_first_page_all(bio); in end_swap_bio_write() local
34 SetPageError(page); in end_swap_bio_write()
36 * We failed to write the page out to swap-space. in end_swap_bio_write()
37 * Re-dirty the page in order to avoid it being reclaimed. in end_swap_bio_write()
43 set_page_dirty(page); in end_swap_bio_write()
47 ClearPageReclaim(page); in end_swap_bio_write()
49 end_page_writeback(page); in end_swap_bio_write()
53 static void swap_slot_free_notify(struct page *page) in swap_slot_free_notify() argument
60 * There is no guarantee that the page is in swap cache - the software in swap_slot_free_notify()
[all …]
/Linux-v5.15/fs/btrfs/
Dsubpage.c13 * - Only support 64K page size for now
14 * This is to make metadata handling easier, as 64K page would ensure
15 * all nodesize would fit inside one page, thus we don't need to handle
21 * - Metadata can't cross 64K page boundary
31 * needed range, other unrelated range in the same page will not be touched.
34 * The writeback is still for the full page, but we will only submit
35 * the dirty extent buffers in the page.
37 * This means, if we have a metadata page like this:
39 * Page offset
53 * record the status of each sector inside a page. This provides the extra
[all …]
Dsubpage.h9 * Maximum page size we support is 64K, minimum sector size is 4K, u16 bitmap
15 * Structure to trace status of each sector inside a page, attached to
16 * page::private for both data and metadata inodes.
27 * page.
28 * Data relies on @readers to unlock the page when last reader finished.
29 * While metadata doesn't need page unlock, it needs to prevent
30 * page::private get cleared before the last end_page_read().
57 struct page *page, enum btrfs_subpage_type type);
59 struct page *page);
61 /* Allocate additional data where page represents more than one sector */
[all …]
/Linux-v5.15/net/core/
Dpage_pool.c18 #include <linux/page-flags.h>
48 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending, in page_pool_init()
58 /* In order to request DMA-sync-for-device the page in page_pool_init()
110 static void page_pool_return_page(struct page_pool *pool, struct page *page);
113 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) in page_pool_refill_alloc_cache()
116 struct page *page; in page_pool_refill_alloc_cache() local
138 page = __ptr_ring_consume(r); in page_pool_refill_alloc_cache()
139 if (unlikely(!page)) in page_pool_refill_alloc_cache()
142 if (likely(page_to_nid(page) == pref_nid)) { in page_pool_refill_alloc_cache()
143 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_refill_alloc_cache()
[all …]
/Linux-v5.15/Documentation/vm/
Dpage_migration.rst4 Page migration
7 Page migration allows moving the physical location of pages between
15 The main intent of page migration is to reduce the latency of memory accesses
19 Page migration allows a process to manually relocate the node on which its
25 Page migration functions are provided by the numactl package by Andi Kleen
28 which provides an interface similar to other NUMA functionality for page
31 proc(5) man page.
37 manual page migration support. Automatic page migration may be implemented
54 Page migration allows the preservation of the relative location of pages
60 Page migration occurs in several steps. First a high level
[all …]
/Linux-v5.15/fs/jfs/
Djfs_metapage.c25 uint pagealloc; /* # of page allocations */
26 uint pagefree; /* # of page frees */
48 unlock_page(mp->page); in __lock_metapage()
50 lock_page(mp->page); in __lock_metapage()
58 * Must have mp->page locked
79 #define mp_anchor(page) ((struct meta_anchor *)page_private(page)) argument
81 static inline struct metapage *page_to_mp(struct page *page, int offset) in page_to_mp() argument
83 if (!PagePrivate(page)) in page_to_mp()
85 return mp_anchor(page)->mp[offset >> L2PSIZE]; in page_to_mp()
88 static inline int insert_metapage(struct page *page, struct metapage *mp) in insert_metapage() argument
[all …]
/Linux-v5.15/fs/sysv/
Ddir.c31 static inline void dir_put_page(struct page *page) in dir_put_page() argument
33 kunmap(page); in dir_put_page()
34 put_page(page); in dir_put_page()
37 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) in dir_commit_chunk() argument
39 struct address_space *mapping = page->mapping; in dir_commit_chunk()
43 block_write_end(NULL, mapping, pos, len, len, page, NULL); in dir_commit_chunk()
49 err = write_one_page(page); in dir_commit_chunk()
51 unlock_page(page); in dir_commit_chunk()
55 static struct page * dir_get_page(struct inode *dir, unsigned long n) in dir_get_page()
58 struct page *page = read_mapping_page(mapping, n, NULL); in dir_get_page() local
[all …]
/Linux-v5.15/fs/9p/
Dvfs_addr.c32 * v9fs_fid_readpage - read an entire page in from 9P
34 * @page: structure to page
37 static int v9fs_fid_readpage(void *data, struct page *page) in v9fs_fid_readpage() argument
40 struct inode *inode = page->mapping->host; in v9fs_fid_readpage()
41 struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE}; in v9fs_fid_readpage()
47 BUG_ON(!PageLocked(page)); in v9fs_fid_readpage()
49 retval = v9fs_readpage_from_fscache(inode, page); in v9fs_fid_readpage()
55 retval = p9_client_read(fid, page_offset(page), &to, &err); in v9fs_fid_readpage()
57 v9fs_uncache_page(inode, page); in v9fs_fid_readpage()
62 zero_user(page, retval, PAGE_SIZE - retval); in v9fs_fid_readpage()
[all …]

12345678910>>...201