| /Linux-v6.1/include/linux/ |
| D | page-flags.h | 3 * Macros for manipulating and testing page->flags 18 * Various page->flags bits: 20 * PG_reserved is set for special pages. The "struct page" of such a page 25 * - Pages reserved or allocated early during boot (before the page allocator 27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much 29 * be given to the page allocator. 32 * - The zero page(s) 33 * - Pages not added to the page allocator when onlining a section because 49 * Consequently, PG_reserved for a page mapped into user space can indicate 50 * the zero page, the vDSO, MMIO pages or device memory. [all …]
|
| D | balloon_compaction.h | 7 * Balloon page migration makes use of the general non-lru movable page 10 * page->private is used to reference the responsible balloon device. 11 * page->mapping is used in context of non-lru page migration to reference 12 * the address space operations for page isolation/migration/compaction. 14 * As the page isolation scanning step a compaction thread does is a lockless 15 * procedure (from a page standpoint), it might bring some racy situations while 16 * performing balloon page compaction. In order to sort out these racy scenarios 17 * and safely perform balloon's page compaction and migration we must, always, 20 * i. when updating a balloon's page ->mapping element, strictly do it under 23 * +-page_lock(page); [all …]
|
| D | page_ref.h | 7 #include <linux/page-flags.h> 29 extern void __page_ref_set(struct page *page, int v); 30 extern void __page_ref_mod(struct page *page, int v); 31 extern void __page_ref_mod_and_test(struct page *page, int v, int ret); 32 extern void __page_ref_mod_and_return(struct page *page, int v, int ret); 33 extern void __page_ref_mod_unless(struct page *page, int v, int u); 34 extern void __page_ref_freeze(struct page *page, int v, int ret); 35 extern void __page_ref_unfreeze(struct page *page, int v); 41 static inline void __page_ref_set(struct page *page, int v) in __page_ref_set() argument 44 static inline void __page_ref_mod(struct page *page, int v) in __page_ref_mod() argument [all …]
|
| D | highmem.h | 17 * kmap - Map a page for long term usage 18 * @page: Pointer to the page to be mapped 37 static inline void *kmap(struct page *page); 41 * @page: Pointer to the page which was mapped by kmap() 46 static inline void kunmap(struct page *page); 49 * kmap_to_page - Get the page for a kmap'ed address 52 * Returns: The page which is mapped to @addr. 54 static inline struct page *kmap_to_page(void *addr); 63 * kmap_local_page - Map a page for temporary usage 64 * @page: Pointer to the page to be mapped [all …]
|
| D | hugetlb_cgroup.h | 26 * Minimum page order trackable by hugetlb cgroup. 28 * The second tail page (hpage[SUBPAGE_INDEX_CGROUP]) is the fault 29 * usage cgroup. The third tail page (hpage[SUBPAGE_INDEX_CGROUP_RSVD]) 70 __hugetlb_cgroup_from_page(struct page *page, bool rsvd) in __hugetlb_cgroup_from_page() argument 72 VM_BUG_ON_PAGE(!PageHuge(page), page); in __hugetlb_cgroup_from_page() 74 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) in __hugetlb_cgroup_from_page() 77 return (void *)page_private(page + SUBPAGE_INDEX_CGROUP_RSVD); in __hugetlb_cgroup_from_page() 79 return (void *)page_private(page + SUBPAGE_INDEX_CGROUP); in __hugetlb_cgroup_from_page() 82 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) in hugetlb_cgroup_from_page() argument 84 return __hugetlb_cgroup_from_page(page, false); in hugetlb_cgroup_from_page() [all …]
|
| D | highmem-internal.h | 10 void *__kmap_local_page_prot(struct page *page, pgprot_t prot); 35 void *kmap_high(struct page *page); 36 void kunmap_high(struct page *page); 38 struct page *__kmap_to_page(void *addr); 40 static inline void *kmap(struct page *page) in kmap() argument 45 if (!PageHighMem(page)) in kmap() 46 addr = page_address(page); in kmap() 48 addr = kmap_high(page); in kmap() 53 static inline void kunmap(struct page *page) in kunmap() argument 56 if (!PageHighMem(page)) in kunmap() [all …]
|
| D | migrate.h | 10 typedef struct page *new_page_t(struct page *page, unsigned long private); 11 typedef void free_page_t(struct page *page, unsigned long private); 17 * - negative errno on page migration failure; 18 * - zero on page migration success; 23 * struct movable_operations - Driver page migration 25 * The VM calls this function to prepare the page to be moved. The page 27 * return ``true`` if the page is movable and ``false`` if it is not 29 * page->lru field, so the driver must preserve any information which 34 * @src page. The driver should copy the contents of the 35 * @src page to the @dst page and set up the fields of @dst page. [all …]
|
| D | page_owner.h | 11 extern void __reset_page_owner(struct page *page, unsigned short order); 12 extern void __set_page_owner(struct page *page, 14 extern void __split_page_owner(struct page *page, unsigned int nr); 16 extern void __set_page_owner_migrate_reason(struct page *page, int reason); 17 extern void __dump_page_owner(const struct page *page); 21 static inline void reset_page_owner(struct page *page, unsigned short order) in reset_page_owner() argument 24 __reset_page_owner(page, order); in reset_page_owner() 27 static inline void set_page_owner(struct page *page, in set_page_owner() argument 31 __set_page_owner(page, order, gfp_mask); in set_page_owner() 34 static inline void split_page_owner(struct page *page, unsigned int nr) in split_page_owner() argument [all …]
|
| D | mm.h | 24 #include <linux/page-flags.h> 95 #include <asm/page.h> 123 * a zero page mapping on a read fault. 126 * related to the physical page in case of virtualization. 139 /* This function must be updated when the size of struct page grows above 80 146 static inline void __mm_zero_struct_page(struct page *page) in __mm_zero_struct_page() argument 148 unsigned long *_pp = (void *)page; in __mm_zero_struct_page() 150 /* Check that struct page is either 56, 64, 72, or 80 bytes */ in __mm_zero_struct_page() 151 BUILD_BUG_ON(sizeof(struct page) & 7); in __mm_zero_struct_page() 152 BUILD_BUG_ON(sizeof(struct page) < 56); in __mm_zero_struct_page() [all …]
|
| /Linux-v6.1/mm/ |
| D | folio-compat.c | 12 struct address_space *page_mapping(struct page *page) in page_mapping() argument 14 return folio_mapping(page_folio(page)); in page_mapping() 18 void unlock_page(struct page *page) in unlock_page() argument 20 return folio_unlock(page_folio(page)); in unlock_page() 24 void end_page_writeback(struct page *page) in end_page_writeback() argument 26 return folio_end_writeback(page_folio(page)); in end_page_writeback() 30 void wait_on_page_writeback(struct page *page) in wait_on_page_writeback() argument 32 return folio_wait_writeback(page_folio(page)); in wait_on_page_writeback() 36 void wait_for_stable_page(struct page *page) in wait_for_stable_page() argument 38 return folio_wait_stable(page_folio(page)); in wait_for_stable_page() [all …]
|
| D | balloon_compaction.c | 15 struct page *page) in balloon_page_enqueue_one() argument 18 * Block others from accessing the 'page' when we get around to in balloon_page_enqueue_one() 20 * holding a reference to the 'page' at this point. If we are not, then in balloon_page_enqueue_one() 23 BUG_ON(!trylock_page(page)); in balloon_page_enqueue_one() 24 balloon_page_insert(b_dev_info, page); in balloon_page_enqueue_one() 25 unlock_page(page); in balloon_page_enqueue_one() 30 * balloon_page_list_enqueue() - inserts a list of pages into the balloon page 32 * @b_dev_info: balloon device descriptor where we will insert a new page to 43 struct page *page, *tmp; in balloon_page_list_enqueue() local 48 list_for_each_entry_safe(page, tmp, pages, lru) { in balloon_page_list_enqueue() [all …]
|
| D | page_isolation.c | 7 #include <linux/page-isolation.h> 28 * Returns a page without holding a reference. If the caller wants to 29 * dereference that page (e.g., dumping), it has to make sure that it 33 static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long end_pfn, in has_unmovable_pages() 36 struct page *page = pfn_to_page(start_pfn); in has_unmovable_pages() local 37 struct zone *zone = page_zone(page); in has_unmovable_pages() 43 if (is_migrate_cma_page(page)) { in has_unmovable_pages() 52 return page; in has_unmovable_pages() 56 page = pfn_to_page(pfn); in has_unmovable_pages() 64 if (PageReserved(page)) in has_unmovable_pages() [all …]
|
| D | migrate_device.c | 74 struct page *page; in migrate_vma_collect_pmd() local 82 page = pmd_page(*pmdp); in migrate_vma_collect_pmd() 83 if (is_huge_zero_page(page)) { in migrate_vma_collect_pmd() 92 get_page(page); in migrate_vma_collect_pmd() 94 if (unlikely(!trylock_page(page))) in migrate_vma_collect_pmd() 97 ret = split_huge_page(page); in migrate_vma_collect_pmd() 98 unlock_page(page); in migrate_vma_collect_pmd() 99 put_page(page); in migrate_vma_collect_pmd() 117 struct page *page; in migrate_vma_collect_pmd() local 133 * Only care about unaddressable device page special in migrate_vma_collect_pmd() [all …]
|
| D | memory-failure.c | 13 * Handles page cache pages in various states. The tricky part 14 * here is that we can access any page asynchronously in respect to 26 * - The case actually shows up as a frequent (top 10) page state in 27 * tools/vm/page-types when running a real workload. 41 #include <linux/page-flags.h> 42 #include <linux/kernel-page-flags.h> 62 #include <linux/page-isolation.h> 79 * 1: the page is dissolved (if needed) and taken off from buddy, 80 * 0: the page is dissolved (if needed) and not taken off from buddy, 83 static int __page_handle_poison(struct page *page) in __page_handle_poison() argument [all …]
|
| /Linux-v6.1/fs/btrfs/ |
| D | subpage.c | 13 * - Only support 64K page size for now 14 * This is to make metadata handling easier, as 64K page would ensure 15 * all nodesize would fit inside one page, thus we don't need to handle 21 * - Metadata can't cross 64K page boundary 31 * needed range, other unrelated range in the same page will not be touched. 34 * The writeback is still for the full page, but we will only submit 35 * the dirty extent buffers in the page. 37 * This means, if we have a metadata page like this: 39 * Page offset 53 * record the status of each sector inside a page. This provides the extra [all …]
|
| D | subpage.h | 43 * Structure to trace status of each sector inside a page, attached to 44 * page::private for both data and metadata inodes. 51 * page. 52 * Data relies on @readers to unlock the page when last reader finished. 53 * While metadata doesn't need page unlock, it needs to prevent 54 * page::private get cleared before the last end_page_read(). 77 bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page); 81 struct page *page, enum btrfs_subpage_type type); 83 struct page *page); 85 /* Allocate additional data where page represents more than one sector */ [all …]
|
| /Linux-v6.1/net/core/ |
| D | page_pool.c | 18 #include <linux/page-flags.h> 155 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending, in page_pool_init() 165 /* In order to request DMA-sync-for-device the page in page_pool_init() 223 static void page_pool_return_page(struct page_pool *pool, struct page *page); 226 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) in page_pool_refill_alloc_cache() 229 struct page *page; in page_pool_refill_alloc_cache() local 250 page = __ptr_ring_consume(r); in page_pool_refill_alloc_cache() 251 if (unlikely(!page)) in page_pool_refill_alloc_cache() 254 if (likely(page_to_nid(page) == pref_nid)) { in page_pool_refill_alloc_cache() 255 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_refill_alloc_cache() [all …]
|
| /Linux-v6.1/fs/sysv/ |
| D | dir.c | 31 static inline void dir_put_page(struct page *page) in dir_put_page() argument 33 kunmap(page); in dir_put_page() 34 put_page(page); in dir_put_page() 37 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) in dir_commit_chunk() argument 39 struct address_space *mapping = page->mapping; in dir_commit_chunk() 43 block_write_end(NULL, mapping, pos, len, len, page, NULL); in dir_commit_chunk() 49 err = write_one_page(page); in dir_commit_chunk() 51 unlock_page(page); in dir_commit_chunk() 55 static struct page * dir_get_page(struct inode *dir, unsigned long n) in dir_get_page() 58 struct page *page = read_mapping_page(mapping, n, NULL); in dir_get_page() local [all …]
|
| /Linux-v6.1/fs/jfs/ |
| D | jfs_metapage.c | 26 uint pagealloc; /* # of page allocations */ 27 uint pagefree; /* # of page frees */ 49 unlock_page(mp->page); in __lock_metapage() 51 lock_page(mp->page); in __lock_metapage() 59 * Must have mp->page locked 80 #define mp_anchor(page) ((struct meta_anchor *)page_private(page)) argument 82 static inline struct metapage *page_to_mp(struct page *page, int offset) in page_to_mp() argument 84 if (!PagePrivate(page)) in page_to_mp() 86 return mp_anchor(page)->mp[offset >> L2PSIZE]; in page_to_mp() 89 static inline int insert_metapage(struct page *page, struct metapage *mp) in insert_metapage() argument [all …]
|
| /Linux-v6.1/sound/pci/trident/ |
| D | trident_memory.c | 7 * Trident 4DWave-NX memory page allocation (TLB area) 19 /* page arguments of these two macros are Trident page (4096 bytes), not like 22 #define __set_tlb_bus(trident,page,addr) \ argument 23 (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)) 24 #define __tlb_to_addr(trident,page) \ argument 25 (dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1)) 28 /* page size == SNDRV_TRIDENT_PAGE_SIZE */ 29 #define ALIGN_PAGE_SIZE PAGE_SIZE /* minimum page size for allocation */ 31 /* fill TLB entrie(s) corresponding to page with ptr */ 32 #define set_tlb_bus(trident,page,addr) __set_tlb_bus(trident,page,addr) argument [all …]
|
| /Linux-v6.1/Documentation/mm/ |
| D | page_migration.rst | 4 Page migration 7 Page migration allows moving the physical location of pages between 15 The main intent of page migration is to reduce the latency of memory accesses 19 Page migration allows a process to manually relocate the node on which its 25 Page migration functions are provided by the numactl package by Andi Kleen 28 which provides an interface similar to other NUMA functionality for page 31 proc(5) man page. 37 manual page migration support. Automatic page migration may be implemented 54 Page migration allows the preservation of the relative location of pages 60 Page migration occurs in several steps. First a high level [all …]
|
| /Linux-v6.1/tools/perf/pmu-events/arch/x86/snowridgex/ |
| D | virtual-memory.json | 3 …"BriefDescription": "Counts the number of page walks due to loads that miss the PDE (Page Director… 14 … second level hits due to a demand load that did not start a page walk. Account for all page sizes… 25 …"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to any page… 31 …page walks completed due to loads (including SW prefetches) whose address translations missed in a… 36 …"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 1G pag… 43 …page walks completed due to loads (including SW prefetches) whose address translations missed in a… 48 …"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 2M or … 55 …page walks completed due to loads (including SW prefetches) whose address translations missed in a… 60 …"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 4K pag… 67 …page walks completed due to loads (including SW prefetches) whose address translations missed in a… [all …]
|
| /Linux-v6.1/tools/perf/pmu-events/arch/x86/elkhartlake/ |
| D | virtual-memory.json | 3 …"BriefDescription": "Counts the number of page walks due to loads that miss the PDE (Page Director… 14 … second level hits due to a demand load that did not start a page walk. Account for all page sizes… 25 …"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to any page… 31 …page walks completed due to loads (including SW prefetches) whose address translations missed in a… 36 …"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 1G pag… 43 …page walks completed due to loads (including SW prefetches) whose address translations missed in a… 48 …"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 2M or … 55 …page walks completed due to loads (including SW prefetches) whose address translations missed in a… 60 …"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 4K pag… 67 …page walks completed due to loads (including SW prefetches) whose address translations missed in a… [all …]
|
| /Linux-v6.1/fs/minix/ |
| D | dir.c | 29 static inline void dir_put_page(struct page *page) in dir_put_page() argument 31 kunmap(page); in dir_put_page() 32 put_page(page); in dir_put_page() 36 * Return the offset into page `page_nr' of the last valid 37 * byte in that page, plus one. 49 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) in dir_commit_chunk() argument 51 struct address_space *mapping = page->mapping; in dir_commit_chunk() 54 block_write_end(NULL, mapping, pos, len, len, page, NULL); in dir_commit_chunk() 61 err = write_one_page(page); in dir_commit_chunk() 63 unlock_page(page); in dir_commit_chunk() [all …]
|
| /Linux-v6.1/fs/ecryptfs/ |
| D | mmap.c | 16 #include <linux/page-flags.h> 28 * Get one page from cache or lower f/s, return error otherwise. 30 * Returns locked and up-to-date page (if ok), with increased 33 struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index) in ecryptfs_get_locked_page() 35 struct page *page = read_mapping_page(inode->i_mapping, index, NULL); in ecryptfs_get_locked_page() local 36 if (!IS_ERR(page)) in ecryptfs_get_locked_page() 37 lock_page(page); in ecryptfs_get_locked_page() 38 return page; in ecryptfs_get_locked_page() 43 * @page: Page that is locked before this call is made 52 static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc) in ecryptfs_writepage() argument [all …]
|