Lines Matching +full:data +full:- +full:mapping

1 /* SPDX-License-Identifier: GPL-2.0 */
22 * Bits in mapping->flags.
35 * mapping_set_error - record a writeback error in the address_space
36 * @mapping - the mapping in which an error should be set
37 * @error - the error to set in the mapping
45 * mapping_set_error to record the error in the mapping so that it can be
48 static inline void mapping_set_error(struct address_space *mapping, int error) in mapping_set_error() argument
54 filemap_set_wb_err(mapping, error); in mapping_set_error()
57 if (error == -ENOSPC) in mapping_set_error()
58 set_bit(AS_ENOSPC, &mapping->flags); in mapping_set_error()
60 set_bit(AS_EIO, &mapping->flags); in mapping_set_error()
63 static inline void mapping_set_unevictable(struct address_space *mapping) in mapping_set_unevictable() argument
65 set_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_set_unevictable()
68 static inline void mapping_clear_unevictable(struct address_space *mapping) in mapping_clear_unevictable() argument
70 clear_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_clear_unevictable()
73 static inline int mapping_unevictable(struct address_space *mapping) in mapping_unevictable() argument
75 if (mapping) in mapping_unevictable()
76 return test_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_unevictable()
77 return !!mapping; in mapping_unevictable()
80 static inline void mapping_set_exiting(struct address_space *mapping) in mapping_set_exiting() argument
82 set_bit(AS_EXITING, &mapping->flags); in mapping_set_exiting()
85 static inline int mapping_exiting(struct address_space *mapping) in mapping_exiting() argument
87 return test_bit(AS_EXITING, &mapping->flags); in mapping_exiting()
90 static inline void mapping_set_no_writeback_tags(struct address_space *mapping) in mapping_set_no_writeback_tags() argument
92 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); in mapping_set_no_writeback_tags()
95 static inline int mapping_use_writeback_tags(struct address_space *mapping) in mapping_use_writeback_tags() argument
97 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); in mapping_use_writeback_tags()
100 static inline gfp_t mapping_gfp_mask(struct address_space * mapping) in mapping_gfp_mask() argument
102 return mapping->gfp_mask; in mapping_gfp_mask()
105 /* Restricts the given gfp_mask to what the mapping allows. */
106 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, in mapping_gfp_constraint() argument
109 return mapping_gfp_mask(mapping) & gfp_mask; in mapping_gfp_constraint()
113 * This is non-atomic. Only to be used before the mapping is activated.
118 m->gfp_mask = mask; in mapping_set_gfp_mask()
129 * been used to lookup the page in the pagecache radix-tree (or page table):
140 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
146 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
153 * - 2 runs before A: in this case, A sees elevated refcount and bails out
154 * - A runs before 2: in this case, 2 sees zero refcount and retries;
161 * such a re-insertion, depending on order that locks are granted.
174 * Preempt must be disabled here - we rely on rcu_read_lock doing in __page_cache_add_speculative()
231 pgoff_t page_cache_next_miss(struct address_space *mapping,
233 pgoff_t page_cache_prev_miss(struct address_space *mapping,
244 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
248 * find_get_page - find and get a page reference
249 * @mapping: the address_space to search
252 * Looks up the page cache slot at @mapping & @offset. If there is a
257 static inline struct page *find_get_page(struct address_space *mapping, in find_get_page() argument
260 return pagecache_get_page(mapping, offset, 0, 0); in find_get_page()
263 static inline struct page *find_get_page_flags(struct address_space *mapping, in find_get_page_flags() argument
266 return pagecache_get_page(mapping, offset, fgp_flags, 0); in find_get_page_flags()
270 * find_lock_page - locate, pin and lock a pagecache page
271 * @mapping: the address_space to search
274 * Looks up the page cache slot at @mapping & @offset. If there is a
282 static inline struct page *find_lock_page(struct address_space *mapping, in find_lock_page() argument
285 return pagecache_get_page(mapping, offset, FGP_LOCK, 0); in find_lock_page()
289 * find_or_create_page - locate or add a pagecache page
290 * @mapping: the page's address_space
291 * @index: the page's index into the mapping
294 * Looks up the page cache slot at @mapping & @offset. If there is a
307 static inline struct page *find_or_create_page(struct address_space *mapping, in find_or_create_page() argument
310 return pagecache_get_page(mapping, offset, in find_or_create_page()
316 * grab_cache_page_nowait - returns locked page at given index in given cache
317 * @mapping: target address_space
321 * This is intended for speculative data generators, where the data can
328 static inline struct page *grab_cache_page_nowait(struct address_space *mapping, in grab_cache_page_nowait() argument
331 return pagecache_get_page(mapping, index, in grab_cache_page_nowait()
333 mapping_gfp_mask(mapping)); in grab_cache_page_nowait()
343 return page + (offset & (compound_nr(page) - 1)); in find_subpage()
346 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
347 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
348 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
351 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
354 static inline unsigned find_get_pages(struct address_space *mapping, in find_get_pages() argument
358 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages, in find_get_pages()
361 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
363 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
366 static inline unsigned find_get_pages_tag(struct address_space *mapping, in find_get_pages_tag() argument
370 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, in find_get_pages_tag()
374 struct page *grab_cache_page_write_begin(struct address_space *mapping,
380 static inline struct page *grab_cache_page(struct address_space *mapping, in grab_cache_page() argument
383 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); in grab_cache_page()
386 extern struct page * read_cache_page(struct address_space *mapping,
387 pgoff_t index, filler_t *filler, void *data);
388 extern struct page * read_cache_page_gfp(struct address_space *mapping,
390 extern int read_cache_pages(struct address_space *mapping,
391 struct list_head *pages, filler_t *filler, void *data);
393 static inline struct page *read_mapping_page(struct address_space *mapping, in read_mapping_page() argument
394 pgoff_t index, void *data) in read_mapping_page() argument
396 return read_cache_page(mapping, index, NULL, data); in read_mapping_page()
400 * Get index of the page with in radix-tree
401 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
408 return page->index; in page_to_index()
411 * We don't initialize ->index for tail pages: calculate based on in page_to_index()
414 pgoff = compound_head(page)->index; in page_to_index()
415 pgoff += page - compound_head(page); in page_to_index()
421 * (TODO: hugepage should have ->index in PAGE_SIZE)
426 return page->index << compound_order(page); in page_to_pgoff()
432 * Return byte-offset into filesystem object for page.
436 return ((loff_t)page->index) << PAGE_SHIFT; in page_offset()
453 pgoff = (address - vma->vm_start) >> PAGE_SHIFT; in linear_page_index()
454 pgoff += vma->vm_pgoff; in linear_page_index()
470 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); in trylock_page()
485 * signals. It returns 0 if it locked the page and -EINTR if it was
497 * lock_page_or_retry - Lock the page, unless this would block and the
521 * ie with increased "page->count" so that the page won't
555 char __user *end = uaddr + size - 1; in fault_in_pages_writeable()
561 return -EFAULT; in fault_in_pages_writeable()
568 return -EFAULT; in fault_in_pages_writeable()
583 const char __user *end = uaddr + size - 1; in fault_in_pages_readable()
589 return -EFAULT; in fault_in_pages_readable()
593 return -EFAULT; in fault_in_pages_readable()
607 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
609 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
614 void delete_from_page_cache_batch(struct address_space *mapping,
622 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) in add_to_page_cache() argument
627 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); in add_to_page_cache()
635 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> in dir_pages()