Lines Matching full:page

13  * - Only support 64K page size for now
14 * This is to make metadata handling easier, as 64K page would ensure
15 * all nodesize would fit inside one page, thus we don't need to handle
21 * - Metadata can't cross 64K page boundary
31 * needed range, other unrelated range in the same page will not be touched.
34 * The writeback is still for the full page, but we will only submit
35 * the dirty extent buffers in the page.
37 * This means, if we have a metadata page like this:
39 * Page offset
53 * record the status of each sector inside a page. This provides the extra
57 * Since we have multiple tree blocks inside one page, we can't rely on page
60 * the same page).
66 bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page) in btrfs_is_subpage() argument
73 * mapping. And if page->mapping->host is data inode, it's subpage. in btrfs_is_subpage()
76 if (!page->mapping || !page->mapping->host || in btrfs_is_subpage()
77 is_data_inode(page->mapping->host)) in btrfs_is_subpage()
121 struct page *page, enum btrfs_subpage_type type) in btrfs_attach_subpage() argument
126 * We have cases like a dummy extent buffer page, which is not mapped in btrfs_attach_subpage()
129 if (page->mapping) in btrfs_attach_subpage()
130 ASSERT(PageLocked(page)); in btrfs_attach_subpage()
132 /* Either not subpage, or the page already has private attached */ in btrfs_attach_subpage()
133 if (!btrfs_is_subpage(fs_info, page) || PagePrivate(page)) in btrfs_attach_subpage()
140 attach_page_private(page, subpage); in btrfs_attach_subpage()
145 struct page *page) in btrfs_detach_subpage() argument
150 if (!btrfs_is_subpage(fs_info, page) || !PagePrivate(page)) in btrfs_detach_subpage()
153 subpage = detach_page_private(page); in btrfs_detach_subpage()
191 * of the same page.
193 * detach_extent_buffer_page() won't detach the page private while we're still
197 struct page *page) in btrfs_page_inc_eb_refs() argument
201 if (!btrfs_is_subpage(fs_info, page)) in btrfs_page_inc_eb_refs()
204 ASSERT(PagePrivate(page) && page->mapping); in btrfs_page_inc_eb_refs()
205 lockdep_assert_held(&page->mapping->private_lock); in btrfs_page_inc_eb_refs()
207 subpage = (struct btrfs_subpage *)page->private; in btrfs_page_inc_eb_refs()
212 struct page *page) in btrfs_page_dec_eb_refs() argument
216 if (!btrfs_is_subpage(fs_info, page)) in btrfs_page_dec_eb_refs()
219 ASSERT(PagePrivate(page) && page->mapping); in btrfs_page_dec_eb_refs()
220 lockdep_assert_held(&page->mapping->private_lock); in btrfs_page_dec_eb_refs()
222 subpage = (struct btrfs_subpage *)page->private; in btrfs_page_dec_eb_refs()
228 struct page *page, u64 start, u32 len) in btrfs_subpage_assert() argument
231 ASSERT(PagePrivate(page) && page->private); in btrfs_subpage_assert()
235 * The range check only works for mapped page, we can still have in btrfs_subpage_assert()
236 * unmapped page like dummy extent buffer pages. in btrfs_subpage_assert()
238 if (page->mapping) in btrfs_subpage_assert()
239 ASSERT(page_offset(page) <= start && in btrfs_subpage_assert()
240 start + len <= page_offset(page) + PAGE_SIZE); in btrfs_subpage_assert()
244 struct page *page, u64 start, u32 len) in btrfs_subpage_start_reader() argument
246 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_start_reader()
249 btrfs_subpage_assert(fs_info, page, start, len); in btrfs_subpage_start_reader()
255 struct page *page, u64 start, u32 len) in btrfs_subpage_end_reader() argument
257 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_end_reader()
262 btrfs_subpage_assert(fs_info, page, start, len); in btrfs_subpage_end_reader()
263 is_data = is_data_inode(page->mapping->host); in btrfs_subpage_end_reader()
268 * For data we need to unlock the page if the last read has finished. in btrfs_subpage_end_reader()
275 unlock_page(page); in btrfs_subpage_end_reader()
278 static void btrfs_subpage_clamp_range(struct page *page, u64 *start, u32 *len) in btrfs_subpage_clamp_range() argument
283 *start = max_t(u64, page_offset(page), orig_start); in btrfs_subpage_clamp_range()
289 if (page_offset(page) >= orig_start + orig_len) in btrfs_subpage_clamp_range()
292 *len = min_t(u64, page_offset(page) + PAGE_SIZE, in btrfs_subpage_clamp_range()
297 struct page *page, u64 start, u32 len) in btrfs_subpage_start_writer() argument
299 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_start_writer()
303 btrfs_subpage_assert(fs_info, page, start, len); in btrfs_subpage_start_writer()
311 struct page *page, u64 start, u32 len) in btrfs_subpage_end_and_test_writer() argument
313 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_end_and_test_writer()
316 btrfs_subpage_assert(fs_info, page, start, len); in btrfs_subpage_end_and_test_writer()
333 * Lock a page for delalloc page writeback.
335 * Return -EAGAIN if the page is not properly initialized.
336 * Return 0 with the page locked, and writer counter updated.
338 * Even with 0 returned, the page still need extra check to make sure
339 * it's really the correct page, as the caller is using
340 * filemap_get_folios_contig(), which can race with page invalidating.
343 struct page *page, u64 start, u32 len) in btrfs_page_start_writer_lock() argument
345 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { in btrfs_page_start_writer_lock()
346 lock_page(page); in btrfs_page_start_writer_lock()
349 lock_page(page); in btrfs_page_start_writer_lock()
350 if (!PagePrivate(page) || !page->private) { in btrfs_page_start_writer_lock()
351 unlock_page(page); in btrfs_page_start_writer_lock()
354 btrfs_subpage_clamp_range(page, &start, &len); in btrfs_page_start_writer_lock()
355 btrfs_subpage_start_writer(fs_info, page, start, len); in btrfs_page_start_writer_lock()
360 struct page *page, u64 start, u32 len) in btrfs_page_end_writer_lock() argument
362 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) in btrfs_page_end_writer_lock()
363 return unlock_page(page); in btrfs_page_end_writer_lock()
364 btrfs_subpage_clamp_range(page, &start, &len); in btrfs_page_end_writer_lock()
365 if (btrfs_subpage_end_and_test_writer(fs_info, page, start, len)) in btrfs_page_end_writer_lock()
366 unlock_page(page); in btrfs_page_end_writer_lock()
391 #define subpage_calc_start_bit(fs_info, page, name, start, len) \ argument
395 btrfs_subpage_assert(fs_info, page, start, len); \
412 struct page *page, u64 start, u32 len) in btrfs_subpage_set_uptodate() argument
414 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_set_uptodate()
415 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_set_uptodate()
422 SetPageUptodate(page); in btrfs_subpage_set_uptodate()
427 struct page *page, u64 start, u32 len) in btrfs_subpage_clear_uptodate() argument
429 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_clear_uptodate()
430 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_clear_uptodate()
436 ClearPageUptodate(page); in btrfs_subpage_clear_uptodate()
441 struct page *page, u64 start, u32 len) in btrfs_subpage_set_error() argument
443 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_set_error()
444 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_set_error()
450 SetPageError(page); in btrfs_subpage_set_error()
455 struct page *page, u64 start, u32 len) in btrfs_subpage_clear_error() argument
457 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_clear_error()
458 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_clear_error()
465 ClearPageError(page); in btrfs_subpage_clear_error()
470 struct page *page, u64 start, u32 len) in btrfs_subpage_set_dirty() argument
472 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_set_dirty()
473 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_set_dirty()
480 set_page_dirty(page); in btrfs_subpage_set_dirty()
490 * NOTE: Callers should manually clear page dirty for true case, as we have
494 struct page *page, u64 start, u32 len) in btrfs_subpage_clear_and_test_dirty() argument
496 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_clear_and_test_dirty()
497 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_clear_and_test_dirty()
511 struct page *page, u64 start, u32 len) in btrfs_subpage_clear_dirty() argument
515 last = btrfs_subpage_clear_and_test_dirty(fs_info, page, start, len); in btrfs_subpage_clear_dirty()
517 clear_page_dirty_for_io(page); in btrfs_subpage_clear_dirty()
521 struct page *page, u64 start, u32 len) in btrfs_subpage_set_writeback() argument
523 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_set_writeback()
524 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_set_writeback()
530 set_page_writeback(page); in btrfs_subpage_set_writeback()
535 struct page *page, u64 start, u32 len) in btrfs_subpage_clear_writeback() argument
537 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_clear_writeback()
538 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_clear_writeback()
545 ASSERT(PageWriteback(page)); in btrfs_subpage_clear_writeback()
546 end_page_writeback(page); in btrfs_subpage_clear_writeback()
552 struct page *page, u64 start, u32 len) in btrfs_subpage_set_ordered() argument
554 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_set_ordered()
555 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_set_ordered()
561 SetPageOrdered(page); in btrfs_subpage_set_ordered()
566 struct page *page, u64 start, u32 len) in btrfs_subpage_clear_ordered() argument
568 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_clear_ordered()
569 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_clear_ordered()
576 ClearPageOrdered(page); in btrfs_subpage_clear_ordered()
581 struct page *page, u64 start, u32 len) in btrfs_subpage_set_checked() argument
583 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_set_checked()
584 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_set_checked()
591 SetPageChecked(page); in btrfs_subpage_set_checked()
596 struct page *page, u64 start, u32 len) in btrfs_subpage_clear_checked() argument
598 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_clear_checked()
599 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_clear_checked()
605 ClearPageChecked(page); in btrfs_subpage_clear_checked()
610 * Unlike set/clear which is dependent on each page status, for test all bits
615 struct page *page, u64 start, u32 len) \
617 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \
618 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, \
644 struct page *page, u64 start, u32 len) \
646 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
647 set_page_func(page); \
650 btrfs_subpage_set_##name(fs_info, page, start, len); \
653 struct page *page, u64 start, u32 len) \
655 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
656 clear_page_func(page); \
659 btrfs_subpage_clear_##name(fs_info, page, start, len); \
662 struct page *page, u64 start, u32 len) \
664 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \
665 return test_page_func(page); \
666 return btrfs_subpage_test_##name(fs_info, page, start, len); \
669 struct page *page, u64 start, u32 len) \
671 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
672 set_page_func(page); \
675 btrfs_subpage_clamp_range(page, &start, &len); \
676 btrfs_subpage_set_##name(fs_info, page, start, len); \
679 struct page *page, u64 start, u32 len) \
681 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
682 clear_page_func(page); \
685 btrfs_subpage_clamp_range(page, &start, &len); \
686 btrfs_subpage_clear_##name(fs_info, page, start, len); \
689 struct page *page, u64 start, u32 len) \
691 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \
692 return test_page_func(page); \
693 btrfs_subpage_clamp_range(page, &start, &len); \
694 return btrfs_subpage_test_##name(fs_info, page, start, len); \
708 * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
712 struct page *page) in btrfs_page_assert_not_dirty() argument
714 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_page_assert_not_dirty()
719 ASSERT(!PageDirty(page)); in btrfs_page_assert_not_dirty()
720 if (!btrfs_is_subpage(fs_info, page)) in btrfs_page_assert_not_dirty()
723 ASSERT(PagePrivate(page) && page->private); in btrfs_page_assert_not_dirty()
728 * Handle different locked pages with different page sizes:
730 * - Page locked by plain lock_page()
733 * This is the most common locked page for __extent_writepage() called
737 * - Page locked by lock_delalloc_pages()
742 void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page, in btrfs_page_unlock_writer() argument
747 ASSERT(PageLocked(page)); in btrfs_page_unlock_writer()
748 /* For non-subpage case, we just unlock the page */ in btrfs_page_unlock_writer()
749 if (!btrfs_is_subpage(fs_info, page)) in btrfs_page_unlock_writer()
750 return unlock_page(page); in btrfs_page_unlock_writer()
752 ASSERT(PagePrivate(page) && page->private); in btrfs_page_unlock_writer()
753 subpage = (struct btrfs_subpage *)page->private; in btrfs_page_unlock_writer()
756 * For subpage case, there are two types of locked page. With or in btrfs_page_unlock_writer()
759 * Since we own the page lock, no one else could touch subpage::writers in btrfs_page_unlock_writer()
764 return unlock_page(page); in btrfs_page_unlock_writer()
767 btrfs_page_end_writer_lock(fs_info, page, start, len); in btrfs_page_unlock_writer()