Lines Matching full:page

14  * - Only support 64K page size for now
15 * This is to make metadata handling easier, as 64K page would ensure
16 * all nodesize would fit inside one page, thus we don't need to handle
22 * - Metadata can't cross 64K page boundary
32 * needed range, other unrelated range in the same page will not be touched.
35 * The writeback is still for the full page, but we will only submit
36 * the dirty extent buffers in the page.
38 * This means, if we have a metadata page like this:
40 * Page offset
54 * record the status of each sector inside a page. This provides the extra
58 * Since we have multiple tree blocks inside one page, we can't rely on page
61 * the same page).
67 bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page) in btrfs_is_subpage() argument
74 * mapping. And if page->mapping->host is data inode, it's subpage. in btrfs_is_subpage()
77 if (!page->mapping || !page->mapping->host || in btrfs_is_subpage()
78 is_data_inode(page->mapping->host)) in btrfs_is_subpage()
119 struct page *page, enum btrfs_subpage_type type) in btrfs_attach_subpage() argument
124 * We have cases like a dummy extent buffer page, which is not mapped in btrfs_attach_subpage()
127 if (page->mapping) in btrfs_attach_subpage()
128 ASSERT(PageLocked(page)); in btrfs_attach_subpage()
130 /* Either not subpage, or the page already has private attached */ in btrfs_attach_subpage()
131 if (!btrfs_is_subpage(fs_info, page) || PagePrivate(page)) in btrfs_attach_subpage()
138 attach_page_private(page, subpage); in btrfs_attach_subpage()
143 struct page *page) in btrfs_detach_subpage() argument
148 if (!btrfs_is_subpage(fs_info, page) || !PagePrivate(page)) in btrfs_detach_subpage()
151 subpage = detach_page_private(page); in btrfs_detach_subpage()
189 * of the same page.
191 * detach_extent_buffer_page() won't detach the page private while we're still
195 struct page *page) in btrfs_page_inc_eb_refs() argument
199 if (!btrfs_is_subpage(fs_info, page)) in btrfs_page_inc_eb_refs()
202 ASSERT(PagePrivate(page) && page->mapping); in btrfs_page_inc_eb_refs()
203 lockdep_assert_held(&page->mapping->private_lock); in btrfs_page_inc_eb_refs()
205 subpage = (struct btrfs_subpage *)page->private; in btrfs_page_inc_eb_refs()
210 struct page *page) in btrfs_page_dec_eb_refs() argument
214 if (!btrfs_is_subpage(fs_info, page)) in btrfs_page_dec_eb_refs()
217 ASSERT(PagePrivate(page) && page->mapping); in btrfs_page_dec_eb_refs()
218 lockdep_assert_held(&page->mapping->private_lock); in btrfs_page_dec_eb_refs()
220 subpage = (struct btrfs_subpage *)page->private; in btrfs_page_dec_eb_refs()
226 struct page *page, u64 start, u32 len) in btrfs_subpage_assert() argument
229 ASSERT(PagePrivate(page) && page->private); in btrfs_subpage_assert()
233 * The range check only works for mapped page, we can still have in btrfs_subpage_assert()
234 * unmapped page like dummy extent buffer pages. in btrfs_subpage_assert()
236 if (page->mapping) in btrfs_subpage_assert()
237 ASSERT(page_offset(page) <= start && in btrfs_subpage_assert()
238 start + len <= page_offset(page) + PAGE_SIZE); in btrfs_subpage_assert()
242 struct page *page, u64 start, u32 len) in btrfs_subpage_start_reader() argument
244 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_start_reader()
247 btrfs_subpage_assert(fs_info, page, start, len); in btrfs_subpage_start_reader()
253 struct page *page, u64 start, u32 len) in btrfs_subpage_end_reader() argument
255 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_end_reader()
260 btrfs_subpage_assert(fs_info, page, start, len); in btrfs_subpage_end_reader()
261 is_data = is_data_inode(page->mapping->host); in btrfs_subpage_end_reader()
266 * For data we need to unlock the page if the last read has finished. in btrfs_subpage_end_reader()
273 unlock_page(page); in btrfs_subpage_end_reader()
276 static void btrfs_subpage_clamp_range(struct page *page, u64 *start, u32 *len) in btrfs_subpage_clamp_range() argument
281 *start = max_t(u64, page_offset(page), orig_start); in btrfs_subpage_clamp_range()
287 if (page_offset(page) >= orig_start + orig_len) in btrfs_subpage_clamp_range()
290 *len = min_t(u64, page_offset(page) + PAGE_SIZE, in btrfs_subpage_clamp_range()
295 struct page *page, u64 start, u32 len) in btrfs_subpage_start_writer() argument
297 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_start_writer()
301 btrfs_subpage_assert(fs_info, page, start, len); in btrfs_subpage_start_writer()
309 struct page *page, u64 start, u32 len) in btrfs_subpage_end_and_test_writer() argument
311 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_end_and_test_writer()
314 btrfs_subpage_assert(fs_info, page, start, len); in btrfs_subpage_end_and_test_writer()
331 * Lock a page for delalloc page writeback.
333 * Return -EAGAIN if the page is not properly initialized.
334 * Return 0 with the page locked, and writer counter updated.
336 * Even with 0 returned, the page still need extra check to make sure
337 * it's really the correct page, as the caller is using
338 * filemap_get_folios_contig(), which can race with page invalidating.
341 struct page *page, u64 start, u32 len) in btrfs_page_start_writer_lock() argument
343 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { in btrfs_page_start_writer_lock()
344 lock_page(page); in btrfs_page_start_writer_lock()
347 lock_page(page); in btrfs_page_start_writer_lock()
348 if (!PagePrivate(page) || !page->private) { in btrfs_page_start_writer_lock()
349 unlock_page(page); in btrfs_page_start_writer_lock()
352 btrfs_subpage_clamp_range(page, &start, &len); in btrfs_page_start_writer_lock()
353 btrfs_subpage_start_writer(fs_info, page, start, len); in btrfs_page_start_writer_lock()
358 struct page *page, u64 start, u32 len) in btrfs_page_end_writer_lock() argument
360 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) in btrfs_page_end_writer_lock()
361 return unlock_page(page); in btrfs_page_end_writer_lock()
362 btrfs_subpage_clamp_range(page, &start, &len); in btrfs_page_end_writer_lock()
363 if (btrfs_subpage_end_and_test_writer(fs_info, page, start, len)) in btrfs_page_end_writer_lock()
364 unlock_page(page); in btrfs_page_end_writer_lock()
367 #define subpage_calc_start_bit(fs_info, page, name, start, len) \ argument
371 btrfs_subpage_assert(fs_info, page, start, len); \
388 struct page *page, u64 start, u32 len) in btrfs_subpage_set_uptodate() argument
390 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_set_uptodate()
391 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_set_uptodate()
398 SetPageUptodate(page); in btrfs_subpage_set_uptodate()
403 struct page *page, u64 start, u32 len) in btrfs_subpage_clear_uptodate() argument
405 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_clear_uptodate()
406 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_clear_uptodate()
412 ClearPageUptodate(page); in btrfs_subpage_clear_uptodate()
417 struct page *page, u64 start, u32 len) in btrfs_subpage_set_dirty() argument
419 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_set_dirty()
420 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_set_dirty()
427 set_page_dirty(page); in btrfs_subpage_set_dirty()
437 * NOTE: Callers should manually clear page dirty for true case, as we have
441 struct page *page, u64 start, u32 len) in btrfs_subpage_clear_and_test_dirty() argument
443 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_clear_and_test_dirty()
444 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_clear_and_test_dirty()
458 struct page *page, u64 start, u32 len) in btrfs_subpage_clear_dirty() argument
462 last = btrfs_subpage_clear_and_test_dirty(fs_info, page, start, len); in btrfs_subpage_clear_dirty()
464 clear_page_dirty_for_io(page); in btrfs_subpage_clear_dirty()
468 struct page *page, u64 start, u32 len) in btrfs_subpage_set_writeback() argument
470 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_set_writeback()
471 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_set_writeback()
477 set_page_writeback(page); in btrfs_subpage_set_writeback()
482 struct page *page, u64 start, u32 len) in btrfs_subpage_clear_writeback() argument
484 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_clear_writeback()
485 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_clear_writeback()
492 ASSERT(PageWriteback(page)); in btrfs_subpage_clear_writeback()
493 end_page_writeback(page); in btrfs_subpage_clear_writeback()
499 struct page *page, u64 start, u32 len) in btrfs_subpage_set_ordered() argument
501 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_set_ordered()
502 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_set_ordered()
508 SetPageOrdered(page); in btrfs_subpage_set_ordered()
513 struct page *page, u64 start, u32 len) in btrfs_subpage_clear_ordered() argument
515 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_clear_ordered()
516 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_clear_ordered()
523 ClearPageOrdered(page); in btrfs_subpage_clear_ordered()
528 struct page *page, u64 start, u32 len) in btrfs_subpage_set_checked() argument
530 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_set_checked()
531 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_set_checked()
538 SetPageChecked(page); in btrfs_subpage_set_checked()
543 struct page *page, u64 start, u32 len) in btrfs_subpage_clear_checked() argument
545 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_clear_checked()
546 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, in btrfs_subpage_clear_checked()
552 ClearPageChecked(page); in btrfs_subpage_clear_checked()
557 * Unlike set/clear which is dependent on each page status, for test all bits
562 struct page *page, u64 start, u32 len) \
564 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \
565 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, \
590 struct page *page, u64 start, u32 len) \
592 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
593 set_page_func(page); \
596 btrfs_subpage_set_##name(fs_info, page, start, len); \
599 struct page *page, u64 start, u32 len) \
601 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
602 clear_page_func(page); \
605 btrfs_subpage_clear_##name(fs_info, page, start, len); \
608 struct page *page, u64 start, u32 len) \
610 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \
611 return test_page_func(page); \
612 return btrfs_subpage_test_##name(fs_info, page, start, len); \
615 struct page *page, u64 start, u32 len) \
617 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
618 set_page_func(page); \
621 btrfs_subpage_clamp_range(page, &start, &len); \
622 btrfs_subpage_set_##name(fs_info, page, start, len); \
625 struct page *page, u64 start, u32 len) \
627 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
628 clear_page_func(page); \
631 btrfs_subpage_clamp_range(page, &start, &len); \
632 btrfs_subpage_clear_##name(fs_info, page, start, len); \
635 struct page *page, u64 start, u32 len) \
637 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \
638 return test_page_func(page); \
639 btrfs_subpage_clamp_range(page, &start, &len); \
640 return btrfs_subpage_test_##name(fs_info, page, start, len); \
653 * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
657 struct page *page) in btrfs_page_assert_not_dirty() argument
659 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; in btrfs_page_assert_not_dirty()
664 ASSERT(!PageDirty(page)); in btrfs_page_assert_not_dirty()
665 if (!btrfs_is_subpage(fs_info, page)) in btrfs_page_assert_not_dirty()
668 ASSERT(PagePrivate(page) && page->private); in btrfs_page_assert_not_dirty()
673 * Handle different locked pages with different page sizes:
675 * - Page locked by plain lock_page()
678 * This is the most common locked page for __extent_writepage() called
682 * - Page locked by lock_delalloc_pages()
687 void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page, in btrfs_page_unlock_writer() argument
692 ASSERT(PageLocked(page)); in btrfs_page_unlock_writer()
693 /* For non-subpage case, we just unlock the page */ in btrfs_page_unlock_writer()
694 if (!btrfs_is_subpage(fs_info, page)) in btrfs_page_unlock_writer()
695 return unlock_page(page); in btrfs_page_unlock_writer()
697 ASSERT(PagePrivate(page) && page->private); in btrfs_page_unlock_writer()
698 subpage = (struct btrfs_subpage *)page->private; in btrfs_page_unlock_writer()
701 * For subpage case, there are two types of locked page. With or in btrfs_page_unlock_writer()
704 * Since we own the page lock, no one else could touch subpage::writers in btrfs_page_unlock_writer()
709 return unlock_page(page); in btrfs_page_unlock_writer()
712 btrfs_page_end_writer_lock(fs_info, page, start, len); in btrfs_page_unlock_writer()
720 struct page *page, u64 start, u32 len) in btrfs_subpage_dump_bitmap() argument
732 ASSERT(PagePrivate(page) && page->private); in btrfs_subpage_dump_bitmap()
734 subpage = (struct btrfs_subpage *)page->private; in btrfs_subpage_dump_bitmap()
744 dump_page(page, "btrfs subpage dump"); in btrfs_subpage_dump_bitmap()
746 "start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl error=%*pbl dirty=%*pbl writeback=%*pbl ordere… in btrfs_subpage_dump_bitmap()
747 start, len, page_offset(page), in btrfs_subpage_dump_bitmap()