Lines Matching refs:b

127 	struct drbd_bitmap *b = device->bitmap;  in __bm_print_lock_info()  local
132 func, b->bm_why ?: "?", in __bm_print_lock_info()
133 b->bm_task->comm, task_pid_nr(b->bm_task)); in __bm_print_lock_info()
138 struct drbd_bitmap *b = device->bitmap; in drbd_bm_lock() local
141 if (!b) { in drbd_bm_lock()
146 trylock_failed = !mutex_trylock(&b->bm_change); in drbd_bm_lock()
151 why, b->bm_why ?: "?", in drbd_bm_lock()
152 b->bm_task->comm, task_pid_nr(b->bm_task)); in drbd_bm_lock()
153 mutex_lock(&b->bm_change); in drbd_bm_lock()
155 if (BM_LOCKED_MASK & b->bm_flags) in drbd_bm_lock()
157 b->bm_flags |= flags & BM_LOCKED_MASK; in drbd_bm_lock()
159 b->bm_why = why; in drbd_bm_lock()
160 b->bm_task = current; in drbd_bm_lock()
165 struct drbd_bitmap *b = device->bitmap; in drbd_bm_unlock() local
166 if (!b) { in drbd_bm_unlock()
174 b->bm_flags &= ~BM_LOCKED_MASK; in drbd_bm_unlock()
175 b->bm_why = NULL; in drbd_bm_unlock()
176 b->bm_task = NULL; in drbd_bm_unlock()
177 mutex_unlock(&b->bm_change); in drbd_bm_unlock()
225 struct drbd_bitmap *b = device->bitmap; in bm_page_lock_io() local
226 void *addr = &page_private(b->bm_pages[page_nr]); in bm_page_lock_io()
227 wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr)); in bm_page_lock_io()
232 struct drbd_bitmap *b = device->bitmap; in bm_page_unlock_io() local
233 void *addr = &page_private(b->bm_pages[page_nr]); in bm_page_unlock_io()
268 struct drbd_bitmap *b = device->bitmap; in drbd_bm_mark_for_writeout() local
276 BUG_ON(b->n_bitmap_hints >= ARRAY_SIZE(b->al_bitmap_hints)); in drbd_bm_mark_for_writeout()
278 b->al_bitmap_hints[b->n_bitmap_hints++] = page_nr; in drbd_bm_mark_for_writeout()
308 static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr) in bm_word_to_page_idx() argument
312 BUG_ON(page_nr >= b->bm_number_of_pages); in bm_word_to_page_idx()
316 static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr) in bm_bit_to_page_idx() argument
320 BUG_ON(page_nr >= b->bm_number_of_pages); in bm_bit_to_page_idx()
324 static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) in __bm_map_pidx() argument
326 struct page *page = b->bm_pages[idx]; in __bm_map_pidx()
330 static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) in bm_map_pidx() argument
332 return __bm_map_pidx(b, idx); in bm_map_pidx()
390 static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want) in bm_realloc_pages() argument
392 struct page **old_pages = b->bm_pages; in bm_realloc_pages()
395 unsigned long have = b->bm_number_of_pages; in bm_realloc_pages()
449 struct drbd_bitmap *b = device->bitmap; in drbd_bm_init() local
450 WARN_ON(b != NULL); in drbd_bm_init()
451 b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL); in drbd_bm_init()
452 if (!b) in drbd_bm_init()
454 spin_lock_init(&b->bm_lock); in drbd_bm_init()
455 mutex_init(&b->bm_change); in drbd_bm_init()
456 init_waitqueue_head(&b->bm_io_wait); in drbd_bm_init()
458 device->bitmap = b; in drbd_bm_init()
496 static int bm_clear_surplus(struct drbd_bitmap *b) in bm_clear_surplus() argument
504 tmp = (b->bm_bits & BITS_PER_PAGE_MASK); in bm_clear_surplus()
511 p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); in bm_clear_surplus()
533 static void bm_set_surplus(struct drbd_bitmap *b) in bm_set_surplus() argument
540 tmp = (b->bm_bits & BITS_PER_PAGE_MASK); in bm_set_surplus()
547 p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); in bm_set_surplus()
568 static unsigned long bm_count_bits(struct drbd_bitmap *b) in bm_count_bits() argument
572 unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1; in bm_count_bits()
576 for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) { in bm_count_bits()
577 p_addr = __bm_map_pidx(b, idx); in bm_count_bits()
583 last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL; in bm_count_bits()
584 p_addr = __bm_map_pidx(b, idx); in bm_count_bits()
596 static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) in bm_memset() argument
604 if (end > b->bm_words) { in bm_memset()
611 idx = bm_word_to_page_idx(b, offset); in bm_memset()
612 p_addr = bm_map_pidx(b, idx); in bm_memset()
620 bm_set_page_need_writeout(b->bm_pages[idx]); in bm_memset()
646 struct drbd_bitmap *b = device->bitmap; in drbd_bm_resize() local
653 if (!expect(b)) in drbd_bm_resize()
661 if (capacity == b->bm_dev_capacity) in drbd_bm_resize()
665 spin_lock_irq(&b->bm_lock); in drbd_bm_resize()
666 opages = b->bm_pages; in drbd_bm_resize()
667 onpages = b->bm_number_of_pages; in drbd_bm_resize()
668 owords = b->bm_words; in drbd_bm_resize()
669 b->bm_pages = NULL; in drbd_bm_resize()
670 b->bm_number_of_pages = in drbd_bm_resize()
671 b->bm_set = in drbd_bm_resize()
672 b->bm_bits = in drbd_bm_resize()
673 b->bm_words = in drbd_bm_resize()
674 b->bm_dev_capacity = 0; in drbd_bm_resize()
675 spin_unlock_irq(&b->bm_lock); in drbd_bm_resize()
701 have = b->bm_number_of_pages; in drbd_bm_resize()
703 D_ASSERT(device, b->bm_pages != NULL); in drbd_bm_resize()
704 npages = b->bm_pages; in drbd_bm_resize()
709 npages = bm_realloc_pages(b, want); in drbd_bm_resize()
717 spin_lock_irq(&b->bm_lock); in drbd_bm_resize()
718 opages = b->bm_pages; in drbd_bm_resize()
719 owords = b->bm_words; in drbd_bm_resize()
720 obits = b->bm_bits; in drbd_bm_resize()
724 bm_set_surplus(b); in drbd_bm_resize()
726 b->bm_pages = npages; in drbd_bm_resize()
727 b->bm_number_of_pages = want; in drbd_bm_resize()
728 b->bm_bits = bits; in drbd_bm_resize()
729 b->bm_words = words; in drbd_bm_resize()
730 b->bm_dev_capacity = capacity; in drbd_bm_resize()
734 bm_memset(b, owords, 0xff, words-owords); in drbd_bm_resize()
735 b->bm_set += bits - obits; in drbd_bm_resize()
737 bm_memset(b, owords, 0x00, words-owords); in drbd_bm_resize()
746 (void)bm_clear_surplus(b); in drbd_bm_resize()
748 spin_unlock_irq(&b->bm_lock); in drbd_bm_resize()
752 b->bm_set = bm_count_bits(b); in drbd_bm_resize()
770 struct drbd_bitmap *b = device->bitmap; in _drbd_bm_total_weight() local
774 if (!expect(b)) in _drbd_bm_total_weight()
776 if (!expect(b->bm_pages)) in _drbd_bm_total_weight()
779 spin_lock_irqsave(&b->bm_lock, flags); in _drbd_bm_total_weight()
780 s = b->bm_set; in _drbd_bm_total_weight()
781 spin_unlock_irqrestore(&b->bm_lock, flags); in _drbd_bm_total_weight()
799 struct drbd_bitmap *b = device->bitmap; in drbd_bm_words() local
800 if (!expect(b)) in drbd_bm_words()
802 if (!expect(b->bm_pages)) in drbd_bm_words()
805 return b->bm_words; in drbd_bm_words()
810 struct drbd_bitmap *b = device->bitmap; in drbd_bm_bits() local
811 if (!expect(b)) in drbd_bm_bits()
814 return b->bm_bits; in drbd_bm_bits()
825 struct drbd_bitmap *b = device->bitmap; in drbd_bm_merge_lel() local
833 if (!expect(b)) in drbd_bm_merge_lel()
835 if (!expect(b->bm_pages)) in drbd_bm_merge_lel()
839 WARN_ON(offset >= b->bm_words); in drbd_bm_merge_lel()
840 WARN_ON(end > b->bm_words); in drbd_bm_merge_lel()
842 spin_lock_irq(&b->bm_lock); in drbd_bm_merge_lel()
845 idx = bm_word_to_page_idx(b, offset); in drbd_bm_merge_lel()
846 p_addr = bm_map_pidx(b, idx); in drbd_bm_merge_lel()
853 b->bm_set += hweight_long(word) - bits; in drbd_bm_merge_lel()
856 bm_set_page_need_writeout(b->bm_pages[idx]); in drbd_bm_merge_lel()
863 if (end == b->bm_words) in drbd_bm_merge_lel()
864 b->bm_set -= bm_clear_surplus(b); in drbd_bm_merge_lel()
865 spin_unlock_irq(&b->bm_lock); in drbd_bm_merge_lel()
874 struct drbd_bitmap *b = device->bitmap; in drbd_bm_get_lel() local
880 if (!expect(b)) in drbd_bm_get_lel()
882 if (!expect(b->bm_pages)) in drbd_bm_get_lel()
885 spin_lock_irq(&b->bm_lock); in drbd_bm_get_lel()
886 if ((offset >= b->bm_words) || in drbd_bm_get_lel()
887 (end > b->bm_words) || in drbd_bm_get_lel()
892 (unsigned long) b->bm_words); in drbd_bm_get_lel()
896 p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset)); in drbd_bm_get_lel()
904 spin_unlock_irq(&b->bm_lock); in drbd_bm_get_lel()
910 struct drbd_bitmap *b = device->bitmap; in drbd_bm_set_all() local
911 if (!expect(b)) in drbd_bm_set_all()
913 if (!expect(b->bm_pages)) in drbd_bm_set_all()
916 spin_lock_irq(&b->bm_lock); in drbd_bm_set_all()
917 bm_memset(b, 0, 0xff, b->bm_words); in drbd_bm_set_all()
918 (void)bm_clear_surplus(b); in drbd_bm_set_all()
919 b->bm_set = b->bm_bits; in drbd_bm_set_all()
920 spin_unlock_irq(&b->bm_lock); in drbd_bm_set_all()
926 struct drbd_bitmap *b = device->bitmap; in drbd_bm_clear_all() local
927 if (!expect(b)) in drbd_bm_clear_all()
929 if (!expect(b->bm_pages)) in drbd_bm_clear_all()
932 spin_lock_irq(&b->bm_lock); in drbd_bm_clear_all()
933 bm_memset(b, 0, 0, b->bm_words); in drbd_bm_clear_all()
934 b->bm_set = 0; in drbd_bm_clear_all()
935 spin_unlock_irq(&b->bm_lock); in drbd_bm_clear_all()
955 struct drbd_bitmap *b = device->bitmap; in drbd_bm_endio() local
959 !bm_test_page_unchanged(b->bm_pages[idx])) in drbd_bm_endio()
966 bm_set_page_io_err(b->bm_pages[idx]); in drbd_bm_endio()
973 bm_clear_page_io_err(b->bm_pages[idx]); in drbd_bm_endio()
995 struct drbd_bitmap *b = device->bitmap; in bm_page_io_async() local
1014 bm_set_page_unchanged(b->bm_pages[page_nr]); in bm_page_io_async()
1019 copy_highpage(page, b->bm_pages[page_nr]); in bm_page_io_async()
1022 page = b->bm_pages[page_nr]; in bm_page_io_async()
1048 struct drbd_bitmap *b = device->bitmap; in bm_rw() local
1086 WARN_ON(!(BM_LOCKED_MASK & b->bm_flags)); in bm_rw()
1092 num_pages = b->bm_number_of_pages; in bm_rw()
1108 for (hint = 0; hint < b->n_bitmap_hints; hint++) { in bm_rw()
1109 i = b->al_bitmap_hints[hint]; in bm_rw()
1114 &page_private(b->bm_pages[i]))) in bm_rw()
1117 if (bm_test_page_unchanged(b->bm_pages[i])) in bm_rw()
1129 bm_test_page_unchanged(b->bm_pages[i])) { in bm_rw()
1136 !bm_test_page_lazy_writeout(b->bm_pages[i])) { in bm_rw()
1181 b->bm_set = bm_count_bits(b); in bm_rw()
1185 now = b->bm_set; in bm_rw()
1272 struct drbd_bitmap *b = device->bitmap; in __bm_find_next() local
1278 if (bm_fo > b->bm_bits) { in __bm_find_next()
1279 drbd_err(device, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits); in __bm_find_next()
1282 while (bm_fo < b->bm_bits) { in __bm_find_next()
1285 p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo)); in __bm_find_next()
1297 if (bm_fo >= b->bm_bits) in __bm_find_next()
1312 struct drbd_bitmap *b = device->bitmap; in bm_find_next() local
1315 if (!expect(b)) in bm_find_next()
1317 if (!expect(b->bm_pages)) in bm_find_next()
1320 spin_lock_irq(&b->bm_lock); in bm_find_next()
1321 if (BM_DONT_TEST & b->bm_flags) in bm_find_next()
1326 spin_unlock_irq(&b->bm_lock); in bm_find_next()
1366 struct drbd_bitmap *b = device->bitmap; in __bm_change_bits_to() local
1373 if (e >= b->bm_bits) { in __bm_change_bits_to()
1375 s, e, b->bm_bits); in __bm_change_bits_to()
1376 e = b->bm_bits ? b->bm_bits -1 : 0; in __bm_change_bits_to()
1379 unsigned int page_nr = bm_bit_to_page_idx(b, bitnr); in __bm_change_bits_to()
1384 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); in __bm_change_bits_to()
1386 bm_set_page_need_writeout(b->bm_pages[last_page_nr]); in __bm_change_bits_to()
1389 p_addr = __bm_map_pidx(b, page_nr); in __bm_change_bits_to()
1400 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); in __bm_change_bits_to()
1402 bm_set_page_need_writeout(b->bm_pages[last_page_nr]); in __bm_change_bits_to()
1404 b->bm_set += changed_total; in __bm_change_bits_to()
1416 struct drbd_bitmap *b = device->bitmap; in bm_change_bits_to() local
1419 if (!expect(b)) in bm_change_bits_to()
1421 if (!expect(b->bm_pages)) in bm_change_bits_to()
1424 spin_lock_irqsave(&b->bm_lock, flags); in bm_change_bits_to()
1425 if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags) in bm_change_bits_to()
1430 spin_unlock_irqrestore(&b->bm_lock, flags); in bm_change_bits_to()
1448 static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b, in bm_set_full_words_within_one_page() argument
1454 unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]); in bm_set_full_words_within_one_page()
1468 bm_set_page_lazy_writeout(b->bm_pages[page_nr]); in bm_set_full_words_within_one_page()
1469 b->bm_set += changed; in bm_set_full_words_within_one_page()
1488 struct drbd_bitmap *b = device->bitmap; in _drbd_bm_set_bits() local
1499 spin_lock_irq(&b->bm_lock); in _drbd_bm_set_bits()
1501 spin_unlock_irq(&b->bm_lock); in _drbd_bm_set_bits()
1507 spin_lock_irq(&b->bm_lock); in _drbd_bm_set_bits()
1524 spin_unlock_irq(&b->bm_lock); in _drbd_bm_set_bits()
1527 spin_lock_irq(&b->bm_lock); in _drbd_bm_set_bits()
1548 spin_unlock_irq(&b->bm_lock); in _drbd_bm_set_bits()
1561 struct drbd_bitmap *b = device->bitmap; in drbd_bm_test_bit() local
1565 if (!expect(b)) in drbd_bm_test_bit()
1567 if (!expect(b->bm_pages)) in drbd_bm_test_bit()
1570 spin_lock_irqsave(&b->bm_lock, flags); in drbd_bm_test_bit()
1571 if (BM_DONT_TEST & b->bm_flags) in drbd_bm_test_bit()
1573 if (bitnr < b->bm_bits) { in drbd_bm_test_bit()
1574 p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr)); in drbd_bm_test_bit()
1577 } else if (bitnr == b->bm_bits) { in drbd_bm_test_bit()
1580 drbd_err(device, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits); in drbd_bm_test_bit()
1584 spin_unlock_irqrestore(&b->bm_lock, flags); in drbd_bm_test_bit()
1592 struct drbd_bitmap *b = device->bitmap; in drbd_bm_count_bits() local
1602 if (!expect(b)) in drbd_bm_count_bits()
1604 if (!expect(b->bm_pages)) in drbd_bm_count_bits()
1607 spin_lock_irqsave(&b->bm_lock, flags); in drbd_bm_count_bits()
1608 if (BM_DONT_TEST & b->bm_flags) in drbd_bm_count_bits()
1611 unsigned int idx = bm_bit_to_page_idx(b, bitnr); in drbd_bm_count_bits()
1616 p_addr = bm_map_pidx(b, idx); in drbd_bm_count_bits()
1618 if (expect(bitnr < b->bm_bits)) in drbd_bm_count_bits()
1621 drbd_err(device, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); in drbd_bm_count_bits()
1625 spin_unlock_irqrestore(&b->bm_lock, flags); in drbd_bm_count_bits()
1646 struct drbd_bitmap *b = device->bitmap; in drbd_bm_e_weight() local
1651 if (!expect(b)) in drbd_bm_e_weight()
1653 if (!expect(b->bm_pages)) in drbd_bm_e_weight()
1656 spin_lock_irqsave(&b->bm_lock, flags); in drbd_bm_e_weight()
1657 if (BM_DONT_TEST & b->bm_flags) in drbd_bm_e_weight()
1661 e = min((size_t)S2W(enr+1), b->bm_words); in drbd_bm_e_weight()
1663 if (s < b->bm_words) { in drbd_bm_e_weight()
1665 p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s)); in drbd_bm_e_weight()
1672 spin_unlock_irqrestore(&b->bm_lock, flags); in drbd_bm_e_weight()