Lines Matching refs:b
233 static void buffer_record_stack(struct dm_buffer *b) in buffer_record_stack() argument
235 b->stack_trace.nr_entries = 0; in buffer_record_stack()
236 b->stack_trace.max_entries = MAX_STACK; in buffer_record_stack()
237 b->stack_trace.entries = b->stack_entries; in buffer_record_stack()
238 b->stack_trace.skip = 2; in buffer_record_stack()
239 save_stack_trace(&b->stack_trace); in buffer_record_stack()
249 struct dm_buffer *b; in __find() local
252 b = container_of(n, struct dm_buffer, node); in __find()
254 if (b->block == block) in __find()
255 return b; in __find()
257 n = (b->block < block) ? n->rb_left : n->rb_right; in __find()
263 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b) in __insert() argument
271 if (found->block == b->block) { in __insert()
272 BUG_ON(found != b); in __insert()
277 new = (found->block < b->block) ? in __insert()
281 rb_link_node(&b->node, parent, new); in __insert()
282 rb_insert_color(&b->node, &c->buffer_tree); in __insert()
285 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b) in __remove() argument
287 rb_erase(&b->node, &c->buffer_tree); in __remove()
425 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask); in alloc_buffer() local
427 if (!b) in alloc_buffer()
430 b->c = c; in alloc_buffer()
432 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); in alloc_buffer()
433 if (!b->data) { in alloc_buffer()
434 kmem_cache_free(c->slab_buffer, b); in alloc_buffer()
438 adjust_total_allocated(b->data_mode, (long)c->block_size); in alloc_buffer()
441 memset(&b->stack_trace, 0, sizeof(b->stack_trace)); in alloc_buffer()
443 return b; in alloc_buffer()
449 static void free_buffer(struct dm_buffer *b) in free_buffer() argument
451 struct dm_bufio_client *c = b->c; in free_buffer()
453 adjust_total_allocated(b->data_mode, -(long)c->block_size); in free_buffer()
455 free_buffer_data(c, b->data, b->data_mode); in free_buffer()
456 kmem_cache_free(c->slab_buffer, b); in free_buffer()
462 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty) in __link_buffer() argument
464 struct dm_bufio_client *c = b->c; in __link_buffer()
467 b->block = block; in __link_buffer()
468 b->list_mode = dirty; in __link_buffer()
469 list_add(&b->lru_list, &c->lru[dirty]); in __link_buffer()
470 __insert(b->c, b); in __link_buffer()
471 b->last_accessed = jiffies; in __link_buffer()
477 static void __unlink_buffer(struct dm_buffer *b) in __unlink_buffer() argument
479 struct dm_bufio_client *c = b->c; in __unlink_buffer()
481 BUG_ON(!c->n_buffers[b->list_mode]); in __unlink_buffer()
483 c->n_buffers[b->list_mode]--; in __unlink_buffer()
484 __remove(b->c, b); in __unlink_buffer()
485 list_del(&b->lru_list); in __unlink_buffer()
491 static void __relink_lru(struct dm_buffer *b, int dirty) in __relink_lru() argument
493 struct dm_bufio_client *c = b->c; in __relink_lru()
495 BUG_ON(!c->n_buffers[b->list_mode]); in __relink_lru()
497 c->n_buffers[b->list_mode]--; in __relink_lru()
499 b->list_mode = dirty; in __relink_lru()
500 list_move(&b->lru_list, &c->lru[dirty]); in __relink_lru()
501 b->last_accessed = jiffies; in __relink_lru()
528 struct dm_buffer *b = context; in dmio_complete() local
530 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0); in dmio_complete()
533 static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, in use_dmio() argument
541 .notify.context = b, in use_dmio()
542 .client = b->c->dm_io, in use_dmio()
545 .bdev = b->c->bdev, in use_dmio()
550 if (b->data_mode != DATA_MODE_VMALLOC) { in use_dmio()
552 io_req.mem.ptr.addr = (char *)b->data + offset; in use_dmio()
555 io_req.mem.ptr.vma = (char *)b->data + offset; in use_dmio()
560 b->end_io(b, errno_to_blk_status(r)); in use_dmio()
565 struct dm_buffer *b = bio->bi_private; in bio_complete() local
568 b->end_io(b, status); in bio_complete()
571 static void use_bio(struct dm_buffer *b, int rw, sector_t sector, in use_bio() argument
578 vec_size = b->c->block_size >> PAGE_SHIFT; in use_bio()
579 if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT)) in use_bio()
585 use_dmio(b, rw, sector, n_sectors, offset); in use_bio()
590 bio_set_dev(bio, b->c->bdev); in use_bio()
593 bio->bi_private = b; in use_bio()
595 ptr = (char *)b->data + offset; in use_bio()
613 static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t)) in submit_io() argument
619 b->end_io = end_io; in submit_io()
621 if (likely(b->c->sectors_per_block_bits >= 0)) in submit_io()
622 sector = b->block << b->c->sectors_per_block_bits; in submit_io()
624 sector = b->block * (b->c->block_size >> SECTOR_SHIFT); in submit_io()
625 sector += b->c->start; in submit_io()
628 n_sectors = b->c->block_size >> SECTOR_SHIFT; in submit_io()
631 if (b->c->write_callback) in submit_io()
632 b->c->write_callback(b); in submit_io()
633 offset = b->write_start; in submit_io()
634 end = b->write_end; in submit_io()
638 if (unlikely(end > b->c->block_size)) in submit_io()
639 end = b->c->block_size; in submit_io()
645 if (b->data_mode != DATA_MODE_VMALLOC) in submit_io()
646 use_bio(b, rw, sector, n_sectors, offset); in submit_io()
648 use_dmio(b, rw, sector, n_sectors, offset); in submit_io()
661 static void write_endio(struct dm_buffer *b, blk_status_t status) in write_endio() argument
663 b->write_error = status; in write_endio()
665 struct dm_bufio_client *c = b->c; in write_endio()
671 BUG_ON(!test_bit(B_WRITING, &b->state)); in write_endio()
674 clear_bit(B_WRITING, &b->state); in write_endio()
677 wake_up_bit(&b->state, B_WRITING); in write_endio()
689 static void __write_dirty_buffer(struct dm_buffer *b, in __write_dirty_buffer() argument
692 if (!test_bit(B_DIRTY, &b->state)) in __write_dirty_buffer()
695 clear_bit(B_DIRTY, &b->state); in __write_dirty_buffer()
696 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in __write_dirty_buffer()
698 b->write_start = b->dirty_start; in __write_dirty_buffer()
699 b->write_end = b->dirty_end; in __write_dirty_buffer()
702 submit_io(b, REQ_OP_WRITE, write_endio); in __write_dirty_buffer()
704 list_add_tail(&b->write_list, write_list); in __write_dirty_buffer()
712 struct dm_buffer *b = in __flush_write_list() local
714 list_del(&b->write_list); in __flush_write_list()
715 submit_io(b, REQ_OP_WRITE, write_endio); in __flush_write_list()
726 static void __make_buffer_clean(struct dm_buffer *b) in __make_buffer_clean() argument
728 BUG_ON(b->hold_count); in __make_buffer_clean()
730 if (!b->state) /* fast case */ in __make_buffer_clean()
733 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); in __make_buffer_clean()
734 __write_dirty_buffer(b, NULL); in __make_buffer_clean()
735 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in __make_buffer_clean()
744 struct dm_buffer *b; in __get_unclaimed_buffer() local
746 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) { in __get_unclaimed_buffer()
747 BUG_ON(test_bit(B_WRITING, &b->state)); in __get_unclaimed_buffer()
748 BUG_ON(test_bit(B_DIRTY, &b->state)); in __get_unclaimed_buffer()
750 if (!b->hold_count) { in __get_unclaimed_buffer()
751 __make_buffer_clean(b); in __get_unclaimed_buffer()
752 __unlink_buffer(b); in __get_unclaimed_buffer()
753 return b; in __get_unclaimed_buffer()
758 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { in __get_unclaimed_buffer()
759 BUG_ON(test_bit(B_READING, &b->state)); in __get_unclaimed_buffer()
761 if (!b->hold_count) { in __get_unclaimed_buffer()
762 __make_buffer_clean(b); in __get_unclaimed_buffer()
763 __unlink_buffer(b); in __get_unclaimed_buffer()
764 return b; in __get_unclaimed_buffer()
809 struct dm_buffer *b; in __alloc_buffer_wait_no_callback() local
827 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); in __alloc_buffer_wait_no_callback()
828 if (b) in __alloc_buffer_wait_no_callback()
829 return b; in __alloc_buffer_wait_no_callback()
837 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); in __alloc_buffer_wait_no_callback()
839 if (b) in __alloc_buffer_wait_no_callback()
840 return b; in __alloc_buffer_wait_no_callback()
845 b = list_entry(c->reserved_buffers.next, in __alloc_buffer_wait_no_callback()
847 list_del(&b->lru_list); in __alloc_buffer_wait_no_callback()
850 return b; in __alloc_buffer_wait_no_callback()
853 b = __get_unclaimed_buffer(c); in __alloc_buffer_wait_no_callback()
854 if (b) in __alloc_buffer_wait_no_callback()
855 return b; in __alloc_buffer_wait_no_callback()
863 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); in __alloc_buffer_wait() local
865 if (!b) in __alloc_buffer_wait()
869 c->alloc_callback(b); in __alloc_buffer_wait()
871 return b; in __alloc_buffer_wait()
877 static void __free_buffer_wake(struct dm_buffer *b) in __free_buffer_wake() argument
879 struct dm_bufio_client *c = b->c; in __free_buffer_wake()
882 free_buffer(b); in __free_buffer_wake()
884 list_add(&b->lru_list, &c->reserved_buffers); in __free_buffer_wake()
894 struct dm_buffer *b, *tmp; in __write_dirty_buffers_async() local
896 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in __write_dirty_buffers_async()
897 BUG_ON(test_bit(B_READING, &b->state)); in __write_dirty_buffers_async()
899 if (!test_bit(B_DIRTY, &b->state) && in __write_dirty_buffers_async()
900 !test_bit(B_WRITING, &b->state)) { in __write_dirty_buffers_async()
901 __relink_lru(b, LIST_CLEAN); in __write_dirty_buffers_async()
905 if (no_wait && test_bit(B_WRITING, &b->state)) in __write_dirty_buffers_async()
908 __write_dirty_buffer(b, write_list); in __write_dirty_buffers_async()
958 struct dm_buffer *b = __get_unclaimed_buffer(c); in __check_watermark() local
960 if (!b) in __check_watermark()
963 __free_buffer_wake(b); in __check_watermark()
979 struct dm_buffer *b, *new_b = NULL; in __bufio_new() local
983 b = __find(c, block); in __bufio_new()
984 if (b) in __bufio_new()
998 b = __find(c, block); in __bufio_new()
999 if (b) { in __bufio_new()
1006 b = new_b; in __bufio_new()
1007 b->hold_count = 1; in __bufio_new()
1008 b->read_error = 0; in __bufio_new()
1009 b->write_error = 0; in __bufio_new()
1010 __link_buffer(b, block, LIST_CLEAN); in __bufio_new()
1013 b->state = 0; in __bufio_new()
1014 return b; in __bufio_new()
1017 b->state = 1 << B_READING; in __bufio_new()
1020 return b; in __bufio_new()
1032 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state))) in __bufio_new()
1035 b->hold_count++; in __bufio_new()
1036 __relink_lru(b, test_bit(B_DIRTY, &b->state) || in __bufio_new()
1037 test_bit(B_WRITING, &b->state)); in __bufio_new()
1038 return b; in __bufio_new()
1045 static void read_endio(struct dm_buffer *b, blk_status_t status) in read_endio() argument
1047 b->read_error = status; in read_endio()
1049 BUG_ON(!test_bit(B_READING, &b->state)); in read_endio()
1052 clear_bit(B_READING, &b->state); in read_endio()
1055 wake_up_bit(&b->state, B_READING); in read_endio()
1068 struct dm_buffer *b; in new_read() local
1073 b = __bufio_new(c, block, nf, &need_submit, &write_list); in new_read()
1075 if (b && b->hold_count == 1) in new_read()
1076 buffer_record_stack(b); in new_read()
1082 if (!b) in new_read()
1086 submit_io(b, REQ_OP_READ, read_endio); in new_read()
1088 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); in new_read()
1090 if (b->read_error) { in new_read()
1091 int error = blk_status_to_errno(b->read_error); in new_read()
1093 dm_bufio_release(b); in new_read()
1098 *bp = b; in new_read()
1100 return b->data; in new_read()
1142 struct dm_buffer *b; in dm_bufio_prefetch() local
1143 b = __bufio_new(c, block, NF_PREFETCH, &need_submit, in dm_bufio_prefetch()
1152 if (unlikely(b != NULL)) { in dm_bufio_prefetch()
1156 submit_io(b, REQ_OP_READ, read_endio); in dm_bufio_prefetch()
1157 dm_bufio_release(b); in dm_bufio_prefetch()
1174 void dm_bufio_release(struct dm_buffer *b) in dm_bufio_release() argument
1176 struct dm_bufio_client *c = b->c; in dm_bufio_release()
1180 BUG_ON(!b->hold_count); in dm_bufio_release()
1182 b->hold_count--; in dm_bufio_release()
1183 if (!b->hold_count) { in dm_bufio_release()
1191 if ((b->read_error || b->write_error) && in dm_bufio_release()
1192 !test_bit(B_READING, &b->state) && in dm_bufio_release()
1193 !test_bit(B_WRITING, &b->state) && in dm_bufio_release()
1194 !test_bit(B_DIRTY, &b->state)) { in dm_bufio_release()
1195 __unlink_buffer(b); in dm_bufio_release()
1196 __free_buffer_wake(b); in dm_bufio_release()
1204 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, in dm_bufio_mark_partial_buffer_dirty() argument
1207 struct dm_bufio_client *c = b->c; in dm_bufio_mark_partial_buffer_dirty()
1210 BUG_ON(end > b->c->block_size); in dm_bufio_mark_partial_buffer_dirty()
1214 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_mark_partial_buffer_dirty()
1216 if (!test_and_set_bit(B_DIRTY, &b->state)) { in dm_bufio_mark_partial_buffer_dirty()
1217 b->dirty_start = start; in dm_bufio_mark_partial_buffer_dirty()
1218 b->dirty_end = end; in dm_bufio_mark_partial_buffer_dirty()
1219 __relink_lru(b, LIST_DIRTY); in dm_bufio_mark_partial_buffer_dirty()
1221 if (start < b->dirty_start) in dm_bufio_mark_partial_buffer_dirty()
1222 b->dirty_start = start; in dm_bufio_mark_partial_buffer_dirty()
1223 if (end > b->dirty_end) in dm_bufio_mark_partial_buffer_dirty()
1224 b->dirty_end = end; in dm_bufio_mark_partial_buffer_dirty()
1231 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) in dm_bufio_mark_buffer_dirty() argument
1233 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); in dm_bufio_mark_buffer_dirty()
1261 struct dm_buffer *b, *tmp; in dm_bufio_write_dirty_buffers() local
1272 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in dm_bufio_write_dirty_buffers()
1278 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_write_dirty_buffers()
1280 if (test_bit(B_WRITING, &b->state)) { in dm_bufio_write_dirty_buffers()
1283 b->hold_count++; in dm_bufio_write_dirty_buffers()
1285 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_write_dirty_buffers()
1288 b->hold_count--; in dm_bufio_write_dirty_buffers()
1290 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_write_dirty_buffers()
1294 if (!test_bit(B_DIRTY, &b->state) && in dm_bufio_write_dirty_buffers()
1295 !test_bit(B_WRITING, &b->state)) in dm_bufio_write_dirty_buffers()
1296 __relink_lru(b, LIST_CLEAN); in dm_bufio_write_dirty_buffers()
1365 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block) in dm_bufio_release_move() argument
1367 struct dm_bufio_client *c = b->c; in dm_bufio_release_move()
1391 BUG_ON(!b->hold_count); in dm_bufio_release_move()
1392 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_release_move()
1394 __write_dirty_buffer(b, NULL); in dm_bufio_release_move()
1395 if (b->hold_count == 1) { in dm_bufio_release_move()
1396 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_release_move()
1398 set_bit(B_DIRTY, &b->state); in dm_bufio_release_move()
1399 b->dirty_start = 0; in dm_bufio_release_move()
1400 b->dirty_end = c->block_size; in dm_bufio_release_move()
1401 __unlink_buffer(b); in dm_bufio_release_move()
1402 __link_buffer(b, new_block, LIST_DIRTY); in dm_bufio_release_move()
1405 wait_on_bit_lock_io(&b->state, B_WRITING, in dm_bufio_release_move()
1414 old_block = b->block; in dm_bufio_release_move()
1415 __unlink_buffer(b); in dm_bufio_release_move()
1416 __link_buffer(b, new_block, b->list_mode); in dm_bufio_release_move()
1417 submit_io(b, REQ_OP_WRITE, write_endio); in dm_bufio_release_move()
1418 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_release_move()
1420 __unlink_buffer(b); in dm_bufio_release_move()
1421 __link_buffer(b, old_block, b->list_mode); in dm_bufio_release_move()
1425 dm_bufio_release(b); in dm_bufio_release_move()
1437 struct dm_buffer *b; in dm_bufio_forget() local
1441 b = __find(c, block); in dm_bufio_forget()
1442 if (b && likely(!b->hold_count) && likely(!b->state)) { in dm_bufio_forget()
1443 __unlink_buffer(b); in dm_bufio_forget()
1444 __free_buffer_wake(b); in dm_bufio_forget()
1474 sector_t dm_bufio_get_block_number(struct dm_buffer *b) in dm_bufio_get_block_number() argument
1476 return b->block; in dm_bufio_get_block_number()
1480 void *dm_bufio_get_block_data(struct dm_buffer *b) in dm_bufio_get_block_data() argument
1482 return b->data; in dm_bufio_get_block_data()
1486 void *dm_bufio_get_aux_data(struct dm_buffer *b) in dm_bufio_get_aux_data() argument
1488 return b + 1; in dm_bufio_get_aux_data()
1492 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b) in dm_bufio_get_client() argument
1494 return b->c; in dm_bufio_get_client()
1500 struct dm_buffer *b; in drop_buffers() local
1513 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
1514 __free_buffer_wake(b); in drop_buffers()
1517 list_for_each_entry(b, &c->lru[i], lru_list) { in drop_buffers()
1521 (unsigned long long)b->block, b->hold_count, i); in drop_buffers()
1523 print_stack_trace(&b->stack_trace, 1); in drop_buffers()
1524 b->hold_count = 0; /* mark unclaimed to avoid BUG_ON below */ in drop_buffers()
1529 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
1530 __free_buffer_wake(b); in drop_buffers()
1547 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) in __try_evict_buffer() argument
1550 if (test_bit(B_READING, &b->state) || in __try_evict_buffer()
1551 test_bit(B_WRITING, &b->state) || in __try_evict_buffer()
1552 test_bit(B_DIRTY, &b->state)) in __try_evict_buffer()
1556 if (b->hold_count) in __try_evict_buffer()
1559 __make_buffer_clean(b); in __try_evict_buffer()
1560 __unlink_buffer(b); in __try_evict_buffer()
1561 __free_buffer_wake(b); in __try_evict_buffer()
1580 struct dm_buffer *b, *tmp; in __scan() local
1587 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { in __scan()
1588 if (__try_evict_buffer(b, gfp_mask)) in __scan()
1705 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); in dm_bufio_client_create() local
1707 if (!b) { in dm_bufio_client_create()
1711 __free_buffer_wake(b); in dm_bufio_client_create()
1732 struct dm_buffer *b = list_entry(c->reserved_buffers.next, in dm_bufio_client_create() local
1734 list_del(&b->lru_list); in dm_bufio_client_create()
1735 free_buffer(b); in dm_bufio_client_create()
1772 struct dm_buffer *b = list_entry(c->reserved_buffers.next, in dm_bufio_client_destroy() local
1774 list_del(&b->lru_list); in dm_bufio_client_destroy()
1775 free_buffer(b); in dm_bufio_client_destroy()
1809 static bool older_than(struct dm_buffer *b, unsigned long age_hz) in older_than() argument
1811 return time_after_eq(jiffies, b->last_accessed + age_hz); in older_than()
1816 struct dm_buffer *b, *tmp; in __evict_old_buffers() local
1831 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) { in __evict_old_buffers()
1835 if (!older_than(b, age_hz)) in __evict_old_buffers()
1838 if (__try_evict_buffer(b, 0)) in __evict_old_buffers()