Lines Matching refs:b
255 static void buffer_record_stack(struct dm_buffer *b) in buffer_record_stack() argument
257 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2); in buffer_record_stack()
267 struct dm_buffer *b; in __find() local
270 b = container_of(n, struct dm_buffer, node); in __find()
272 if (b->block == block) in __find()
273 return b; in __find()
275 n = block < b->block ? n->rb_left : n->rb_right; in __find()
284 struct dm_buffer *b; in __find_next() local
288 b = container_of(n, struct dm_buffer, node); in __find_next()
290 if (b->block == block) in __find_next()
291 return b; in __find_next()
293 if (block <= b->block) { in __find_next()
295 best = b; in __find_next()
304 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b) in __insert() argument
312 if (found->block == b->block) { in __insert()
313 BUG_ON(found != b); in __insert()
318 new = b->block < found->block ? in __insert()
322 rb_link_node(&b->node, parent, new); in __insert()
323 rb_insert_color(&b->node, &c->buffer_tree); in __insert()
326 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b) in __remove() argument
328 rb_erase(&b->node, &c->buffer_tree); in __remove()
333 static void adjust_total_allocated(struct dm_buffer *b, bool unlink) in adjust_total_allocated() argument
344 data_mode = b->data_mode; in adjust_total_allocated()
345 diff = (long)b->c->block_size; in adjust_total_allocated()
358 b->accessed = 1; in adjust_total_allocated()
361 list_add(&b->global_list, &global_queue); in adjust_total_allocated()
366 list_del(&b->global_list); in adjust_total_allocated()
483 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask); in alloc_buffer() local
485 if (!b) in alloc_buffer()
488 b->c = c; in alloc_buffer()
490 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); in alloc_buffer()
491 if (!b->data) { in alloc_buffer()
492 kmem_cache_free(c->slab_buffer, b); in alloc_buffer()
497 b->stack_len = 0; in alloc_buffer()
499 return b; in alloc_buffer()
505 static void free_buffer(struct dm_buffer *b) in free_buffer() argument
507 struct dm_bufio_client *c = b->c; in free_buffer()
509 free_buffer_data(c, b->data, b->data_mode); in free_buffer()
510 kmem_cache_free(c->slab_buffer, b); in free_buffer()
516 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty) in __link_buffer() argument
518 struct dm_bufio_client *c = b->c; in __link_buffer()
521 b->block = block; in __link_buffer()
522 b->list_mode = dirty; in __link_buffer()
523 list_add(&b->lru_list, &c->lru[dirty]); in __link_buffer()
524 __insert(b->c, b); in __link_buffer()
525 b->last_accessed = jiffies; in __link_buffer()
527 adjust_total_allocated(b, false); in __link_buffer()
533 static void __unlink_buffer(struct dm_buffer *b) in __unlink_buffer() argument
535 struct dm_bufio_client *c = b->c; in __unlink_buffer()
537 BUG_ON(!c->n_buffers[b->list_mode]); in __unlink_buffer()
539 c->n_buffers[b->list_mode]--; in __unlink_buffer()
540 __remove(b->c, b); in __unlink_buffer()
541 list_del(&b->lru_list); in __unlink_buffer()
543 adjust_total_allocated(b, true); in __unlink_buffer()
549 static void __relink_lru(struct dm_buffer *b, int dirty) in __relink_lru() argument
551 struct dm_bufio_client *c = b->c; in __relink_lru()
553 b->accessed = 1; in __relink_lru()
555 BUG_ON(!c->n_buffers[b->list_mode]); in __relink_lru()
557 c->n_buffers[b->list_mode]--; in __relink_lru()
559 b->list_mode = dirty; in __relink_lru()
560 list_move(&b->lru_list, &c->lru[dirty]); in __relink_lru()
561 b->last_accessed = jiffies; in __relink_lru()
588 struct dm_buffer *b = context; in dmio_complete() local
590 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0); in dmio_complete()
593 static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector, in use_dmio() argument
600 .notify.context = b, in use_dmio()
601 .client = b->c->dm_io, in use_dmio()
604 .bdev = b->c->bdev, in use_dmio()
609 if (b->data_mode != DATA_MODE_VMALLOC) { in use_dmio()
611 io_req.mem.ptr.addr = (char *)b->data + offset; in use_dmio()
614 io_req.mem.ptr.vma = (char *)b->data + offset; in use_dmio()
619 b->end_io(b, errno_to_blk_status(r)); in use_dmio()
624 struct dm_buffer *b = bio->bi_private; in bio_complete() local
628 b->end_io(b, status); in bio_complete()
631 static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector, in use_bio() argument
638 vec_size = b->c->block_size >> PAGE_SHIFT; in use_bio()
639 if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT)) in use_bio()
645 use_dmio(b, op, sector, n_sectors, offset); in use_bio()
648 bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, op); in use_bio()
651 bio->bi_private = b; in use_bio()
653 ptr = (char *)b->data + offset; in use_bio()
684 static void submit_io(struct dm_buffer *b, enum req_op op, in submit_io() argument
691 b->end_io = end_io; in submit_io()
693 sector = block_to_sector(b->c, b->block); in submit_io()
696 n_sectors = b->c->block_size >> SECTOR_SHIFT; in submit_io()
699 if (b->c->write_callback) in submit_io()
700 b->c->write_callback(b); in submit_io()
701 offset = b->write_start; in submit_io()
702 end = b->write_end; in submit_io()
706 if (unlikely(end > b->c->block_size)) in submit_io()
707 end = b->c->block_size; in submit_io()
713 if (b->data_mode != DATA_MODE_VMALLOC) in submit_io()
714 use_bio(b, op, sector, n_sectors, offset); in submit_io()
716 use_dmio(b, op, sector, n_sectors, offset); in submit_io()
729 static void write_endio(struct dm_buffer *b, blk_status_t status) in write_endio() argument
731 b->write_error = status; in write_endio()
733 struct dm_bufio_client *c = b->c; in write_endio()
739 BUG_ON(!test_bit(B_WRITING, &b->state)); in write_endio()
742 clear_bit(B_WRITING, &b->state); in write_endio()
745 wake_up_bit(&b->state, B_WRITING); in write_endio()
757 static void __write_dirty_buffer(struct dm_buffer *b, in __write_dirty_buffer() argument
760 if (!test_bit(B_DIRTY, &b->state)) in __write_dirty_buffer()
763 clear_bit(B_DIRTY, &b->state); in __write_dirty_buffer()
764 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in __write_dirty_buffer()
766 b->write_start = b->dirty_start; in __write_dirty_buffer()
767 b->write_end = b->dirty_end; in __write_dirty_buffer()
770 submit_io(b, REQ_OP_WRITE, write_endio); in __write_dirty_buffer()
772 list_add_tail(&b->write_list, write_list); in __write_dirty_buffer()
780 struct dm_buffer *b = in __flush_write_list() local
782 list_del(&b->write_list); in __flush_write_list()
783 submit_io(b, REQ_OP_WRITE, write_endio); in __flush_write_list()
794 static void __make_buffer_clean(struct dm_buffer *b) in __make_buffer_clean() argument
796 BUG_ON(b->hold_count); in __make_buffer_clean()
799 if (!smp_load_acquire(&b->state)) /* fast case */ in __make_buffer_clean()
802 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); in __make_buffer_clean()
803 __write_dirty_buffer(b, NULL); in __make_buffer_clean()
804 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in __make_buffer_clean()
813 struct dm_buffer *b; in __get_unclaimed_buffer() local
815 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) { in __get_unclaimed_buffer()
816 BUG_ON(test_bit(B_WRITING, &b->state)); in __get_unclaimed_buffer()
817 BUG_ON(test_bit(B_DIRTY, &b->state)); in __get_unclaimed_buffer()
820 unlikely(test_bit_acquire(B_READING, &b->state))) in __get_unclaimed_buffer()
823 if (!b->hold_count) { in __get_unclaimed_buffer()
824 __make_buffer_clean(b); in __get_unclaimed_buffer()
825 __unlink_buffer(b); in __get_unclaimed_buffer()
826 return b; in __get_unclaimed_buffer()
834 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { in __get_unclaimed_buffer()
835 BUG_ON(test_bit(B_READING, &b->state)); in __get_unclaimed_buffer()
837 if (!b->hold_count) { in __get_unclaimed_buffer()
838 __make_buffer_clean(b); in __get_unclaimed_buffer()
839 __unlink_buffer(b); in __get_unclaimed_buffer()
840 return b; in __get_unclaimed_buffer()
885 struct dm_buffer *b; in __alloc_buffer_wait_no_callback() local
903 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); in __alloc_buffer_wait_no_callback()
904 if (b) in __alloc_buffer_wait_no_callback()
905 return b; in __alloc_buffer_wait_no_callback()
913 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); in __alloc_buffer_wait_no_callback()
915 if (b) in __alloc_buffer_wait_no_callback()
916 return b; in __alloc_buffer_wait_no_callback()
921 b = list_entry(c->reserved_buffers.next, in __alloc_buffer_wait_no_callback()
923 list_del(&b->lru_list); in __alloc_buffer_wait_no_callback()
926 return b; in __alloc_buffer_wait_no_callback()
929 b = __get_unclaimed_buffer(c); in __alloc_buffer_wait_no_callback()
930 if (b) in __alloc_buffer_wait_no_callback()
931 return b; in __alloc_buffer_wait_no_callback()
939 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); in __alloc_buffer_wait() local
941 if (!b) in __alloc_buffer_wait()
945 c->alloc_callback(b); in __alloc_buffer_wait()
947 return b; in __alloc_buffer_wait()
953 static void __free_buffer_wake(struct dm_buffer *b) in __free_buffer_wake() argument
955 struct dm_bufio_client *c = b->c; in __free_buffer_wake()
958 free_buffer(b); in __free_buffer_wake()
960 list_add(&b->lru_list, &c->reserved_buffers); in __free_buffer_wake()
970 struct dm_buffer *b, *tmp; in __write_dirty_buffers_async() local
972 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in __write_dirty_buffers_async()
973 BUG_ON(test_bit(B_READING, &b->state)); in __write_dirty_buffers_async()
975 if (!test_bit(B_DIRTY, &b->state) && in __write_dirty_buffers_async()
976 !test_bit(B_WRITING, &b->state)) { in __write_dirty_buffers_async()
977 __relink_lru(b, LIST_CLEAN); in __write_dirty_buffers_async()
981 if (no_wait && test_bit(B_WRITING, &b->state)) in __write_dirty_buffers_async()
984 __write_dirty_buffer(b, write_list); in __write_dirty_buffers_async()
1009 struct dm_buffer *b, *new_b = NULL; in __bufio_new() local
1013 b = __find(c, block); in __bufio_new()
1014 if (b) in __bufio_new()
1028 b = __find(c, block); in __bufio_new()
1029 if (b) { in __bufio_new()
1036 b = new_b; in __bufio_new()
1037 b->hold_count = 1; in __bufio_new()
1038 b->read_error = 0; in __bufio_new()
1039 b->write_error = 0; in __bufio_new()
1040 __link_buffer(b, block, LIST_CLEAN); in __bufio_new()
1043 b->state = 0; in __bufio_new()
1044 return b; in __bufio_new()
1047 b->state = 1 << B_READING; in __bufio_new()
1050 return b; in __bufio_new()
1062 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) in __bufio_new()
1065 b->hold_count++; in __bufio_new()
1066 __relink_lru(b, test_bit(B_DIRTY, &b->state) || in __bufio_new()
1067 test_bit(B_WRITING, &b->state)); in __bufio_new()
1068 return b; in __bufio_new()
1075 static void read_endio(struct dm_buffer *b, blk_status_t status) in read_endio() argument
1077 b->read_error = status; in read_endio()
1079 BUG_ON(!test_bit(B_READING, &b->state)); in read_endio()
1082 clear_bit(B_READING, &b->state); in read_endio()
1085 wake_up_bit(&b->state, B_READING); in read_endio()
1098 struct dm_buffer *b; in new_read() local
1103 b = __bufio_new(c, block, nf, &need_submit, &write_list); in new_read()
1105 if (b && b->hold_count == 1) in new_read()
1106 buffer_record_stack(b); in new_read()
1112 if (!b) in new_read()
1116 submit_io(b, REQ_OP_READ, read_endio); in new_read()
1118 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); in new_read()
1120 if (b->read_error) { in new_read()
1121 int error = blk_status_to_errno(b->read_error); in new_read()
1123 dm_bufio_release(b); in new_read()
1128 *bp = b; in new_read()
1130 return b->data; in new_read()
1172 struct dm_buffer *b; in dm_bufio_prefetch() local
1173 b = __bufio_new(c, block, NF_PREFETCH, &need_submit, in dm_bufio_prefetch()
1182 if (unlikely(b != NULL)) { in dm_bufio_prefetch()
1186 submit_io(b, REQ_OP_READ, read_endio); in dm_bufio_prefetch()
1187 dm_bufio_release(b); in dm_bufio_prefetch()
1204 void dm_bufio_release(struct dm_buffer *b) in dm_bufio_release() argument
1206 struct dm_bufio_client *c = b->c; in dm_bufio_release()
1210 BUG_ON(!b->hold_count); in dm_bufio_release()
1212 b->hold_count--; in dm_bufio_release()
1213 if (!b->hold_count) { in dm_bufio_release()
1221 if ((b->read_error || b->write_error) && in dm_bufio_release()
1222 !test_bit_acquire(B_READING, &b->state) && in dm_bufio_release()
1223 !test_bit(B_WRITING, &b->state) && in dm_bufio_release()
1224 !test_bit(B_DIRTY, &b->state)) { in dm_bufio_release()
1225 __unlink_buffer(b); in dm_bufio_release()
1226 __free_buffer_wake(b); in dm_bufio_release()
1234 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, in dm_bufio_mark_partial_buffer_dirty() argument
1237 struct dm_bufio_client *c = b->c; in dm_bufio_mark_partial_buffer_dirty()
1240 BUG_ON(end > b->c->block_size); in dm_bufio_mark_partial_buffer_dirty()
1244 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_mark_partial_buffer_dirty()
1246 if (!test_and_set_bit(B_DIRTY, &b->state)) { in dm_bufio_mark_partial_buffer_dirty()
1247 b->dirty_start = start; in dm_bufio_mark_partial_buffer_dirty()
1248 b->dirty_end = end; in dm_bufio_mark_partial_buffer_dirty()
1249 __relink_lru(b, LIST_DIRTY); in dm_bufio_mark_partial_buffer_dirty()
1251 if (start < b->dirty_start) in dm_bufio_mark_partial_buffer_dirty()
1252 b->dirty_start = start; in dm_bufio_mark_partial_buffer_dirty()
1253 if (end > b->dirty_end) in dm_bufio_mark_partial_buffer_dirty()
1254 b->dirty_end = end; in dm_bufio_mark_partial_buffer_dirty()
1261 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) in dm_bufio_mark_buffer_dirty() argument
1263 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); in dm_bufio_mark_buffer_dirty()
1291 struct dm_buffer *b, *tmp; in dm_bufio_write_dirty_buffers() local
1302 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in dm_bufio_write_dirty_buffers()
1308 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_write_dirty_buffers()
1310 if (test_bit(B_WRITING, &b->state)) { in dm_bufio_write_dirty_buffers()
1313 b->hold_count++; in dm_bufio_write_dirty_buffers()
1315 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_write_dirty_buffers()
1318 b->hold_count--; in dm_bufio_write_dirty_buffers()
1320 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_write_dirty_buffers()
1324 if (!test_bit(B_DIRTY, &b->state) && in dm_bufio_write_dirty_buffers()
1325 !test_bit(B_WRITING, &b->state)) in dm_bufio_write_dirty_buffers()
1326 __relink_lru(b, LIST_CLEAN); in dm_bufio_write_dirty_buffers()
1417 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block) in dm_bufio_release_move() argument
1419 struct dm_bufio_client *c = b->c; in dm_bufio_release_move()
1443 BUG_ON(!b->hold_count); in dm_bufio_release_move()
1444 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_release_move()
1446 __write_dirty_buffer(b, NULL); in dm_bufio_release_move()
1447 if (b->hold_count == 1) { in dm_bufio_release_move()
1448 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_release_move()
1450 set_bit(B_DIRTY, &b->state); in dm_bufio_release_move()
1451 b->dirty_start = 0; in dm_bufio_release_move()
1452 b->dirty_end = c->block_size; in dm_bufio_release_move()
1453 __unlink_buffer(b); in dm_bufio_release_move()
1454 __link_buffer(b, new_block, LIST_DIRTY); in dm_bufio_release_move()
1457 wait_on_bit_lock_io(&b->state, B_WRITING, in dm_bufio_release_move()
1466 old_block = b->block; in dm_bufio_release_move()
1467 __unlink_buffer(b); in dm_bufio_release_move()
1468 __link_buffer(b, new_block, b->list_mode); in dm_bufio_release_move()
1469 submit_io(b, REQ_OP_WRITE, write_endio); in dm_bufio_release_move()
1470 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_release_move()
1472 __unlink_buffer(b); in dm_bufio_release_move()
1473 __link_buffer(b, old_block, b->list_mode); in dm_bufio_release_move()
1477 dm_bufio_release(b); in dm_bufio_release_move()
1481 static void forget_buffer_locked(struct dm_buffer *b) in forget_buffer_locked() argument
1483 if (likely(!b->hold_count) && likely(!smp_load_acquire(&b->state))) { in forget_buffer_locked()
1484 __unlink_buffer(b); in forget_buffer_locked()
1485 __free_buffer_wake(b); in forget_buffer_locked()
1497 struct dm_buffer *b; in dm_bufio_forget() local
1501 b = __find(c, block); in dm_bufio_forget()
1502 if (b) in dm_bufio_forget()
1503 forget_buffer_locked(b); in dm_bufio_forget()
1511 struct dm_buffer *b; in dm_bufio_forget_buffers() local
1517 b = __find_next(c, block); in dm_bufio_forget_buffers()
1518 if (b) { in dm_bufio_forget_buffers()
1519 block = b->block + 1; in dm_bufio_forget_buffers()
1520 forget_buffer_locked(b); in dm_bufio_forget_buffers()
1525 if (!b) in dm_bufio_forget_buffers()
1565 sector_t dm_bufio_get_block_number(struct dm_buffer *b) in dm_bufio_get_block_number() argument
1567 return b->block; in dm_bufio_get_block_number()
1571 void *dm_bufio_get_block_data(struct dm_buffer *b) in dm_bufio_get_block_data() argument
1573 return b->data; in dm_bufio_get_block_data()
1577 void *dm_bufio_get_aux_data(struct dm_buffer *b) in dm_bufio_get_aux_data() argument
1579 return b + 1; in dm_bufio_get_aux_data()
1583 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b) in dm_bufio_get_client() argument
1585 return b->c; in dm_bufio_get_client()
1591 struct dm_buffer *b; in drop_buffers() local
1604 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
1605 __free_buffer_wake(b); in drop_buffers()
1608 list_for_each_entry(b, &c->lru[i], lru_list) { in drop_buffers()
1612 (unsigned long long)b->block, b->hold_count, i); in drop_buffers()
1614 stack_trace_print(b->stack_entries, b->stack_len, 1); in drop_buffers()
1616 b->hold_count = 0; in drop_buffers()
1621 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
1622 __free_buffer_wake(b); in drop_buffers()
1639 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) in __try_evict_buffer() argument
1642 (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) { in __try_evict_buffer()
1643 if (test_bit_acquire(B_READING, &b->state) || in __try_evict_buffer()
1644 test_bit(B_WRITING, &b->state) || in __try_evict_buffer()
1645 test_bit(B_DIRTY, &b->state)) in __try_evict_buffer()
1649 if (b->hold_count) in __try_evict_buffer()
1652 __make_buffer_clean(b); in __try_evict_buffer()
1653 __unlink_buffer(b); in __try_evict_buffer()
1654 __free_buffer_wake(b); in __try_evict_buffer()
1672 struct dm_buffer *b, *tmp; in __scan() local
1679 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { in __scan()
1684 if (__try_evict_buffer(b, GFP_KERNEL)) { in __scan()
1820 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); in dm_bufio_client_create() local
1822 if (!b) { in dm_bufio_client_create()
1826 __free_buffer_wake(b); in dm_bufio_client_create()
1851 struct dm_buffer *b = list_entry(c->reserved_buffers.next, in dm_bufio_client_create() local
1853 list_del(&b->lru_list); in dm_bufio_client_create()
1854 free_buffer(b); in dm_bufio_client_create()
1894 struct dm_buffer *b = list_entry(c->reserved_buffers.next, in dm_bufio_client_destroy() local
1896 list_del(&b->lru_list); in dm_bufio_client_destroy()
1897 free_buffer(b); in dm_bufio_client_destroy()
1933 static bool older_than(struct dm_buffer *b, unsigned long age_hz) in older_than() argument
1935 return time_after_eq(jiffies, b->last_accessed + age_hz); in older_than()
1940 struct dm_buffer *b, *tmp; in __evict_old_buffers() local
1955 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) { in __evict_old_buffers()
1959 if (!older_than(b, age_hz)) in __evict_old_buffers()
1962 if (__try_evict_buffer(b, 0)) in __evict_old_buffers()
1975 struct dm_buffer *b; in do_global_cleanup() local
1996 b = list_entry(global_queue.prev, struct dm_buffer, global_list); in do_global_cleanup()
1998 if (b->accessed) { in do_global_cleanup()
1999 b->accessed = 0; in do_global_cleanup()
2000 list_move(&b->global_list, &global_queue); in do_global_cleanup()
2007 current_client = b->c; in do_global_cleanup()
2024 if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) { in do_global_cleanup()
2026 list_move(&b->global_list, &global_queue); in do_global_cleanup()