Lines Matching +full:cache +full:- +full:block

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2009-2011 Red Hat, Inc.
10 #include <linux/dm-bufio.h>
12 #include <linux/device-mapper.h>
13 #include <linux/dm-io.h>
65 * dm_buffer->list_mode
71 /*--------------------------------------------------------------*/
99 /*--------------*/
103 lru->cursor = NULL; in lru_init()
104 lru->count = 0; in lru_init()
105 INIT_LIST_HEAD(&lru->iterators); in lru_init()
110 WARN_ON_ONCE(lru->cursor); in lru_destroy()
111 WARN_ON_ONCE(!list_empty(&lru->iterators)); in lru_destroy()
123 atomic_set(&le->referenced, 0); in lru_insert()
125 if (lru->cursor) { in lru_insert()
126 list_add_tail(&le->list, lru->cursor); in lru_insert()
128 INIT_LIST_HEAD(&le->list); in lru_insert()
129 lru->cursor = &le->list; in lru_insert()
131 lru->count++; in lru_insert()
134 /*--------------*/
149 it->lru = lru; in lru_iter_begin()
150 it->stop = lru->cursor ? to_le(lru->cursor->prev) : NULL; in lru_iter_begin()
151 it->e = lru->cursor ? to_le(lru->cursor) : NULL; in lru_iter_begin()
152 list_add(&it->list, &lru->iterators); in lru_iter_begin()
160 list_del(&it->list); in lru_iter_end()
176 while (it->e) { in lru_iter_next()
177 e = it->e; in lru_iter_next()
180 if (it->e == it->stop) in lru_iter_next()
181 it->e = NULL; in lru_iter_next()
183 it->e = to_le(it->e->list.next); in lru_iter_next()
200 list_for_each_entry(it, &lru->iterators, list) { in lru_iter_invalidate()
201 /* Move c->e forwards if necc. */ in lru_iter_invalidate()
202 if (it->e == e) { in lru_iter_invalidate()
203 it->e = to_le(it->e->list.next); in lru_iter_invalidate()
204 if (it->e == e) in lru_iter_invalidate()
205 it->e = NULL; in lru_iter_invalidate()
208 /* Move it->stop backwards if necc. */ in lru_iter_invalidate()
209 if (it->stop == e) { in lru_iter_invalidate()
210 it->stop = to_le(it->stop->list.prev); in lru_iter_invalidate()
211 if (it->stop == e) in lru_iter_invalidate()
212 it->stop = NULL; in lru_iter_invalidate()
217 /*--------------*/
225 if (lru->count == 1) { in lru_remove()
226 lru->cursor = NULL; in lru_remove()
228 if (lru->cursor == &le->list) in lru_remove()
229 lru->cursor = lru->cursor->next; in lru_remove()
230 list_del(&le->list); in lru_remove()
232 lru->count--; in lru_remove()
240 atomic_set(&le->referenced, 1); in lru_reference()
243 /*--------------*/
260 struct list_head *h = lru->cursor;
270 while (tested < lru->count) {
273 if (atomic_read(&le->referenced)) {
274 atomic_set(&le->referenced, 0);
283 lru->cursor = le->list.next;
291 lru->cursor = le->list.next;
296 h = h->next;
304 /*--------------------------------------------------------------*/
314 * Describes how the block was allocated:
330 sector_t block; member
364 /*--------------------------------------------------------------*/
367 * The buffer cache manages buffers, particularly:
368 * - inc/dec of holder count
369 * - setting the last_accessed field
370 * - maintains clean/dirty state along with lru
371 * - selecting buffers that match predicates
374 * - allocation/freeing of buffers.
375 * - IO
376 * - Eviction or cache sizing.
399 static inline unsigned int cache_index(sector_t block, unsigned int num_locks) in cache_index() argument
401 return dm_hash_locks_index(block, num_locks); in cache_index()
404 static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block) in cache_read_lock() argument
406 down_read(&bc->trees[cache_index(block, bc->num_locks)].lock); in cache_read_lock()
409 static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block) in cache_read_unlock() argument
411 up_read(&bc->trees[cache_index(block, bc->num_locks)].lock); in cache_read_unlock()
414 static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block) in cache_write_lock() argument
416 down_write(&bc->trees[cache_index(block, bc->num_locks)].lock); in cache_write_lock()
419 static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block) in cache_write_unlock() argument
421 up_write(&bc->trees[cache_index(block, bc->num_locks)].lock); in cache_write_unlock()
429 struct dm_buffer_cache *cache; member
435 static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool write) in lh_init() argument
437 lh->cache = cache; in lh_init()
438 lh->write = write; in lh_init()
439 lh->no_previous = cache->num_locks; in lh_init()
440 lh->previous = lh->no_previous; in lh_init()
445 if (lh->write) in __lh_lock()
446 down_write(&lh->cache->trees[index].lock); in __lh_lock()
448 down_read(&lh->cache->trees[index].lock); in __lh_lock()
453 if (lh->write) in __lh_unlock()
454 up_write(&lh->cache->trees[index].lock); in __lh_unlock()
456 up_read(&lh->cache->trees[index].lock); in __lh_unlock()
464 if (lh->previous != lh->no_previous) { in lh_exit()
465 __lh_unlock(lh, lh->previous); in lh_exit()
466 lh->previous = lh->no_previous; in lh_exit()
476 unsigned int index = cache_index(b, lh->no_previous); /* no_previous is num_locks */ in lh_next()
478 if (lh->previous != lh->no_previous) { in lh_next()
479 if (lh->previous != index) { in lh_next()
480 __lh_unlock(lh, lh->previous); in lh_next()
482 lh->previous = index; in lh_next()
486 lh->previous = index; in lh_next()
509 bc->num_locks = num_locks; in cache_init()
511 for (i = 0; i < bc->num_locks; i++) { in cache_init()
512 init_rwsem(&bc->trees[i].lock); in cache_init()
513 bc->trees[i].root = RB_ROOT; in cache_init()
516 lru_init(&bc->lru[LIST_CLEAN]); in cache_init()
517 lru_init(&bc->lru[LIST_DIRTY]); in cache_init()
524 for (i = 0; i < bc->num_locks; i++) in cache_destroy()
525 WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root)); in cache_destroy()
527 lru_destroy(&bc->lru[LIST_CLEAN]); in cache_destroy()
528 lru_destroy(&bc->lru[LIST_DIRTY]); in cache_destroy()
531 /*--------------*/
538 return bc->lru[list_mode].count; in cache_count()
546 /*--------------*/
549 * Gets a specific buffer, indexed by block.
555 static struct dm_buffer *__cache_get(const struct rb_root *root, sector_t block) in __cache_get() argument
557 struct rb_node *n = root->rb_node; in __cache_get()
563 if (b->block == block) in __cache_get()
566 n = block < b->block ? n->rb_left : n->rb_right; in __cache_get()
574 atomic_inc(&b->hold_count); in __cache_inc_buffer()
575 WRITE_ONCE(b->last_accessed, jiffies); in __cache_inc_buffer()
578 static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block) in cache_get() argument
582 cache_read_lock(bc, block); in cache_get()
583 b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block); in cache_get()
585 lru_reference(&b->lru); in cache_get()
588 cache_read_unlock(bc, block); in cache_get()
593 /*--------------*/
603 cache_read_lock(bc, b->block); in cache_put()
604 BUG_ON(!atomic_read(&b->hold_count)); in cache_put()
605 r = atomic_dec_and_test(&b->hold_count); in cache_put()
606 cache_read_unlock(bc, b->block); in cache_put()
611 /*--------------*/
635 lh_next(w->lh, b->block); in __evict_pred()
637 if (atomic_read(&b->hold_count)) in __evict_pred()
640 return w->pred(b, w->context); in __evict_pred()
651 le = lru_evict(&bc->lru[list_mode], __evict_pred, &w); in __cache_evict()
657 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); in __cache_evict()
675 /*--------------*/
682 cache_write_lock(bc, b->block); in cache_mark()
683 if (list_mode != b->list_mode) { in cache_mark()
684 lru_remove(&bc->lru[b->list_mode], &b->lru); in cache_mark()
685 b->list_mode = list_mode; in cache_mark()
686 lru_insert(&bc->lru[b->list_mode], &b->lru); in cache_mark()
688 cache_write_unlock(bc, b->block); in cache_mark()
691 /*--------------*/
705 le = lru_evict(&bc->lru[old_mode], __evict_pred, &w); in __cache_mark_many()
710 b->list_mode = new_mode; in __cache_mark_many()
711 lru_insert(&bc->lru[b->list_mode], &b->lru); in __cache_mark_many()
725 /*--------------*/
746 struct lru *lru = &bc->lru[list_mode];
749 if (!lru->cursor)
752 first = le = to_le(lru->cursor);
756 lh_next(lh, b->block);
767 le = to_le(le->list.next);
781 /*--------------*/
784 * Passes ownership of the buffer to the cache. Returns false if the
794 struct rb_node **new = &root->rb_node, *parent = NULL; in __cache_insert()
800 if (found->block == b->block) in __cache_insert()
804 new = b->block < found->block ? in __cache_insert()
805 &found->node.rb_left : &found->node.rb_right; in __cache_insert()
808 rb_link_node(&b->node, parent, new); in __cache_insert()
809 rb_insert_color(&b->node, root); in __cache_insert()
818 if (WARN_ON_ONCE(b->list_mode >= LIST_SIZE)) in cache_insert()
821 cache_write_lock(bc, b->block); in cache_insert()
822 BUG_ON(atomic_read(&b->hold_count) != 1); in cache_insert()
823 r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b); in cache_insert()
825 lru_insert(&bc->lru[b->list_mode], &b->lru); in cache_insert()
826 cache_write_unlock(bc, b->block); in cache_insert()
831 /*--------------*/
834 * Removes buffer from cache, ownership of the buffer passes back to the caller.
843 cache_write_lock(bc, b->block); in cache_remove()
845 if (atomic_read(&b->hold_count) != 1) { in cache_remove()
849 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); in cache_remove()
850 lru_remove(&bc->lru[b->list_mode], &b->lru); in cache_remove()
853 cache_write_unlock(bc, b->block); in cache_remove()
858 /*--------------*/
862 static struct dm_buffer *__find_next(struct rb_root *root, sector_t block) in __find_next() argument
864 struct rb_node *n = root->rb_node; in __find_next()
871 if (b->block == block) in __find_next()
874 if (block <= b->block) { in __find_next()
875 n = n->rb_left; in __find_next()
878 n = n->rb_right; in __find_next()
896 if (!b || (b->block >= end)) in __remove_range()
899 begin = b->block + 1; in __remove_range()
901 if (atomic_read(&b->hold_count)) in __remove_range()
905 rb_erase(&b->node, root); in __remove_range()
906 lru_remove(&bc->lru[b->list_mode], &b->lru); in __remove_range()
918 for (i = 0; i < bc->num_locks; i++) { in cache_remove_range()
919 down_write(&bc->trees[i].lock); in cache_remove_range()
920 __remove_range(bc, &bc->trees[i].root, begin, end, pred, release); in cache_remove_range()
921 up_write(&bc->trees[i].lock); in cache_remove_range()
925 /*----------------------------------------------------------------*/
938 * context), so some clean-not-writing buffers can be held on
979 struct dm_buffer_cache cache; /* must be last member */ member
984 /*----------------------------------------------------------------*/
986 #define dm_bufio_in_request() (!!current->bio_list)
990 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) in dm_bufio_lock()
991 spin_lock_bh(&c->spinlock); in dm_bufio_lock()
993 mutex_lock_nested(&c->lock, dm_bufio_in_request()); in dm_bufio_lock()
998 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) in dm_bufio_unlock()
999 spin_unlock_bh(&c->spinlock); in dm_bufio_unlock()
1001 mutex_unlock(&c->lock); in dm_bufio_unlock()
1004 /*----------------------------------------------------------------*/
1007 * Default cache size: available memory divided by the ratio.
1012 * Total cache size set by the user.
1018 * at any time. If it disagrees, the user has changed cache size.
1036 /*----------------------------------------------------------------*/
1061 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2); in buffer_record_stack()
1065 /*----------------------------------------------------------------*/
1078 data_mode = b->data_mode; in adjust_total_allocated()
1079 diff = (long)b->c->block_size; in adjust_total_allocated()
1081 diff = -diff; in adjust_total_allocated()
1101 * Change the number of clients and recalculate per-client limit.
1113 * Use default if set to 0 and report the actual cache size used. in __cache_size_refresh()
1146 if (unlikely(c->slab_cache != NULL)) { in alloc_buffer_data()
1148 return kmem_cache_alloc(c->slab_cache, gfp_mask); in alloc_buffer_data()
1151 if (c->block_size <= KMALLOC_MAX_SIZE && in alloc_buffer_data()
1155 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); in alloc_buffer_data()
1160 return __vmalloc(c->block_size, gfp_mask); in alloc_buffer_data()
1171 kmem_cache_free(c->slab_cache, data); in free_buffer_data()
1176 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); in free_buffer_data()
1195 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask); in alloc_buffer()
1200 b->c = c; in alloc_buffer()
1202 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); in alloc_buffer()
1203 if (!b->data) { in alloc_buffer()
1204 kmem_cache_free(c->slab_buffer, b); in alloc_buffer()
1210 b->stack_len = 0; in alloc_buffer()
1220 struct dm_bufio_client *c = b->c; in free_buffer()
1223 free_buffer_data(c, b->data, b->data_mode); in free_buffer()
1224 kmem_cache_free(c->slab_buffer, b); in free_buffer()
1228 *--------------------------------------------------------------------------
1233 * memory-consumption per buffer, so it is not viable);
1235 * the memory must be direct-mapped, not vmalloced;
1241 * rejects the bio because it is too large, use dm-io layer to do the I/O.
1242 * The dm-io layer splits the I/O into multiple requests, avoiding the above
1244 *--------------------------------------------------------------------------
1248 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
1255 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0); in dmio_complete()
1266 .client = b->c->dm_io, in use_dmio()
1269 .bdev = b->c->bdev, in use_dmio()
1274 if (b->data_mode != DATA_MODE_VMALLOC) { in use_dmio()
1276 io_req.mem.ptr.addr = (char *)b->data + offset; in use_dmio()
1279 io_req.mem.ptr.vma = (char *)b->data + offset; in use_dmio()
1284 b->end_io(b, errno_to_blk_status(r)); in use_dmio()
1289 struct dm_buffer *b = bio->bi_private; in bio_complete()
1290 blk_status_t status = bio->bi_status; in bio_complete()
1294 b->end_io(b, status); in bio_complete()
1309 bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op); in use_bio()
1310 bio->bi_iter.bi_sector = sector; in use_bio()
1311 bio->bi_end_io = bio_complete; in use_bio()
1312 bio->bi_private = b; in use_bio()
1314 ptr = (char *)b->data + offset; in use_bio()
1322 static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block) in block_to_sector() argument
1326 if (likely(c->sectors_per_block_bits >= 0)) in block_to_sector()
1327 sector = block << c->sectors_per_block_bits; in block_to_sector()
1329 sector = block * (c->block_size >> SECTOR_SHIFT); in block_to_sector()
1330 sector += c->start; in block_to_sector()
1342 b->end_io = end_io; in submit_io()
1344 sector = block_to_sector(b->c, b->block); in submit_io()
1347 n_sectors = b->c->block_size >> SECTOR_SHIFT; in submit_io()
1350 if (b->c->write_callback) in submit_io()
1351 b->c->write_callback(b); in submit_io()
1352 offset = b->write_start; in submit_io()
1353 end = b->write_end; in submit_io()
1354 offset &= -DM_BUFIO_WRITE_ALIGN; in submit_io()
1355 end += DM_BUFIO_WRITE_ALIGN - 1; in submit_io()
1356 end &= -DM_BUFIO_WRITE_ALIGN; in submit_io()
1357 if (unlikely(end > b->c->block_size)) in submit_io()
1358 end = b->c->block_size; in submit_io()
1361 n_sectors = (end - offset) >> SECTOR_SHIFT; in submit_io()
1364 if (b->data_mode != DATA_MODE_VMALLOC) in submit_io()
1371 *--------------------------------------------------------------
1373 *--------------------------------------------------------------
1384 b->write_error = status; in write_endio()
1386 struct dm_bufio_client *c = b->c; in write_endio()
1388 (void)cmpxchg(&c->async_write_error, 0, in write_endio()
1392 BUG_ON(!test_bit(B_WRITING, &b->state)); in write_endio()
1395 clear_bit(B_WRITING, &b->state); in write_endio()
1398 wake_up_bit(&b->state, B_WRITING); in write_endio()
1404 * - If the buffer is not dirty, exit.
1405 * - If there some previous write going on, wait for it to finish (we can't
1407 * - Submit our write and don't wait on it. We set B_WRITING indicating
1413 if (!test_bit(B_DIRTY, &b->state)) in __write_dirty_buffer()
1416 clear_bit(B_DIRTY, &b->state); in __write_dirty_buffer()
1417 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in __write_dirty_buffer()
1419 b->write_start = b->dirty_start; in __write_dirty_buffer()
1420 b->write_end = b->dirty_end; in __write_dirty_buffer()
1425 list_add_tail(&b->write_list, write_list); in __write_dirty_buffer()
1435 list_entry(write_list->next, struct dm_buffer, write_list); in __flush_write_list()
1436 list_del(&b->write_list); in __flush_write_list()
1450 BUG_ON(atomic_read(&b->hold_count)); in __make_buffer_clean()
1453 if (!smp_load_acquire(&b->state)) /* fast case */ in __make_buffer_clean()
1456 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); in __make_buffer_clean()
1458 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in __make_buffer_clean()
1466 if (WARN_ON_ONCE(test_bit(B_WRITING, &b->state))) in is_clean()
1468 if (WARN_ON_ONCE(test_bit(B_DIRTY, &b->state))) in is_clean()
1470 if (WARN_ON_ONCE(b->list_mode != LIST_CLEAN)) in is_clean()
1473 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep && in is_clean()
1474 unlikely(test_bit(B_READING, &b->state))) in is_clean()
1483 if (WARN_ON_ONCE(test_bit(B_READING, &b->state))) in is_dirty()
1485 if (WARN_ON_ONCE(b->list_mode != LIST_DIRTY)) in is_dirty()
1499 b = cache_evict(&c->cache, LIST_CLEAN, is_clean, c); in __get_unclaimed_buffer()
1506 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) in __get_unclaimed_buffer()
1509 b = cache_evict(&c->cache, LIST_DIRTY, is_dirty, NULL); in __get_unclaimed_buffer()
1522 * This function is entered with c->lock held, drops it and regains it
1529 add_wait_queue(&c->free_buffer_wait, &wait); in __wait_for_free_buffer()
1535 * hold c->lock when wake_up is called. So we have a timeout here, in __wait_for_free_buffer()
1540 remove_wait_queue(&c->free_buffer_wait, &wait); in __wait_for_free_buffer()
1564 * dm-bufio is resistant to allocation failures (it just keeps in __alloc_buffer_wait_no_callback()
1573 * For debugging, if we set the cache size to 1, no new buffers will in __alloc_buffer_wait_no_callback()
1595 if (!list_empty(&c->reserved_buffers)) { in __alloc_buffer_wait_no_callback()
1596 b = list_to_buffer(c->reserved_buffers.next); in __alloc_buffer_wait_no_callback()
1597 list_del(&b->lru.list); in __alloc_buffer_wait_no_callback()
1598 c->need_reserved_buffers++; in __alloc_buffer_wait_no_callback()
1618 if (c->alloc_callback) in __alloc_buffer_wait()
1619 c->alloc_callback(b); in __alloc_buffer_wait()
1629 struct dm_bufio_client *c = b->c; in __free_buffer_wake()
1631 b->block = -1; in __free_buffer_wake()
1632 if (!c->need_reserved_buffers) in __free_buffer_wake()
1635 list_add(&b->lru.list, &c->reserved_buffers); in __free_buffer_wake()
1636 c->need_reserved_buffers--; in __free_buffer_wake()
1643 if (unlikely(waitqueue_active(&c->free_buffer_wait))) in __free_buffer_wake()
1644 wake_up(&c->free_buffer_wait); in __free_buffer_wake()
1649 if (WARN_ON_ONCE(test_bit(B_READING, &b->state))) in cleaned()
1652 if (test_bit(B_DIRTY, &b->state) || test_bit(B_WRITING, &b->state)) in cleaned()
1660 cache_mark_many(&c->cache, LIST_DIRTY, LIST_CLEAN, cleaned, NULL); in __move_clean_buffers()
1672 if (wc->no_wait && test_bit(B_WRITING, &b->state)) in write_one()
1675 __write_dirty_buffer(b, wc->write_list); in write_one()
1685 cache_iterate(&c->cache, LIST_DIRTY, write_one, &wc); in __write_dirty_buffers_async()
1691 * If we're over "limit_buffers", block until we get under the limit.
1696 if (cache_count(&c->cache, LIST_DIRTY) > in __check_watermark()
1697 cache_count(&c->cache, LIST_CLEAN) * DM_BUFIO_WRITEBACK_RATIO) in __check_watermark()
1702 *--------------------------------------------------------------
1704 *--------------------------------------------------------------
1713 if (cache_put(&c->cache, b) && in cache_put_and_wake()
1714 unlikely(waitqueue_active(&c->free_buffer_wait))) in cache_put_and_wake()
1715 wake_up(&c->free_buffer_wait); in cache_put_and_wake()
1719 * This assumes you have already checked the cache to see if the buffer
1722 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, in __bufio_new() argument
1742 b = cache_get(&c->cache, block); in __bufio_new()
1751 atomic_set(&b->hold_count, 1); in __bufio_new()
1752 WRITE_ONCE(b->last_accessed, jiffies); in __bufio_new()
1753 b->block = block; in __bufio_new()
1754 b->read_error = 0; in __bufio_new()
1755 b->write_error = 0; in __bufio_new()
1756 b->list_mode = LIST_CLEAN; in __bufio_new()
1759 b->state = 0; in __bufio_new()
1761 b->state = 1 << B_READING; in __bufio_new()
1766 * We mustn't insert into the cache until the B_READING state in __bufio_new()
1770 cache_insert(&c->cache, b); in __bufio_new()
1787 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) { in __bufio_new()
1801 b->read_error = status; in read_endio()
1803 BUG_ON(!test_bit(B_READING, &b->state)); in read_endio()
1806 clear_bit(B_READING, &b->state); in read_endio()
1809 wake_up_bit(&b->state, B_READING); in read_endio()
1818 static void *new_read(struct dm_bufio_client *c, sector_t block, in new_read() argument
1829 * Fast path, hopefully the block is already in the cache. No need in new_read()
1832 b = cache_get(&c->cache, block); in new_read()
1846 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) { in new_read()
1857 b = __bufio_new(c, block, nf, &need_submit, &write_list); in new_read()
1862 if (b && (atomic_read(&b->hold_count) == 1)) in new_read()
1874 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); in new_read()
1876 if (b->read_error) { in new_read()
1877 int error = blk_status_to_errno(b->read_error); in new_read()
1886 return b->data; in new_read()
1889 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, in dm_bufio_get() argument
1892 return new_read(c, block, NF_GET, bp); in dm_bufio_get()
1896 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, in dm_bufio_read() argument
1900 return ERR_PTR(-EINVAL); in dm_bufio_read()
1902 return new_read(c, block, NF_READ, bp); in dm_bufio_read()
1906 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, in dm_bufio_new() argument
1910 return ERR_PTR(-EINVAL); in dm_bufio_new()
1912 return new_read(c, block, NF_FRESH, bp); in dm_bufio_new()
1917 sector_t block, unsigned int n_blocks) in dm_bufio_prefetch() argument
1928 for (; n_blocks--; block++) { in dm_bufio_prefetch()
1932 b = cache_get(&c->cache, block); in dm_bufio_prefetch()
1934 /* already in cache */ in dm_bufio_prefetch()
1940 b = __bufio_new(c, block, NF_PREFETCH, &need_submit, in dm_bufio_prefetch()
1972 struct dm_bufio_client *c = b->c; in dm_bufio_release()
1979 if ((b->read_error || b->write_error) && in dm_bufio_release()
1980 !test_bit_acquire(B_READING, &b->state) && in dm_bufio_release()
1981 !test_bit(B_WRITING, &b->state) && in dm_bufio_release()
1982 !test_bit(B_DIRTY, &b->state)) { in dm_bufio_release()
1985 /* cache remove can fail if there are other holders */ in dm_bufio_release()
1986 if (cache_remove(&c->cache, b)) { in dm_bufio_release()
2002 struct dm_bufio_client *c = b->c; in dm_bufio_mark_partial_buffer_dirty()
2005 BUG_ON(end > b->c->block_size); in dm_bufio_mark_partial_buffer_dirty()
2009 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_mark_partial_buffer_dirty()
2011 if (!test_and_set_bit(B_DIRTY, &b->state)) { in dm_bufio_mark_partial_buffer_dirty()
2012 b->dirty_start = start; in dm_bufio_mark_partial_buffer_dirty()
2013 b->dirty_end = end; in dm_bufio_mark_partial_buffer_dirty()
2014 cache_mark(&c->cache, b, LIST_DIRTY); in dm_bufio_mark_partial_buffer_dirty()
2016 if (start < b->dirty_start) in dm_bufio_mark_partial_buffer_dirty()
2017 b->dirty_start = start; in dm_bufio_mark_partial_buffer_dirty()
2018 if (end > b->dirty_end) in dm_bufio_mark_partial_buffer_dirty()
2019 b->dirty_end = end; in dm_bufio_mark_partial_buffer_dirty()
2028 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); in dm_bufio_mark_buffer_dirty()
2048 * and simultaneously (so that the block layer can merge the writes) and then
2051 * Finally, we flush hardware disk cache.
2057 return test_bit(B_WRITING, &b->state); in is_writing()
2075 nr_buffers = cache_count(&c->cache, LIST_DIRTY); in dm_bufio_write_dirty_buffers()
2076 lru_iter_begin(&c->cache.lru[LIST_DIRTY], &it); in dm_bufio_write_dirty_buffers()
2081 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_write_dirty_buffers()
2084 nr_buffers--; in dm_bufio_write_dirty_buffers()
2086 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in dm_bufio_write_dirty_buffers()
2089 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in dm_bufio_write_dirty_buffers()
2092 if (!test_bit(B_DIRTY, &b->state) && !test_bit(B_WRITING, &b->state)) in dm_bufio_write_dirty_buffers()
2093 cache_mark(&c->cache, b, LIST_CLEAN); in dm_bufio_write_dirty_buffers()
2101 wake_up(&c->free_buffer_wait); in dm_bufio_write_dirty_buffers()
2104 a = xchg(&c->async_write_error, 0); in dm_bufio_write_dirty_buffers()
2114 * Use dm-io to send an empty barrier to flush the device.
2122 .client = c->dm_io, in dm_bufio_issue_flush()
2125 .bdev = c->bdev, in dm_bufio_issue_flush()
2131 return -EINVAL; in dm_bufio_issue_flush()
2138 * Use dm-io to send a discard request to flush the device.
2140 int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count) in dm_bufio_issue_discard() argument
2146 .client = c->dm_io, in dm_bufio_issue_discard()
2149 .bdev = c->bdev, in dm_bufio_issue_discard()
2150 .sector = block_to_sector(c, block), in dm_bufio_issue_discard()
2155 return -EINVAL; /* discards are optional */ in dm_bufio_issue_discard()
2161 static bool forget_buffer(struct dm_bufio_client *c, sector_t block) in forget_buffer() argument
2165 b = cache_get(&c->cache, block); in forget_buffer()
2167 if (likely(!smp_load_acquire(&b->state))) { in forget_buffer()
2168 if (cache_remove(&c->cache, b)) in forget_buffer()
2186 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block) in dm_bufio_forget() argument
2189 forget_buffer(c, block); in dm_bufio_forget()
2196 return b->state ? ER_DONT_EVICT : ER_EVICT; in idle()
2199 void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks) in dm_bufio_forget_buffers() argument
2202 cache_remove_range(&c->cache, block, block + n_blocks, idle, __free_buffer_wake); in dm_bufio_forget_buffers()
2209 c->minimum_buffers = n; in dm_bufio_set_minimum_buffers()
2215 return c->block_size; in dm_bufio_get_block_size()
2221 sector_t s = bdev_nr_sectors(c->bdev); in dm_bufio_get_device_size()
2223 if (s >= c->start) in dm_bufio_get_device_size()
2224 s -= c->start; in dm_bufio_get_device_size()
2227 if (likely(c->sectors_per_block_bits >= 0)) in dm_bufio_get_device_size()
2228 s >>= c->sectors_per_block_bits; in dm_bufio_get_device_size()
2230 sector_div(s, c->block_size >> SECTOR_SHIFT); in dm_bufio_get_device_size()
2237 return c->dm_io; in dm_bufio_get_dm_io_client()
2243 return b->block; in dm_bufio_get_block_number()
2249 return b->data; in dm_bufio_get_block_data()
2261 return b->c; in dm_bufio_get_client()
2272 (unsigned long long)b->block, atomic_read(&b->hold_count), b->list_mode); in warn_leak()
2274 stack_trace_print(b->stack_entries, b->stack_len, 1); in warn_leak()
2276 atomic_set(&b->hold_count, 0); in warn_leak()
2290 * An optimization so that the buffers are not written one-by-one. in drop_buffers()
2302 cache_iterate(&c->cache, i, warn_leak, &warned); in drop_buffers()
2311 WARN_ON(cache_count(&c->cache, i)); in drop_buffers()
2320 if (likely(c->sectors_per_block_bits >= 0)) in get_retain_buffers()
2321 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT; in get_retain_buffers()
2323 retain_bytes /= c->block_size; in get_retain_buffers()
2334 unsigned long count = cache_total(&c->cache); in __scan()
2338 if (count - freed <= retain_target) in __scan()
2339 atomic_long_set(&c->need_shrink, 0); in __scan()
2340 if (!atomic_long_read(&c->need_shrink)) in __scan()
2343 b = cache_evict(&c->cache, l, in __scan()
2351 atomic_long_dec(&c->need_shrink); in __scan()
2372 atomic_long_add(sc->nr_to_scan, &c->need_shrink); in dm_bufio_shrink_scan()
2373 queue_work(dm_bufio_wq, &c->shrink_work); in dm_bufio_shrink_scan()
2375 return sc->nr_to_scan; in dm_bufio_shrink_scan()
2381 unsigned long count = cache_total(&c->cache); in dm_bufio_shrink_count()
2383 unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink); in dm_bufio_shrink_count()
2388 count -= retain_target; in dm_bufio_shrink_count()
2393 count -= queued_for_cleanup; in dm_bufio_shrink_count()
2412 if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) { in dm_bufio_client_create()
2413 DMERR("%s: block size not specified or is not multiple of 512b", __func__); in dm_bufio_client_create()
2414 r = -EINVAL; in dm_bufio_client_create()
2421 r = -ENOMEM; in dm_bufio_client_create()
2424 cache_init(&c->cache, num_locks); in dm_bufio_client_create()
2426 c->bdev = bdev; in dm_bufio_client_create()
2427 c->block_size = block_size; in dm_bufio_client_create()
2429 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT; in dm_bufio_client_create()
2431 c->sectors_per_block_bits = -1; in dm_bufio_client_create()
2433 c->alloc_callback = alloc_callback; in dm_bufio_client_create()
2434 c->write_callback = write_callback; in dm_bufio_client_create()
2437 c->no_sleep = true; in dm_bufio_client_create()
2441 mutex_init(&c->lock); in dm_bufio_client_create()
2442 spin_lock_init(&c->spinlock); in dm_bufio_client_create()
2443 INIT_LIST_HEAD(&c->reserved_buffers); in dm_bufio_client_create()
2444 c->need_reserved_buffers = reserved_buffers; in dm_bufio_client_create()
2448 init_waitqueue_head(&c->free_buffer_wait); in dm_bufio_client_create()
2449 c->async_write_error = 0; in dm_bufio_client_create()
2451 c->dm_io = dm_io_client_create(); in dm_bufio_client_create()
2452 if (IS_ERR(c->dm_io)) { in dm_bufio_client_create()
2453 r = PTR_ERR(c->dm_io); in dm_bufio_client_create()
2461 snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u", block_size); in dm_bufio_client_create()
2462 c->slab_cache = kmem_cache_create(slab_name, block_size, align, in dm_bufio_client_create()
2464 if (!c->slab_cache) { in dm_bufio_client_create()
2465 r = -ENOMEM; in dm_bufio_client_create()
2470 snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u", aux_size); in dm_bufio_client_create()
2473 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size, in dm_bufio_client_create()
2475 if (!c->slab_buffer) { in dm_bufio_client_create()
2476 r = -ENOMEM; in dm_bufio_client_create()
2480 while (c->need_reserved_buffers) { in dm_bufio_client_create()
2484 r = -ENOMEM; in dm_bufio_client_create()
2490 INIT_WORK(&c->shrink_work, shrink_work); in dm_bufio_client_create()
2491 atomic_long_set(&c->need_shrink, 0); in dm_bufio_client_create()
2493 c->shrinker.count_objects = dm_bufio_shrink_count; in dm_bufio_client_create()
2494 c->shrinker.scan_objects = dm_bufio_shrink_scan; in dm_bufio_client_create()
2495 c->shrinker.seeks = 1; in dm_bufio_client_create()
2496 c->shrinker.batch = 0; in dm_bufio_client_create()
2497 r = register_shrinker(&c->shrinker, "dm-bufio:(%u:%u)", in dm_bufio_client_create()
2498 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); in dm_bufio_client_create()
2504 list_add(&c->client_list, &dm_bufio_all_clients); in dm_bufio_client_create()
2511 while (!list_empty(&c->reserved_buffers)) { in dm_bufio_client_create()
2512 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); in dm_bufio_client_create()
2514 list_del(&b->lru.list); in dm_bufio_client_create()
2517 kmem_cache_destroy(c->slab_cache); in dm_bufio_client_create()
2518 kmem_cache_destroy(c->slab_buffer); in dm_bufio_client_create()
2519 dm_io_client_destroy(c->dm_io); in dm_bufio_client_create()
2521 mutex_destroy(&c->lock); in dm_bufio_client_create()
2522 if (c->no_sleep) in dm_bufio_client_create()
2540 unregister_shrinker(&c->shrinker); in dm_bufio_client_destroy()
2541 flush_work(&c->shrink_work); in dm_bufio_client_destroy()
2545 list_del(&c->client_list); in dm_bufio_client_destroy()
2546 dm_bufio_client_count--; in dm_bufio_client_destroy()
2551 WARN_ON(c->need_reserved_buffers); in dm_bufio_client_destroy()
2553 while (!list_empty(&c->reserved_buffers)) { in dm_bufio_client_destroy()
2554 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); in dm_bufio_client_destroy()
2556 list_del(&b->lru.list); in dm_bufio_client_destroy()
2561 if (cache_count(&c->cache, i)) in dm_bufio_client_destroy()
2562 DMERR("leaked buffer count %d: %lu", i, cache_count(&c->cache, i)); in dm_bufio_client_destroy()
2565 WARN_ON(cache_count(&c->cache, i)); in dm_bufio_client_destroy()
2567 cache_destroy(&c->cache); in dm_bufio_client_destroy()
2568 kmem_cache_destroy(c->slab_cache); in dm_bufio_client_destroy()
2569 kmem_cache_destroy(c->slab_buffer); in dm_bufio_client_destroy()
2570 dm_io_client_destroy(c->dm_io); in dm_bufio_client_destroy()
2571 mutex_destroy(&c->lock); in dm_bufio_client_destroy()
2572 if (c->no_sleep) in dm_bufio_client_destroy()
2581 flush_work(&c->shrink_work); in dm_bufio_client_reset()
2587 c->start = start; in dm_bufio_set_sector_offset()
2591 /*--------------------------------------------------------------*/
2605 return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz); in older_than()
2632 if (!(params->gfp & __GFP_FS) || in select_for_evict()
2633 (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) { in select_for_evict()
2634 if (test_bit_acquire(B_READING, &b->state) || in select_for_evict()
2635 test_bit(B_WRITING, &b->state) || in select_for_evict()
2636 test_bit(B_DIRTY, &b->state)) in select_for_evict()
2640 return older_than(b, params->age_hz) ? ER_EVICT : ER_STOP; in select_for_evict()
2652 b = cache_evict(&c->cache, list_mode, select_for_evict, params); in __evict_many()
2656 last_accessed = READ_ONCE(b->last_accessed); in __evict_many()
2657 if (time_after_eq(params->last_accessed, last_accessed)) in __evict_many()
2658 params->last_accessed = last_accessed; in __evict_many()
2685 count = cache_total(&c->cache); in evict_old_buffers()
2687 __evict_many(c, &params, LIST_CLEAN, count - retain); in evict_old_buffers()
2715 /*--------------------------------------------------------------*/
2746 if (time_after_eq(c->oldest_buffer, new_client->oldest_buffer)) in __insert_client()
2748 h = h->next; in __insert_client()
2751 list_add_tail(&new_client->client_list, h); in __insert_client()
2774 c->oldest_buffer = params.last_accessed; in __evict_a_few()
2798 unsigned long threshold = dm_bufio_cache_size - in evict_old()
2817 *--------------------------------------------------------------
2819 *--------------------------------------------------------------
2835 mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(), in dm_bufio_init()
2854 return -ENOMEM; in dm_bufio_init()
2905 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2926 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2928 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");