Lines Matching refs:bc
404 static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block) in cache_read_lock() argument
406 down_read(&bc->trees[cache_index(block, bc->num_locks)].lock); in cache_read_lock()
409 static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block) in cache_read_unlock() argument
411 up_read(&bc->trees[cache_index(block, bc->num_locks)].lock); in cache_read_unlock()
414 static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block) in cache_write_lock() argument
416 down_write(&bc->trees[cache_index(block, bc->num_locks)].lock); in cache_write_lock()
419 static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block) in cache_write_unlock() argument
421 up_write(&bc->trees[cache_index(block, bc->num_locks)].lock); in cache_write_unlock()
505 static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks) in cache_init() argument
509 bc->num_locks = num_locks; in cache_init()
511 for (i = 0; i < bc->num_locks; i++) { in cache_init()
512 init_rwsem(&bc->trees[i].lock); in cache_init()
513 bc->trees[i].root = RB_ROOT; in cache_init()
516 lru_init(&bc->lru[LIST_CLEAN]); in cache_init()
517 lru_init(&bc->lru[LIST_DIRTY]); in cache_init()
520 static void cache_destroy(struct dm_buffer_cache *bc) in cache_destroy() argument
524 for (i = 0; i < bc->num_locks; i++) in cache_destroy()
525 WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root)); in cache_destroy()
527 lru_destroy(&bc->lru[LIST_CLEAN]); in cache_destroy()
528 lru_destroy(&bc->lru[LIST_DIRTY]); in cache_destroy()
536 static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode) in cache_count() argument
538 return bc->lru[list_mode].count; in cache_count()
541 static inline unsigned long cache_total(struct dm_buffer_cache *bc) in cache_total() argument
543 return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY); in cache_total()
578 static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block) in cache_get() argument
582 cache_read_lock(bc, block); in cache_get()
583 b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block); in cache_get()
588 cache_read_unlock(bc, block); in cache_get()
599 static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b) in cache_put() argument
603 cache_read_lock(bc, b->block); in cache_put()
606 cache_read_unlock(bc, b->block); in cache_put()
643 static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode, in __cache_evict() argument
651 le = lru_evict(&bc->lru[list_mode], __evict_pred, &w); in __cache_evict()
657 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); in __cache_evict()
662 static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode, in cache_evict() argument
668 lh_init(&lh, bc, true); in cache_evict()
669 b = __cache_evict(bc, list_mode, pred, context, &lh); in cache_evict()
680 static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode) in cache_mark() argument
682 cache_write_lock(bc, b->block); in cache_mark()
684 lru_remove(&bc->lru[b->list_mode], &b->lru); in cache_mark()
686 lru_insert(&bc->lru[b->list_mode], &b->lru); in cache_mark()
688 cache_write_unlock(bc, b->block); in cache_mark()
697 static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, in __cache_mark_many() argument
705 le = lru_evict(&bc->lru[old_mode], __evict_pred, &w); in __cache_mark_many()
711 lru_insert(&bc->lru[b->list_mode], &b->lru); in __cache_mark_many()
715 static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, in cache_mark_many() argument
720 lh_init(&lh, bc, true); in cache_mark_many()
721 __cache_mark_many(bc, old_mode, new_mode, pred, context, &lh); in cache_mark_many()
743 static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode,
746 struct lru *lru = &bc->lru[list_mode];
771 static void cache_iterate(struct dm_buffer_cache *bc, int list_mode, in cache_iterate() argument
776 lh_init(&lh, bc, false); in cache_iterate()
777 __cache_iterate(bc, list_mode, fn, context, &lh); in cache_iterate()
814 static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b) in cache_insert() argument
821 cache_write_lock(bc, b->block); in cache_insert()
823 r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b); in cache_insert()
825 lru_insert(&bc->lru[b->list_mode], &b->lru); in cache_insert()
826 cache_write_unlock(bc, b->block); in cache_insert()
839 static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b) in cache_remove() argument
843 cache_write_lock(bc, b->block); in cache_remove()
849 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); in cache_remove()
850 lru_remove(&bc->lru[b->list_mode], &b->lru); in cache_remove()
853 cache_write_unlock(bc, b->block); in cache_remove()
885 static void __remove_range(struct dm_buffer_cache *bc, in __remove_range() argument
906 lru_remove(&bc->lru[b->list_mode], &b->lru); in __remove_range()
912 static void cache_remove_range(struct dm_buffer_cache *bc, in cache_remove_range() argument
918 for (i = 0; i < bc->num_locks; i++) { in cache_remove_range()
919 down_write(&bc->trees[i].lock); in cache_remove_range()
920 __remove_range(bc, &bc->trees[i].root, begin, end, pred, release); in cache_remove_range()
921 up_write(&bc->trees[i].lock); in cache_remove_range()