Lines Matching full:trees
392 * We spread entries across multiple trees to reduce contention
396 struct buffer_tree trees[]; member
406 down_read(&bc->trees[cache_index(block, bc->num_locks)].lock); in cache_read_lock()
411 up_read(&bc->trees[cache_index(block, bc->num_locks)].lock); in cache_read_unlock()
416 down_write(&bc->trees[cache_index(block, bc->num_locks)].lock); in cache_write_lock()
421 up_write(&bc->trees[cache_index(block, bc->num_locks)].lock); in cache_write_unlock()
446 down_write(&lh->cache->trees[index].lock); in __lh_lock()
448 down_read(&lh->cache->trees[index].lock); in __lh_lock()
454 up_write(&lh->cache->trees[index].lock); in __lh_unlock()
456 up_read(&lh->cache->trees[index].lock); in __lh_unlock()
512 init_rwsem(&bc->trees[i].lock); in cache_init()
513 bc->trees[i].root = RB_ROOT; in cache_init()
525 WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root)); in cache_destroy()
583 b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block); in cache_get()
657 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); in __cache_evict()
823 r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b); in cache_insert()
849 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); in cache_remove()
919 down_write(&bc->trees[i].lock); in cache_remove_range()
920 __remove_range(bc, &bc->trees[i].root, begin, end, pred, release); in cache_remove_range()
921 up_write(&bc->trees[i].lock); in cache_remove_range()