/Linux-v4.19/drivers/md/bcache/ |
D | btree.h | 128 struct cache_set *c; 192 static inline void set_gc_sectors(struct cache_set *c) in set_gc_sectors() 197 void bkey_put(struct cache_set *c, struct bkey *k); 246 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, 249 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, 255 int bch_btree_insert(struct cache_set *c, struct keylist *keys, 258 int bch_gc_thread_start(struct cache_set *c); 259 void bch_initial_gc_finish(struct cache_set *c); 260 void bch_moving_gc(struct cache_set *c); 261 int bch_btree_check(struct cache_set *c); [all …]
|
D | bcache.h | 254 struct cache_set *c; 404 struct cache_set *set; 502 struct cache_set { struct 748 static inline size_t sector_to_bucket(struct cache_set *c, sector_t s) in sector_to_bucket() 753 static inline sector_t bucket_to_sector(struct cache_set *c, size_t b) in bucket_to_sector() 758 static inline sector_t bucket_remainder(struct cache_set *c, sector_t s) in bucket_remainder() 763 static inline struct cache *PTR_CACHE(struct cache_set *c, in PTR_CACHE() 770 static inline size_t PTR_BUCKET_NR(struct cache_set *c, in PTR_BUCKET_NR() 777 static inline struct bucket *PTR_BUCKET(struct cache_set *c, in PTR_BUCKET() 791 static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k, in ptr_stale() [all …]
|
D | journal.h | 97 struct cache_set *c; 166 struct cache_set; 170 atomic_t *bch_journal(struct cache_set *c, 174 void bch_journal_mark(struct cache_set *c, struct list_head *list); 175 void bch_journal_meta(struct cache_set *c, struct closure *cl); 176 int bch_journal_read(struct cache_set *c, struct list_head *list); 177 int bch_journal_replay(struct cache_set *c, struct list_head *list); 179 void bch_journal_free(struct cache_set *c); 180 int bch_journal_alloc(struct cache_set *c);
|
D | stats.h | 42 struct cache_set; 56 void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, 58 void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d); 59 void bch_mark_cache_miss_collision(struct cache_set *c, 61 void bch_mark_sectors_bypassed(struct cache_set *c,
|
D | extents.h | 9 struct cache_set; 12 bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k); 13 bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k);
|
D | io.c | 17 void bch_bbio_free(struct bio *bio, struct cache_set *c) in bch_bbio_free() 24 struct bio *bch_bbio_alloc(struct cache_set *c) in bch_bbio_alloc() 34 void __bch_submit_bbio(struct bio *bio, struct cache_set *c) in __bch_submit_bbio() 45 void bch_submit_bbio(struct bio *bio, struct cache_set *c, in bch_submit_bbio() 124 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, in bch_bbio_count_io_errors() 154 void bch_bbio_endio(struct cache_set *c, struct bio *bio, in bch_bbio_endio()
|
D | debug.h | 7 struct cache_set; 30 void bch_debug_init_cache_set(struct cache_set *c); 32 static inline void bch_debug_init_cache_set(struct cache_set *c) {} in bch_debug_init_cache_set()
|
D | journal.c | 146 int bch_journal_read(struct cache_set *c, struct list_head *list) in bch_journal_read() 274 void bch_journal_mark(struct cache_set *c, struct list_head *list) in bch_journal_mark() 320 int bch_journal_replay(struct cache_set *s, struct list_head *list) in bch_journal_replay() 380 static void btree_flush_write(struct cache_set *c) in btree_flush_write() 492 static void journal_reclaim(struct cache_set *c) in journal_reclaim() 601 struct cache_set *c = container_of(cl, struct cache_set, journal.io); in journal_write_unlock() 610 struct cache_set *c = container_of(cl, struct cache_set, journal.io); in journal_write_unlocked() 688 struct cache_set *c = container_of(cl, struct cache_set, journal.io); in journal_write() 694 static void journal_try_write(struct cache_set *c) in journal_try_write() 710 static struct journal_write *journal_wait_for_write(struct cache_set *c, in journal_wait_for_write() [all …]
|
D | super.c | 279 struct cache_set *c = container_of(cl, struct cache_set, sb_write); in bcache_write_super_unlock() 284 void bcache_write_super(struct cache_set *c) in bcache_write_super() 321 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); in uuid_endio() 330 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); in uuid_io_unlock() 335 static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, in uuid_io() 376 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) in uuid_read() 417 static int __uuid_write(struct cache_set *c) in __uuid_write() 437 int bch_uuid_write(struct cache_set *c) in bch_uuid_write() 447 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) in uuid_find() 459 static struct uuid_entry *uuid_find_empty(struct cache_set *c) in uuid_find_empty() [all …]
|
D | sysfs.c | 253 struct cache_set *c; in STORE() 514 static int bch_bset_print_stats(struct cache_set *c, char *buf) in bch_bset_print_stats() 540 static unsigned int bch_root_usage(struct cache_set *c) in bch_root_usage() 564 static size_t bch_cache_size(struct cache_set *c) in bch_cache_size() 577 static unsigned int bch_cache_max_chain(struct cache_set *c) in bch_cache_max_chain() 600 static unsigned int bch_btree_used(struct cache_set *c) in bch_btree_used() 606 static unsigned int bch_average_key_size(struct cache_set *c) in bch_average_key_size() 615 struct cache_set *c = container_of(kobj, struct cache_set, kobj); in SHOW() 689 struct cache_set *c = container_of(kobj, struct cache_set, kobj); in STORE() 797 struct cache_set *c = container_of(kobj, struct cache_set, internal); in SHOW() [all …]
|
D | request.h | 7 struct cache_set *c; 36 unsigned int bch_get_congested(struct cache_set *c);
|
D | movinggc.c | 24 struct cache_set *c = container_of(buf, struct cache_set, in moving_pred() 126 static void read_moving(struct cache_set *c) in read_moving() 197 void bch_moving_gc(struct cache_set *c) in bch_moving_gc() 250 void bch_moving_init_cache_set(struct cache_set *c) in bch_moving_init_cache_set()
|
D | btree.c | 184 void bkey_put(struct cache_set *c, struct bkey *k) in bkey_put() 607 static struct btree *mca_bucket_alloc(struct cache_set *c, in mca_bucket_alloc() 672 struct cache_set *c = container_of(shrink, struct cache_set, shrink); in bch_mca_scan() 739 struct cache_set *c = container_of(shrink, struct cache_set, shrink); in bch_mca_count() 750 void bch_btree_cache_free(struct cache_set *c) in bch_btree_cache_free() 793 int bch_btree_cache_alloc(struct cache_set *c) in bch_btree_cache_alloc() 833 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k) in mca_hash() 838 static struct btree *mca_find(struct cache_set *c, struct bkey *k) in mca_find() 852 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) in mca_cannibalize_lock() 867 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, in mca_cannibalize() [all …]
|
D | stats.c | 199 void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, in bch_mark_cache_accounting() 208 void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d) in bch_mark_cache_readahead() 216 void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d) in bch_mark_cache_miss_collision() 224 void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc, in bch_mark_sectors_bypassed()
|
D | alloc.c | 86 void bch_rescale_priorities(struct cache_set *c, int sectors) in bch_rescale_priorities() 472 void bch_bucket_free(struct cache_set *c, struct bkey *k) in bch_bucket_free() 481 int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, in __bch_bucket_alloc_set() 514 int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, in bch_bucket_alloc_set() 559 static struct open_bucket *pick_data_bucket(struct cache_set *c, in pick_data_bucket() 600 bool bch_alloc_sectors(struct cache_set *c, in bch_alloc_sectors() 691 void bch_open_buckets_free(struct cache_set *c) in bch_open_buckets_free() 703 int bch_open_buckets_alloc(struct cache_set *c) in bch_open_buckets_alloc()
|
D | extents.c | 47 static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) in __ptr_invalid() 68 static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) in bch_ptr_status() 149 bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k) in __bch_btree_ptr_invalid() 314 struct cache_set *c, in bch_subtract_dirty() 328 struct cache_set *c = container_of(b, struct btree, keys)->c; in bch_extent_insert_fixup() 480 bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k) in __bch_extent_invalid()
|
D | debug.c | 162 struct cache_set *c; 209 struct cache_set *c = inode->i_private; in bch_dump_open() 237 void bch_debug_init_cache_set(struct cache_set *c) in bch_debug_init_cache_set()
|
D | writeback.c | 23 struct cache_set *c = dc->disk.c; in __calc_target_rate() 113 static bool set_at_max_writeback_rate(struct cache_set *c, in set_at_max_writeback_rate() 162 struct cache_set *c = dc->disk.c; in update_writeback_rate() 505 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, in bcache_dev_sectors_dirty_add() 644 struct cache_set *c = dc->disk.c; in bch_writeback_thread()
|
D | writeback.h | 99 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
|
D | request.c | 103 struct cache_set *c) in bch_keylist_realloc() 334 unsigned int bch_get_congested(struct cache_set *c) in bch_get_congested() 377 struct cache_set *c = dc->disk.c; in check_should_bypass() 1111 static void quit_max_writeback_rate(struct cache_set *c, in quit_max_writeback_rate()
|
/Linux-v4.19/include/trace/events/ |
D | bcache.h | 152 TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio, 194 DECLARE_EVENT_CLASS(cache_set, 195 TP_PROTO(struct cache_set *c), 214 DEFINE_EVENT(cache_set, bcache_journal_full, 215 TP_PROTO(struct cache_set *c), 219 DEFINE_EVENT(cache_set, bcache_journal_entry_full, 220 TP_PROTO(struct cache_set *c), 231 DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize, 232 TP_PROTO(struct cache_set *c), 265 DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail, [all …]
|