| /Linux-v5.4/drivers/md/bcache/ |
| D | writeback.h | 43 static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, in bcache_dev_stripe_dirty() 64 static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, in should_writeback() 90 static inline void bch_writeback_queue(struct cached_dev *dc) in bch_writeback_queue() 96 static inline void bch_writeback_add(struct cached_dev *dc) in bch_writeback_add() 114 void bch_cached_dev_writeback_init(struct cached_dev *dc); 115 int bch_cached_dev_writeback_start(struct cached_dev *dc);
|
| D | request.c | 30 static unsigned int cache_mode(struct cached_dev *dc) in cache_mode() 35 static bool verify(struct cached_dev *dc) in verify() 370 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) in iohash() 375 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) in check_should_bypass() 586 struct cached_dev *dc; in cache_lookup() 612 dc = container_of(s->d, struct cached_dev, disk); in cache_lookup() 647 struct cached_dev *dc = container_of(s->d, in backing_request_endio() 648 struct cached_dev, disk); in backing_request_endio() 758 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_bio_complete() 825 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_read_done() [all …]
|
| D | writeback.c | 30 static uint64_t __calc_target_rate(struct cached_dev *dc) in __calc_target_rate() 61 static void __update_writeback_rate(struct cached_dev *dc) in __update_writeback_rate() 123 struct cached_dev *dc) in set_at_max_writeback_rate() 171 struct cached_dev *dc = container_of(to_delayed_work(work), in update_writeback_rate() 172 struct cached_dev, in update_writeback_rate() 231 static unsigned int writeback_delay(struct cached_dev *dc, in writeback_delay() 243 struct cached_dev *dc; 274 struct cached_dev *dc = io->dc; in write_dirty_finish() 326 struct cached_dev *dc = io->dc; in write_dirty() 393 static void read_dirty(struct cached_dev *dc) in read_dirty() [all …]
|
| D | stats.c | 202 struct cached_dev *dc = container_of(d, struct cached_dev, disk); in bch_mark_cache_accounting() 210 struct cached_dev *dc = container_of(d, struct cached_dev, disk); in bch_mark_cache_readahead() 218 struct cached_dev *dc = container_of(d, struct cached_dev, disk); in bch_mark_cache_miss_collision() 224 void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc, in bch_mark_sectors_bypassed()
|
| D | debug.h | 6 struct cached_dev; 12 void bch_data_verify(struct cached_dev *dc, struct bio *bio); 21 static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {} in bch_data_verify()
|
| D | bcache.h | 298 struct cached_dev { struct 871 static inline void cached_dev_put(struct cached_dev *dc) in cached_dev_put() 877 static inline bool cached_dev_get(struct cached_dev *dc) in cached_dev_get() 944 void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio); 975 bool bch_cached_dev_error(struct cached_dev *dc); 981 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); 1004 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, 1006 void bch_cached_dev_detach(struct cached_dev *dc); 1007 int bch_cached_dev_run(struct cached_dev *dc);
|
| D | stats.h | 43 struct cached_dev; 62 struct cached_dev *dc,
|
| D | super.c | 201 struct cached_dev *dc = bio->bi_private; in write_bdev_super_endio() 246 struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); in bch_write_bdev_super_unlock() 251 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) in bch_write_bdev_super() 874 struct cached_dev *dc; in calc_cached_dev_sectors() 885 struct cached_dev *dc = arg; in cached_dev_status_update() 920 int bch_cached_dev_run(struct cached_dev *dc) in bch_cached_dev_run() 996 static void cancel_writeback_rate_update_dwork(struct cached_dev *dc) in cancel_writeback_rate_update_dwork() 1016 struct cached_dev *dc = container_of(w, struct cached_dev, detach); in cached_dev_detach_finish() 1056 void bch_cached_dev_detach(struct cached_dev *dc) in bch_cached_dev_detach() 1077 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, in bch_cached_dev_attach() [all …]
|
| D | request.h | 39 void bch_cached_dev_request_init(struct cached_dev *dc);
|
| D | sysfs.c | 158 struct cached_dev *dc = container_of(kobj, struct cached_dev, in SHOW() 270 struct cached_dev *dc = container_of(kobj, struct cached_dev, in STORE() 417 struct cached_dev *dc = container_of(kobj, struct cached_dev, in STORE()
|
| D | io.c | 55 void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio) in bch_count_backing_io_errors()
|
| D | debug.c | 108 void bch_data_verify(struct cached_dev *dc, struct bio *bio) in bch_data_verify()
|
| D | btree.c | 1779 struct cached_dev *dc; in bch_btree_gc_finish() 1785 dc = container_of(d, struct cached_dev, disk); in bch_btree_gc_finish()
|
| /Linux-v5.4/arch/x86/kernel/ |
| D | quirks.c | 177 static struct pci_dev *cached_dev; variable 190 if (!force_hpet_address || !cached_dev) in old_ich_force_hpet_resume() 193 pci_read_config_dword(cached_dev, 0xD0, &gen_cntl); in old_ich_force_hpet_resume() 197 pci_write_config_dword(cached_dev, 0xD0, gen_cntl); in old_ich_force_hpet_resume() 198 pci_read_config_dword(cached_dev, 0xD0, &gen_cntl); in old_ich_force_hpet_resume() 248 cached_dev = dev; in old_ich_force_enable_hpet() 286 if (!force_hpet_address || !cached_dev) in vt8237_force_hpet_resume() 290 pci_write_config_dword(cached_dev, 0x68, val); in vt8237_force_hpet_resume() 292 pci_read_config_dword(cached_dev, 0x68, &val); in vt8237_force_hpet_resume() 335 cached_dev = dev; in vt8237_force_enable_hpet() [all …]
|
| /Linux-v5.4/net/packet/ |
| D | internal.h | 132 struct net_device __rcu *cached_dev; member
|
| D | af_packet.c | 248 dev = rcu_dereference(po->cached_dev); in packet_cached_dev_get() 259 rcu_assign_pointer(po->cached_dev, dev); in packet_cached_dev_assign() 264 RCU_INIT_POINTER(po->cached_dev, NULL); in packet_cached_dev_reset()
|