Lines Matching full:cache

10 #include "dm-cache-metadata.h"
22 #define DM_MSG_PREFIX "cache"
25 "A percentage of time allocated for copying to and/or from cache");
33 * cblock: index of a cache block
34 * promotion: movement of a block from origin to cache
35 * demotion: movement of a block from cache to origin
36 * migration: movement of a block between the origin and cache device,
317 * The block size of the device holding cache data must be
332 * dirty. If you lose the cache device you will lose data.
338 * Data is written to both cache and origin. Blocks are never
344 * A degraded mode useful for various cache coherency situations
346 * origin. If a write goes to a cached oblock, then the cache
373 struct cache { struct
407 * Size of the cache device in blocks. argument
464 * Cache features such as write-through.
499 struct cache *cache; argument
511 static bool writethrough_mode(struct cache *cache) in writethrough_mode() argument
513 return cache->features.io_mode == CM_IO_WRITETHROUGH; in writethrough_mode()
516 static bool writeback_mode(struct cache *cache) in writeback_mode() argument
518 return cache->features.io_mode == CM_IO_WRITEBACK; in writeback_mode()
521 static inline bool passthrough_mode(struct cache *cache) in passthrough_mode() argument
523 return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH); in passthrough_mode()
528 static void wake_deferred_bio_worker(struct cache *cache) in wake_deferred_bio_worker() argument
530 queue_work(cache->wq, &cache->deferred_bio_worker); in wake_deferred_bio_worker()
533 static void wake_migration_worker(struct cache *cache) in wake_migration_worker() argument
535 if (passthrough_mode(cache)) in wake_migration_worker()
538 queue_work(cache->wq, &cache->migration_worker); in wake_migration_worker()
543 static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache) in alloc_prison_cell() argument
545 return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO); in alloc_prison_cell()
548 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell) in free_prison_cell() argument
550 dm_bio_prison_free_cell_v2(cache->prison, cell); in free_prison_cell()
553 static struct dm_cache_migration *alloc_migration(struct cache *cache) in alloc_migration() argument
557 mg = mempool_alloc(&cache->migration_pool, GFP_NOIO); in alloc_migration()
561 mg->cache = cache; in alloc_migration()
562 atomic_inc(&cache->nr_allocated_migrations); in alloc_migration()
569 struct cache *cache = mg->cache; in free_migration() local
571 if (atomic_dec_and_test(&cache->nr_allocated_migrations)) in free_migration()
572 wake_up(&cache->migration_wait); in free_migration()
574 mempool_free(mg, &cache->migration_pool); in free_migration()
631 static void defer_bio(struct cache *cache, struct bio *bio) in defer_bio() argument
635 spin_lock_irqsave(&cache->lock, flags); in defer_bio()
636 bio_list_add(&cache->deferred_bios, bio); in defer_bio()
637 spin_unlock_irqrestore(&cache->lock, flags); in defer_bio()
639 wake_deferred_bio_worker(cache); in defer_bio()
642 static void defer_bios(struct cache *cache, struct bio_list *bios) in defer_bios() argument
646 spin_lock_irqsave(&cache->lock, flags); in defer_bios()
647 bio_list_merge(&cache->deferred_bios, bios); in defer_bios()
649 spin_unlock_irqrestore(&cache->lock, flags); in defer_bios()
651 wake_deferred_bio_worker(cache); in defer_bios()
656 static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio) in bio_detain_shared() argument
664 cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */ in bio_detain_shared()
667 r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell); in bio_detain_shared()
672 free_prison_cell(cache, cell_prealloc); in bio_detain_shared()
677 free_prison_cell(cache, cell_prealloc); in bio_detain_shared()
687 static bool is_dirty(struct cache *cache, dm_cblock_t b) in is_dirty() argument
689 return test_bit(from_cblock(b), cache->dirty_bitset); in is_dirty()
692 static void set_dirty(struct cache *cache, dm_cblock_t cblock) in set_dirty() argument
694 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { in set_dirty()
695 atomic_inc(&cache->nr_dirty); in set_dirty()
696 policy_set_dirty(cache->policy, cblock); in set_dirty()
704 static void force_set_dirty(struct cache *cache, dm_cblock_t cblock) in force_set_dirty() argument
706 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) in force_set_dirty()
707 atomic_inc(&cache->nr_dirty); in force_set_dirty()
708 policy_set_dirty(cache->policy, cblock); in force_set_dirty()
711 static void force_clear_dirty(struct cache *cache, dm_cblock_t cblock) in force_clear_dirty() argument
713 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) { in force_clear_dirty()
714 if (atomic_dec_return(&cache->nr_dirty) == 0) in force_clear_dirty()
715 dm_table_event(cache->ti->table); in force_clear_dirty()
718 policy_clear_dirty(cache->policy, cblock); in force_clear_dirty()
723 static bool block_size_is_power_of_two(struct cache *cache) in block_size_is_power_of_two() argument
725 return cache->sectors_per_block_shift >= 0; in block_size_is_power_of_two()
739 static dm_block_t oblocks_per_dblock(struct cache *cache) in oblocks_per_dblock() argument
741 dm_block_t oblocks = cache->discard_block_size; in oblocks_per_dblock()
743 if (block_size_is_power_of_two(cache)) in oblocks_per_dblock()
744 oblocks >>= cache->sectors_per_block_shift; in oblocks_per_dblock()
746 oblocks = block_div(oblocks, cache->sectors_per_block); in oblocks_per_dblock()
751 static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) in oblock_to_dblock() argument
754 oblocks_per_dblock(cache))); in oblock_to_dblock()
757 static void set_discard(struct cache *cache, dm_dblock_t b) in set_discard() argument
761 BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks)); in set_discard()
762 atomic_inc(&cache->stats.discard_count); in set_discard()
764 spin_lock_irqsave(&cache->lock, flags); in set_discard()
765 set_bit(from_dblock(b), cache->discard_bitset); in set_discard()
766 spin_unlock_irqrestore(&cache->lock, flags); in set_discard()
769 static void clear_discard(struct cache *cache, dm_dblock_t b) in clear_discard() argument
773 spin_lock_irqsave(&cache->lock, flags); in clear_discard()
774 clear_bit(from_dblock(b), cache->discard_bitset); in clear_discard()
775 spin_unlock_irqrestore(&cache->lock, flags); in clear_discard()
778 static bool is_discarded(struct cache *cache, dm_dblock_t b) in is_discarded() argument
783 spin_lock_irqsave(&cache->lock, flags); in is_discarded()
784 r = test_bit(from_dblock(b), cache->discard_bitset); in is_discarded()
785 spin_unlock_irqrestore(&cache->lock, flags); in is_discarded()
790 static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b) in is_discarded_oblock() argument
795 spin_lock_irqsave(&cache->lock, flags); in is_discarded_oblock()
796 r = test_bit(from_dblock(oblock_to_dblock(cache, b)), in is_discarded_oblock()
797 cache->discard_bitset); in is_discarded_oblock()
798 spin_unlock_irqrestore(&cache->lock, flags); in is_discarded_oblock()
806 static void remap_to_origin(struct cache *cache, struct bio *bio) in remap_to_origin() argument
808 bio_set_dev(bio, cache->origin_dev->bdev); in remap_to_origin()
811 static void remap_to_cache(struct cache *cache, struct bio *bio, in remap_to_cache() argument
817 bio_set_dev(bio, cache->cache_dev->bdev); in remap_to_cache()
818 if (!block_size_is_power_of_two(cache)) in remap_to_cache()
820 (block * cache->sectors_per_block) + in remap_to_cache()
821 sector_div(bi_sector, cache->sectors_per_block); in remap_to_cache()
824 (block << cache->sectors_per_block_shift) | in remap_to_cache()
825 (bi_sector & (cache->sectors_per_block - 1)); in remap_to_cache()
828 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) in check_if_tick_bio_needed() argument
833 spin_lock_irqsave(&cache->lock, flags); in check_if_tick_bio_needed()
834 if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) && in check_if_tick_bio_needed()
838 cache->need_tick_bio = false; in check_if_tick_bio_needed()
840 spin_unlock_irqrestore(&cache->lock, flags); in check_if_tick_bio_needed()
843 static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, in __remap_to_origin_clear_discard() argument
847 check_if_tick_bio_needed(cache, bio); in __remap_to_origin_clear_discard()
848 remap_to_origin(cache, bio); in __remap_to_origin_clear_discard()
850 clear_discard(cache, oblock_to_dblock(cache, oblock)); in __remap_to_origin_clear_discard()
853 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, in remap_to_origin_clear_discard() argument
857 __remap_to_origin_clear_discard(cache, bio, oblock, true); in remap_to_origin_clear_discard()
860 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, in remap_to_cache_dirty() argument
863 check_if_tick_bio_needed(cache, bio); in remap_to_cache_dirty()
864 remap_to_cache(cache, bio, cblock); in remap_to_cache_dirty()
866 set_dirty(cache, cblock); in remap_to_cache_dirty()
867 clear_discard(cache, oblock_to_dblock(cache, oblock)); in remap_to_cache_dirty()
871 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) in get_bio_block() argument
875 if (!block_size_is_power_of_two(cache)) in get_bio_block()
876 (void) sector_div(block_nr, cache->sectors_per_block); in get_bio_block()
878 block_nr >>= cache->sectors_per_block_shift; in get_bio_block()
883 static bool accountable_bio(struct cache *cache, struct bio *bio) in accountable_bio() argument
888 static void accounted_begin(struct cache *cache, struct bio *bio) in accounted_begin() argument
892 if (accountable_bio(cache, bio)) { in accounted_begin()
895 iot_io_begin(&cache->tracker, pb->len); in accounted_begin()
899 static void accounted_complete(struct cache *cache, struct bio *bio) in accounted_complete() argument
903 iot_io_end(&cache->tracker, pb->len); in accounted_complete()
906 static void accounted_request(struct cache *cache, struct bio *bio) in accounted_request() argument
908 accounted_begin(cache, bio); in accounted_request()
914 struct cache *cache = context; in issue_op() local
915 accounted_request(cache, bio); in issue_op()
920 * to both the cache and origin devices. Clone the bio and send them in parallel.
922 static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio, in remap_to_origin_and_cache() argument
925 struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, &cache->bs); in remap_to_origin_and_cache()
934 __remap_to_origin_clear_discard(cache, origin_bio, oblock, false); in remap_to_origin_and_cache()
937 remap_to_cache(cache, bio, cblock); in remap_to_origin_and_cache()
943 static enum cache_metadata_mode get_cache_mode(struct cache *cache) in get_cache_mode() argument
945 return cache->features.mode; in get_cache_mode()
948 static const char *cache_device_name(struct cache *cache) in cache_device_name() argument
950 return dm_device_name(dm_table_get_md(cache->ti->table)); in cache_device_name()
953 static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode) in notify_mode_switch() argument
961 dm_table_event(cache->ti->table); in notify_mode_switch()
962 DMINFO("%s: switching cache to %s mode", in notify_mode_switch()
963 cache_device_name(cache), descs[(int)mode]); in notify_mode_switch()
966 static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode) in set_cache_mode() argument
969 enum cache_metadata_mode old_mode = get_cache_mode(cache); in set_cache_mode()
971 if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) { in set_cache_mode()
973 cache_device_name(cache)); in set_cache_mode()
978 DMERR("%s: unable to switch cache to write mode until repaired.", in set_cache_mode()
979 cache_device_name(cache)); in set_cache_mode()
993 dm_cache_metadata_set_read_only(cache->cmd); in set_cache_mode()
997 dm_cache_metadata_set_read_write(cache->cmd); in set_cache_mode()
1001 cache->features.mode = new_mode; in set_cache_mode()
1004 notify_mode_switch(cache, new_mode); in set_cache_mode()
1007 static void abort_transaction(struct cache *cache) in abort_transaction() argument
1009 const char *dev_name = cache_device_name(cache); in abort_transaction()
1011 if (get_cache_mode(cache) >= CM_READ_ONLY) in abort_transaction()
1014 if (dm_cache_metadata_set_needs_check(cache->cmd)) { in abort_transaction()
1016 set_cache_mode(cache, CM_FAIL); in abort_transaction()
1020 if (dm_cache_metadata_abort(cache->cmd)) { in abort_transaction()
1022 set_cache_mode(cache, CM_FAIL); in abort_transaction()
1026 static void metadata_operation_failed(struct cache *cache, const char *op, int r) in metadata_operation_failed() argument
1029 cache_device_name(cache), op, r); in metadata_operation_failed()
1030 abort_transaction(cache); in metadata_operation_failed()
1031 set_cache_mode(cache, CM_READ_ONLY); in metadata_operation_failed()
1036 static void load_stats(struct cache *cache) in load_stats() argument
1040 dm_cache_metadata_get_stats(cache->cmd, &stats); in load_stats()
1041 atomic_set(&cache->stats.read_hit, stats.read_hits); in load_stats()
1042 atomic_set(&cache->stats.read_miss, stats.read_misses); in load_stats()
1043 atomic_set(&cache->stats.write_hit, stats.write_hits); in load_stats()
1044 atomic_set(&cache->stats.write_miss, stats.write_misses); in load_stats()
1047 static void save_stats(struct cache *cache) in save_stats() argument
1051 if (get_cache_mode(cache) >= CM_READ_ONLY) in save_stats()
1054 stats.read_hits = atomic_read(&cache->stats.read_hit); in save_stats()
1055 stats.read_misses = atomic_read(&cache->stats.read_miss); in save_stats()
1056 stats.write_hits = atomic_read(&cache->stats.write_hit); in save_stats()
1057 stats.write_misses = atomic_read(&cache->stats.write_miss); in save_stats()
1059 dm_cache_metadata_set_stats(cache->cmd, &stats); in save_stats()
1082 * Migration covers moving data from the origin device to the cache, or
1086 static void inc_io_migrations(struct cache *cache) in inc_io_migrations() argument
1088 atomic_inc(&cache->nr_io_migrations); in inc_io_migrations()
1091 static void dec_io_migrations(struct cache *cache) in dec_io_migrations() argument
1093 atomic_dec(&cache->nr_io_migrations); in dec_io_migrations()
1101 static void calc_discard_block_range(struct cache *cache, struct bio *bio, in calc_discard_block_range() argument
1107 *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size)); in calc_discard_block_range()
1109 if (se - sb < cache->discard_block_size) in calc_discard_block_range()
1112 *e = to_dblock(block_div(se, cache->discard_block_size)); in calc_discard_block_range()
1117 static void prevent_background_work(struct cache *cache) in prevent_background_work() argument
1120 down_write(&cache->background_work_lock); in prevent_background_work()
1124 static void allow_background_work(struct cache *cache) in allow_background_work() argument
1127 up_write(&cache->background_work_lock); in allow_background_work()
1131 static bool background_work_begin(struct cache *cache) in background_work_begin() argument
1136 r = down_read_trylock(&cache->background_work_lock); in background_work_begin()
1142 static void background_work_end(struct cache *cache) in background_work_end() argument
1145 up_read(&cache->background_work_lock); in background_work_end()
1151 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) in bio_writes_complete_block() argument
1154 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); in bio_writes_complete_block()
1157 static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block) in optimisable_bio() argument
1159 return writeback_mode(cache) && in optimisable_bio()
1160 (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio)); in optimisable_bio()
1167 dm_cell_quiesce_v2(mg->cache->prison, mg->cell, &mg->k.ws); in quiesce()
1183 queue_continuation(mg->cache->wq, &mg->k); in copy_complete()
1189 struct cache *cache = mg->cache; in copy() local
1191 o_region.bdev = cache->origin_dev->bdev; in copy()
1192 o_region.sector = from_oblock(mg->op->oblock) * cache->sectors_per_block; in copy()
1193 o_region.count = cache->sectors_per_block; in copy()
1195 c_region.bdev = cache->cache_dev->bdev; in copy()
1196 c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block; in copy()
1197 c_region.count = cache->sectors_per_block; in copy()
1200 dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k); in copy()
1202 dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k); in copy()
1205 static void bio_drop_shared_lock(struct cache *cache, struct bio *bio) in bio_drop_shared_lock() argument
1209 if (pb->cell && dm_cell_put_v2(cache->prison, pb->cell)) in bio_drop_shared_lock()
1210 free_prison_cell(cache, pb->cell); in bio_drop_shared_lock()
1217 struct cache *cache = mg->cache; in overwrite_endio() local
1225 queue_continuation(cache->wq, &mg->k); in overwrite_endio()
1241 remap_to_cache(mg->cache, bio, mg->op->cblock); in overwrite()
1243 remap_to_origin(mg->cache, bio); in overwrite()
1246 accounted_request(mg->cache, bio); in overwrite()
1263 struct cache *cache = mg->cache; in mg_complete() local
1268 update_stats(&cache->stats, op->op); in mg_complete()
1272 clear_discard(cache, oblock_to_dblock(cache, op->oblock)); in mg_complete()
1273 policy_complete_background_work(cache->policy, op, success); in mg_complete()
1277 force_set_dirty(cache, cblock); in mg_complete()
1285 force_clear_dirty(cache, cblock); in mg_complete()
1286 dec_io_migrations(cache); in mg_complete()
1295 force_clear_dirty(cache, cblock); in mg_complete()
1296 policy_complete_background_work(cache->policy, op, success); in mg_complete()
1297 dec_io_migrations(cache); in mg_complete()
1302 force_clear_dirty(cache, cblock); in mg_complete()
1303 policy_complete_background_work(cache->policy, op, success); in mg_complete()
1304 dec_io_migrations(cache); in mg_complete()
1310 if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios)) in mg_complete()
1311 free_prison_cell(cache, mg->cell); in mg_complete()
1315 defer_bios(cache, &bios); in mg_complete()
1316 wake_migration_worker(cache); in mg_complete()
1318 background_work_end(cache); in mg_complete()
1331 struct cache *cache = mg->cache; in mg_update_metadata() local
1336 r = dm_cache_insert_mapping(cache->cmd, op->cblock, op->oblock); in mg_update_metadata()
1339 cache_device_name(cache)); in mg_update_metadata()
1340 metadata_operation_failed(cache, "dm_cache_insert_mapping", r); in mg_update_metadata()
1349 r = dm_cache_remove_mapping(cache->cmd, op->cblock); in mg_update_metadata()
1352 cache_device_name(cache)); in mg_update_metadata()
1353 metadata_operation_failed(cache, "dm_cache_remove_mapping", r); in mg_update_metadata()
1364 * - vblock x in a cache block in mg_update_metadata()
1366 * - cache block gets reallocated and over written in mg_update_metadata()
1369 * When we recover, because there was no commit the cache will in mg_update_metadata()
1370 * rollback to having the data for vblock x in the cache block. in mg_update_metadata()
1371 * But the cache block has since been overwritten, so it'll end in mg_update_metadata()
1379 continue_after_commit(&cache->committer, &mg->k); in mg_update_metadata()
1380 schedule_commit(&cache->committer); in mg_update_metadata()
1417 r = dm_cell_lock_promote_v2(mg->cache->prison, mg->cell, in mg_upgrade_lock()
1433 struct cache *cache = mg->cache; in mg_full_copy() local
1437 if ((!is_policy_promote && !is_dirty(cache, op->cblock)) || in mg_full_copy()
1438 is_discarded_oblock(cache, op->oblock)) { in mg_full_copy()
1457 if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) { in mg_copy()
1461 bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio); in mg_copy()
1464 inc_io_migrations(mg->cache); in mg_copy()
1486 struct cache *cache = mg->cache; in mg_lock_writes() local
1489 prealloc = alloc_prison_cell(cache); in mg_lock_writes()
1497 r = dm_cell_lock_v2(cache->prison, &key, in mg_lock_writes()
1501 free_prison_cell(cache, prealloc); in mg_lock_writes()
1507 free_prison_cell(cache, prealloc); in mg_lock_writes()
1517 static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio) in mg_start() argument
1521 if (!background_work_begin(cache)) { in mg_start()
1522 policy_complete_background_work(cache->policy, op, false); in mg_start()
1526 mg = alloc_migration(cache); in mg_start()
1532 inc_io_migrations(cache); in mg_start()
1544 struct cache *cache = mg->cache; in invalidate_complete() local
1547 if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios)) in invalidate_complete()
1548 free_prison_cell(cache, mg->cell); in invalidate_complete()
1554 defer_bios(cache, &bios); in invalidate_complete()
1556 background_work_end(cache); in invalidate_complete()
1565 static int invalidate_cblock(struct cache *cache, dm_cblock_t cblock) in invalidate_cblock() argument
1567 int r = policy_invalidate_mapping(cache->policy, cblock); in invalidate_cblock()
1569 r = dm_cache_remove_mapping(cache->cmd, cblock); in invalidate_cblock()
1572 cache_device_name(cache)); in invalidate_cblock()
1573 metadata_operation_failed(cache, "dm_cache_remove_mapping", r); in invalidate_cblock()
1583 DMERR("%s: policy_invalidate_mapping failed", cache_device_name(cache)); in invalidate_cblock()
1592 struct cache *cache = mg->cache; in invalidate_remove() local
1594 r = invalidate_cblock(cache, mg->invalidate_cblock); in invalidate_remove()
1601 continue_after_commit(&cache->committer, &mg->k); in invalidate_remove()
1602 remap_to_origin_clear_discard(cache, mg->overwrite_bio, mg->invalidate_oblock); in invalidate_remove()
1604 schedule_commit(&cache->committer); in invalidate_remove()
1611 struct cache *cache = mg->cache; in invalidate_lock() local
1614 prealloc = alloc_prison_cell(cache); in invalidate_lock()
1617 r = dm_cell_lock_v2(cache->prison, &key, in invalidate_lock()
1620 free_prison_cell(cache, prealloc); in invalidate_lock()
1626 free_prison_cell(cache, prealloc); in invalidate_lock()
1637 queue_work(cache->wq, &mg->k.ws); in invalidate_lock()
1643 static int invalidate_start(struct cache *cache, dm_cblock_t cblock, in invalidate_start() argument
1648 if (!background_work_begin(cache)) in invalidate_start()
1651 mg = alloc_migration(cache); in invalidate_start()
1669 static enum busy spare_migration_bandwidth(struct cache *cache) in spare_migration_bandwidth() argument
1671 bool idle = iot_idle_for(&cache->tracker, HZ); in spare_migration_bandwidth()
1672 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * in spare_migration_bandwidth()
1673 cache->sectors_per_block; in spare_migration_bandwidth()
1675 if (idle && current_volume <= cache->migration_threshold) in spare_migration_bandwidth()
1681 static void inc_hit_counter(struct cache *cache, struct bio *bio) in inc_hit_counter() argument
1684 &cache->stats.read_hit : &cache->stats.write_hit); in inc_hit_counter()
1687 static void inc_miss_counter(struct cache *cache, struct bio *bio) in inc_miss_counter() argument
1690 &cache->stats.read_miss : &cache->stats.write_miss); in inc_miss_counter()
1695 static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block, in map_bio() argument
1704 rb = bio_detain_shared(cache, block, bio); in map_bio()
1718 if (optimisable_bio(cache, bio, block)) { in map_bio()
1721 r = policy_lookup_with_work(cache->policy, block, &cblock, data_dir, true, &op); in map_bio()
1724 cache_device_name(cache), r); in map_bio()
1730 bio_drop_shared_lock(cache, bio); in map_bio()
1732 mg_start(cache, op, bio); in map_bio()
1736 r = policy_lookup(cache->policy, block, &cblock, data_dir, false, &background_queued); in map_bio()
1739 cache_device_name(cache), r); in map_bio()
1745 wake_migration_worker(cache); in map_bio()
1754 inc_miss_counter(cache, bio); in map_bio()
1756 accounted_begin(cache, bio); in map_bio()
1757 remap_to_origin_clear_discard(cache, bio, block); in map_bio()
1770 inc_hit_counter(cache, bio); in map_bio()
1774 * cache blocks that are written to. in map_bio()
1776 if (passthrough_mode(cache)) { in map_bio()
1778 bio_drop_shared_lock(cache, bio); in map_bio()
1779 atomic_inc(&cache->stats.demotion); in map_bio()
1780 invalidate_start(cache, cblock, block, bio); in map_bio()
1782 remap_to_origin_clear_discard(cache, bio, block); in map_bio()
1784 if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) && in map_bio()
1785 !is_dirty(cache, cblock)) { in map_bio()
1786 remap_to_origin_and_cache(cache, bio, block, cblock); in map_bio()
1787 accounted_begin(cache, bio); in map_bio()
1789 remap_to_cache_dirty(cache, bio, block, cblock); in map_bio()
1801 accounted_complete(cache, bio); in map_bio()
1802 issue_after_commit(&cache->committer, bio); in map_bio()
1810 static bool process_bio(struct cache *cache, struct bio *bio) in process_bio() argument
1814 if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED) in process_bio()
1823 static int commit(struct cache *cache, bool clean_shutdown) in commit() argument
1827 if (get_cache_mode(cache) >= CM_READ_ONLY) in commit()
1830 atomic_inc(&cache->stats.commit_count); in commit()
1831 r = dm_cache_commit(cache->cmd, clean_shutdown); in commit()
1833 metadata_operation_failed(cache, "dm_cache_commit", r); in commit()
1843 struct cache *cache = context; in commit_op() local
1845 if (dm_cache_changed_this_transaction(cache->cmd)) in commit_op()
1846 return errno_to_blk_status(commit(cache, false)); in commit_op()
1853 static bool process_flush_bio(struct cache *cache, struct bio *bio) in process_flush_bio() argument
1858 remap_to_origin(cache, bio); in process_flush_bio()
1860 remap_to_cache(cache, bio, 0); in process_flush_bio()
1862 issue_after_commit(&cache->committer, bio); in process_flush_bio()
1866 static bool process_discard_bio(struct cache *cache, struct bio *bio) in process_discard_bio() argument
1873 calc_discard_block_range(cache, bio, &b, &e); in process_discard_bio()
1875 set_discard(cache, b); in process_discard_bio()
1879 if (cache->features.discard_passdown) { in process_discard_bio()
1880 remap_to_origin(cache, bio); in process_discard_bio()
1890 struct cache *cache = container_of(ws, struct cache, deferred_bio_worker); in process_deferred_bios() local
1899 spin_lock_irqsave(&cache->lock, flags); in process_deferred_bios()
1900 bio_list_merge(&bios, &cache->deferred_bios); in process_deferred_bios()
1901 bio_list_init(&cache->deferred_bios); in process_deferred_bios()
1902 spin_unlock_irqrestore(&cache->lock, flags); in process_deferred_bios()
1906 commit_needed = process_flush_bio(cache, bio) || commit_needed; in process_deferred_bios()
1909 commit_needed = process_discard_bio(cache, bio) || commit_needed; in process_deferred_bios()
1912 commit_needed = process_bio(cache, bio) || commit_needed; in process_deferred_bios()
1916 schedule_commit(&cache->committer); in process_deferred_bios()
1923 static void requeue_deferred_bios(struct cache *cache) in requeue_deferred_bios() argument
1929 bio_list_merge(&bios, &cache->deferred_bios); in requeue_deferred_bios()
1930 bio_list_init(&cache->deferred_bios); in requeue_deferred_bios()
1944 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); in do_waker() local
1946 policy_tick(cache->policy, true); in do_waker()
1947 wake_migration_worker(cache); in do_waker()
1948 schedule_commit(&cache->committer); in do_waker()
1949 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); in do_waker()
1956 struct cache *cache = container_of(ws, struct cache, migration_worker); in check_migrations() local
1960 b = spare_migration_bandwidth(cache); in check_migrations()
1962 r = policy_get_background_work(cache->policy, b == IDLE, &op); in check_migrations()
1968 cache_device_name(cache)); in check_migrations()
1972 r = mg_start(cache, op, NULL); in check_migrations()
1986 static void destroy(struct cache *cache) in destroy() argument
1990 mempool_exit(&cache->migration_pool); in destroy()
1992 if (cache->prison) in destroy()
1993 dm_bio_prison_destroy_v2(cache->prison); in destroy()
1995 if (cache->wq) in destroy()
1996 destroy_workqueue(cache->wq); in destroy()
1998 if (cache->dirty_bitset) in destroy()
1999 free_bitset(cache->dirty_bitset); in destroy()
2001 if (cache->discard_bitset) in destroy()
2002 free_bitset(cache->discard_bitset); in destroy()
2004 if (cache->copier) in destroy()
2005 dm_kcopyd_client_destroy(cache->copier); in destroy()
2007 if (cache->cmd) in destroy()
2008 dm_cache_metadata_close(cache->cmd); in destroy()
2010 if (cache->metadata_dev) in destroy()
2011 dm_put_device(cache->ti, cache->metadata_dev); in destroy()
2013 if (cache->origin_dev) in destroy()
2014 dm_put_device(cache->ti, cache->origin_dev); in destroy()
2016 if (cache->cache_dev) in destroy()
2017 dm_put_device(cache->ti, cache->cache_dev); in destroy()
2019 if (cache->policy) in destroy()
2020 dm_cache_policy_destroy(cache->policy); in destroy()
2022 for (i = 0; i < cache->nr_ctr_args ; i++) in destroy()
2023 kfree(cache->ctr_args[i]); in destroy()
2024 kfree(cache->ctr_args); in destroy()
2026 bioset_exit(&cache->bs); in destroy()
2028 kfree(cache); in destroy()
2033 struct cache *cache = ti->private; in cache_dtr() local
2035 destroy(cache); in cache_dtr()
2046 * Construct a cache device mapping.
2048 * cache <metadata dev> <cache dev> <origin dev> <block size>
2053 * cache dev : fast device holding cached data blocks
2055 * block size : cache unit size in sectors
2065 * See cache-policies.txt for details.
2068 * writethrough : write through caching that prohibits cache block
2071 * back cache block contents later for performance reasons,
2154 *error = "Error opening cache device"; in parse_cache_dev()
2203 *error = "Data block size is larger than the cache device"; in parse_block_size()
2224 {0, 3, "Invalid number of cache feature arguments"}, in parse_features()
2263 *error = "Unrecognised cache feature requested"; in parse_features()
2269 *error = "Duplicate cache io_mode features requested"; in parse_features()
2342 static int process_config_option(struct cache *cache, const char *key, const char *value) in process_config_option() argument
2350 cache->migration_threshold = tmp; in process_config_option()
2357 static int set_config_value(struct cache *cache, const char *key, const char *value) in set_config_value() argument
2359 int r = process_config_option(cache, key, value); in set_config_value()
2362 r = policy_set_config_value(cache->policy, key, value); in set_config_value()
2370 static int set_config_values(struct cache *cache, int argc, const char **argv) in set_config_values() argument
2380 r = set_config_value(cache, argv[0], argv[1]); in set_config_values()
2391 static int create_cache_policy(struct cache *cache, struct cache_args *ca, in create_cache_policy() argument
2395 cache->cache_size, in create_cache_policy()
2396 cache->origin_sectors, in create_cache_policy()
2397 cache->sectors_per_block); in create_cache_policy()
2399 *error = "Error creating cache's policy"; in create_cache_policy()
2402 cache->policy = p; in create_cache_policy()
2403 BUG_ON(!cache->policy); in create_cache_policy()
2409 * We want the discard block size to be at least the size of the cache
2434 static void set_cache_size(struct cache *cache, dm_cblock_t size) in set_cache_size() argument
2438 if (nr_blocks > (1 << 20) && cache->cache_size != size) in set_cache_size()
2439 DMWARN_LIMIT("You have created a cache device with a lot of individual cache blocks (%llu)\n" in set_cache_size()
2441 "Please consider increasing the cache block size to reduce the overall cache block count.", in set_cache_size()
2444 cache->cache_size = size; in set_cache_size()
2455 struct cache *cache = container_of(cb, struct cache, callbacks); in cache_is_congested() local
2457 return is_congested(cache->origin_dev, bdi_bits) || in cache_is_congested()
2458 is_congested(cache->cache_dev, bdi_bits); in cache_is_congested()
2463 static int cache_create(struct cache_args *ca, struct cache **result) in cache_create()
2467 struct cache *cache; in cache_create() local
2473 cache = kzalloc(sizeof(*cache), GFP_KERNEL); in cache_create()
2474 if (!cache) in cache_create()
2477 cache->ti = ca->ti; in cache_create()
2478 ti->private = cache; in cache_create()
2487 cache->features = ca->features; in cache_create()
2488 if (writethrough_mode(cache)) { in cache_create()
2490 r = bioset_init(&cache->bs, BIO_POOL_SIZE, 0, 0); in cache_create()
2495 cache->callbacks.congested_fn = cache_is_congested; in cache_create()
2496 dm_table_add_target_callbacks(ti->table, &cache->callbacks); in cache_create()
2498 cache->metadata_dev = ca->metadata_dev; in cache_create()
2499 cache->origin_dev = ca->origin_dev; in cache_create()
2500 cache->cache_dev = ca->cache_dev; in cache_create()
2504 origin_blocks = cache->origin_sectors = ca->origin_sectors; in cache_create()
2506 cache->origin_blocks = to_oblock(origin_blocks); in cache_create()
2508 cache->sectors_per_block = ca->block_size; in cache_create()
2509 if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) { in cache_create()
2517 cache->sectors_per_block_shift = -1; in cache_create()
2519 set_cache_size(cache, to_cblock(cache_size)); in cache_create()
2521 cache->sectors_per_block_shift = __ffs(ca->block_size); in cache_create()
2522 set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift)); in cache_create()
2525 r = create_cache_policy(cache, ca, error); in cache_create()
2529 cache->policy_nr_args = ca->policy_argc; in cache_create()
2530 cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD; in cache_create()
2532 r = set_config_values(cache, ca->policy_argc, ca->policy_argv); in cache_create()
2534 *error = "Error setting cache policy's config values"; in cache_create()
2538 cmd = dm_cache_metadata_open(cache->metadata_dev->bdev, in cache_create()
2540 dm_cache_policy_get_hint_size(cache->policy), in cache_create()
2547 cache->cmd = cmd; in cache_create()
2548 set_cache_mode(cache, CM_WRITE); in cache_create()
2549 if (get_cache_mode(cache) != CM_WRITE) { in cache_create()
2555 if (passthrough_mode(cache)) { in cache_create()
2558 r = dm_cache_metadata_all_clean(cache->cmd, &all_clean); in cache_create()
2570 policy_allow_migrations(cache->policy, false); in cache_create()
2573 spin_lock_init(&cache->lock); in cache_create()
2574 bio_list_init(&cache->deferred_bios); in cache_create()
2575 atomic_set(&cache->nr_allocated_migrations, 0); in cache_create()
2576 atomic_set(&cache->nr_io_migrations, 0); in cache_create()
2577 init_waitqueue_head(&cache->migration_wait); in cache_create()
2580 atomic_set(&cache->nr_dirty, 0); in cache_create()
2581 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); in cache_create()
2582 if (!cache->dirty_bitset) { in cache_create()
2586 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); in cache_create()
2588 cache->discard_block_size = in cache_create()
2589 calculate_discard_block_size(cache->sectors_per_block, in cache_create()
2590 cache->origin_sectors); in cache_create()
2591 cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors, in cache_create()
2592 cache->discard_block_size)); in cache_create()
2593 cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); in cache_create()
2594 if (!cache->discard_bitset) { in cache_create()
2598 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); in cache_create()
2600 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); in cache_create()
2601 if (IS_ERR(cache->copier)) { in cache_create()
2603 r = PTR_ERR(cache->copier); in cache_create()
2607 cache->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0); in cache_create()
2608 if (!cache->wq) { in cache_create()
2612 INIT_WORK(&cache->deferred_bio_worker, process_deferred_bios); in cache_create()
2613 INIT_WORK(&cache->migration_worker, check_migrations); in cache_create()
2614 INIT_DELAYED_WORK(&cache->waker, do_waker); in cache_create()
2616 cache->prison = dm_bio_prison_create_v2(cache->wq); in cache_create()
2617 if (!cache->prison) { in cache_create()
2622 r = mempool_init_slab_pool(&cache->migration_pool, MIGRATION_POOL_SIZE, in cache_create()
2625 *error = "Error creating cache's migration mempool"; in cache_create()
2629 cache->need_tick_bio = true; in cache_create()
2630 cache->sized = false; in cache_create()
2631 cache->invalidate = false; in cache_create()
2632 cache->commit_requested = false; in cache_create()
2633 cache->loaded_mappings = false; in cache_create()
2634 cache->loaded_discards = false; in cache_create()
2636 load_stats(cache); in cache_create()
2638 atomic_set(&cache->stats.demotion, 0); in cache_create()
2639 atomic_set(&cache->stats.promotion, 0); in cache_create()
2640 atomic_set(&cache->stats.copies_avoided, 0); in cache_create()
2641 atomic_set(&cache->stats.cache_cell_clash, 0); in cache_create()
2642 atomic_set(&cache->stats.commit_count, 0); in cache_create()
2643 atomic_set(&cache->stats.discard_count, 0); in cache_create()
2645 spin_lock_init(&cache->invalidation_lock); in cache_create()
2646 INIT_LIST_HEAD(&cache->invalidation_requests); in cache_create()
2648 batcher_init(&cache->committer, commit_op, cache, in cache_create()
2649 issue_op, cache, cache->wq); in cache_create()
2650 iot_init(&cache->tracker); in cache_create()
2652 init_rwsem(&cache->background_work_lock); in cache_create()
2653 prevent_background_work(cache); in cache_create()
2655 *result = cache; in cache_create()
2658 destroy(cache); in cache_create()
2662 static int copy_ctr_args(struct cache *cache, int argc, const char **argv) in copy_ctr_args() argument
2680 cache->nr_ctr_args = argc; in copy_ctr_args()
2681 cache->ctr_args = copy; in copy_ctr_args()
2690 struct cache *cache = NULL; in cache_ctr() local
2694 ti->error = "Error allocating memory for cache"; in cache_ctr()
2703 r = cache_create(ca, &cache); in cache_ctr()
2707 r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); in cache_ctr()
2709 destroy(cache); in cache_ctr()
2713 ti->private = cache; in cache_ctr()
2723 struct cache *cache = ti->private; in cache_map() local
2727 dm_oblock_t block = get_bio_block(cache, bio); in cache_map()
2730 if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { in cache_map()
2733 * the end of the origin device. We don't cache these. in cache_map()
2736 remap_to_origin(cache, bio); in cache_map()
2737 accounted_begin(cache, bio); in cache_map()
2742 defer_bio(cache, bio); in cache_map()
2746 r = map_bio(cache, bio, block, &commit_needed); in cache_map()
2748 schedule_commit(&cache->committer); in cache_map()
2755 struct cache *cache = ti->private; in cache_end_io() local
2760 policy_tick(cache->policy, false); in cache_end_io()
2762 spin_lock_irqsave(&cache->lock, flags); in cache_end_io()
2763 cache->need_tick_bio = true; in cache_end_io()
2764 spin_unlock_irqrestore(&cache->lock, flags); in cache_end_io()
2767 bio_drop_shared_lock(cache, bio); in cache_end_io()
2768 accounted_complete(cache, bio); in cache_end_io()
2773 static int write_dirty_bitset(struct cache *cache) in write_dirty_bitset() argument
2777 if (get_cache_mode(cache) >= CM_READ_ONLY) in write_dirty_bitset()
2780 r = dm_cache_set_dirty_bits(cache->cmd, from_cblock(cache->cache_size), cache->dirty_bitset); in write_dirty_bitset()
2782 metadata_operation_failed(cache, "dm_cache_set_dirty_bits", r); in write_dirty_bitset()
2787 static int write_discard_bitset(struct cache *cache) in write_discard_bitset() argument
2791 if (get_cache_mode(cache) >= CM_READ_ONLY) in write_discard_bitset()
2794 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, in write_discard_bitset()
2795 cache->discard_nr_blocks); in write_discard_bitset()
2797 DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache)); in write_discard_bitset()
2798 metadata_operation_failed(cache, "dm_cache_discard_bitset_resize", r); in write_discard_bitset()
2802 for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) { in write_discard_bitset()
2803 r = dm_cache_set_discard(cache->cmd, to_dblock(i), in write_discard_bitset()
2804 is_discarded(cache, to_dblock(i))); in write_discard_bitset()
2806 metadata_operation_failed(cache, "dm_cache_set_discard", r); in write_discard_bitset()
2814 static int write_hints(struct cache *cache) in write_hints() argument
2818 if (get_cache_mode(cache) >= CM_READ_ONLY) in write_hints()
2821 r = dm_cache_write_hints(cache->cmd, cache->policy); in write_hints()
2823 metadata_operation_failed(cache, "dm_cache_write_hints", r); in write_hints()
2833 static bool sync_metadata(struct cache *cache) in sync_metadata() argument
2837 r1 = write_dirty_bitset(cache); in sync_metadata()
2839 DMERR("%s: could not write dirty bitset", cache_device_name(cache)); in sync_metadata()
2841 r2 = write_discard_bitset(cache); in sync_metadata()
2843 DMERR("%s: could not write discard bitset", cache_device_name(cache)); in sync_metadata()
2845 save_stats(cache); in sync_metadata()
2847 r3 = write_hints(cache); in sync_metadata()
2849 DMERR("%s: could not write hints", cache_device_name(cache)); in sync_metadata()
2856 r4 = commit(cache, !r1 && !r2 && !r3); in sync_metadata()
2858 DMERR("%s: could not write cache metadata", cache_device_name(cache)); in sync_metadata()
2865 struct cache *cache = ti->private; in cache_postsuspend() local
2867 prevent_background_work(cache); in cache_postsuspend()
2868 BUG_ON(atomic_read(&cache->nr_io_migrations)); in cache_postsuspend()
2870 cancel_delayed_work(&cache->waker); in cache_postsuspend()
2871 flush_workqueue(cache->wq); in cache_postsuspend()
2872 WARN_ON(cache->tracker.in_flight); in cache_postsuspend()
2878 requeue_deferred_bios(cache); in cache_postsuspend()
2880 if (get_cache_mode(cache) == CM_WRITE) in cache_postsuspend()
2881 (void) sync_metadata(cache); in cache_postsuspend()
2888 struct cache *cache = context; in load_mapping() local
2891 set_bit(from_cblock(cblock), cache->dirty_bitset); in load_mapping()
2892 atomic_inc(&cache->nr_dirty); in load_mapping()
2894 clear_bit(from_cblock(cblock), cache->dirty_bitset); in load_mapping()
2896 r = policy_load_mapping(cache->policy, oblock, cblock, dirty, hint, hint_valid); in load_mapping()
2910 struct cache *cache; member
2920 static void discard_load_info_init(struct cache *cache, in discard_load_info_init() argument
2923 li->cache = cache; in discard_load_info_init()
2943 b = dm_sector_div_up(b, li->cache->discard_block_size); in set_discard_range()
2944 sector_div(e, li->cache->discard_block_size); in set_discard_range()
2950 if (e > from_dblock(li->cache->discard_nr_blocks)) in set_discard_range()
2951 e = from_dblock(li->cache->discard_nr_blocks); in set_discard_range()
2954 set_discard(li->cache, to_dblock(b)); in set_discard_range()
2987 static dm_cblock_t get_cache_dev_size(struct cache *cache) in get_cache_dev_size() argument
2989 sector_t size = get_dev_size(cache->cache_dev); in get_cache_dev_size()
2990 (void) sector_div(size, cache->sectors_per_block); in get_cache_dev_size()
2994 static bool can_resize(struct cache *cache, dm_cblock_t new_size) in can_resize() argument
2996 if (from_cblock(new_size) > from_cblock(cache->cache_size)) { in can_resize()
2997 if (cache->sized) { in can_resize()
2998 DMERR("%s: unable to extend cache due to missing cache table reload", in can_resize()
2999 cache_device_name(cache)); in can_resize()
3005 * We can't drop a dirty block when shrinking the cache. in can_resize()
3007 while (from_cblock(new_size) < from_cblock(cache->cache_size)) { in can_resize()
3009 if (is_dirty(cache, new_size)) { in can_resize()
3010 DMERR("%s: unable to shrink cache; cache block %llu is dirty", in can_resize()
3011 cache_device_name(cache), in can_resize()
3020 static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size) in resize_cache_dev() argument
3024 r = dm_cache_resize(cache->cmd, new_size); in resize_cache_dev()
3026 DMERR("%s: could not resize cache metadata", cache_device_name(cache)); in resize_cache_dev()
3027 metadata_operation_failed(cache, "dm_cache_resize", r); in resize_cache_dev()
3031 set_cache_size(cache, new_size); in resize_cache_dev()
3039 struct cache *cache = ti->private; in cache_preresume() local
3040 dm_cblock_t csize = get_cache_dev_size(cache); in cache_preresume()
3043 * Check to see if the cache has resized. in cache_preresume()
3045 if (!cache->sized) { in cache_preresume()
3046 r = resize_cache_dev(cache, csize); in cache_preresume()
3050 cache->sized = true; in cache_preresume()
3052 } else if (csize != cache->cache_size) { in cache_preresume()
3053 if (!can_resize(cache, csize)) in cache_preresume()
3056 r = resize_cache_dev(cache, csize); in cache_preresume()
3061 if (!cache->loaded_mappings) { in cache_preresume()
3062 r = dm_cache_load_mappings(cache->cmd, cache->policy, in cache_preresume()
3063 load_mapping, cache); in cache_preresume()
3065 DMERR("%s: could not load cache mappings", cache_device_name(cache)); in cache_preresume()
3066 metadata_operation_failed(cache, "dm_cache_load_mappings", r); in cache_preresume()
3070 cache->loaded_mappings = true; in cache_preresume()
3073 if (!cache->loaded_discards) { in cache_preresume()
3081 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); in cache_preresume()
3083 discard_load_info_init(cache, &li); in cache_preresume()
3084 r = dm_cache_load_discards(cache->cmd, load_discard, &li); in cache_preresume()
3086 DMERR("%s: could not load origin discards", cache_device_name(cache)); in cache_preresume()
3087 metadata_operation_failed(cache, "dm_cache_load_discards", r); in cache_preresume()
3092 cache->loaded_discards = true; in cache_preresume()
3100 struct cache *cache = ti->private; in cache_resume() local
3102 cache->need_tick_bio = true; in cache_resume()
3103 allow_background_work(cache); in cache_resume()
3104 do_waker(&cache->waker.work); in cache_resume()
3107 static void emit_flags(struct cache *cache, char *result, in emit_flags() argument
3111 struct cache_features *cf = &cache->features; in emit_flags()
3119 if (writethrough_mode(cache)) in emit_flags()
3122 else if (passthrough_mode(cache)) in emit_flags()
3125 else if (writeback_mode(cache)) in emit_flags()
3131 cache_device_name(cache), (int) cf->io_mode); in emit_flags()
3144 * <cache block size> <#used cache blocks>/<#total cache blocks>
3149 * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
3160 struct cache *cache = ti->private; in cache_status() local
3166 if (get_cache_mode(cache) == CM_FAIL) { in cache_status()
3173 (void) commit(cache, false); in cache_status()
3175 r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata); in cache_status()
3178 cache_device_name(cache), r); in cache_status()
3182 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata); in cache_status()
3185 cache_device_name(cache), r); in cache_status()
3189 residency = policy_residency(cache->policy); in cache_status()
3195 (unsigned long long)cache->sectors_per_block, in cache_status()
3197 (unsigned long long) from_cblock(cache->cache_size), in cache_status()
3198 (unsigned) atomic_read(&cache->stats.read_hit), in cache_status()
3199 (unsigned) atomic_read(&cache->stats.read_miss), in cache_status()
3200 (unsigned) atomic_read(&cache->stats.write_hit), in cache_status()
3201 (unsigned) atomic_read(&cache->stats.write_miss), in cache_status()
3202 (unsigned) atomic_read(&cache->stats.demotion), in cache_status()
3203 (unsigned) atomic_read(&cache->stats.promotion), in cache_status()
3204 (unsigned long) atomic_read(&cache->nr_dirty)); in cache_status()
3206 emit_flags(cache, result, maxlen, &sz); in cache_status()
3208 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold); in cache_status()
3210 DMEMIT("%s ", dm_cache_policy_get_name(cache->policy)); in cache_status()
3212 r = policy_emit_config_values(cache->policy, result, maxlen, &sz); in cache_status()
3215 cache_device_name(cache), r); in cache_status()
3218 if (get_cache_mode(cache) == CM_READ_ONLY) in cache_status()
3223 r = dm_cache_metadata_needs_check(cache->cmd, &needs_check); in cache_status()
3233 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev); in cache_status()
3235 format_dev_t(buf, cache->cache_dev->bdev->bd_dev); in cache_status()
3237 format_dev_t(buf, cache->origin_dev->bdev->bd_dev); in cache_status()
3240 for (i = 0; i < cache->nr_ctr_args - 1; i++) in cache_status()
3241 DMEMIT(" %s", cache->ctr_args[i]); in cache_status()
3242 if (cache->nr_ctr_args) in cache_status()
3243 DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]); in cache_status()
3262 * A cache block range can take two forms:
3267 static int parse_cblock_range(struct cache *cache, const char *str, in parse_cblock_range() argument
3300 DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str); in parse_cblock_range()
3304 static int validate_cblock_range(struct cache *cache, struct cblock_range *range) in validate_cblock_range() argument
3308 uint64_t n = from_cblock(cache->cache_size); in validate_cblock_range()
3312 cache_device_name(cache), b, n); in validate_cblock_range()
3318 cache_device_name(cache), e, n); in validate_cblock_range()
3324 cache_device_name(cache), b, e); in validate_cblock_range()
3336 static int request_invalidation(struct cache *cache, struct cblock_range *range) in request_invalidation() argument
3347 r = invalidate_cblock(cache, range->begin); in request_invalidation()
3354 cache->commit_requested = true; in request_invalidation()
3358 static int process_invalidate_cblocks_message(struct cache *cache, unsigned count, in process_invalidate_cblocks_message() argument
3365 if (!passthrough_mode(cache)) { in process_invalidate_cblocks_message()
3366 DMERR("%s: cache has to be in passthrough mode for invalidation", in process_invalidate_cblocks_message()
3367 cache_device_name(cache)); in process_invalidate_cblocks_message()
3372 r = parse_cblock_range(cache, cblock_ranges[i], &range); in process_invalidate_cblocks_message()
3376 r = validate_cblock_range(cache, &range); in process_invalidate_cblocks_message()
3383 r = request_invalidation(cache, &range); in process_invalidate_cblocks_message()
3397 * The key migration_threshold is supported by the cache target core.
3402 struct cache *cache = ti->private; in cache_message() local
3407 if (get_cache_mode(cache) >= CM_READ_ONLY) { in cache_message()
3408 DMERR("%s: unable to service cache target messages in READ_ONLY or FAIL mode", in cache_message()
3409 cache_device_name(cache)); in cache_message()
3414 return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1); in cache_message()
3419 return set_config_value(cache, argv[0], argv[1]); in cache_message()
3426 struct cache *cache = ti->private; in cache_iterate_devices() local
3428 r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data); in cache_iterate_devices()
3430 r = fn(ti, cache->origin_dev, 0, ti->len, data); in cache_iterate_devices()
3446 static void disable_passdown_if_not_supported(struct cache *cache) in disable_passdown_if_not_supported() argument
3448 struct block_device *origin_bdev = cache->origin_dev->bdev; in disable_passdown_if_not_supported()
3453 if (!cache->features.discard_passdown) in disable_passdown_if_not_supported()
3459 else if (origin_limits->max_discard_sectors < cache->sectors_per_block) in disable_passdown_if_not_supported()
3465 cache->features.discard_passdown = false; in disable_passdown_if_not_supported()
3469 static void set_discard_limits(struct cache *cache, struct queue_limits *limits) in set_discard_limits() argument
3471 struct block_device *origin_bdev = cache->origin_dev->bdev; in set_discard_limits()
3474 if (!cache->features.discard_passdown) { in set_discard_limits()
3476 limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024, in set_discard_limits()
3477 cache->origin_sectors); in set_discard_limits()
3478 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; in set_discard_limits()
3495 struct cache *cache = ti->private; in cache_io_hints() local
3500 * cache's blocksize (io_opt is a factor) do not override them. in cache_io_hints()
3502 if (io_opt_sectors < cache->sectors_per_block || in cache_io_hints()
3503 do_div(io_opt_sectors, cache->sectors_per_block)) { in cache_io_hints()
3504 blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT); in cache_io_hints()
3505 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); in cache_io_hints()
3508 disable_passdown_if_not_supported(cache); in cache_io_hints()
3509 set_discard_limits(cache, limits); in cache_io_hints()
3515 .name = "cache",
3541 DMERR("cache target registration failed: %d", r); in dm_cache_init()
3558 MODULE_DESCRIPTION(DM_NAME " cache target");