Lines Matching +full:cell +full:- +full:count

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011-2012 Red Hat UK.
8 #include "dm-thin-metadata.h"
9 #include "dm-bio-prison-v1.h"
12 #include <linux/device-mapper.h>
13 #include <linux/dm-io.h>
14 #include <linux/dm-kcopyd.h>
51 #define MAX_DEV_ID ((1 << 24) - 1)
57 * We use a standard copy-on-write btree to store the mappings for the
58 * devices (note I'm talking about copy-on-write of the metadata here, not
95 * - The origin mapping will point to the old origin block (the shared
99 * - The snap mapping still points to the old block. As it would after
111 /*----------------------------------------------------------------*/
124 key->virtual = (ls == VIRTUAL); in build_key()
125 key->dev = dm_thin_dev_id(td); in build_key()
126 key->block_begin = b; in build_key()
127 key->block_end = e; in build_key()
144 /*----------------------------------------------------------------*/
156 init_rwsem(&t->lock); in throttle_init()
157 t->throttle_applied = false; in throttle_init()
162 t->threshold = jiffies + THROTTLE_THRESHOLD; in throttle_work_start()
167 if (!t->throttle_applied && time_is_before_jiffies(t->threshold)) { in throttle_work_update()
168 down_write(&t->lock); in throttle_work_update()
169 t->throttle_applied = true; in throttle_work_update()
175 if (t->throttle_applied) { in throttle_work_complete()
176 t->throttle_applied = false; in throttle_work_complete()
177 up_write(&t->lock); in throttle_work_complete()
183 down_read(&t->lock); in throttle_lock()
188 up_read(&t->lock); in throttle_unlock()
191 /*----------------------------------------------------------------*/
227 typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
294 return pool->pf.mode; in get_pool_mode()
301 "out-of-data-space", in notify_of_pool_mode_change()
302 "read-only", in notify_of_pool_mode_change()
303 "read-only", in notify_of_pool_mode_change()
310 if (!pool->pf.error_if_no_space) in notify_of_pool_mode_change()
316 dm_table_event(pool->ti->table); in notify_of_pool_mode_change()
318 dm_device_name(pool->pool_md), in notify_of_pool_mode_change()
365 /*----------------------------------------------------------------*/
369 return pool->sectors_per_block_shift >= 0; in block_size_is_power_of_two()
375 (b << pool->sectors_per_block_shift) : in block_to_sectors()
376 (b * pool->sectors_per_block); in block_to_sectors()
379 /*----------------------------------------------------------------*/
392 op->tc = tc; in begin_discard()
393 blk_start_plug(&op->plug); in begin_discard()
394 op->parent_bio = parent; in begin_discard()
395 op->bio = NULL; in begin_discard()
400 struct thin_c *tc = op->tc; in issue_discard()
401 sector_t s = block_to_sectors(tc->pool, data_b); in issue_discard()
402 sector_t len = block_to_sectors(tc->pool, data_e - data_b); in issue_discard()
404 return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOIO, &op->bio); in issue_discard()
409 if (op->bio) { in end_discard()
414 bio_chain(op->bio, op->parent_bio); in end_discard()
415 op->bio->bi_opf = REQ_OP_DISCARD; in end_discard()
416 submit_bio(op->bio); in end_discard()
419 blk_finish_plug(&op->plug); in end_discard()
425 if (r && !op->parent_bio->bi_status) in end_discard()
426 op->parent_bio->bi_status = errno_to_blk_status(r); in end_discard()
427 bio_endio(op->parent_bio); in end_discard()
430 /*----------------------------------------------------------------*/
438 queue_work(pool->wq, &pool->worker); in wake_worker()
441 /*----------------------------------------------------------------*/
450 * Allocate a cell from the prison's mempool. in bio_detain()
453 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO); in bio_detain()
455 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result); in bio_detain()
458 * We reused an old cell; we can get rid of in bio_detain()
461 dm_bio_prison_free_cell(pool->prison, cell_prealloc); in bio_detain()
467 struct dm_bio_prison_cell *cell, in cell_release() argument
470 dm_cell_release(pool->prison, cell, bios); in cell_release()
471 dm_bio_prison_free_cell(pool->prison, cell); in cell_release()
477 struct dm_bio_prison_cell *cell) in cell_visit_release() argument
479 dm_cell_visit_release(pool->prison, fn, context, cell); in cell_visit_release()
480 dm_bio_prison_free_cell(pool->prison, cell); in cell_visit_release()
484 struct dm_bio_prison_cell *cell, in cell_release_no_holder() argument
487 dm_cell_release_no_holder(pool->prison, cell, bios); in cell_release_no_holder()
488 dm_bio_prison_free_cell(pool->prison, cell); in cell_release_no_holder()
492 struct dm_bio_prison_cell *cell, blk_status_t error_code) in cell_error_with_code() argument
494 dm_cell_error(pool->prison, cell, error_code); in cell_error_with_code()
495 dm_bio_prison_free_cell(pool->prison, cell); in cell_error_with_code()
500 return pool->out_of_data_space ? BLK_STS_NOSPC : BLK_STS_IOERR; in get_pool_io_error_code()
503 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) in cell_error() argument
505 cell_error_with_code(pool, cell, get_pool_io_error_code(pool)); in cell_error()
508 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) in cell_success() argument
510 cell_error_with_code(pool, cell, 0); in cell_success()
513 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell) in cell_requeue() argument
515 cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE); in cell_requeue()
518 /*----------------------------------------------------------------*/
542 list_add(&pool->list, &dm_thin_pool_table.pools); in __pool_table_insert()
548 list_del(&pool->list); in __pool_table_remove()
558 if (tmp->pool_md == md) { in __pool_table_lookup()
574 if (tmp->md_dev == md_dev) { in __pool_table_lookup_metadata_dev()
583 /*----------------------------------------------------------------*/
591 struct dm_bio_prison_cell *cell; member
605 bio->bi_status = error; in error_bio_list()
617 spin_lock_irq(&tc->lock); in error_thin_bio_list()
619 spin_unlock_irq(&tc->lock); in error_thin_bio_list()
626 struct pool *pool = tc->pool; in requeue_deferred_cells()
628 struct dm_bio_prison_cell *cell, *tmp; in requeue_deferred_cells() local
632 spin_lock_irq(&tc->lock); in requeue_deferred_cells()
633 list_splice_init(&tc->deferred_cells, &cells); in requeue_deferred_cells()
634 spin_unlock_irq(&tc->lock); in requeue_deferred_cells()
636 list_for_each_entry_safe(cell, tmp, &cells, user_list) in requeue_deferred_cells()
637 cell_requeue(pool, cell); in requeue_deferred_cells()
646 spin_lock_irq(&tc->lock); in requeue_io()
647 __merge_bio_list(&bios, &tc->deferred_bio_list); in requeue_io()
648 __merge_bio_list(&bios, &tc->retry_on_resume_list); in requeue_io()
649 spin_unlock_irq(&tc->lock); in requeue_io()
660 list_for_each_entry_rcu(tc, &pool->active_thins, list) in error_retry_list_with_code()
661 error_thin_bio_list(tc, &tc->retry_on_resume_list, error); in error_retry_list_with_code()
673 * but most is exclusively called from the thin target rather than the thin-pool
679 struct pool *pool = tc->pool; in get_bio_block()
680 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block()
683 block_nr >>= pool->sectors_per_block_shift; in get_bio_block()
685 (void) sector_div(block_nr, pool->sectors_per_block); in get_bio_block()
696 struct pool *pool = tc->pool; in get_bio_block_range()
697 sector_t b = bio->bi_iter.bi_sector; in get_bio_block_range()
698 sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT); in get_bio_block_range()
700 b += pool->sectors_per_block - 1ull; /* so we round up */ in get_bio_block_range()
703 b >>= pool->sectors_per_block_shift; in get_bio_block_range()
704 e >>= pool->sectors_per_block_shift; in get_bio_block_range()
706 (void) sector_div(b, pool->sectors_per_block); in get_bio_block_range()
707 (void) sector_div(e, pool->sectors_per_block); in get_bio_block_range()
720 struct pool *pool = tc->pool; in remap()
721 sector_t bi_sector = bio->bi_iter.bi_sector; in remap()
723 bio_set_dev(bio, tc->pool_dev->bdev); in remap()
725 bio->bi_iter.bi_sector = in remap()
726 (block << pool->sectors_per_block_shift) | in remap()
727 (bi_sector & (pool->sectors_per_block - 1)); in remap()
729 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + in remap()
730 sector_div(bi_sector, pool->sectors_per_block); in remap()
735 bio_set_dev(bio, tc->origin_dev->bdev); in remap_to_origin()
740 return op_is_flush(bio->bi_opf) && in bio_triggers_commit()
741 dm_thin_changed_this_transaction(tc->td); in bio_triggers_commit()
752 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds); in inc_all_io_entry()
757 struct pool *pool = tc->pool; in issue()
769 if (dm_thin_aborted_changes(tc->td)) { in issue()
778 spin_lock_irq(&pool->lock); in issue()
779 bio_list_add(&pool->deferred_flush_bios, bio); in issue()
780 spin_unlock_irq(&pool->lock); in issue()
796 /*----------------------------------------------------------------*/
818 struct dm_bio_prison_cell *cell; member
823 * still be in the cell, so care has to be taken to avoid issuing
832 struct pool *pool = m->tc->pool; in __complete_mapping_preparation()
834 if (atomic_dec_and_test(&m->prepare_actions)) { in __complete_mapping_preparation()
835 list_add_tail(&m->list, &pool->prepared_mappings); in __complete_mapping_preparation()
843 struct pool *pool = m->tc->pool; in complete_mapping_preparation()
845 spin_lock_irqsave(&pool->lock, flags); in complete_mapping_preparation()
847 spin_unlock_irqrestore(&pool->lock, flags); in complete_mapping_preparation()
854 m->status = read_err || write_err ? BLK_STS_IOERR : 0; in copy_complete()
861 struct dm_thin_new_mapping *m = h->overwrite_mapping; in overwrite_endio()
863 bio->bi_end_io = m->saved_bi_end_io; in overwrite_endio()
865 m->status = bio->bi_status; in overwrite_endio()
869 /*----------------------------------------------------------------*/
880 * This sends the bios in the cell, except the original holder, back
883 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell) in cell_defer_no_holder() argument
885 struct pool *pool = tc->pool; in cell_defer_no_holder()
890 cell_release_no_holder(pool, cell, &bios); in cell_defer_no_holder()
893 spin_lock_irqsave(&tc->lock, flags); in cell_defer_no_holder()
894 bio_list_merge(&tc->deferred_bio_list, &bios); in cell_defer_no_holder()
895 spin_unlock_irqrestore(&tc->lock, flags); in cell_defer_no_holder()
909 struct dm_bio_prison_cell *cell) in __inc_remap_and_issue_cell() argument
914 while ((bio = bio_list_pop(&cell->bios))) { in __inc_remap_and_issue_cell()
915 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) in __inc_remap_and_issue_cell()
916 bio_list_add(&info->defer_bios, bio); in __inc_remap_and_issue_cell()
918 inc_all_io_entry(info->tc->pool, bio); in __inc_remap_and_issue_cell()
925 bio_list_add(&info->issue_bios, bio); in __inc_remap_and_issue_cell()
931 struct dm_bio_prison_cell *cell, in inc_remap_and_issue_cell() argument
943 * before the cell is released, and avoid a race with new bios in inc_remap_and_issue_cell()
944 * being added to the cell. in inc_remap_and_issue_cell()
946 cell_visit_release(tc->pool, __inc_remap_and_issue_cell, in inc_remap_and_issue_cell()
947 &info, cell); in inc_remap_and_issue_cell()
958 cell_error(m->tc->pool, m->cell); in process_prepared_mapping_fail()
959 list_del(&m->list); in process_prepared_mapping_fail()
960 mempool_free(m, &m->tc->pool->mapping_pool); in process_prepared_mapping_fail()
965 struct pool *pool = tc->pool; in complete_overwrite_bio()
981 if (dm_thin_aborted_changes(tc->td)) { in complete_overwrite_bio()
990 spin_lock_irq(&pool->lock); in complete_overwrite_bio()
991 bio_list_add(&pool->deferred_flush_completions, bio); in complete_overwrite_bio()
992 spin_unlock_irq(&pool->lock); in complete_overwrite_bio()
997 struct thin_c *tc = m->tc; in process_prepared_mapping()
998 struct pool *pool = tc->pool; in process_prepared_mapping()
999 struct bio *bio = m->bio; in process_prepared_mapping()
1002 if (m->status) { in process_prepared_mapping()
1003 cell_error(pool, m->cell); in process_prepared_mapping()
1012 r = dm_thin_insert_block(tc->td, m->virt_begin, m->data_block); in process_prepared_mapping()
1015 cell_error(pool, m->cell); in process_prepared_mapping()
1023 * the bios in the cell. in process_prepared_mapping()
1026 inc_remap_and_issue_cell(tc, m->cell, m->data_block); in process_prepared_mapping()
1029 inc_all_io_entry(tc->pool, m->cell->holder); in process_prepared_mapping()
1030 remap_and_issue(tc, m->cell->holder, m->data_block); in process_prepared_mapping()
1031 inc_remap_and_issue_cell(tc, m->cell, m->data_block); in process_prepared_mapping()
1035 list_del(&m->list); in process_prepared_mapping()
1036 mempool_free(m, &pool->mapping_pool); in process_prepared_mapping()
1039 /*----------------------------------------------------------------*/
1043 struct thin_c *tc = m->tc; in free_discard_mapping()
1045 if (m->cell) in free_discard_mapping()
1046 cell_defer_no_holder(tc, m->cell); in free_discard_mapping()
1047 mempool_free(m, &tc->pool->mapping_pool); in free_discard_mapping()
1052 bio_io_error(m->bio); in process_prepared_discard_fail()
1058 bio_endio(m->bio); in process_prepared_discard_success()
1065 struct thin_c *tc = m->tc; in process_prepared_discard_no_passdown()
1067 r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end); in process_prepared_discard_no_passdown()
1069 metadata_operation_failed(tc->pool, "dm_thin_remove_range", r); in process_prepared_discard_no_passdown()
1070 bio_io_error(m->bio); in process_prepared_discard_no_passdown()
1072 bio_endio(m->bio); in process_prepared_discard_no_passdown()
1074 cell_defer_no_holder(tc, m->cell); in process_prepared_discard_no_passdown()
1075 mempool_free(m, &tc->pool->mapping_pool); in process_prepared_discard_no_passdown()
1078 /*----------------------------------------------------------------*/
1089 struct thin_c *tc = m->tc; in passdown_double_checking_shared_status()
1090 struct pool *pool = tc->pool; in passdown_double_checking_shared_status()
1091 dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin; in passdown_double_checking_shared_status()
1098 r = dm_pool_block_is_shared(pool->pmd, b, &shared); in passdown_double_checking_shared_status()
1111 r = dm_pool_block_is_shared(pool->pmd, e, &shared); in passdown_double_checking_shared_status()
1132 struct pool *pool = m->tc->pool; in queue_passdown_pt2()
1134 spin_lock_irqsave(&pool->lock, flags); in queue_passdown_pt2()
1135 list_add_tail(&m->list, &pool->prepared_discards_pt2); in queue_passdown_pt2()
1136 spin_unlock_irqrestore(&pool->lock, flags); in queue_passdown_pt2()
1146 queue_passdown_pt2(bio->bi_private); in passdown_endio()
1153 struct thin_c *tc = m->tc; in process_prepared_discard_passdown_pt1()
1154 struct pool *pool = tc->pool; in process_prepared_discard_passdown_pt1()
1156 dm_block_t data_end = m->data_block + (m->virt_end - m->virt_begin); in process_prepared_discard_passdown_pt1()
1163 r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end); in process_prepared_discard_passdown_pt1()
1166 bio_io_error(m->bio); in process_prepared_discard_passdown_pt1()
1167 cell_defer_no_holder(tc, m->cell); in process_prepared_discard_passdown_pt1()
1168 mempool_free(m, &pool->mapping_pool); in process_prepared_discard_passdown_pt1()
1176 r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end); in process_prepared_discard_passdown_pt1()
1179 bio_io_error(m->bio); in process_prepared_discard_passdown_pt1()
1180 cell_defer_no_holder(tc, m->cell); in process_prepared_discard_passdown_pt1()
1181 mempool_free(m, &pool->mapping_pool); in process_prepared_discard_passdown_pt1()
1186 discard_parent->bi_end_io = passdown_endio; in process_prepared_discard_passdown_pt1()
1187 discard_parent->bi_private = m; in process_prepared_discard_passdown_pt1()
1188 if (m->maybe_shared) in process_prepared_discard_passdown_pt1()
1194 r = issue_discard(&op, m->data_block, data_end); in process_prepared_discard_passdown_pt1()
1202 struct thin_c *tc = m->tc; in process_prepared_discard_passdown_pt2()
1203 struct pool *pool = tc->pool; in process_prepared_discard_passdown_pt2()
1209 r = dm_pool_dec_data_range(pool->pmd, m->data_block, in process_prepared_discard_passdown_pt2()
1210 m->data_block + (m->virt_end - m->virt_begin)); in process_prepared_discard_passdown_pt2()
1213 bio_io_error(m->bio); in process_prepared_discard_passdown_pt2()
1215 bio_endio(m->bio); in process_prepared_discard_passdown_pt2()
1217 cell_defer_no_holder(tc, m->cell); in process_prepared_discard_passdown_pt2()
1218 mempool_free(m, &pool->mapping_pool); in process_prepared_discard_passdown_pt2()
1228 spin_lock_irq(&pool->lock); in process_prepared()
1230 spin_unlock_irq(&pool->lock); in process_prepared()
1241 return bio->bi_iter.bi_size == in io_overlaps_block()
1242 (pool->sectors_per_block << SECTOR_SHIFT); in io_overlaps_block()
1254 *save = bio->bi_end_io; in save_and_set_endio()
1255 bio->bi_end_io = fn; in save_and_set_endio()
1260 if (pool->next_mapping) in ensure_next_mapping()
1263 pool->next_mapping = mempool_alloc(&pool->mapping_pool, GFP_ATOMIC); in ensure_next_mapping()
1265 return pool->next_mapping ? 0 : -ENOMEM; in ensure_next_mapping()
1270 struct dm_thin_new_mapping *m = pool->next_mapping; in get_next_mapping()
1272 BUG_ON(!pool->next_mapping); in get_next_mapping()
1275 INIT_LIST_HEAD(&m->list); in get_next_mapping()
1276 m->bio = NULL; in get_next_mapping()
1278 pool->next_mapping = NULL; in get_next_mapping()
1288 to.bdev = tc->pool_dev->bdev; in ll_zero()
1290 to.count = end - begin; in ll_zero()
1292 dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m); in ll_zero()
1299 struct pool *pool = tc->pool; in remap_and_issue_overwrite()
1302 h->overwrite_mapping = m; in remap_and_issue_overwrite()
1303 m->bio = bio; in remap_and_issue_overwrite()
1304 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); in remap_and_issue_overwrite()
1315 struct dm_bio_prison_cell *cell, struct bio *bio, in schedule_copy() argument
1318 struct pool *pool = tc->pool; in schedule_copy()
1321 m->tc = tc; in schedule_copy()
1322 m->virt_begin = virt_block; in schedule_copy()
1323 m->virt_end = virt_block + 1u; in schedule_copy()
1324 m->data_block = data_dest; in schedule_copy()
1325 m->cell = cell; in schedule_copy()
1332 atomic_set(&m->prepare_actions, 3); in schedule_copy()
1334 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) in schedule_copy()
1348 from.bdev = origin->bdev; in schedule_copy()
1349 from.sector = data_origin * pool->sectors_per_block; in schedule_copy()
1350 from.count = len; in schedule_copy()
1352 to.bdev = tc->pool_dev->bdev; in schedule_copy()
1353 to.sector = data_dest * pool->sectors_per_block; in schedule_copy()
1354 to.count = len; in schedule_copy()
1356 dm_kcopyd_copy(pool->copier, &from, 1, &to, in schedule_copy()
1362 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) { in schedule_copy()
1363 atomic_inc(&m->prepare_actions); in schedule_copy()
1365 data_dest * pool->sectors_per_block + len, in schedule_copy()
1366 (data_dest + 1) * pool->sectors_per_block); in schedule_copy()
1375 struct dm_bio_prison_cell *cell, struct bio *bio) in schedule_internal_copy() argument
1377 schedule_copy(tc, virt_block, tc->pool_dev, in schedule_internal_copy()
1378 data_origin, data_dest, cell, bio, in schedule_internal_copy()
1379 tc->pool->sectors_per_block); in schedule_internal_copy()
1383 dm_block_t data_block, struct dm_bio_prison_cell *cell, in schedule_zero() argument
1386 struct pool *pool = tc->pool; in schedule_zero()
1389 atomic_set(&m->prepare_actions, 1); /* no need to quiesce */ in schedule_zero()
1390 m->tc = tc; in schedule_zero()
1391 m->virt_begin = virt_block; in schedule_zero()
1392 m->virt_end = virt_block + 1u; in schedule_zero()
1393 m->data_block = data_block; in schedule_zero()
1394 m->cell = cell; in schedule_zero()
1398 * zeroing pre-existing data, we can issue the bio immediately. in schedule_zero()
1401 if (pool->pf.zero_new_blocks) { in schedule_zero()
1405 ll_zero(tc, m, data_block * pool->sectors_per_block, in schedule_zero()
1406 (data_block + 1) * pool->sectors_per_block); in schedule_zero()
1413 struct dm_bio_prison_cell *cell, struct bio *bio) in schedule_external_copy() argument
1415 struct pool *pool = tc->pool; in schedule_external_copy()
1416 sector_t virt_block_begin = virt_block * pool->sectors_per_block; in schedule_external_copy()
1417 sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block; in schedule_external_copy()
1419 if (virt_block_end <= tc->origin_size) in schedule_external_copy()
1420 schedule_copy(tc, virt_block, tc->origin_dev, in schedule_external_copy()
1421 virt_block, data_dest, cell, bio, in schedule_external_copy()
1422 pool->sectors_per_block); in schedule_external_copy()
1424 else if (virt_block_begin < tc->origin_size) in schedule_external_copy()
1425 schedule_copy(tc, virt_block, tc->origin_dev, in schedule_external_copy()
1426 virt_block, data_dest, cell, bio, in schedule_external_copy()
1427 tc->origin_size - virt_block_begin); in schedule_external_copy()
1430 schedule_zero(tc, virt_block, data_dest, cell, bio); in schedule_external_copy()
1453 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free); in check_for_metadata_space()
1473 r = dm_pool_get_free_block_count(pool->pmd, &nr_free); in check_for_data_space()
1484 * A non-zero return indicates read_only or fail_io mode.
1492 return -EINVAL; in commit()
1494 r = dm_pool_commit_metadata(pool->pmd); in commit()
1507 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) { in check_low_water_mark()
1509 dm_device_name(pool->pool_md)); in check_low_water_mark()
1510 spin_lock_irq(&pool->lock); in check_low_water_mark()
1511 pool->low_water_triggered = true; in check_low_water_mark()
1512 spin_unlock_irq(&pool->lock); in check_low_water_mark()
1513 dm_table_event(pool->ti->table); in check_low_water_mark()
1521 struct pool *pool = tc->pool; in alloc_data_block()
1524 return -EINVAL; in alloc_data_block()
1526 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); in alloc_data_block()
1543 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); in alloc_data_block()
1551 return -ENOSPC; in alloc_data_block()
1555 r = dm_pool_alloc_data_block(pool->pmd, result); in alloc_data_block()
1557 if (r == -ENOSPC) in alloc_data_block()
1564 r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks); in alloc_data_block()
1587 struct thin_c *tc = h->tc; in retry_on_resume()
1589 spin_lock_irq(&tc->lock); in retry_on_resume()
1590 bio_list_add(&tc->retry_on_resume_list, bio); in retry_on_resume()
1591 spin_unlock_irq(&tc->lock); in retry_on_resume()
1605 return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0; in should_error_unserviceable_bio()
1623 bio->bi_status = error; in handle_unserviceable_bio()
1629 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell) in retry_bios_on_resume() argument
1637 cell_error_with_code(pool, cell, error); in retry_bios_on_resume()
1642 cell_release(pool, cell, &bios); in retry_bios_on_resume()
1651 struct pool *pool = tc->pool; in process_discard_cell_no_passdown()
1658 m->tc = tc; in process_discard_cell_no_passdown()
1659 m->virt_begin = virt_cell->key.block_begin; in process_discard_cell_no_passdown()
1660 m->virt_end = virt_cell->key.block_end; in process_discard_cell_no_passdown()
1661 m->cell = virt_cell; in process_discard_cell_no_passdown()
1662 m->bio = virt_cell->holder; in process_discard_cell_no_passdown()
1664 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) in process_discard_cell_no_passdown()
1665 pool->process_prepared_discard(m); in process_discard_cell_no_passdown()
1671 struct pool *pool = tc->pool; in break_up_discard_bio()
1682 r = dm_thin_find_mapped_range(tc->td, begin, end, &virt_begin, &virt_end, in break_up_discard_bio()
1692 data_end = data_begin + (virt_end - virt_begin); in break_up_discard_bio()
1704 len = min_t(sector_t, data_end - data_begin, next_boundary - data_begin); in break_up_discard_bio()
1707 (void) build_key(tc->td, PHYSICAL, data_begin, data_begin + len, &data_key); in break_up_discard_bio()
1708 if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) { in break_up_discard_bio()
1719 m->tc = tc; in break_up_discard_bio()
1720 m->maybe_shared = maybe_shared; in break_up_discard_bio()
1721 m->virt_begin = virt_begin; in break_up_discard_bio()
1722 m->virt_end = virt_begin + len; in break_up_discard_bio()
1723 m->data_block = data_begin; in break_up_discard_bio()
1724 m->cell = data_cell; in break_up_discard_bio()
1725 m->bio = bio; in break_up_discard_bio()
1731 * This per-mapping bi_remaining increment is paired with in break_up_discard_bio()
1736 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) in break_up_discard_bio()
1737 pool->process_prepared_discard(m); in break_up_discard_bio()
1749 struct bio *bio = virt_cell->holder; in process_discard_cell_passdown()
1757 h->cell = virt_cell; in process_discard_cell_passdown()
1758 break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio); in process_discard_cell_passdown()
1783 if (unlikely(!build_key(tc->td, VIRTUAL, begin, end, &virt_key))) { in process_discard_bio()
1789 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell)) { in process_discard_bio()
1795 * cell will never be granted. in process_discard_bio()
1800 tc->pool->process_discard_cell(tc, virt_cell); in process_discard_bio()
1806 struct dm_bio_prison_cell *cell) in break_sharing() argument
1810 struct pool *pool = tc->pool; in break_sharing()
1815 schedule_internal_copy(tc, block, lookup_result->block, in break_sharing()
1816 data_block, cell, bio); in break_sharing()
1819 case -ENOSPC: in break_sharing()
1820 retry_bios_on_resume(pool, cell); in break_sharing()
1826 cell_error(pool, cell); in break_sharing()
1832 struct dm_bio_prison_cell *cell) in __remap_and_issue_shared_cell() argument
1837 while ((bio = bio_list_pop(&cell->bios))) { in __remap_and_issue_shared_cell()
1838 if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) || in __remap_and_issue_shared_cell()
1840 bio_list_add(&info->defer_bios, bio); in __remap_and_issue_shared_cell()
1844 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds); in __remap_and_issue_shared_cell()
1845 inc_all_io_entry(info->tc->pool, bio); in __remap_and_issue_shared_cell()
1846 bio_list_add(&info->issue_bios, bio); in __remap_and_issue_shared_cell()
1852 struct dm_bio_prison_cell *cell, in remap_and_issue_shared_cell() argument
1862 cell_visit_release(tc->pool, __remap_and_issue_shared_cell, in remap_and_issue_shared_cell()
1863 &info, cell); in remap_and_issue_shared_cell()
1878 struct pool *pool = tc->pool; in process_shared_bio()
1882 * If cell is already occupied, then sharing is already in the process in process_shared_bio()
1885 build_data_key(tc->td, lookup_result->block, &key); in process_shared_bio()
1891 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) { in process_shared_bio()
1897 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); in process_shared_bio()
1899 remap_and_issue(tc, bio, lookup_result->block); in process_shared_bio()
1901 remap_and_issue_shared_cell(tc, data_cell, lookup_result->block); in process_shared_bio()
1902 remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block); in process_shared_bio()
1907 struct dm_bio_prison_cell *cell) in provision_block() argument
1911 struct pool *pool = tc->pool; in provision_block()
1916 if (!bio->bi_iter.bi_size) { in provision_block()
1918 cell_defer_no_holder(tc, cell); in provision_block()
1929 cell_defer_no_holder(tc, cell); in provision_block()
1937 if (tc->origin_dev) in provision_block()
1938 schedule_external_copy(tc, block, data_block, cell, bio); in provision_block()
1940 schedule_zero(tc, block, data_block, cell, bio); in provision_block()
1943 case -ENOSPC: in provision_block()
1944 retry_bios_on_resume(pool, cell); in provision_block()
1950 cell_error(pool, cell); in provision_block()
1955 static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell() argument
1958 struct pool *pool = tc->pool; in process_cell()
1959 struct bio *bio = cell->holder; in process_cell()
1963 if (tc->requeue_mode) { in process_cell()
1964 cell_requeue(pool, cell); in process_cell()
1968 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); in process_cell()
1972 process_shared_bio(tc, bio, block, &lookup_result, cell); in process_cell()
1976 inc_remap_and_issue_cell(tc, cell, lookup_result.block); in process_cell()
1980 case -ENODATA: in process_cell()
1981 if (bio_data_dir(bio) == READ && tc->origin_dev) { in process_cell()
1983 cell_defer_no_holder(tc, cell); in process_cell()
1985 if (bio_end_sector(bio) <= tc->origin_size) in process_cell()
1988 else if (bio->bi_iter.bi_sector < tc->origin_size) { in process_cell()
1990 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; in process_cell()
1998 provision_block(tc, bio, block, cell); in process_cell()
2004 cell_defer_no_holder(tc, cell); in process_cell()
2012 struct pool *pool = tc->pool; in process_bio()
2014 struct dm_bio_prison_cell *cell; in process_bio() local
2018 * If cell is already occupied, then the block is already in process_bio()
2021 build_virtual_key(tc->td, block, &key); in process_bio()
2022 if (bio_detain(pool, &key, bio, &cell)) in process_bio()
2025 process_cell(tc, cell); in process_bio()
2029 struct dm_bio_prison_cell *cell) in __process_bio_read_only() argument
2036 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); in __process_bio_read_only()
2039 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) { in __process_bio_read_only()
2040 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
2041 if (cell) in __process_bio_read_only()
2042 cell_defer_no_holder(tc, cell); in __process_bio_read_only()
2044 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
2046 if (cell) in __process_bio_read_only()
2047 inc_remap_and_issue_cell(tc, cell, lookup_result.block); in __process_bio_read_only()
2051 case -ENODATA: in __process_bio_read_only()
2052 if (cell) in __process_bio_read_only()
2053 cell_defer_no_holder(tc, cell); in __process_bio_read_only()
2055 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
2059 if (tc->origin_dev) { in __process_bio_read_only()
2060 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
2072 if (cell) in __process_bio_read_only()
2073 cell_defer_no_holder(tc, cell); in __process_bio_read_only()
2084 static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell_read_only() argument
2086 __process_bio_read_only(tc, cell->holder, cell); in process_cell_read_only()
2099 static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell_success() argument
2101 cell_success(tc->pool, cell); in process_cell_success()
2104 static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell_fail() argument
2106 cell_error(tc->pool, cell); in process_cell_fail()
2115 return !time_in_range(jiffies, pool->last_commit_jiffies, in need_commit_due_to_time()
2116 pool->last_commit_jiffies + COMMIT_PERIOD); in need_commit_due_to_time()
2126 sector_t bi_sector = bio->bi_iter.bi_sector; in __thin_bio_rb_add()
2128 rbp = &tc->sort_bio_list.rb_node; in __thin_bio_rb_add()
2134 if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector) in __thin_bio_rb_add()
2135 rbp = &(*rbp)->rb_left; in __thin_bio_rb_add()
2137 rbp = &(*rbp)->rb_right; in __thin_bio_rb_add()
2141 rb_link_node(&pbd->rb_node, parent, rbp); in __thin_bio_rb_add()
2142 rb_insert_color(&pbd->rb_node, &tc->sort_bio_list); in __thin_bio_rb_add()
2151 for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) { in __extract_sorted_bios()
2155 bio_list_add(&tc->deferred_bio_list, bio); in __extract_sorted_bios()
2156 rb_erase(&pbd->rb_node, &tc->sort_bio_list); in __extract_sorted_bios()
2159 WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list)); in __extract_sorted_bios()
2168 bio_list_merge(&bios, &tc->deferred_bio_list); in __sort_thin_deferred_bios()
2169 bio_list_init(&tc->deferred_bio_list); in __sort_thin_deferred_bios()
2171 /* Sort deferred_bio_list using rb-tree */ in __sort_thin_deferred_bios()
2185 struct pool *pool = tc->pool; in process_thin_deferred_bios()
2189 unsigned int count = 0; in process_thin_deferred_bios() local
2191 if (tc->requeue_mode) { in process_thin_deferred_bios()
2192 error_thin_bio_list(tc, &tc->deferred_bio_list, in process_thin_deferred_bios()
2199 spin_lock_irq(&tc->lock); in process_thin_deferred_bios()
2201 if (bio_list_empty(&tc->deferred_bio_list)) { in process_thin_deferred_bios()
2202 spin_unlock_irq(&tc->lock); in process_thin_deferred_bios()
2208 bio_list_merge(&bios, &tc->deferred_bio_list); in process_thin_deferred_bios()
2209 bio_list_init(&tc->deferred_bio_list); in process_thin_deferred_bios()
2211 spin_unlock_irq(&tc->lock); in process_thin_deferred_bios()
2221 spin_lock_irq(&tc->lock); in process_thin_deferred_bios()
2222 bio_list_add(&tc->deferred_bio_list, bio); in process_thin_deferred_bios()
2223 bio_list_merge(&tc->deferred_bio_list, &bios); in process_thin_deferred_bios()
2224 spin_unlock_irq(&tc->lock); in process_thin_deferred_bios()
2229 pool->process_discard(tc, bio); in process_thin_deferred_bios()
2231 pool->process_bio(tc, bio); in process_thin_deferred_bios()
2233 if ((count++ & 127) == 0) { in process_thin_deferred_bios()
2234 throttle_work_update(&pool->throttle); in process_thin_deferred_bios()
2235 dm_pool_issue_prefetches(pool->pmd); in process_thin_deferred_bios()
2247 BUG_ON(!lhs_cell->holder); in cmp_cells()
2248 BUG_ON(!rhs_cell->holder); in cmp_cells()
2250 if (lhs_cell->holder->bi_iter.bi_sector < rhs_cell->holder->bi_iter.bi_sector) in cmp_cells()
2251 return -1; in cmp_cells()
2253 if (lhs_cell->holder->bi_iter.bi_sector > rhs_cell->holder->bi_iter.bi_sector) in cmp_cells()
2261 unsigned int count = 0; in sort_cells() local
2262 struct dm_bio_prison_cell *cell, *tmp; in sort_cells() local
2264 list_for_each_entry_safe(cell, tmp, cells, user_list) { in sort_cells()
2265 if (count >= CELL_SORT_ARRAY_SIZE) in sort_cells()
2268 pool->cell_sort_array[count++] = cell; in sort_cells()
2269 list_del(&cell->user_list); in sort_cells()
2272 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL); in sort_cells()
2274 return count; in sort_cells()
2279 struct pool *pool = tc->pool; in process_thin_deferred_cells()
2281 struct dm_bio_prison_cell *cell; in process_thin_deferred_cells() local
2282 unsigned int i, j, count; in process_thin_deferred_cells() local
2286 spin_lock_irq(&tc->lock); in process_thin_deferred_cells()
2287 list_splice_init(&tc->deferred_cells, &cells); in process_thin_deferred_cells()
2288 spin_unlock_irq(&tc->lock); in process_thin_deferred_cells()
2294 count = sort_cells(tc->pool, &cells); in process_thin_deferred_cells()
2296 for (i = 0; i < count; i++) { in process_thin_deferred_cells()
2297 cell = pool->cell_sort_array[i]; in process_thin_deferred_cells()
2298 BUG_ON(!cell->holder); in process_thin_deferred_cells()
2306 for (j = i; j < count; j++) in process_thin_deferred_cells()
2307 list_add(&pool->cell_sort_array[j]->user_list, &cells); in process_thin_deferred_cells()
2309 spin_lock_irq(&tc->lock); in process_thin_deferred_cells()
2310 list_splice(&cells, &tc->deferred_cells); in process_thin_deferred_cells()
2311 spin_unlock_irq(&tc->lock); in process_thin_deferred_cells()
2315 if (bio_op(cell->holder) == REQ_OP_DISCARD) in process_thin_deferred_cells()
2316 pool->process_discard_cell(tc, cell); in process_thin_deferred_cells()
2318 pool->process_cell(tc, cell); in process_thin_deferred_cells()
2337 if (!list_empty(&pool->active_thins)) { in get_first_thin()
2338 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); in get_first_thin()
2351 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) { in get_next_thin()
2383 spin_lock_irq(&pool->lock); in process_deferred_bios()
2384 bio_list_merge(&bios, &pool->deferred_flush_bios); in process_deferred_bios()
2385 bio_list_init(&pool->deferred_flush_bios); in process_deferred_bios()
2387 bio_list_merge(&bio_completions, &pool->deferred_flush_completions); in process_deferred_bios()
2388 bio_list_init(&pool->deferred_flush_completions); in process_deferred_bios()
2389 spin_unlock_irq(&pool->lock); in process_deferred_bios()
2392 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) in process_deferred_bios()
2402 pool->last_commit_jiffies = jiffies; in process_deferred_bios()
2412 if (bio->bi_opf & REQ_PREFLUSH) in process_deferred_bios()
2423 throttle_work_start(&pool->throttle); in do_worker()
2424 dm_pool_issue_prefetches(pool->pmd); in do_worker()
2425 throttle_work_update(&pool->throttle); in do_worker()
2426 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping); in do_worker()
2427 throttle_work_update(&pool->throttle); in do_worker()
2428 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard); in do_worker()
2429 throttle_work_update(&pool->throttle); in do_worker()
2430 process_prepared(pool, &pool->prepared_discards_pt2, &pool->process_prepared_discard_pt2); in do_worker()
2431 throttle_work_update(&pool->throttle); in do_worker()
2433 throttle_work_complete(&pool->throttle); in do_worker()
2445 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); in do_waker()
2458 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { in do_no_space_timeout()
2459 pool->pf.error_if_no_space = true; in do_no_space_timeout()
2465 /*----------------------------------------------------------------*/
2479 complete(&pw->complete); in pool_work_complete()
2485 INIT_WORK_ONSTACK(&pw->worker, fn); in pool_work_wait()
2486 init_completion(&pw->complete); in pool_work_wait()
2487 queue_work(pool->wq, &pw->worker); in pool_work_wait()
2488 wait_for_completion(&pw->complete); in pool_work_wait()
2491 /*----------------------------------------------------------------*/
2507 w->tc->requeue_mode = true; in do_noflush_start()
2508 requeue_io(w->tc); in do_noflush_start()
2509 pool_work_complete(&w->pw); in do_noflush_start()
2516 w->tc->requeue_mode = false; in do_noflush_stop()
2517 pool_work_complete(&w->pw); in do_noflush_stop()
2525 pool_work_wait(&w.pw, tc->pool, fn); in noflush_work()
2528 /*----------------------------------------------------------------*/
2532 struct pool_c *pt = pool->ti->private; in set_discard_callbacks()
2534 if (pt->adjusted_pf.discard_passdown) { in set_discard_callbacks()
2535 pool->process_discard_cell = process_discard_cell_passdown; in set_discard_callbacks()
2536 pool->process_prepared_discard = process_prepared_discard_passdown_pt1; in set_discard_callbacks()
2537 pool->process_prepared_discard_pt2 = process_prepared_discard_passdown_pt2; in set_discard_callbacks()
2539 pool->process_discard_cell = process_discard_cell_no_passdown; in set_discard_callbacks()
2540 pool->process_prepared_discard = process_prepared_discard_no_passdown; in set_discard_callbacks()
2546 struct pool_c *pt = pool->ti->private; in set_pool_mode()
2547 bool needs_check = dm_pool_metadata_needs_check(pool->pmd); in set_pool_mode()
2557 dm_device_name(pool->pool_md)); in set_pool_mode()
2573 dm_pool_metadata_read_only(pool->pmd); in set_pool_mode()
2574 pool->process_bio = process_bio_fail; in set_pool_mode()
2575 pool->process_discard = process_bio_fail; in set_pool_mode()
2576 pool->process_cell = process_cell_fail; in set_pool_mode()
2577 pool->process_discard_cell = process_cell_fail; in set_pool_mode()
2578 pool->process_prepared_mapping = process_prepared_mapping_fail; in set_pool_mode()
2579 pool->process_prepared_discard = process_prepared_discard_fail; in set_pool_mode()
2586 dm_pool_metadata_read_only(pool->pmd); in set_pool_mode()
2587 pool->process_bio = process_bio_read_only; in set_pool_mode()
2588 pool->process_discard = process_bio_success; in set_pool_mode()
2589 pool->process_cell = process_cell_read_only; in set_pool_mode()
2590 pool->process_discard_cell = process_cell_success; in set_pool_mode()
2591 pool->process_prepared_mapping = process_prepared_mapping_fail; in set_pool_mode()
2592 pool->process_prepared_discard = process_prepared_discard_success; in set_pool_mode()
2606 pool->out_of_data_space = true; in set_pool_mode()
2607 pool->process_bio = process_bio_read_only; in set_pool_mode()
2608 pool->process_discard = process_discard_bio; in set_pool_mode()
2609 pool->process_cell = process_cell_read_only; in set_pool_mode()
2610 pool->process_prepared_mapping = process_prepared_mapping; in set_pool_mode()
2613 if (!pool->pf.error_if_no_space && no_space_timeout) in set_pool_mode()
2614 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); in set_pool_mode()
2619 cancel_delayed_work_sync(&pool->no_space_timeout); in set_pool_mode()
2620 pool->out_of_data_space = false; in set_pool_mode()
2621 pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space; in set_pool_mode()
2622 dm_pool_metadata_read_write(pool->pmd); in set_pool_mode()
2623 pool->process_bio = process_bio; in set_pool_mode()
2624 pool->process_discard = process_discard_bio; in set_pool_mode()
2625 pool->process_cell = process_cell; in set_pool_mode()
2626 pool->process_prepared_mapping = process_prepared_mapping; in set_pool_mode()
2631 pool->pf.mode = new_mode; in set_pool_mode()
2636 pt->adjusted_pf.mode = new_mode; in set_pool_mode()
2644 const char *dev_name = dm_device_name(pool->pool_md); in abort_transaction()
2647 if (dm_pool_abort_metadata(pool->pmd)) { in abort_transaction()
2652 if (dm_pool_metadata_set_needs_check(pool->pmd)) { in abort_transaction()
2661 dm_device_name(pool->pool_md), op, r); in metadata_operation_failed()
2667 /*----------------------------------------------------------------*/
2678 struct pool *pool = tc->pool; in thin_defer_bio()
2680 spin_lock_irq(&tc->lock); in thin_defer_bio()
2681 bio_list_add(&tc->deferred_bio_list, bio); in thin_defer_bio()
2682 spin_unlock_irq(&tc->lock); in thin_defer_bio()
2689 struct pool *pool = tc->pool; in thin_defer_bio_with_throttle()
2691 throttle_lock(&pool->throttle); in thin_defer_bio_with_throttle()
2693 throttle_unlock(&pool->throttle); in thin_defer_bio_with_throttle()
2696 static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell) in thin_defer_cell() argument
2698 struct pool *pool = tc->pool; in thin_defer_cell()
2700 throttle_lock(&pool->throttle); in thin_defer_cell()
2701 spin_lock_irq(&tc->lock); in thin_defer_cell()
2702 list_add_tail(&cell->user_list, &tc->deferred_cells); in thin_defer_cell()
2703 spin_unlock_irq(&tc->lock); in thin_defer_cell()
2704 throttle_unlock(&pool->throttle); in thin_defer_cell()
2713 h->tc = tc; in thin_hook_bio()
2714 h->shared_read_entry = NULL; in thin_hook_bio()
2715 h->all_io_entry = NULL; in thin_hook_bio()
2716 h->overwrite_mapping = NULL; in thin_hook_bio()
2717 h->cell = NULL; in thin_hook_bio()
2721 * Non-blocking function called from the thin target's map function.
2726 struct thin_c *tc = ti->private; in thin_bio_map()
2728 struct dm_thin_device *td = tc->td; in thin_bio_map()
2735 if (tc->requeue_mode) { in thin_bio_map()
2736 bio->bi_status = BLK_STS_DM_REQUEUE; in thin_bio_map()
2741 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_bio_map()
2746 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) { in thin_bio_map()
2752 * We must hold the virtual cell before doing the lookup, otherwise in thin_bio_map()
2755 build_virtual_key(tc->td, block, &key); in thin_bio_map()
2756 if (bio_detain(tc->pool, &key, bio, &virt_cell)) in thin_bio_map()
2785 build_data_key(tc->td, result.block, &key); in thin_bio_map()
2786 if (bio_detain(tc->pool, &key, bio, &data_cell)) { in thin_bio_map()
2791 inc_all_io_entry(tc->pool, bio); in thin_bio_map()
2798 case -ENODATA: in thin_bio_map()
2799 case -EWOULDBLOCK: in thin_bio_map()
2806 * dm_thin_find_block can fail with -EINVAL if the in thin_bio_map()
2807 * pool is switched to fail-io mode. in thin_bio_map()
2820 list_for_each_entry_rcu(tc, &pool->active_thins, list) { in requeue_bios()
2821 spin_lock_irq(&tc->lock); in requeue_bios()
2822 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list); in requeue_bios()
2823 bio_list_init(&tc->retry_on_resume_list); in requeue_bios()
2824 spin_unlock_irq(&tc->lock); in requeue_bios()
2830 *--------------------------------------------------------------
2832 *--------------------------------------------------------------
2845 struct pool *pool = pt->pool; in disable_discard_passdown_if_not_supported()
2846 struct block_device *data_bdev = pt->data_dev->bdev; in disable_discard_passdown_if_not_supported()
2847 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits; in disable_discard_passdown_if_not_supported()
2850 if (!pt->adjusted_pf.discard_passdown) in disable_discard_passdown_if_not_supported()
2853 if (!bdev_max_discard_sectors(pt->data_dev->bdev)) in disable_discard_passdown_if_not_supported()
2856 else if (data_limits->max_discard_sectors < pool->sectors_per_block) in disable_discard_passdown_if_not_supported()
2861 pt->adjusted_pf.discard_passdown = false; in disable_discard_passdown_if_not_supported()
2867 struct pool_c *pt = ti->private; in bind_control_target()
2873 enum pool_mode new_mode = pt->adjusted_pf.mode; in bind_control_target()
2880 pt->adjusted_pf.mode = old_mode; in bind_control_target()
2882 pool->ti = ti; in bind_control_target()
2883 pool->pf = pt->adjusted_pf; in bind_control_target()
2884 pool->low_water_blocks = pt->low_water_blocks; in bind_control_target()
2893 if (pool->ti == ti) in unbind_control_target()
2894 pool->ti = NULL; in unbind_control_target()
2898 *--------------------------------------------------------------
2900 *--------------------------------------------------------------
2905 pf->mode = PM_WRITE; in pool_features_init()
2906 pf->zero_new_blocks = true; in pool_features_init()
2907 pf->discard_enabled = true; in pool_features_init()
2908 pf->discard_passdown = true; in pool_features_init()
2909 pf->error_if_no_space = false; in pool_features_init()
2916 vfree(pool->cell_sort_array); in __pool_destroy()
2917 if (dm_pool_metadata_close(pool->pmd) < 0) in __pool_destroy()
2920 dm_bio_prison_destroy(pool->prison); in __pool_destroy()
2921 dm_kcopyd_client_destroy(pool->copier); in __pool_destroy()
2923 cancel_delayed_work_sync(&pool->waker); in __pool_destroy()
2924 cancel_delayed_work_sync(&pool->no_space_timeout); in __pool_destroy()
2925 if (pool->wq) in __pool_destroy()
2926 destroy_workqueue(pool->wq); in __pool_destroy()
2928 if (pool->next_mapping) in __pool_destroy()
2929 mempool_free(pool->next_mapping, &pool->mapping_pool); in __pool_destroy()
2930 mempool_exit(&pool->mapping_pool); in __pool_destroy()
2931 dm_deferred_set_destroy(pool->shared_read_ds); in __pool_destroy()
2932 dm_deferred_set_destroy(pool->all_io_ds); in __pool_destroy()
2959 err_p = ERR_PTR(-ENOMEM); in pool_create()
2963 pool->pmd = pmd; in pool_create()
2964 pool->sectors_per_block = block_size; in pool_create()
2965 if (block_size & (block_size - 1)) in pool_create()
2966 pool->sectors_per_block_shift = -1; in pool_create()
2968 pool->sectors_per_block_shift = __ffs(block_size); in pool_create()
2969 pool->low_water_blocks = 0; in pool_create()
2970 pool_features_init(&pool->pf); in pool_create()
2971 pool->prison = dm_bio_prison_create(); in pool_create()
2972 if (!pool->prison) { in pool_create()
2974 err_p = ERR_PTR(-ENOMEM); in pool_create()
2978 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); in pool_create()
2979 if (IS_ERR(pool->copier)) { in pool_create()
2980 r = PTR_ERR(pool->copier); in pool_create()
2990 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); in pool_create()
2991 if (!pool->wq) { in pool_create()
2993 err_p = ERR_PTR(-ENOMEM); in pool_create()
2997 throttle_init(&pool->throttle); in pool_create()
2998 INIT_WORK(&pool->worker, do_worker); in pool_create()
2999 INIT_DELAYED_WORK(&pool->waker, do_waker); in pool_create()
3000 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); in pool_create()
3001 spin_lock_init(&pool->lock); in pool_create()
3002 bio_list_init(&pool->deferred_flush_bios); in pool_create()
3003 bio_list_init(&pool->deferred_flush_completions); in pool_create()
3004 INIT_LIST_HEAD(&pool->prepared_mappings); in pool_create()
3005 INIT_LIST_HEAD(&pool->prepared_discards); in pool_create()
3006 INIT_LIST_HEAD(&pool->prepared_discards_pt2); in pool_create()
3007 INIT_LIST_HEAD(&pool->active_thins); in pool_create()
3008 pool->low_water_triggered = false; in pool_create()
3009 pool->suspended = true; in pool_create()
3010 pool->out_of_data_space = false; in pool_create()
3012 pool->shared_read_ds = dm_deferred_set_create(); in pool_create()
3013 if (!pool->shared_read_ds) { in pool_create()
3015 err_p = ERR_PTR(-ENOMEM); in pool_create()
3019 pool->all_io_ds = dm_deferred_set_create(); in pool_create()
3020 if (!pool->all_io_ds) { in pool_create()
3022 err_p = ERR_PTR(-ENOMEM); in pool_create()
3026 pool->next_mapping = NULL; in pool_create()
3027 r = mempool_init_slab_pool(&pool->mapping_pool, MAPPING_POOL_SIZE, in pool_create()
3035 pool->cell_sort_array = in pool_create()
3037 sizeof(*pool->cell_sort_array))); in pool_create()
3038 if (!pool->cell_sort_array) { in pool_create()
3039 *error = "Error allocating cell sort array"; in pool_create()
3040 err_p = ERR_PTR(-ENOMEM); in pool_create()
3044 pool->ref_count = 1; in pool_create()
3045 pool->last_commit_jiffies = jiffies; in pool_create()
3046 pool->pool_md = pool_md; in pool_create()
3047 pool->md_dev = metadata_dev; in pool_create()
3048 pool->data_dev = data_dev; in pool_create()
3054 mempool_exit(&pool->mapping_pool); in pool_create()
3056 dm_deferred_set_destroy(pool->all_io_ds); in pool_create()
3058 dm_deferred_set_destroy(pool->shared_read_ds); in pool_create()
3060 destroy_workqueue(pool->wq); in pool_create()
3062 dm_kcopyd_client_destroy(pool->copier); in pool_create()
3064 dm_bio_prison_destroy(pool->prison); in pool_create()
3077 pool->ref_count++; in __pool_inc()
3083 BUG_ON(!pool->ref_count); in __pool_dec()
3084 if (!--pool->ref_count) in __pool_dec()
3097 if (pool->pool_md != pool_md) { in __pool_find()
3099 return ERR_PTR(-EBUSY); in __pool_find()
3101 if (pool->data_dev != data_dev) { in __pool_find()
3103 return ERR_PTR(-EBUSY); in __pool_find()
3110 if (pool->md_dev != metadata_dev || pool->data_dev != data_dev) { in __pool_find()
3112 return ERR_PTR(-EINVAL); in __pool_find()
3126 *--------------------------------------------------------------
3128 *--------------------------------------------------------------
3132 struct pool_c *pt = ti->private; in pool_dtr()
3136 unbind_control_target(pt->pool, ti); in pool_dtr()
3137 __pool_dec(pt->pool); in pool_dtr()
3138 dm_put_device(ti, pt->metadata_dev); in pool_dtr()
3139 dm_put_device(ti, pt->data_dev); in pool_dtr()
3159 if (!as->argc) in parse_pool_features()
3162 r = dm_read_arg_group(_args, as, &argc, &ti->error); in parse_pool_features()
3164 return -EINVAL; in parse_pool_features()
3168 argc--; in parse_pool_features()
3171 pf->zero_new_blocks = false; in parse_pool_features()
3174 pf->discard_enabled = false; in parse_pool_features()
3177 pf->discard_passdown = false; in parse_pool_features()
3180 pf->mode = PM_READ_ONLY; in parse_pool_features()
3183 pf->error_if_no_space = true; in parse_pool_features()
3186 ti->error = "Unrecognised pool feature requested"; in parse_pool_features()
3187 r = -EINVAL; in parse_pool_features()
3200 dm_device_name(pool->pool_md)); in metadata_low_callback()
3202 dm_table_event(pool->ti->table); in metadata_low_callback()
3209 * properly written to non-volatile storage and won't be lost in case of a
3220 return blkdev_issue_flush(pool->data_dev); in metadata_pre_commit_callback()
3269 dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4; in calc_metadata_threshold()
3275 * thin-pool <metadata dev> <data dev>
3281 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
3306 ti->error = "Invalid argument count"; in pool_ctr()
3307 r = -EINVAL; in pool_ctr()
3316 ti->error = "Error setting metadata or data device"; in pool_ctr()
3317 r = -EINVAL; in pool_ctr()
3335 ti->error = "Error opening metadata block device"; in pool_ctr()
3338 warn_if_metadata_device_too_big(metadata_dev->bdev); in pool_ctr()
3342 ti->error = "Error getting data device"; in pool_ctr()
3349 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) { in pool_ctr()
3350 ti->error = "Invalid block size"; in pool_ctr()
3351 r = -EINVAL; in pool_ctr()
3356 ti->error = "Invalid low water mark"; in pool_ctr()
3357 r = -EINVAL; in pool_ctr()
3363 r = -ENOMEM; in pool_ctr()
3367 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, data_dev->bdev, in pool_ctr()
3368 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created); in pool_ctr()
3380 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) { in pool_ctr()
3381 ti->error = "Discard support cannot be disabled once enabled"; in pool_ctr()
3382 r = -EINVAL; in pool_ctr()
3386 pt->pool = pool; in pool_ctr()
3387 pt->ti = ti; in pool_ctr()
3388 pt->metadata_dev = metadata_dev; in pool_ctr()
3389 pt->data_dev = data_dev; in pool_ctr()
3390 pt->low_water_blocks = low_water_blocks; in pool_ctr()
3391 pt->adjusted_pf = pt->requested_pf = pf; in pool_ctr()
3392 ti->num_flush_bios = 1; in pool_ctr()
3393 ti->limit_swap_bios = true; in pool_ctr()
3401 ti->num_discard_bios = 1; in pool_ctr()
3407 ti->discards_supported = true; in pool_ctr()
3408 ti->max_discard_granularity = true; in pool_ctr()
3410 ti->private = pt; in pool_ctr()
3412 r = dm_pool_register_metadata_threshold(pt->pool->pmd, in pool_ctr()
3417 ti->error = "Error registering metadata threshold"; in pool_ctr()
3421 dm_pool_register_pre_commit_callback(pool->pmd, in pool_ctr()
3444 struct pool_c *pt = ti->private; in pool_map()
3445 struct pool *pool = pt->pool; in pool_map()
3448 * As this is a singleton target, ti->begin is always zero. in pool_map()
3450 spin_lock_irq(&pool->lock); in pool_map()
3451 bio_set_dev(bio, pt->data_dev->bdev); in pool_map()
3452 spin_unlock_irq(&pool->lock); in pool_map()
3460 struct pool_c *pt = ti->private; in maybe_resize_data_dev()
3461 struct pool *pool = pt->pool; in maybe_resize_data_dev()
3462 sector_t data_size = ti->len; in maybe_resize_data_dev()
3467 (void) sector_div(data_size, pool->sectors_per_block); in maybe_resize_data_dev()
3469 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size); in maybe_resize_data_dev()
3472 dm_device_name(pool->pool_md)); in maybe_resize_data_dev()
3478 dm_device_name(pool->pool_md), in maybe_resize_data_dev()
3480 return -EINVAL; in maybe_resize_data_dev()
3483 if (dm_pool_metadata_needs_check(pool->pmd)) { in maybe_resize_data_dev()
3485 dm_device_name(pool->pool_md)); in maybe_resize_data_dev()
3491 dm_device_name(pool->pool_md), in maybe_resize_data_dev()
3493 r = dm_pool_resize_data_dev(pool->pmd, data_size); in maybe_resize_data_dev()
3508 struct pool_c *pt = ti->private; in maybe_resize_metadata_dev()
3509 struct pool *pool = pt->pool; in maybe_resize_metadata_dev()
3514 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev); in maybe_resize_metadata_dev()
3516 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); in maybe_resize_metadata_dev()
3519 dm_device_name(pool->pool_md)); in maybe_resize_metadata_dev()
3525 dm_device_name(pool->pool_md), in maybe_resize_metadata_dev()
3527 return -EINVAL; in maybe_resize_metadata_dev()
3530 if (dm_pool_metadata_needs_check(pool->pmd)) { in maybe_resize_metadata_dev()
3532 dm_device_name(pool->pool_md)); in maybe_resize_metadata_dev()
3536 warn_if_metadata_device_too_big(pool->md_dev); in maybe_resize_metadata_dev()
3538 dm_device_name(pool->pool_md), in maybe_resize_metadata_dev()
3544 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); in maybe_resize_metadata_dev()
3563 * -and-
3571 struct pool_c *pt = ti->private; in pool_preresume()
3572 struct pool *pool = pt->pool; in pool_preresume()
3593 * When a thin-pool is PM_FAIL, it cannot be rebuilt if in pool_preresume()
3610 dm_internal_suspend_noflush(tc->thin_md); in pool_suspend_active_thins()
3622 dm_internal_resume(tc->thin_md); in pool_resume_active_thins()
3629 struct pool_c *pt = ti->private; in pool_resume()
3630 struct pool *pool = pt->pool; in pool_resume()
3639 spin_lock_irq(&pool->lock); in pool_resume()
3640 pool->low_water_triggered = false; in pool_resume()
3641 pool->suspended = false; in pool_resume()
3642 spin_unlock_irq(&pool->lock); in pool_resume()
3644 do_waker(&pool->waker.work); in pool_resume()
3649 struct pool_c *pt = ti->private; in pool_presuspend()
3650 struct pool *pool = pt->pool; in pool_presuspend()
3652 spin_lock_irq(&pool->lock); in pool_presuspend()
3653 pool->suspended = true; in pool_presuspend()
3654 spin_unlock_irq(&pool->lock); in pool_presuspend()
3661 struct pool_c *pt = ti->private; in pool_presuspend_undo()
3662 struct pool *pool = pt->pool; in pool_presuspend_undo()
3666 spin_lock_irq(&pool->lock); in pool_presuspend_undo()
3667 pool->suspended = false; in pool_presuspend_undo()
3668 spin_unlock_irq(&pool->lock); in pool_presuspend_undo()
3673 struct pool_c *pt = ti->private; in pool_postsuspend()
3674 struct pool *pool = pt->pool; in pool_postsuspend()
3676 cancel_delayed_work_sync(&pool->waker); in pool_postsuspend()
3677 cancel_delayed_work_sync(&pool->no_space_timeout); in pool_postsuspend()
3678 flush_workqueue(pool->wq); in pool_postsuspend()
3687 return -EINVAL; in check_arg_count()
3702 return -EINVAL; in read_dev_id()
3718 r = dm_pool_create_thin(pool->pmd, dev_id); in process_create_thin_mesg()
3720 DMWARN("Creation of new thinly-provisioned device with id %s failed.", in process_create_thin_mesg()
3746 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id); in process_create_snap_mesg()
3769 r = dm_pool_delete_thin_device(pool->pmd, dev_id); in process_delete_mesg()
3787 return -EINVAL; in process_set_transaction_id_mesg()
3792 return -EINVAL; in process_set_transaction_id_mesg()
3795 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id); in process_set_transaction_id_mesg()
3815 r = dm_pool_reserve_metadata_snap(pool->pmd); in process_reserve_metadata_snap_mesg()
3830 r = dm_pool_release_metadata_snap(pool->pmd); in process_release_metadata_snap_mesg()
3849 int r = -EINVAL; in pool_message()
3850 struct pool_c *pt = ti->private; in pool_message()
3851 struct pool *pool = pt->pool; in pool_message()
3855 dm_device_name(pool->pool_md)); in pool_message()
3856 return -EOPNOTSUPP; in pool_message()
3889 unsigned int count = !pf->zero_new_blocks + !pf->discard_enabled + in emit_flags() local
3890 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) + in emit_flags()
3891 pf->error_if_no_space; in emit_flags()
3892 DMEMIT("%u ", count); in emit_flags()
3894 if (!pf->zero_new_blocks) in emit_flags()
3897 if (!pf->discard_enabled) in emit_flags()
3900 if (!pf->discard_passdown) in emit_flags()
3903 if (pf->mode == PM_READ_ONLY) in emit_flags()
3906 if (pf->error_if_no_space) in emit_flags()
3930 struct pool_c *pt = ti->private; in pool_status()
3931 struct pool *pool = pt->pool; in pool_status()
3940 /* Commit to ensure statistics aren't out-of-date */ in pool_status()
3944 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); in pool_status()
3947 dm_device_name(pool->pool_md), r); in pool_status()
3951 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata); in pool_status()
3954 dm_device_name(pool->pool_md), r); in pool_status()
3958 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata); in pool_status()
3961 dm_device_name(pool->pool_md), r); in pool_status()
3965 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data); in pool_status()
3968 dm_device_name(pool->pool_md), r); in pool_status()
3972 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data); in pool_status()
3975 dm_device_name(pool->pool_md), r); in pool_status()
3979 r = dm_pool_get_metadata_snap(pool->pmd, &held_root); in pool_status()
3982 dm_device_name(pool->pool_md), r); in pool_status()
3988 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), in pool_status()
3990 (unsigned long long)(nr_blocks_data - nr_free_blocks_data), in pool_status()
3996 DMEMIT("- "); in pool_status()
4006 if (!pool->pf.discard_enabled) in pool_status()
4008 else if (pool->pf.discard_passdown) in pool_status()
4013 if (pool->pf.error_if_no_space) in pool_status()
4018 if (dm_pool_metadata_needs_check(pool->pmd)) in pool_status()
4021 DMEMIT("- "); in pool_status()
4029 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev), in pool_status()
4030 format_dev_t(buf2, pt->data_dev->bdev->bd_dev), in pool_status()
4031 (unsigned long)pool->sectors_per_block, in pool_status()
4032 (unsigned long long)pt->low_water_blocks); in pool_status()
4033 emit_flags(&pt->requested_pf, result, sz, maxlen); in pool_status()
4049 struct pool_c *pt = ti->private; in pool_iterate_devices()
4051 return fn(ti, pt->data_dev, 0, ti->len, data); in pool_iterate_devices()
4056 struct pool_c *pt = ti->private; in pool_io_hints()
4057 struct pool *pool = pt->pool; in pool_io_hints()
4058 sector_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; in pool_io_hints()
4061 * If max_sectors is smaller than pool->sectors_per_block adjust it in pool_io_hints()
4062 * to the highest possible power-of-2 factor of pool->sectors_per_block. in pool_io_hints()
4064 * device that has a full stripe width that matches pool->sectors_per_block in pool_io_hints()
4065 * -- because even though partial RAID stripe-sized IOs will be issued to a in pool_io_hints()
4069 if (limits->max_sectors < pool->sectors_per_block) { in pool_io_hints()
4070 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) { in pool_io_hints()
4071 if ((limits->max_sectors & (limits->max_sectors - 1)) == 0) in pool_io_hints()
4072 limits->max_sectors--; in pool_io_hints()
4073 limits->max_sectors = rounddown_pow_of_two(limits->max_sectors); in pool_io_hints()
4078 * If the system-determined stacked limits are compatible with the in pool_io_hints()
4081 if (io_opt_sectors < pool->sectors_per_block || in pool_io_hints()
4082 !is_factor(io_opt_sectors, pool->sectors_per_block)) { in pool_io_hints()
4083 if (is_factor(pool->sectors_per_block, limits->max_sectors)) in pool_io_hints()
4084 blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT); in pool_io_hints()
4086 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT); in pool_io_hints()
4087 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); in pool_io_hints()
4091 * pt->adjusted_pf is a staging area for the actual features to use. in pool_io_hints()
4096 if (pt->adjusted_pf.discard_enabled) { in pool_io_hints()
4098 if (!pt->adjusted_pf.discard_passdown) in pool_io_hints()
4099 limits->max_discard_sectors = 0; in pool_io_hints()
4109 limits->discard_granularity = 0; in pool_io_hints()
4114 .name = "thin-pool",
4134 *--------------------------------------------------------------
4136 *--------------------------------------------------------------
4140 refcount_inc(&tc->refcount); in thin_get()
4145 if (refcount_dec_and_test(&tc->refcount)) in thin_put()
4146 complete(&tc->can_destroy); in thin_put()
4151 struct thin_c *tc = ti->private; in thin_dtr()
4153 spin_lock_irq(&tc->pool->lock); in thin_dtr()
4154 list_del_rcu(&tc->list); in thin_dtr()
4155 spin_unlock_irq(&tc->pool->lock); in thin_dtr()
4159 wait_for_completion(&tc->can_destroy); in thin_dtr()
4163 __pool_dec(tc->pool); in thin_dtr()
4164 dm_pool_close_thin_device(tc->td); in thin_dtr()
4165 dm_put_device(ti, tc->pool_dev); in thin_dtr()
4166 if (tc->origin_dev) in thin_dtr()
4167 dm_put_device(ti, tc->origin_dev); in thin_dtr()
4195 ti->error = "Invalid argument count"; in thin_ctr()
4196 r = -EINVAL; in thin_ctr()
4200 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL); in thin_ctr()
4202 ti->error = "Out of memory"; in thin_ctr()
4203 r = -ENOMEM; in thin_ctr()
4206 tc->thin_md = dm_table_get_md(ti->table); in thin_ctr()
4207 spin_lock_init(&tc->lock); in thin_ctr()
4208 INIT_LIST_HEAD(&tc->deferred_cells); in thin_ctr()
4209 bio_list_init(&tc->deferred_bio_list); in thin_ctr()
4210 bio_list_init(&tc->retry_on_resume_list); in thin_ctr()
4211 tc->sort_bio_list = RB_ROOT; in thin_ctr()
4215 ti->error = "Error setting origin device"; in thin_ctr()
4216 r = -EINVAL; in thin_ctr()
4222 ti->error = "Error opening origin device"; in thin_ctr()
4225 tc->origin_dev = origin_dev; in thin_ctr()
4228 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev); in thin_ctr()
4230 ti->error = "Error opening pool device"; in thin_ctr()
4233 tc->pool_dev = pool_dev; in thin_ctr()
4235 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) { in thin_ctr()
4236 ti->error = "Invalid device id"; in thin_ctr()
4237 r = -EINVAL; in thin_ctr()
4241 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev); in thin_ctr()
4243 ti->error = "Couldn't get pool mapped device"; in thin_ctr()
4244 r = -EINVAL; in thin_ctr()
4248 tc->pool = __pool_table_lookup(pool_md); in thin_ctr()
4249 if (!tc->pool) { in thin_ctr()
4250 ti->error = "Couldn't find pool object"; in thin_ctr()
4251 r = -EINVAL; in thin_ctr()
4254 __pool_inc(tc->pool); in thin_ctr()
4256 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_ctr()
4257 ti->error = "Couldn't open thin device, Pool is in fail mode"; in thin_ctr()
4258 r = -EINVAL; in thin_ctr()
4262 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td); in thin_ctr()
4264 ti->error = "Couldn't open thin internal device"; in thin_ctr()
4268 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); in thin_ctr()
4272 ti->num_flush_bios = 1; in thin_ctr()
4273 ti->limit_swap_bios = true; in thin_ctr()
4274 ti->flush_supported = true; in thin_ctr()
4275 ti->accounts_remapped_io = true; in thin_ctr()
4276 ti->per_io_data_size = sizeof(struct dm_thin_endio_hook); in thin_ctr()
4279 if (tc->pool->pf.discard_enabled) { in thin_ctr()
4280 ti->discards_supported = true; in thin_ctr()
4281 ti->num_discard_bios = 1; in thin_ctr()
4282 ti->max_discard_granularity = true; in thin_ctr()
4287 spin_lock_irq(&tc->pool->lock); in thin_ctr()
4288 if (tc->pool->suspended) { in thin_ctr()
4289 spin_unlock_irq(&tc->pool->lock); in thin_ctr()
4291 ti->error = "Unable to activate thin device while pool is suspended"; in thin_ctr()
4292 r = -EINVAL; in thin_ctr()
4295 refcount_set(&tc->refcount, 1); in thin_ctr()
4296 init_completion(&tc->can_destroy); in thin_ctr()
4297 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); in thin_ctr()
4298 spin_unlock_irq(&tc->pool->lock); in thin_ctr()
4312 dm_pool_close_thin_device(tc->td); in thin_ctr()
4314 __pool_dec(tc->pool); in thin_ctr()
4318 dm_put_device(ti, tc->pool_dev); in thin_ctr()
4320 if (tc->origin_dev) in thin_ctr()
4321 dm_put_device(ti, tc->origin_dev); in thin_ctr()
4332 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); in thin_map()
4344 struct pool *pool = h->tc->pool; in thin_endio()
4346 if (h->shared_read_entry) { in thin_endio()
4348 dm_deferred_entry_dec(h->shared_read_entry, &work); in thin_endio()
4350 spin_lock_irqsave(&pool->lock, flags); in thin_endio()
4352 list_del(&m->list); in thin_endio()
4355 spin_unlock_irqrestore(&pool->lock, flags); in thin_endio()
4358 if (h->all_io_entry) { in thin_endio()
4360 dm_deferred_entry_dec(h->all_io_entry, &work); in thin_endio()
4362 spin_lock_irqsave(&pool->lock, flags); in thin_endio()
4364 list_add_tail(&m->list, &pool->prepared_discards); in thin_endio()
4365 spin_unlock_irqrestore(&pool->lock, flags); in thin_endio()
4370 if (h->cell) in thin_endio()
4371 cell_defer_no_holder(h->tc, h->cell); in thin_endio()
4378 struct thin_c *tc = ti->private; in thin_presuspend()
4386 struct thin_c *tc = ti->private; in thin_postsuspend()
4397 struct thin_c *tc = ti->private; in thin_preresume()
4399 if (tc->origin_dev) in thin_preresume()
4400 tc->origin_size = get_dev_size(tc->origin_dev->bdev); in thin_preresume()
4415 struct thin_c *tc = ti->private; in thin_status()
4417 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_status()
4422 if (!tc->td) in thin_status()
4423 DMEMIT("-"); in thin_status()
4427 r = dm_thin_get_mapped_count(tc->td, &mapped); in thin_status()
4433 r = dm_thin_get_highest_mapped_block(tc->td, &highest); in thin_status()
4439 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block); in thin_status()
4442 tc->pool->sectors_per_block) - 1); in thin_status()
4444 DMEMIT("-"); in thin_status()
4449 format_dev_t(buf, tc->pool_dev->bdev->bd_dev), in thin_status()
4450 (unsigned long) tc->dev_id); in thin_status()
4451 if (tc->origin_dev) in thin_status()
4452 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev)); in thin_status()
4471 struct thin_c *tc = ti->private; in thin_iterate_devices()
4472 struct pool *pool = tc->pool; in thin_iterate_devices()
4478 if (!pool->ti) in thin_iterate_devices()
4481 blocks = pool->ti->len; in thin_iterate_devices()
4482 (void) sector_div(blocks, pool->sectors_per_block); in thin_iterate_devices()
4484 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data); in thin_iterate_devices()
4491 struct thin_c *tc = ti->private; in thin_io_hints()
4492 struct pool *pool = tc->pool; in thin_io_hints()
4494 if (pool->pf.discard_enabled) { in thin_io_hints()
4495 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; in thin_io_hints()
4496 limits->max_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE; in thin_io_hints()
4516 /*----------------------------------------------------------------*/
4520 int r = -ENOMEM; in dm_thin_init()
4563 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");