1 /*
2 * Copyright (C) 2011-2012 Red Hat UK.
3 *
4 * This file is released under the GPL.
5 */
6
7 #include "dm-thin-metadata.h"
8 #include "dm-bio-prison-v1.h"
9 #include "dm.h"
10
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/dm-kcopyd.h>
14 #include <linux/jiffies.h>
15 #include <linux/log2.h>
16 #include <linux/list.h>
17 #include <linux/rculist.h>
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/sort.h>
23 #include <linux/rbtree.h>
24
25 #define DM_MSG_PREFIX "thin"
26
27 /*
28 * Tunable constants
29 */
30 #define ENDIO_HOOK_POOL_SIZE 1024
31 #define MAPPING_POOL_SIZE 1024
32 #define COMMIT_PERIOD HZ
33 #define NO_SPACE_TIMEOUT_SECS 60
34
35 static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
36
37 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
38 "A percentage of time allocated for copy on write");
39
40 /*
41 * The block size of the device holding pool data must be
42 * between 64KB and 1GB.
43 */
44 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
45 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
46
47 /*
48 * Device id is restricted to 24 bits.
49 */
50 #define MAX_DEV_ID ((1 << 24) - 1)
51
52 /*
53 * How do we handle breaking sharing of data blocks?
54 * =================================================
55 *
56 * We use a standard copy-on-write btree to store the mappings for the
57 * devices (note I'm talking about copy-on-write of the metadata here, not
58 * the data). When you take an internal snapshot you clone the root node
59 * of the origin btree. After this there is no concept of an origin or a
60 * snapshot. They are just two device trees that happen to point to the
61 * same data blocks.
62 *
63 * When we get a write in we decide if it's to a shared data block using
64 * some timestamp magic. If it is, we have to break sharing.
65 *
66 * Let's say we write to a shared block in what was the origin. The
67 * steps are:
68 *
69 * i) plug io further to this physical block. (see bio_prison code).
70 *
71 * ii) quiesce any read io to that shared data block. Obviously
72 * including all devices that share this block. (see dm_deferred_set code)
73 *
74 * iii) copy the data block to a newly allocate block. This step can be
75 * missed out if the io covers the block. (schedule_copy).
76 *
77 * iv) insert the new mapping into the origin's btree
78 * (process_prepared_mapping). This act of inserting breaks some
79 * sharing of btree nodes between the two devices. Breaking sharing only
80 * effects the btree of that specific device. Btrees for the other
81 * devices that share the block never change. The btree for the origin
82 * device as it was after the last commit is untouched, ie. we're using
83 * persistent data structures in the functional programming sense.
84 *
85 * v) unplug io to this physical block, including the io that triggered
86 * the breaking of sharing.
87 *
88 * Steps (ii) and (iii) occur in parallel.
89 *
90 * The metadata _doesn't_ need to be committed before the io continues. We
91 * get away with this because the io is always written to a _new_ block.
92 * If there's a crash, then:
93 *
94 * - The origin mapping will point to the old origin block (the shared
95 * one). This will contain the data as it was before the io that triggered
96 * the breaking of sharing came in.
97 *
98 * - The snap mapping still points to the old block. As it would after
99 * the commit.
100 *
101 * The downside of this scheme is the timestamp magic isn't perfect, and
102 * will continue to think that data block in the snapshot device is shared
103 * even after the write to the origin has broken sharing. I suspect data
104 * blocks will typically be shared by many different devices, so we're
105 * breaking sharing n + 1 times, rather than n, where n is the number of
106 * devices that reference this data block. At the moment I think the
107 * benefits far, far outweigh the disadvantages.
108 */
109
110 /*----------------------------------------------------------------*/
111
112 /*
113 * Key building.
114 */
115 enum lock_space {
116 VIRTUAL,
117 PHYSICAL
118 };
119
build_key(struct dm_thin_device * td,enum lock_space ls,dm_block_t b,dm_block_t e,struct dm_cell_key * key)120 static void build_key(struct dm_thin_device *td, enum lock_space ls,
121 dm_block_t b, dm_block_t e, struct dm_cell_key *key)
122 {
123 key->virtual = (ls == VIRTUAL);
124 key->dev = dm_thin_dev_id(td);
125 key->block_begin = b;
126 key->block_end = e;
127 }
128
build_data_key(struct dm_thin_device * td,dm_block_t b,struct dm_cell_key * key)129 static void build_data_key(struct dm_thin_device *td, dm_block_t b,
130 struct dm_cell_key *key)
131 {
132 build_key(td, PHYSICAL, b, b + 1llu, key);
133 }
134
build_virtual_key(struct dm_thin_device * td,dm_block_t b,struct dm_cell_key * key)135 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
136 struct dm_cell_key *key)
137 {
138 build_key(td, VIRTUAL, b, b + 1llu, key);
139 }
140
141 /*----------------------------------------------------------------*/
142
143 #define THROTTLE_THRESHOLD (1 * HZ)
144
145 struct throttle {
146 struct rw_semaphore lock;
147 unsigned long threshold;
148 bool throttle_applied;
149 };
150
throttle_init(struct throttle * t)151 static void throttle_init(struct throttle *t)
152 {
153 init_rwsem(&t->lock);
154 t->throttle_applied = false;
155 }
156
throttle_work_start(struct throttle * t)157 static void throttle_work_start(struct throttle *t)
158 {
159 t->threshold = jiffies + THROTTLE_THRESHOLD;
160 }
161
throttle_work_update(struct throttle * t)162 static void throttle_work_update(struct throttle *t)
163 {
164 if (!t->throttle_applied && jiffies > t->threshold) {
165 down_write(&t->lock);
166 t->throttle_applied = true;
167 }
168 }
169
throttle_work_complete(struct throttle * t)170 static void throttle_work_complete(struct throttle *t)
171 {
172 if (t->throttle_applied) {
173 t->throttle_applied = false;
174 up_write(&t->lock);
175 }
176 }
177
throttle_lock(struct throttle * t)178 static void throttle_lock(struct throttle *t)
179 {
180 down_read(&t->lock);
181 }
182
throttle_unlock(struct throttle * t)183 static void throttle_unlock(struct throttle *t)
184 {
185 up_read(&t->lock);
186 }
187
188 /*----------------------------------------------------------------*/
189
190 /*
191 * A pool device ties together a metadata device and a data device. It
192 * also provides the interface for creating and destroying internal
193 * devices.
194 */
195 struct dm_thin_new_mapping;
196
197 /*
198 * The pool runs in 4 modes. Ordered in degraded order for comparisons.
199 */
200 enum pool_mode {
201 PM_WRITE, /* metadata may be changed */
202 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
203
204 /*
205 * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
206 */
207 PM_OUT_OF_METADATA_SPACE,
208 PM_READ_ONLY, /* metadata may not be changed */
209
210 PM_FAIL, /* all I/O fails */
211 };
212
213 struct pool_features {
214 enum pool_mode mode;
215
216 bool zero_new_blocks:1;
217 bool discard_enabled:1;
218 bool discard_passdown:1;
219 bool error_if_no_space:1;
220 };
221
222 struct thin_c;
223 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
224 typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
225 typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
226
227 #define CELL_SORT_ARRAY_SIZE 8192
228
229 struct pool {
230 struct list_head list;
231 struct dm_target *ti; /* Only set if a pool target is bound */
232
233 struct mapped_device *pool_md;
234 struct block_device *md_dev;
235 struct dm_pool_metadata *pmd;
236
237 dm_block_t low_water_blocks;
238 uint32_t sectors_per_block;
239 int sectors_per_block_shift;
240
241 struct pool_features pf;
242 bool low_water_triggered:1; /* A dm event has been sent */
243 bool suspended:1;
244 bool out_of_data_space:1;
245
246 struct dm_bio_prison *prison;
247 struct dm_kcopyd_client *copier;
248
249 struct work_struct worker;
250 struct workqueue_struct *wq;
251 struct throttle throttle;
252 struct delayed_work waker;
253 struct delayed_work no_space_timeout;
254
255 unsigned long last_commit_jiffies;
256 unsigned ref_count;
257
258 spinlock_t lock;
259 struct bio_list deferred_flush_bios;
260 struct list_head prepared_mappings;
261 struct list_head prepared_discards;
262 struct list_head prepared_discards_pt2;
263 struct list_head active_thins;
264
265 struct dm_deferred_set *shared_read_ds;
266 struct dm_deferred_set *all_io_ds;
267
268 struct dm_thin_new_mapping *next_mapping;
269
270 process_bio_fn process_bio;
271 process_bio_fn process_discard;
272
273 process_cell_fn process_cell;
274 process_cell_fn process_discard_cell;
275
276 process_mapping_fn process_prepared_mapping;
277 process_mapping_fn process_prepared_discard;
278 process_mapping_fn process_prepared_discard_pt2;
279
280 struct dm_bio_prison_cell **cell_sort_array;
281
282 mempool_t mapping_pool;
283 };
284
285 static enum pool_mode get_pool_mode(struct pool *pool);
286 static void metadata_operation_failed(struct pool *pool, const char *op, int r);
287
288 /*
289 * Target context for a pool.
290 */
291 struct pool_c {
292 struct dm_target *ti;
293 struct pool *pool;
294 struct dm_dev *data_dev;
295 struct dm_dev *metadata_dev;
296 struct dm_target_callbacks callbacks;
297
298 dm_block_t low_water_blocks;
299 struct pool_features requested_pf; /* Features requested during table load */
300 struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
301 };
302
303 /*
304 * Target context for a thin.
305 */
306 struct thin_c {
307 struct list_head list;
308 struct dm_dev *pool_dev;
309 struct dm_dev *origin_dev;
310 sector_t origin_size;
311 dm_thin_id dev_id;
312
313 struct pool *pool;
314 struct dm_thin_device *td;
315 struct mapped_device *thin_md;
316
317 bool requeue_mode:1;
318 spinlock_t lock;
319 struct list_head deferred_cells;
320 struct bio_list deferred_bio_list;
321 struct bio_list retry_on_resume_list;
322 struct rb_root sort_bio_list; /* sorted list of deferred bios */
323
324 /*
325 * Ensures the thin is not destroyed until the worker has finished
326 * iterating the active_thins list.
327 */
328 atomic_t refcount;
329 struct completion can_destroy;
330 };
331
332 /*----------------------------------------------------------------*/
333
block_size_is_power_of_two(struct pool * pool)334 static bool block_size_is_power_of_two(struct pool *pool)
335 {
336 return pool->sectors_per_block_shift >= 0;
337 }
338
block_to_sectors(struct pool * pool,dm_block_t b)339 static sector_t block_to_sectors(struct pool *pool, dm_block_t b)
340 {
341 return block_size_is_power_of_two(pool) ?
342 (b << pool->sectors_per_block_shift) :
343 (b * pool->sectors_per_block);
344 }
345
346 /*----------------------------------------------------------------*/
347
348 struct discard_op {
349 struct thin_c *tc;
350 struct blk_plug plug;
351 struct bio *parent_bio;
352 struct bio *bio;
353 };
354
begin_discard(struct discard_op * op,struct thin_c * tc,struct bio * parent)355 static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent)
356 {
357 BUG_ON(!parent);
358
359 op->tc = tc;
360 blk_start_plug(&op->plug);
361 op->parent_bio = parent;
362 op->bio = NULL;
363 }
364
issue_discard(struct discard_op * op,dm_block_t data_b,dm_block_t data_e)365 static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t data_e)
366 {
367 struct thin_c *tc = op->tc;
368 sector_t s = block_to_sectors(tc->pool, data_b);
369 sector_t len = block_to_sectors(tc->pool, data_e - data_b);
370
371 return __blkdev_issue_discard(tc->pool_dev->bdev, s, len,
372 GFP_NOWAIT, 0, &op->bio);
373 }
374
end_discard(struct discard_op * op,int r)375 static void end_discard(struct discard_op *op, int r)
376 {
377 if (op->bio) {
378 /*
379 * Even if one of the calls to issue_discard failed, we
380 * need to wait for the chain to complete.
381 */
382 bio_chain(op->bio, op->parent_bio);
383 bio_set_op_attrs(op->bio, REQ_OP_DISCARD, 0);
384 submit_bio(op->bio);
385 }
386
387 blk_finish_plug(&op->plug);
388
389 /*
390 * Even if r is set, there could be sub discards in flight that we
391 * need to wait for.
392 */
393 if (r && !op->parent_bio->bi_status)
394 op->parent_bio->bi_status = errno_to_blk_status(r);
395 bio_endio(op->parent_bio);
396 }
397
398 /*----------------------------------------------------------------*/
399
400 /*
401 * wake_worker() is used when new work is queued and when pool_resume is
402 * ready to continue deferred IO processing.
403 */
wake_worker(struct pool * pool)404 static void wake_worker(struct pool *pool)
405 {
406 queue_work(pool->wq, &pool->worker);
407 }
408
409 /*----------------------------------------------------------------*/
410
bio_detain(struct pool * pool,struct dm_cell_key * key,struct bio * bio,struct dm_bio_prison_cell ** cell_result)411 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
412 struct dm_bio_prison_cell **cell_result)
413 {
414 int r;
415 struct dm_bio_prison_cell *cell_prealloc;
416
417 /*
418 * Allocate a cell from the prison's mempool.
419 * This might block but it can't fail.
420 */
421 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
422
423 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
424 if (r)
425 /*
426 * We reused an old cell; we can get rid of
427 * the new one.
428 */
429 dm_bio_prison_free_cell(pool->prison, cell_prealloc);
430
431 return r;
432 }
433
cell_release(struct pool * pool,struct dm_bio_prison_cell * cell,struct bio_list * bios)434 static void cell_release(struct pool *pool,
435 struct dm_bio_prison_cell *cell,
436 struct bio_list *bios)
437 {
438 dm_cell_release(pool->prison, cell, bios);
439 dm_bio_prison_free_cell(pool->prison, cell);
440 }
441
cell_visit_release(struct pool * pool,void (* fn)(void *,struct dm_bio_prison_cell *),void * context,struct dm_bio_prison_cell * cell)442 static void cell_visit_release(struct pool *pool,
443 void (*fn)(void *, struct dm_bio_prison_cell *),
444 void *context,
445 struct dm_bio_prison_cell *cell)
446 {
447 dm_cell_visit_release(pool->prison, fn, context, cell);
448 dm_bio_prison_free_cell(pool->prison, cell);
449 }
450
cell_release_no_holder(struct pool * pool,struct dm_bio_prison_cell * cell,struct bio_list * bios)451 static void cell_release_no_holder(struct pool *pool,
452 struct dm_bio_prison_cell *cell,
453 struct bio_list *bios)
454 {
455 dm_cell_release_no_holder(pool->prison, cell, bios);
456 dm_bio_prison_free_cell(pool->prison, cell);
457 }
458
cell_error_with_code(struct pool * pool,struct dm_bio_prison_cell * cell,blk_status_t error_code)459 static void cell_error_with_code(struct pool *pool,
460 struct dm_bio_prison_cell *cell, blk_status_t error_code)
461 {
462 dm_cell_error(pool->prison, cell, error_code);
463 dm_bio_prison_free_cell(pool->prison, cell);
464 }
465
get_pool_io_error_code(struct pool * pool)466 static blk_status_t get_pool_io_error_code(struct pool *pool)
467 {
468 return pool->out_of_data_space ? BLK_STS_NOSPC : BLK_STS_IOERR;
469 }
470
cell_error(struct pool * pool,struct dm_bio_prison_cell * cell)471 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
472 {
473 cell_error_with_code(pool, cell, get_pool_io_error_code(pool));
474 }
475
cell_success(struct pool * pool,struct dm_bio_prison_cell * cell)476 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
477 {
478 cell_error_with_code(pool, cell, 0);
479 }
480
cell_requeue(struct pool * pool,struct dm_bio_prison_cell * cell)481 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
482 {
483 cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE);
484 }
485
486 /*----------------------------------------------------------------*/
487
488 /*
489 * A global list of pools that uses a struct mapped_device as a key.
490 */
491 static struct dm_thin_pool_table {
492 struct mutex mutex;
493 struct list_head pools;
494 } dm_thin_pool_table;
495
pool_table_init(void)496 static void pool_table_init(void)
497 {
498 mutex_init(&dm_thin_pool_table.mutex);
499 INIT_LIST_HEAD(&dm_thin_pool_table.pools);
500 }
501
pool_table_exit(void)502 static void pool_table_exit(void)
503 {
504 mutex_destroy(&dm_thin_pool_table.mutex);
505 }
506
__pool_table_insert(struct pool * pool)507 static void __pool_table_insert(struct pool *pool)
508 {
509 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
510 list_add(&pool->list, &dm_thin_pool_table.pools);
511 }
512
__pool_table_remove(struct pool * pool)513 static void __pool_table_remove(struct pool *pool)
514 {
515 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
516 list_del(&pool->list);
517 }
518
__pool_table_lookup(struct mapped_device * md)519 static struct pool *__pool_table_lookup(struct mapped_device *md)
520 {
521 struct pool *pool = NULL, *tmp;
522
523 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
524
525 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
526 if (tmp->pool_md == md) {
527 pool = tmp;
528 break;
529 }
530 }
531
532 return pool;
533 }
534
__pool_table_lookup_metadata_dev(struct block_device * md_dev)535 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
536 {
537 struct pool *pool = NULL, *tmp;
538
539 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
540
541 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
542 if (tmp->md_dev == md_dev) {
543 pool = tmp;
544 break;
545 }
546 }
547
548 return pool;
549 }
550
551 /*----------------------------------------------------------------*/
552
553 struct dm_thin_endio_hook {
554 struct thin_c *tc;
555 struct dm_deferred_entry *shared_read_entry;
556 struct dm_deferred_entry *all_io_entry;
557 struct dm_thin_new_mapping *overwrite_mapping;
558 struct rb_node rb_node;
559 struct dm_bio_prison_cell *cell;
560 };
561
__merge_bio_list(struct bio_list * bios,struct bio_list * master)562 static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
563 {
564 bio_list_merge(bios, master);
565 bio_list_init(master);
566 }
567
error_bio_list(struct bio_list * bios,blk_status_t error)568 static void error_bio_list(struct bio_list *bios, blk_status_t error)
569 {
570 struct bio *bio;
571
572 while ((bio = bio_list_pop(bios))) {
573 bio->bi_status = error;
574 bio_endio(bio);
575 }
576 }
577
error_thin_bio_list(struct thin_c * tc,struct bio_list * master,blk_status_t error)578 static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
579 blk_status_t error)
580 {
581 struct bio_list bios;
582 unsigned long flags;
583
584 bio_list_init(&bios);
585
586 spin_lock_irqsave(&tc->lock, flags);
587 __merge_bio_list(&bios, master);
588 spin_unlock_irqrestore(&tc->lock, flags);
589
590 error_bio_list(&bios, error);
591 }
592
requeue_deferred_cells(struct thin_c * tc)593 static void requeue_deferred_cells(struct thin_c *tc)
594 {
595 struct pool *pool = tc->pool;
596 unsigned long flags;
597 struct list_head cells;
598 struct dm_bio_prison_cell *cell, *tmp;
599
600 INIT_LIST_HEAD(&cells);
601
602 spin_lock_irqsave(&tc->lock, flags);
603 list_splice_init(&tc->deferred_cells, &cells);
604 spin_unlock_irqrestore(&tc->lock, flags);
605
606 list_for_each_entry_safe(cell, tmp, &cells, user_list)
607 cell_requeue(pool, cell);
608 }
609
requeue_io(struct thin_c * tc)610 static void requeue_io(struct thin_c *tc)
611 {
612 struct bio_list bios;
613 unsigned long flags;
614
615 bio_list_init(&bios);
616
617 spin_lock_irqsave(&tc->lock, flags);
618 __merge_bio_list(&bios, &tc->deferred_bio_list);
619 __merge_bio_list(&bios, &tc->retry_on_resume_list);
620 spin_unlock_irqrestore(&tc->lock, flags);
621
622 error_bio_list(&bios, BLK_STS_DM_REQUEUE);
623 requeue_deferred_cells(tc);
624 }
625
error_retry_list_with_code(struct pool * pool,blk_status_t error)626 static void error_retry_list_with_code(struct pool *pool, blk_status_t error)
627 {
628 struct thin_c *tc;
629
630 rcu_read_lock();
631 list_for_each_entry_rcu(tc, &pool->active_thins, list)
632 error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
633 rcu_read_unlock();
634 }
635
error_retry_list(struct pool * pool)636 static void error_retry_list(struct pool *pool)
637 {
638 error_retry_list_with_code(pool, get_pool_io_error_code(pool));
639 }
640
641 /*
642 * This section of code contains the logic for processing a thin device's IO.
643 * Much of the code depends on pool object resources (lists, workqueues, etc)
644 * but most is exclusively called from the thin target rather than the thin-pool
645 * target.
646 */
647
get_bio_block(struct thin_c * tc,struct bio * bio)648 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
649 {
650 struct pool *pool = tc->pool;
651 sector_t block_nr = bio->bi_iter.bi_sector;
652
653 if (block_size_is_power_of_two(pool))
654 block_nr >>= pool->sectors_per_block_shift;
655 else
656 (void) sector_div(block_nr, pool->sectors_per_block);
657
658 return block_nr;
659 }
660
661 /*
662 * Returns the _complete_ blocks that this bio covers.
663 */
get_bio_block_range(struct thin_c * tc,struct bio * bio,dm_block_t * begin,dm_block_t * end)664 static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
665 dm_block_t *begin, dm_block_t *end)
666 {
667 struct pool *pool = tc->pool;
668 sector_t b = bio->bi_iter.bi_sector;
669 sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
670
671 b += pool->sectors_per_block - 1ull; /* so we round up */
672
673 if (block_size_is_power_of_two(pool)) {
674 b >>= pool->sectors_per_block_shift;
675 e >>= pool->sectors_per_block_shift;
676 } else {
677 (void) sector_div(b, pool->sectors_per_block);
678 (void) sector_div(e, pool->sectors_per_block);
679 }
680
681 if (e < b)
682 /* Can happen if the bio is within a single block. */
683 e = b;
684
685 *begin = b;
686 *end = e;
687 }
688
remap(struct thin_c * tc,struct bio * bio,dm_block_t block)689 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
690 {
691 struct pool *pool = tc->pool;
692 sector_t bi_sector = bio->bi_iter.bi_sector;
693
694 bio_set_dev(bio, tc->pool_dev->bdev);
695 if (block_size_is_power_of_two(pool))
696 bio->bi_iter.bi_sector =
697 (block << pool->sectors_per_block_shift) |
698 (bi_sector & (pool->sectors_per_block - 1));
699 else
700 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
701 sector_div(bi_sector, pool->sectors_per_block);
702 }
703
remap_to_origin(struct thin_c * tc,struct bio * bio)704 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
705 {
706 bio_set_dev(bio, tc->origin_dev->bdev);
707 }
708
bio_triggers_commit(struct thin_c * tc,struct bio * bio)709 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
710 {
711 return op_is_flush(bio->bi_opf) &&
712 dm_thin_changed_this_transaction(tc->td);
713 }
714
inc_all_io_entry(struct pool * pool,struct bio * bio)715 static void inc_all_io_entry(struct pool *pool, struct bio *bio)
716 {
717 struct dm_thin_endio_hook *h;
718
719 if (bio_op(bio) == REQ_OP_DISCARD)
720 return;
721
722 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
723 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
724 }
725
issue(struct thin_c * tc,struct bio * bio)726 static void issue(struct thin_c *tc, struct bio *bio)
727 {
728 struct pool *pool = tc->pool;
729 unsigned long flags;
730
731 if (!bio_triggers_commit(tc, bio)) {
732 generic_make_request(bio);
733 return;
734 }
735
736 /*
737 * Complete bio with an error if earlier I/O caused changes to
738 * the metadata that can't be committed e.g, due to I/O errors
739 * on the metadata device.
740 */
741 if (dm_thin_aborted_changes(tc->td)) {
742 bio_io_error(bio);
743 return;
744 }
745
746 /*
747 * Batch together any bios that trigger commits and then issue a
748 * single commit for them in process_deferred_bios().
749 */
750 spin_lock_irqsave(&pool->lock, flags);
751 bio_list_add(&pool->deferred_flush_bios, bio);
752 spin_unlock_irqrestore(&pool->lock, flags);
753 }
754
remap_to_origin_and_issue(struct thin_c * tc,struct bio * bio)755 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
756 {
757 remap_to_origin(tc, bio);
758 issue(tc, bio);
759 }
760
remap_and_issue(struct thin_c * tc,struct bio * bio,dm_block_t block)761 static void remap_and_issue(struct thin_c *tc, struct bio *bio,
762 dm_block_t block)
763 {
764 remap(tc, bio, block);
765 issue(tc, bio);
766 }
767
768 /*----------------------------------------------------------------*/
769
770 /*
771 * Bio endio functions.
772 */
773 struct dm_thin_new_mapping {
774 struct list_head list;
775
776 bool pass_discard:1;
777 bool maybe_shared:1;
778
779 /*
780 * Track quiescing, copying and zeroing preparation actions. When this
781 * counter hits zero the block is prepared and can be inserted into the
782 * btree.
783 */
784 atomic_t prepare_actions;
785
786 blk_status_t status;
787 struct thin_c *tc;
788 dm_block_t virt_begin, virt_end;
789 dm_block_t data_block;
790 struct dm_bio_prison_cell *cell;
791
792 /*
793 * If the bio covers the whole area of a block then we can avoid
794 * zeroing or copying. Instead this bio is hooked. The bio will
795 * still be in the cell, so care has to be taken to avoid issuing
796 * the bio twice.
797 */
798 struct bio *bio;
799 bio_end_io_t *saved_bi_end_io;
800 };
801
__complete_mapping_preparation(struct dm_thin_new_mapping * m)802 static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
803 {
804 struct pool *pool = m->tc->pool;
805
806 if (atomic_dec_and_test(&m->prepare_actions)) {
807 list_add_tail(&m->list, &pool->prepared_mappings);
808 wake_worker(pool);
809 }
810 }
811
complete_mapping_preparation(struct dm_thin_new_mapping * m)812 static void complete_mapping_preparation(struct dm_thin_new_mapping *m)
813 {
814 unsigned long flags;
815 struct pool *pool = m->tc->pool;
816
817 spin_lock_irqsave(&pool->lock, flags);
818 __complete_mapping_preparation(m);
819 spin_unlock_irqrestore(&pool->lock, flags);
820 }
821
copy_complete(int read_err,unsigned long write_err,void * context)822 static void copy_complete(int read_err, unsigned long write_err, void *context)
823 {
824 struct dm_thin_new_mapping *m = context;
825
826 m->status = read_err || write_err ? BLK_STS_IOERR : 0;
827 complete_mapping_preparation(m);
828 }
829
overwrite_endio(struct bio * bio)830 static void overwrite_endio(struct bio *bio)
831 {
832 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
833 struct dm_thin_new_mapping *m = h->overwrite_mapping;
834
835 bio->bi_end_io = m->saved_bi_end_io;
836
837 m->status = bio->bi_status;
838 complete_mapping_preparation(m);
839 }
840
841 /*----------------------------------------------------------------*/
842
843 /*
844 * Workqueue.
845 */
846
847 /*
848 * Prepared mapping jobs.
849 */
850
851 /*
852 * This sends the bios in the cell, except the original holder, back
853 * to the deferred_bios list.
854 */
cell_defer_no_holder(struct thin_c * tc,struct dm_bio_prison_cell * cell)855 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
856 {
857 struct pool *pool = tc->pool;
858 unsigned long flags;
859
860 spin_lock_irqsave(&tc->lock, flags);
861 cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
862 spin_unlock_irqrestore(&tc->lock, flags);
863
864 wake_worker(pool);
865 }
866
867 static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
868
869 struct remap_info {
870 struct thin_c *tc;
871 struct bio_list defer_bios;
872 struct bio_list issue_bios;
873 };
874
__inc_remap_and_issue_cell(void * context,struct dm_bio_prison_cell * cell)875 static void __inc_remap_and_issue_cell(void *context,
876 struct dm_bio_prison_cell *cell)
877 {
878 struct remap_info *info = context;
879 struct bio *bio;
880
881 while ((bio = bio_list_pop(&cell->bios))) {
882 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD)
883 bio_list_add(&info->defer_bios, bio);
884 else {
885 inc_all_io_entry(info->tc->pool, bio);
886
887 /*
888 * We can't issue the bios with the bio prison lock
889 * held, so we add them to a list to issue on
890 * return from this function.
891 */
892 bio_list_add(&info->issue_bios, bio);
893 }
894 }
895 }
896
inc_remap_and_issue_cell(struct thin_c * tc,struct dm_bio_prison_cell * cell,dm_block_t block)897 static void inc_remap_and_issue_cell(struct thin_c *tc,
898 struct dm_bio_prison_cell *cell,
899 dm_block_t block)
900 {
901 struct bio *bio;
902 struct remap_info info;
903
904 info.tc = tc;
905 bio_list_init(&info.defer_bios);
906 bio_list_init(&info.issue_bios);
907
908 /*
909 * We have to be careful to inc any bios we're about to issue
910 * before the cell is released, and avoid a race with new bios
911 * being added to the cell.
912 */
913 cell_visit_release(tc->pool, __inc_remap_and_issue_cell,
914 &info, cell);
915
916 while ((bio = bio_list_pop(&info.defer_bios)))
917 thin_defer_bio(tc, bio);
918
919 while ((bio = bio_list_pop(&info.issue_bios)))
920 remap_and_issue(info.tc, bio, block);
921 }
922
process_prepared_mapping_fail(struct dm_thin_new_mapping * m)923 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
924 {
925 cell_error(m->tc->pool, m->cell);
926 list_del(&m->list);
927 mempool_free(m, &m->tc->pool->mapping_pool);
928 }
929
process_prepared_mapping(struct dm_thin_new_mapping * m)930 static void process_prepared_mapping(struct dm_thin_new_mapping *m)
931 {
932 struct thin_c *tc = m->tc;
933 struct pool *pool = tc->pool;
934 struct bio *bio = m->bio;
935 int r;
936
937 if (m->status) {
938 cell_error(pool, m->cell);
939 goto out;
940 }
941
942 /*
943 * Commit the prepared block into the mapping btree.
944 * Any I/O for this block arriving after this point will get
945 * remapped to it directly.
946 */
947 r = dm_thin_insert_block(tc->td, m->virt_begin, m->data_block);
948 if (r) {
949 metadata_operation_failed(pool, "dm_thin_insert_block", r);
950 cell_error(pool, m->cell);
951 goto out;
952 }
953
954 /*
955 * Release any bios held while the block was being provisioned.
956 * If we are processing a write bio that completely covers the block,
957 * we already processed it so can ignore it now when processing
958 * the bios in the cell.
959 */
960 if (bio) {
961 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
962 bio_endio(bio);
963 } else {
964 inc_all_io_entry(tc->pool, m->cell->holder);
965 remap_and_issue(tc, m->cell->holder, m->data_block);
966 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
967 }
968
969 out:
970 list_del(&m->list);
971 mempool_free(m, &pool->mapping_pool);
972 }
973
974 /*----------------------------------------------------------------*/
975
free_discard_mapping(struct dm_thin_new_mapping * m)976 static void free_discard_mapping(struct dm_thin_new_mapping *m)
977 {
978 struct thin_c *tc = m->tc;
979 if (m->cell)
980 cell_defer_no_holder(tc, m->cell);
981 mempool_free(m, &tc->pool->mapping_pool);
982 }
983
process_prepared_discard_fail(struct dm_thin_new_mapping * m)984 static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
985 {
986 bio_io_error(m->bio);
987 free_discard_mapping(m);
988 }
989
process_prepared_discard_success(struct dm_thin_new_mapping * m)990 static void process_prepared_discard_success(struct dm_thin_new_mapping *m)
991 {
992 bio_endio(m->bio);
993 free_discard_mapping(m);
994 }
995
process_prepared_discard_no_passdown(struct dm_thin_new_mapping * m)996 static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m)
997 {
998 int r;
999 struct thin_c *tc = m->tc;
1000
1001 r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end);
1002 if (r) {
1003 metadata_operation_failed(tc->pool, "dm_thin_remove_range", r);
1004 bio_io_error(m->bio);
1005 } else
1006 bio_endio(m->bio);
1007
1008 cell_defer_no_holder(tc, m->cell);
1009 mempool_free(m, &tc->pool->mapping_pool);
1010 }
1011
1012 /*----------------------------------------------------------------*/
1013
passdown_double_checking_shared_status(struct dm_thin_new_mapping * m,struct bio * discard_parent)1014 static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m,
1015 struct bio *discard_parent)
1016 {
1017 /*
1018 * We've already unmapped this range of blocks, but before we
1019 * passdown we have to check that these blocks are now unused.
1020 */
1021 int r = 0;
1022 bool used = true;
1023 struct thin_c *tc = m->tc;
1024 struct pool *pool = tc->pool;
1025 dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
1026 struct discard_op op;
1027
1028 begin_discard(&op, tc, discard_parent);
1029 while (b != end) {
1030 /* find start of unmapped run */
1031 for (; b < end; b++) {
1032 r = dm_pool_block_is_used(pool->pmd, b, &used);
1033 if (r)
1034 goto out;
1035
1036 if (!used)
1037 break;
1038 }
1039
1040 if (b == end)
1041 break;
1042
1043 /* find end of run */
1044 for (e = b + 1; e != end; e++) {
1045 r = dm_pool_block_is_used(pool->pmd, e, &used);
1046 if (r)
1047 goto out;
1048
1049 if (used)
1050 break;
1051 }
1052
1053 r = issue_discard(&op, b, e);
1054 if (r)
1055 goto out;
1056
1057 b = e;
1058 }
1059 out:
1060 end_discard(&op, r);
1061 }
1062
queue_passdown_pt2(struct dm_thin_new_mapping * m)1063 static void queue_passdown_pt2(struct dm_thin_new_mapping *m)
1064 {
1065 unsigned long flags;
1066 struct pool *pool = m->tc->pool;
1067
1068 spin_lock_irqsave(&pool->lock, flags);
1069 list_add_tail(&m->list, &pool->prepared_discards_pt2);
1070 spin_unlock_irqrestore(&pool->lock, flags);
1071 wake_worker(pool);
1072 }
1073
passdown_endio(struct bio * bio)1074 static void passdown_endio(struct bio *bio)
1075 {
1076 /*
1077 * It doesn't matter if the passdown discard failed, we still want
1078 * to unmap (we ignore err).
1079 */
1080 queue_passdown_pt2(bio->bi_private);
1081 bio_put(bio);
1082 }
1083
process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping * m)1084 static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
1085 {
1086 int r;
1087 struct thin_c *tc = m->tc;
1088 struct pool *pool = tc->pool;
1089 struct bio *discard_parent;
1090 dm_block_t data_end = m->data_block + (m->virt_end - m->virt_begin);
1091
1092 /*
1093 * Only this thread allocates blocks, so we can be sure that the
1094 * newly unmapped blocks will not be allocated before the end of
1095 * the function.
1096 */
1097 r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end);
1098 if (r) {
1099 metadata_operation_failed(pool, "dm_thin_remove_range", r);
1100 bio_io_error(m->bio);
1101 cell_defer_no_holder(tc, m->cell);
1102 mempool_free(m, &pool->mapping_pool);
1103 return;
1104 }
1105
1106 /*
1107 * Increment the unmapped blocks. This prevents a race between the
1108 * passdown io and reallocation of freed blocks.
1109 */
1110 r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
1111 if (r) {
1112 metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
1113 bio_io_error(m->bio);
1114 cell_defer_no_holder(tc, m->cell);
1115 mempool_free(m, &pool->mapping_pool);
1116 return;
1117 }
1118
1119 discard_parent = bio_alloc(GFP_NOIO, 1);
1120 if (!discard_parent) {
1121 DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
1122 dm_device_name(tc->pool->pool_md));
1123 queue_passdown_pt2(m);
1124
1125 } else {
1126 discard_parent->bi_end_io = passdown_endio;
1127 discard_parent->bi_private = m;
1128
1129 if (m->maybe_shared)
1130 passdown_double_checking_shared_status(m, discard_parent);
1131 else {
1132 struct discard_op op;
1133
1134 begin_discard(&op, tc, discard_parent);
1135 r = issue_discard(&op, m->data_block, data_end);
1136 end_discard(&op, r);
1137 }
1138 }
1139 }
1140
process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping * m)1141 static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
1142 {
1143 int r;
1144 struct thin_c *tc = m->tc;
1145 struct pool *pool = tc->pool;
1146
1147 /*
1148 * The passdown has completed, so now we can decrement all those
1149 * unmapped blocks.
1150 */
1151 r = dm_pool_dec_data_range(pool->pmd, m->data_block,
1152 m->data_block + (m->virt_end - m->virt_begin));
1153 if (r) {
1154 metadata_operation_failed(pool, "dm_pool_dec_data_range", r);
1155 bio_io_error(m->bio);
1156 } else
1157 bio_endio(m->bio);
1158
1159 cell_defer_no_holder(tc, m->cell);
1160 mempool_free(m, &pool->mapping_pool);
1161 }
1162
process_prepared(struct pool * pool,struct list_head * head,process_mapping_fn * fn)1163 static void process_prepared(struct pool *pool, struct list_head *head,
1164 process_mapping_fn *fn)
1165 {
1166 unsigned long flags;
1167 struct list_head maps;
1168 struct dm_thin_new_mapping *m, *tmp;
1169
1170 INIT_LIST_HEAD(&maps);
1171 spin_lock_irqsave(&pool->lock, flags);
1172 list_splice_init(head, &maps);
1173 spin_unlock_irqrestore(&pool->lock, flags);
1174
1175 list_for_each_entry_safe(m, tmp, &maps, list)
1176 (*fn)(m);
1177 }
1178
1179 /*
1180 * Deferred bio jobs.
1181 */
io_overlaps_block(struct pool * pool,struct bio * bio)1182 static int io_overlaps_block(struct pool *pool, struct bio *bio)
1183 {
1184 return bio->bi_iter.bi_size ==
1185 (pool->sectors_per_block << SECTOR_SHIFT);
1186 }
1187
io_overwrites_block(struct pool * pool,struct bio * bio)1188 static int io_overwrites_block(struct pool *pool, struct bio *bio)
1189 {
1190 return (bio_data_dir(bio) == WRITE) &&
1191 io_overlaps_block(pool, bio);
1192 }
1193
save_and_set_endio(struct bio * bio,bio_end_io_t ** save,bio_end_io_t * fn)1194 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
1195 bio_end_io_t *fn)
1196 {
1197 *save = bio->bi_end_io;
1198 bio->bi_end_io = fn;
1199 }
1200
ensure_next_mapping(struct pool * pool)1201 static int ensure_next_mapping(struct pool *pool)
1202 {
1203 if (pool->next_mapping)
1204 return 0;
1205
1206 pool->next_mapping = mempool_alloc(&pool->mapping_pool, GFP_ATOMIC);
1207
1208 return pool->next_mapping ? 0 : -ENOMEM;
1209 }
1210
get_next_mapping(struct pool * pool)1211 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
1212 {
1213 struct dm_thin_new_mapping *m = pool->next_mapping;
1214
1215 BUG_ON(!pool->next_mapping);
1216
1217 memset(m, 0, sizeof(struct dm_thin_new_mapping));
1218 INIT_LIST_HEAD(&m->list);
1219 m->bio = NULL;
1220
1221 pool->next_mapping = NULL;
1222
1223 return m;
1224 }
1225
ll_zero(struct thin_c * tc,struct dm_thin_new_mapping * m,sector_t begin,sector_t end)1226 static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
1227 sector_t begin, sector_t end)
1228 {
1229 struct dm_io_region to;
1230
1231 to.bdev = tc->pool_dev->bdev;
1232 to.sector = begin;
1233 to.count = end - begin;
1234
1235 dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
1236 }
1237
remap_and_issue_overwrite(struct thin_c * tc,struct bio * bio,dm_block_t data_begin,struct dm_thin_new_mapping * m)1238 static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
1239 dm_block_t data_begin,
1240 struct dm_thin_new_mapping *m)
1241 {
1242 struct pool *pool = tc->pool;
1243 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1244
1245 h->overwrite_mapping = m;
1246 m->bio = bio;
1247 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1248 inc_all_io_entry(pool, bio);
1249 remap_and_issue(tc, bio, data_begin);
1250 }
1251
1252 /*
1253 * A partial copy also needs to zero the uncopied region.
1254 */
schedule_copy(struct thin_c * tc,dm_block_t virt_block,struct dm_dev * origin,dm_block_t data_origin,dm_block_t data_dest,struct dm_bio_prison_cell * cell,struct bio * bio,sector_t len)1255 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1256 struct dm_dev *origin, dm_block_t data_origin,
1257 dm_block_t data_dest,
1258 struct dm_bio_prison_cell *cell, struct bio *bio,
1259 sector_t len)
1260 {
1261 struct pool *pool = tc->pool;
1262 struct dm_thin_new_mapping *m = get_next_mapping(pool);
1263
1264 m->tc = tc;
1265 m->virt_begin = virt_block;
1266 m->virt_end = virt_block + 1u;
1267 m->data_block = data_dest;
1268 m->cell = cell;
1269
1270 /*
1271 * quiesce action + copy action + an extra reference held for the
1272 * duration of this function (we may need to inc later for a
1273 * partial zero).
1274 */
1275 atomic_set(&m->prepare_actions, 3);
1276
1277 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
1278 complete_mapping_preparation(m); /* already quiesced */
1279
1280 /*
1281 * IO to pool_dev remaps to the pool target's data_dev.
1282 *
1283 * If the whole block of data is being overwritten, we can issue the
1284 * bio immediately. Otherwise we use kcopyd to clone the data first.
1285 */
1286 if (io_overwrites_block(pool, bio))
1287 remap_and_issue_overwrite(tc, bio, data_dest, m);
1288 else {
1289 struct dm_io_region from, to;
1290
1291 from.bdev = origin->bdev;
1292 from.sector = data_origin * pool->sectors_per_block;
1293 from.count = len;
1294
1295 to.bdev = tc->pool_dev->bdev;
1296 to.sector = data_dest * pool->sectors_per_block;
1297 to.count = len;
1298
1299 dm_kcopyd_copy(pool->copier, &from, 1, &to,
1300 0, copy_complete, m);
1301
1302 /*
1303 * Do we need to zero a tail region?
1304 */
1305 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) {
1306 atomic_inc(&m->prepare_actions);
1307 ll_zero(tc, m,
1308 data_dest * pool->sectors_per_block + len,
1309 (data_dest + 1) * pool->sectors_per_block);
1310 }
1311 }
1312
1313 complete_mapping_preparation(m); /* drop our ref */
1314 }
1315
schedule_internal_copy(struct thin_c * tc,dm_block_t virt_block,dm_block_t data_origin,dm_block_t data_dest,struct dm_bio_prison_cell * cell,struct bio * bio)1316 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1317 dm_block_t data_origin, dm_block_t data_dest,
1318 struct dm_bio_prison_cell *cell, struct bio *bio)
1319 {
1320 schedule_copy(tc, virt_block, tc->pool_dev,
1321 data_origin, data_dest, cell, bio,
1322 tc->pool->sectors_per_block);
1323 }
1324
schedule_zero(struct thin_c * tc,dm_block_t virt_block,dm_block_t data_block,struct dm_bio_prison_cell * cell,struct bio * bio)1325 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1326 dm_block_t data_block, struct dm_bio_prison_cell *cell,
1327 struct bio *bio)
1328 {
1329 struct pool *pool = tc->pool;
1330 struct dm_thin_new_mapping *m = get_next_mapping(pool);
1331
1332 atomic_set(&m->prepare_actions, 1); /* no need to quiesce */
1333 m->tc = tc;
1334 m->virt_begin = virt_block;
1335 m->virt_end = virt_block + 1u;
1336 m->data_block = data_block;
1337 m->cell = cell;
1338
1339 /*
1340 * If the whole block of data is being overwritten or we are not
1341 * zeroing pre-existing data, we can issue the bio immediately.
1342 * Otherwise we use kcopyd to zero the data first.
1343 */
1344 if (pool->pf.zero_new_blocks) {
1345 if (io_overwrites_block(pool, bio))
1346 remap_and_issue_overwrite(tc, bio, data_block, m);
1347 else
1348 ll_zero(tc, m, data_block * pool->sectors_per_block,
1349 (data_block + 1) * pool->sectors_per_block);
1350 } else
1351 process_prepared_mapping(m);
1352 }
1353
schedule_external_copy(struct thin_c * tc,dm_block_t virt_block,dm_block_t data_dest,struct dm_bio_prison_cell * cell,struct bio * bio)1354 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1355 dm_block_t data_dest,
1356 struct dm_bio_prison_cell *cell, struct bio *bio)
1357 {
1358 struct pool *pool = tc->pool;
1359 sector_t virt_block_begin = virt_block * pool->sectors_per_block;
1360 sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;
1361
1362 if (virt_block_end <= tc->origin_size)
1363 schedule_copy(tc, virt_block, tc->origin_dev,
1364 virt_block, data_dest, cell, bio,
1365 pool->sectors_per_block);
1366
1367 else if (virt_block_begin < tc->origin_size)
1368 schedule_copy(tc, virt_block, tc->origin_dev,
1369 virt_block, data_dest, cell, bio,
1370 tc->origin_size - virt_block_begin);
1371
1372 else
1373 schedule_zero(tc, virt_block, data_dest, cell, bio);
1374 }
1375
1376 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1377
1378 static void requeue_bios(struct pool *pool);
1379
is_read_only_pool_mode(enum pool_mode mode)1380 static bool is_read_only_pool_mode(enum pool_mode mode)
1381 {
1382 return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
1383 }
1384
is_read_only(struct pool * pool)1385 static bool is_read_only(struct pool *pool)
1386 {
1387 return is_read_only_pool_mode(get_pool_mode(pool));
1388 }
1389
check_for_metadata_space(struct pool * pool)1390 static void check_for_metadata_space(struct pool *pool)
1391 {
1392 int r;
1393 const char *ooms_reason = NULL;
1394 dm_block_t nr_free;
1395
1396 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
1397 if (r)
1398 ooms_reason = "Could not get free metadata blocks";
1399 else if (!nr_free)
1400 ooms_reason = "No free metadata blocks";
1401
1402 if (ooms_reason && !is_read_only(pool)) {
1403 DMERR("%s", ooms_reason);
1404 set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
1405 }
1406 }
1407
check_for_data_space(struct pool * pool)1408 static void check_for_data_space(struct pool *pool)
1409 {
1410 int r;
1411 dm_block_t nr_free;
1412
1413 if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
1414 return;
1415
1416 r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
1417 if (r)
1418 return;
1419
1420 if (nr_free) {
1421 set_pool_mode(pool, PM_WRITE);
1422 requeue_bios(pool);
1423 }
1424 }
1425
1426 /*
1427 * A non-zero return indicates read_only or fail_io mode.
1428 * Many callers don't care about the return value.
1429 */
commit(struct pool * pool)1430 static int commit(struct pool *pool)
1431 {
1432 int r;
1433
1434 if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
1435 return -EINVAL;
1436
1437 r = dm_pool_commit_metadata(pool->pmd);
1438 if (r)
1439 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
1440 else {
1441 check_for_metadata_space(pool);
1442 check_for_data_space(pool);
1443 }
1444
1445 return r;
1446 }
1447
check_low_water_mark(struct pool * pool,dm_block_t free_blocks)1448 static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
1449 {
1450 unsigned long flags;
1451
1452 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
1453 DMWARN("%s: reached low water mark for data device: sending event.",
1454 dm_device_name(pool->pool_md));
1455 spin_lock_irqsave(&pool->lock, flags);
1456 pool->low_water_triggered = true;
1457 spin_unlock_irqrestore(&pool->lock, flags);
1458 dm_table_event(pool->ti->table);
1459 }
1460 }
1461
alloc_data_block(struct thin_c * tc,dm_block_t * result)1462 static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1463 {
1464 int r;
1465 dm_block_t free_blocks;
1466 struct pool *pool = tc->pool;
1467
1468 if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
1469 return -EINVAL;
1470
1471 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1472 if (r) {
1473 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
1474 return r;
1475 }
1476
1477 check_low_water_mark(pool, free_blocks);
1478
1479 if (!free_blocks) {
1480 /*
1481 * Try to commit to see if that will free up some
1482 * more space.
1483 */
1484 r = commit(pool);
1485 if (r)
1486 return r;
1487
1488 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1489 if (r) {
1490 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
1491 return r;
1492 }
1493
1494 if (!free_blocks) {
1495 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
1496 return -ENOSPC;
1497 }
1498 }
1499
1500 r = dm_pool_alloc_data_block(pool->pmd, result);
1501 if (r) {
1502 if (r == -ENOSPC)
1503 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
1504 else
1505 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
1506 return r;
1507 }
1508
1509 r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
1510 if (r) {
1511 metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
1512 return r;
1513 }
1514
1515 if (!free_blocks) {
1516 /* Let's commit before we use up the metadata reserve. */
1517 r = commit(pool);
1518 if (r)
1519 return r;
1520 }
1521
1522 return 0;
1523 }
1524
1525 /*
1526 * If we have run out of space, queue bios until the device is
1527 * resumed, presumably after having been reloaded with more space.
1528 */
retry_on_resume(struct bio * bio)1529 static void retry_on_resume(struct bio *bio)
1530 {
1531 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1532 struct thin_c *tc = h->tc;
1533 unsigned long flags;
1534
1535 spin_lock_irqsave(&tc->lock, flags);
1536 bio_list_add(&tc->retry_on_resume_list, bio);
1537 spin_unlock_irqrestore(&tc->lock, flags);
1538 }
1539
should_error_unserviceable_bio(struct pool * pool)1540 static blk_status_t should_error_unserviceable_bio(struct pool *pool)
1541 {
1542 enum pool_mode m = get_pool_mode(pool);
1543
1544 switch (m) {
1545 case PM_WRITE:
1546 /* Shouldn't get here */
1547 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
1548 return BLK_STS_IOERR;
1549
1550 case PM_OUT_OF_DATA_SPACE:
1551 return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
1552
1553 case PM_OUT_OF_METADATA_SPACE:
1554 case PM_READ_ONLY:
1555 case PM_FAIL:
1556 return BLK_STS_IOERR;
1557 default:
1558 /* Shouldn't get here */
1559 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
1560 return BLK_STS_IOERR;
1561 }
1562 }
1563
handle_unserviceable_bio(struct pool * pool,struct bio * bio)1564 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1565 {
1566 blk_status_t error = should_error_unserviceable_bio(pool);
1567
1568 if (error) {
1569 bio->bi_status = error;
1570 bio_endio(bio);
1571 } else
1572 retry_on_resume(bio);
1573 }
1574
retry_bios_on_resume(struct pool * pool,struct dm_bio_prison_cell * cell)1575 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1576 {
1577 struct bio *bio;
1578 struct bio_list bios;
1579 blk_status_t error;
1580
1581 error = should_error_unserviceable_bio(pool);
1582 if (error) {
1583 cell_error_with_code(pool, cell, error);
1584 return;
1585 }
1586
1587 bio_list_init(&bios);
1588 cell_release(pool, cell, &bios);
1589
1590 while ((bio = bio_list_pop(&bios)))
1591 retry_on_resume(bio);
1592 }
1593
process_discard_cell_no_passdown(struct thin_c * tc,struct dm_bio_prison_cell * virt_cell)1594 static void process_discard_cell_no_passdown(struct thin_c *tc,
1595 struct dm_bio_prison_cell *virt_cell)
1596 {
1597 struct pool *pool = tc->pool;
1598 struct dm_thin_new_mapping *m = get_next_mapping(pool);
1599
1600 /*
1601 * We don't need to lock the data blocks, since there's no
1602 * passdown. We only lock data blocks for allocation and breaking sharing.
1603 */
1604 m->tc = tc;
1605 m->virt_begin = virt_cell->key.block_begin;
1606 m->virt_end = virt_cell->key.block_end;
1607 m->cell = virt_cell;
1608 m->bio = virt_cell->holder;
1609
1610 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1611 pool->process_prepared_discard(m);
1612 }
1613
break_up_discard_bio(struct thin_c * tc,dm_block_t begin,dm_block_t end,struct bio * bio)1614 static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end,
1615 struct bio *bio)
1616 {
1617 struct pool *pool = tc->pool;
1618
1619 int r;
1620 bool maybe_shared;
1621 struct dm_cell_key data_key;
1622 struct dm_bio_prison_cell *data_cell;
1623 struct dm_thin_new_mapping *m;
1624 dm_block_t virt_begin, virt_end, data_begin;
1625
1626 while (begin != end) {
1627 r = ensure_next_mapping(pool);
1628 if (r)
1629 /* we did our best */
1630 return;
1631
1632 r = dm_thin_find_mapped_range(tc->td, begin, end, &virt_begin, &virt_end,
1633 &data_begin, &maybe_shared);
1634 if (r)
1635 /*
1636 * Silently fail, letting any mappings we've
1637 * created complete.
1638 */
1639 break;
1640
1641 build_key(tc->td, PHYSICAL, data_begin, data_begin + (virt_end - virt_begin), &data_key);
1642 if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) {
1643 /* contention, we'll give up with this range */
1644 begin = virt_end;
1645 continue;
1646 }
1647
1648 /*
1649 * IO may still be going to the destination block. We must
1650 * quiesce before we can do the removal.
1651 */
1652 m = get_next_mapping(pool);
1653 m->tc = tc;
1654 m->maybe_shared = maybe_shared;
1655 m->virt_begin = virt_begin;
1656 m->virt_end = virt_end;
1657 m->data_block = data_begin;
1658 m->cell = data_cell;
1659 m->bio = bio;
1660
1661 /*
1662 * The parent bio must not complete before sub discard bios are
1663 * chained to it (see end_discard's bio_chain)!
1664 *
1665 * This per-mapping bi_remaining increment is paired with
1666 * the implicit decrement that occurs via bio_endio() in
1667 * end_discard().
1668 */
1669 bio_inc_remaining(bio);
1670 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1671 pool->process_prepared_discard(m);
1672
1673 begin = virt_end;
1674 }
1675 }
1676
process_discard_cell_passdown(struct thin_c * tc,struct dm_bio_prison_cell * virt_cell)1677 static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_prison_cell *virt_cell)
1678 {
1679 struct bio *bio = virt_cell->holder;
1680 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1681
1682 /*
1683 * The virt_cell will only get freed once the origin bio completes.
1684 * This means it will remain locked while all the individual
1685 * passdown bios are in flight.
1686 */
1687 h->cell = virt_cell;
1688 break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio);
1689
1690 /*
1691 * We complete the bio now, knowing that the bi_remaining field
1692 * will prevent completion until the sub range discards have
1693 * completed.
1694 */
1695 bio_endio(bio);
1696 }
1697
process_discard_bio(struct thin_c * tc,struct bio * bio)1698 static void process_discard_bio(struct thin_c *tc, struct bio *bio)
1699 {
1700 dm_block_t begin, end;
1701 struct dm_cell_key virt_key;
1702 struct dm_bio_prison_cell *virt_cell;
1703
1704 get_bio_block_range(tc, bio, &begin, &end);
1705 if (begin == end) {
1706 /*
1707 * The discard covers less than a block.
1708 */
1709 bio_endio(bio);
1710 return;
1711 }
1712
1713 build_key(tc->td, VIRTUAL, begin, end, &virt_key);
1714 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell))
1715 /*
1716 * Potential starvation issue: We're relying on the
1717 * fs/application being well behaved, and not trying to
1718 * send IO to a region at the same time as discarding it.
1719 * If they do this persistently then it's possible this
1720 * cell will never be granted.
1721 */
1722 return;
1723
1724 tc->pool->process_discard_cell(tc, virt_cell);
1725 }
1726
break_sharing(struct thin_c * tc,struct bio * bio,dm_block_t block,struct dm_cell_key * key,struct dm_thin_lookup_result * lookup_result,struct dm_bio_prison_cell * cell)1727 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1728 struct dm_cell_key *key,
1729 struct dm_thin_lookup_result *lookup_result,
1730 struct dm_bio_prison_cell *cell)
1731 {
1732 int r;
1733 dm_block_t data_block;
1734 struct pool *pool = tc->pool;
1735
1736 r = alloc_data_block(tc, &data_block);
1737 switch (r) {
1738 case 0:
1739 schedule_internal_copy(tc, block, lookup_result->block,
1740 data_block, cell, bio);
1741 break;
1742
1743 case -ENOSPC:
1744 retry_bios_on_resume(pool, cell);
1745 break;
1746
1747 default:
1748 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1749 __func__, r);
1750 cell_error(pool, cell);
1751 break;
1752 }
1753 }
1754
__remap_and_issue_shared_cell(void * context,struct dm_bio_prison_cell * cell)1755 static void __remap_and_issue_shared_cell(void *context,
1756 struct dm_bio_prison_cell *cell)
1757 {
1758 struct remap_info *info = context;
1759 struct bio *bio;
1760
1761 while ((bio = bio_list_pop(&cell->bios))) {
1762 if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) ||
1763 bio_op(bio) == REQ_OP_DISCARD)
1764 bio_list_add(&info->defer_bios, bio);
1765 else {
1766 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1767
1768 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
1769 inc_all_io_entry(info->tc->pool, bio);
1770 bio_list_add(&info->issue_bios, bio);
1771 }
1772 }
1773 }
1774
remap_and_issue_shared_cell(struct thin_c * tc,struct dm_bio_prison_cell * cell,dm_block_t block)1775 static void remap_and_issue_shared_cell(struct thin_c *tc,
1776 struct dm_bio_prison_cell *cell,
1777 dm_block_t block)
1778 {
1779 struct bio *bio;
1780 struct remap_info info;
1781
1782 info.tc = tc;
1783 bio_list_init(&info.defer_bios);
1784 bio_list_init(&info.issue_bios);
1785
1786 cell_visit_release(tc->pool, __remap_and_issue_shared_cell,
1787 &info, cell);
1788
1789 while ((bio = bio_list_pop(&info.defer_bios)))
1790 thin_defer_bio(tc, bio);
1791
1792 while ((bio = bio_list_pop(&info.issue_bios)))
1793 remap_and_issue(tc, bio, block);
1794 }
1795
process_shared_bio(struct thin_c * tc,struct bio * bio,dm_block_t block,struct dm_thin_lookup_result * lookup_result,struct dm_bio_prison_cell * virt_cell)1796 static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1797 dm_block_t block,
1798 struct dm_thin_lookup_result *lookup_result,
1799 struct dm_bio_prison_cell *virt_cell)
1800 {
1801 struct dm_bio_prison_cell *data_cell;
1802 struct pool *pool = tc->pool;
1803 struct dm_cell_key key;
1804
1805 /*
1806 * If cell is already occupied, then sharing is already in the process
1807 * of being broken so we have nothing further to do here.
1808 */
1809 build_data_key(tc->td, lookup_result->block, &key);
1810 if (bio_detain(pool, &key, bio, &data_cell)) {
1811 cell_defer_no_holder(tc, virt_cell);
1812 return;
1813 }
1814
1815 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) {
1816 break_sharing(tc, bio, block, &key, lookup_result, data_cell);
1817 cell_defer_no_holder(tc, virt_cell);
1818 } else {
1819 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1820
1821 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1822 inc_all_io_entry(pool, bio);
1823 remap_and_issue(tc, bio, lookup_result->block);
1824
1825 remap_and_issue_shared_cell(tc, data_cell, lookup_result->block);
1826 remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block);
1827 }
1828 }
1829
provision_block(struct thin_c * tc,struct bio * bio,dm_block_t block,struct dm_bio_prison_cell * cell)1830 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1831 struct dm_bio_prison_cell *cell)
1832 {
1833 int r;
1834 dm_block_t data_block;
1835 struct pool *pool = tc->pool;
1836
1837 /*
1838 * Remap empty bios (flushes) immediately, without provisioning.
1839 */
1840 if (!bio->bi_iter.bi_size) {
1841 inc_all_io_entry(pool, bio);
1842 cell_defer_no_holder(tc, cell);
1843
1844 remap_and_issue(tc, bio, 0);
1845 return;
1846 }
1847
1848 /*
1849 * Fill read bios with zeroes and complete them immediately.
1850 */
1851 if (bio_data_dir(bio) == READ) {
1852 zero_fill_bio(bio);
1853 cell_defer_no_holder(tc, cell);
1854 bio_endio(bio);
1855 return;
1856 }
1857
1858 r = alloc_data_block(tc, &data_block);
1859 switch (r) {
1860 case 0:
1861 if (tc->origin_dev)
1862 schedule_external_copy(tc, block, data_block, cell, bio);
1863 else
1864 schedule_zero(tc, block, data_block, cell, bio);
1865 break;
1866
1867 case -ENOSPC:
1868 retry_bios_on_resume(pool, cell);
1869 break;
1870
1871 default:
1872 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1873 __func__, r);
1874 cell_error(pool, cell);
1875 break;
1876 }
1877 }
1878
process_cell(struct thin_c * tc,struct dm_bio_prison_cell * cell)1879 static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1880 {
1881 int r;
1882 struct pool *pool = tc->pool;
1883 struct bio *bio = cell->holder;
1884 dm_block_t block = get_bio_block(tc, bio);
1885 struct dm_thin_lookup_result lookup_result;
1886
1887 if (tc->requeue_mode) {
1888 cell_requeue(pool, cell);
1889 return;
1890 }
1891
1892 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1893 switch (r) {
1894 case 0:
1895 if (lookup_result.shared)
1896 process_shared_bio(tc, bio, block, &lookup_result, cell);
1897 else {
1898 inc_all_io_entry(pool, bio);
1899 remap_and_issue(tc, bio, lookup_result.block);
1900 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1901 }
1902 break;
1903
1904 case -ENODATA:
1905 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1906 inc_all_io_entry(pool, bio);
1907 cell_defer_no_holder(tc, cell);
1908
1909 if (bio_end_sector(bio) <= tc->origin_size)
1910 remap_to_origin_and_issue(tc, bio);
1911
1912 else if (bio->bi_iter.bi_sector < tc->origin_size) {
1913 zero_fill_bio(bio);
1914 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT;
1915 remap_to_origin_and_issue(tc, bio);
1916
1917 } else {
1918 zero_fill_bio(bio);
1919 bio_endio(bio);
1920 }
1921 } else
1922 provision_block(tc, bio, block, cell);
1923 break;
1924
1925 default:
1926 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1927 __func__, r);
1928 cell_defer_no_holder(tc, cell);
1929 bio_io_error(bio);
1930 break;
1931 }
1932 }
1933
process_bio(struct thin_c * tc,struct bio * bio)1934 static void process_bio(struct thin_c *tc, struct bio *bio)
1935 {
1936 struct pool *pool = tc->pool;
1937 dm_block_t block = get_bio_block(tc, bio);
1938 struct dm_bio_prison_cell *cell;
1939 struct dm_cell_key key;
1940
1941 /*
1942 * If cell is already occupied, then the block is already
1943 * being provisioned so we have nothing further to do here.
1944 */
1945 build_virtual_key(tc->td, block, &key);
1946 if (bio_detain(pool, &key, bio, &cell))
1947 return;
1948
1949 process_cell(tc, cell);
1950 }
1951
__process_bio_read_only(struct thin_c * tc,struct bio * bio,struct dm_bio_prison_cell * cell)1952 static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
1953 struct dm_bio_prison_cell *cell)
1954 {
1955 int r;
1956 int rw = bio_data_dir(bio);
1957 dm_block_t block = get_bio_block(tc, bio);
1958 struct dm_thin_lookup_result lookup_result;
1959
1960 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1961 switch (r) {
1962 case 0:
1963 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) {
1964 handle_unserviceable_bio(tc->pool, bio);
1965 if (cell)
1966 cell_defer_no_holder(tc, cell);
1967 } else {
1968 inc_all_io_entry(tc->pool, bio);
1969 remap_and_issue(tc, bio, lookup_result.block);
1970 if (cell)
1971 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1972 }
1973 break;
1974
1975 case -ENODATA:
1976 if (cell)
1977 cell_defer_no_holder(tc, cell);
1978 if (rw != READ) {
1979 handle_unserviceable_bio(tc->pool, bio);
1980 break;
1981 }
1982
1983 if (tc->origin_dev) {
1984 inc_all_io_entry(tc->pool, bio);
1985 remap_to_origin_and_issue(tc, bio);
1986 break;
1987 }
1988
1989 zero_fill_bio(bio);
1990 bio_endio(bio);
1991 break;
1992
1993 default:
1994 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1995 __func__, r);
1996 if (cell)
1997 cell_defer_no_holder(tc, cell);
1998 bio_io_error(bio);
1999 break;
2000 }
2001 }
2002
process_bio_read_only(struct thin_c * tc,struct bio * bio)2003 static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
2004 {
2005 __process_bio_read_only(tc, bio, NULL);
2006 }
2007
process_cell_read_only(struct thin_c * tc,struct dm_bio_prison_cell * cell)2008 static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2009 {
2010 __process_bio_read_only(tc, cell->holder, cell);
2011 }
2012
process_bio_success(struct thin_c * tc,struct bio * bio)2013 static void process_bio_success(struct thin_c *tc, struct bio *bio)
2014 {
2015 bio_endio(bio);
2016 }
2017
process_bio_fail(struct thin_c * tc,struct bio * bio)2018 static void process_bio_fail(struct thin_c *tc, struct bio *bio)
2019 {
2020 bio_io_error(bio);
2021 }
2022
process_cell_success(struct thin_c * tc,struct dm_bio_prison_cell * cell)2023 static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2024 {
2025 cell_success(tc->pool, cell);
2026 }
2027
process_cell_fail(struct thin_c * tc,struct dm_bio_prison_cell * cell)2028 static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2029 {
2030 cell_error(tc->pool, cell);
2031 }
2032
2033 /*
2034 * FIXME: should we also commit due to size of transaction, measured in
2035 * metadata blocks?
2036 */
need_commit_due_to_time(struct pool * pool)2037 static int need_commit_due_to_time(struct pool *pool)
2038 {
2039 return !time_in_range(jiffies, pool->last_commit_jiffies,
2040 pool->last_commit_jiffies + COMMIT_PERIOD);
2041 }
2042
2043 #define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
2044 #define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook))
2045
__thin_bio_rb_add(struct thin_c * tc,struct bio * bio)2046 static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
2047 {
2048 struct rb_node **rbp, *parent;
2049 struct dm_thin_endio_hook *pbd;
2050 sector_t bi_sector = bio->bi_iter.bi_sector;
2051
2052 rbp = &tc->sort_bio_list.rb_node;
2053 parent = NULL;
2054 while (*rbp) {
2055 parent = *rbp;
2056 pbd = thin_pbd(parent);
2057
2058 if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector)
2059 rbp = &(*rbp)->rb_left;
2060 else
2061 rbp = &(*rbp)->rb_right;
2062 }
2063
2064 pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2065 rb_link_node(&pbd->rb_node, parent, rbp);
2066 rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
2067 }
2068
__extract_sorted_bios(struct thin_c * tc)2069 static void __extract_sorted_bios(struct thin_c *tc)
2070 {
2071 struct rb_node *node;
2072 struct dm_thin_endio_hook *pbd;
2073 struct bio *bio;
2074
2075 for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
2076 pbd = thin_pbd(node);
2077 bio = thin_bio(pbd);
2078
2079 bio_list_add(&tc->deferred_bio_list, bio);
2080 rb_erase(&pbd->rb_node, &tc->sort_bio_list);
2081 }
2082
2083 WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
2084 }
2085
__sort_thin_deferred_bios(struct thin_c * tc)2086 static void __sort_thin_deferred_bios(struct thin_c *tc)
2087 {
2088 struct bio *bio;
2089 struct bio_list bios;
2090
2091 bio_list_init(&bios);
2092 bio_list_merge(&bios, &tc->deferred_bio_list);
2093 bio_list_init(&tc->deferred_bio_list);
2094
2095 /* Sort deferred_bio_list using rb-tree */
2096 while ((bio = bio_list_pop(&bios)))
2097 __thin_bio_rb_add(tc, bio);
2098
2099 /*
2100 * Transfer the sorted bios in sort_bio_list back to
2101 * deferred_bio_list to allow lockless submission of
2102 * all bios.
2103 */
2104 __extract_sorted_bios(tc);
2105 }
2106
process_thin_deferred_bios(struct thin_c * tc)2107 static void process_thin_deferred_bios(struct thin_c *tc)
2108 {
2109 struct pool *pool = tc->pool;
2110 unsigned long flags;
2111 struct bio *bio;
2112 struct bio_list bios;
2113 struct blk_plug plug;
2114 unsigned count = 0;
2115
2116 if (tc->requeue_mode) {
2117 error_thin_bio_list(tc, &tc->deferred_bio_list,
2118 BLK_STS_DM_REQUEUE);
2119 return;
2120 }
2121
2122 bio_list_init(&bios);
2123
2124 spin_lock_irqsave(&tc->lock, flags);
2125
2126 if (bio_list_empty(&tc->deferred_bio_list)) {
2127 spin_unlock_irqrestore(&tc->lock, flags);
2128 return;
2129 }
2130
2131 __sort_thin_deferred_bios(tc);
2132
2133 bio_list_merge(&bios, &tc->deferred_bio_list);
2134 bio_list_init(&tc->deferred_bio_list);
2135
2136 spin_unlock_irqrestore(&tc->lock, flags);
2137
2138 blk_start_plug(&plug);
2139 while ((bio = bio_list_pop(&bios))) {
2140 /*
2141 * If we've got no free new_mapping structs, and processing
2142 * this bio might require one, we pause until there are some
2143 * prepared mappings to process.
2144 */
2145 if (ensure_next_mapping(pool)) {
2146 spin_lock_irqsave(&tc->lock, flags);
2147 bio_list_add(&tc->deferred_bio_list, bio);
2148 bio_list_merge(&tc->deferred_bio_list, &bios);
2149 spin_unlock_irqrestore(&tc->lock, flags);
2150 break;
2151 }
2152
2153 if (bio_op(bio) == REQ_OP_DISCARD)
2154 pool->process_discard(tc, bio);
2155 else
2156 pool->process_bio(tc, bio);
2157
2158 if ((count++ & 127) == 0) {
2159 throttle_work_update(&pool->throttle);
2160 dm_pool_issue_prefetches(pool->pmd);
2161 }
2162 }
2163 blk_finish_plug(&plug);
2164 }
2165
cmp_cells(const void * lhs,const void * rhs)2166 static int cmp_cells(const void *lhs, const void *rhs)
2167 {
2168 struct dm_bio_prison_cell *lhs_cell = *((struct dm_bio_prison_cell **) lhs);
2169 struct dm_bio_prison_cell *rhs_cell = *((struct dm_bio_prison_cell **) rhs);
2170
2171 BUG_ON(!lhs_cell->holder);
2172 BUG_ON(!rhs_cell->holder);
2173
2174 if (lhs_cell->holder->bi_iter.bi_sector < rhs_cell->holder->bi_iter.bi_sector)
2175 return -1;
2176
2177 if (lhs_cell->holder->bi_iter.bi_sector > rhs_cell->holder->bi_iter.bi_sector)
2178 return 1;
2179
2180 return 0;
2181 }
2182
sort_cells(struct pool * pool,struct list_head * cells)2183 static unsigned sort_cells(struct pool *pool, struct list_head *cells)
2184 {
2185 unsigned count = 0;
2186 struct dm_bio_prison_cell *cell, *tmp;
2187
2188 list_for_each_entry_safe(cell, tmp, cells, user_list) {
2189 if (count >= CELL_SORT_ARRAY_SIZE)
2190 break;
2191
2192 pool->cell_sort_array[count++] = cell;
2193 list_del(&cell->user_list);
2194 }
2195
2196 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL);
2197
2198 return count;
2199 }
2200
process_thin_deferred_cells(struct thin_c * tc)2201 static void process_thin_deferred_cells(struct thin_c *tc)
2202 {
2203 struct pool *pool = tc->pool;
2204 unsigned long flags;
2205 struct list_head cells;
2206 struct dm_bio_prison_cell *cell;
2207 unsigned i, j, count;
2208
2209 INIT_LIST_HEAD(&cells);
2210
2211 spin_lock_irqsave(&tc->lock, flags);
2212 list_splice_init(&tc->deferred_cells, &cells);
2213 spin_unlock_irqrestore(&tc->lock, flags);
2214
2215 if (list_empty(&cells))
2216 return;
2217
2218 do {
2219 count = sort_cells(tc->pool, &cells);
2220
2221 for (i = 0; i < count; i++) {
2222 cell = pool->cell_sort_array[i];
2223 BUG_ON(!cell->holder);
2224
2225 /*
2226 * If we've got no free new_mapping structs, and processing
2227 * this bio might require one, we pause until there are some
2228 * prepared mappings to process.
2229 */
2230 if (ensure_next_mapping(pool)) {
2231 for (j = i; j < count; j++)
2232 list_add(&pool->cell_sort_array[j]->user_list, &cells);
2233
2234 spin_lock_irqsave(&tc->lock, flags);
2235 list_splice(&cells, &tc->deferred_cells);
2236 spin_unlock_irqrestore(&tc->lock, flags);
2237 return;
2238 }
2239
2240 if (bio_op(cell->holder) == REQ_OP_DISCARD)
2241 pool->process_discard_cell(tc, cell);
2242 else
2243 pool->process_cell(tc, cell);
2244 }
2245 } while (!list_empty(&cells));
2246 }
2247
2248 static void thin_get(struct thin_c *tc);
2249 static void thin_put(struct thin_c *tc);
2250
2251 /*
2252 * We can't hold rcu_read_lock() around code that can block. So we
2253 * find a thin with the rcu lock held; bump a refcount; then drop
2254 * the lock.
2255 */
get_first_thin(struct pool * pool)2256 static struct thin_c *get_first_thin(struct pool *pool)
2257 {
2258 struct thin_c *tc = NULL;
2259
2260 rcu_read_lock();
2261 if (!list_empty(&pool->active_thins)) {
2262 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
2263 thin_get(tc);
2264 }
2265 rcu_read_unlock();
2266
2267 return tc;
2268 }
2269
get_next_thin(struct pool * pool,struct thin_c * tc)2270 static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
2271 {
2272 struct thin_c *old_tc = tc;
2273
2274 rcu_read_lock();
2275 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
2276 thin_get(tc);
2277 thin_put(old_tc);
2278 rcu_read_unlock();
2279 return tc;
2280 }
2281 thin_put(old_tc);
2282 rcu_read_unlock();
2283
2284 return NULL;
2285 }
2286
process_deferred_bios(struct pool * pool)2287 static void process_deferred_bios(struct pool *pool)
2288 {
2289 unsigned long flags;
2290 struct bio *bio;
2291 struct bio_list bios;
2292 struct thin_c *tc;
2293
2294 tc = get_first_thin(pool);
2295 while (tc) {
2296 process_thin_deferred_cells(tc);
2297 process_thin_deferred_bios(tc);
2298 tc = get_next_thin(pool, tc);
2299 }
2300
2301 /*
2302 * If there are any deferred flush bios, we must commit
2303 * the metadata before issuing them.
2304 */
2305 bio_list_init(&bios);
2306 spin_lock_irqsave(&pool->lock, flags);
2307 bio_list_merge(&bios, &pool->deferred_flush_bios);
2308 bio_list_init(&pool->deferred_flush_bios);
2309 spin_unlock_irqrestore(&pool->lock, flags);
2310
2311 if (bio_list_empty(&bios) &&
2312 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
2313 return;
2314
2315 if (commit(pool)) {
2316 while ((bio = bio_list_pop(&bios)))
2317 bio_io_error(bio);
2318 return;
2319 }
2320 pool->last_commit_jiffies = jiffies;
2321
2322 while ((bio = bio_list_pop(&bios)))
2323 generic_make_request(bio);
2324 }
2325
do_worker(struct work_struct * ws)2326 static void do_worker(struct work_struct *ws)
2327 {
2328 struct pool *pool = container_of(ws, struct pool, worker);
2329
2330 throttle_work_start(&pool->throttle);
2331 dm_pool_issue_prefetches(pool->pmd);
2332 throttle_work_update(&pool->throttle);
2333 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
2334 throttle_work_update(&pool->throttle);
2335 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
2336 throttle_work_update(&pool->throttle);
2337 process_prepared(pool, &pool->prepared_discards_pt2, &pool->process_prepared_discard_pt2);
2338 throttle_work_update(&pool->throttle);
2339 process_deferred_bios(pool);
2340 throttle_work_complete(&pool->throttle);
2341 }
2342
2343 /*
2344 * We want to commit periodically so that not too much
2345 * unwritten data builds up.
2346 */
do_waker(struct work_struct * ws)2347 static void do_waker(struct work_struct *ws)
2348 {
2349 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
2350 wake_worker(pool);
2351 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
2352 }
2353
2354 static void notify_of_pool_mode_change_to_oods(struct pool *pool);
2355
2356 /*
2357 * We're holding onto IO to allow userland time to react. After the
2358 * timeout either the pool will have been resized (and thus back in
2359 * PM_WRITE mode), or we degrade to PM_OUT_OF_DATA_SPACE w/ error_if_no_space.
2360 */
do_no_space_timeout(struct work_struct * ws)2361 static void do_no_space_timeout(struct work_struct *ws)
2362 {
2363 struct pool *pool = container_of(to_delayed_work(ws), struct pool,
2364 no_space_timeout);
2365
2366 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
2367 pool->pf.error_if_no_space = true;
2368 notify_of_pool_mode_change_to_oods(pool);
2369 error_retry_list_with_code(pool, BLK_STS_NOSPC);
2370 }
2371 }
2372
2373 /*----------------------------------------------------------------*/
2374
2375 struct pool_work {
2376 struct work_struct worker;
2377 struct completion complete;
2378 };
2379
to_pool_work(struct work_struct * ws)2380 static struct pool_work *to_pool_work(struct work_struct *ws)
2381 {
2382 return container_of(ws, struct pool_work, worker);
2383 }
2384
pool_work_complete(struct pool_work * pw)2385 static void pool_work_complete(struct pool_work *pw)
2386 {
2387 complete(&pw->complete);
2388 }
2389
pool_work_wait(struct pool_work * pw,struct pool * pool,void (* fn)(struct work_struct *))2390 static void pool_work_wait(struct pool_work *pw, struct pool *pool,
2391 void (*fn)(struct work_struct *))
2392 {
2393 INIT_WORK_ONSTACK(&pw->worker, fn);
2394 init_completion(&pw->complete);
2395 queue_work(pool->wq, &pw->worker);
2396 wait_for_completion(&pw->complete);
2397 }
2398
2399 /*----------------------------------------------------------------*/
2400
2401 struct noflush_work {
2402 struct pool_work pw;
2403 struct thin_c *tc;
2404 };
2405
to_noflush(struct work_struct * ws)2406 static struct noflush_work *to_noflush(struct work_struct *ws)
2407 {
2408 return container_of(to_pool_work(ws), struct noflush_work, pw);
2409 }
2410
do_noflush_start(struct work_struct * ws)2411 static void do_noflush_start(struct work_struct *ws)
2412 {
2413 struct noflush_work *w = to_noflush(ws);
2414 w->tc->requeue_mode = true;
2415 requeue_io(w->tc);
2416 pool_work_complete(&w->pw);
2417 }
2418
do_noflush_stop(struct work_struct * ws)2419 static void do_noflush_stop(struct work_struct *ws)
2420 {
2421 struct noflush_work *w = to_noflush(ws);
2422 w->tc->requeue_mode = false;
2423 pool_work_complete(&w->pw);
2424 }
2425
noflush_work(struct thin_c * tc,void (* fn)(struct work_struct *))2426 static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
2427 {
2428 struct noflush_work w;
2429
2430 w.tc = tc;
2431 pool_work_wait(&w.pw, tc->pool, fn);
2432 }
2433
2434 /*----------------------------------------------------------------*/
2435
get_pool_mode(struct pool * pool)2436 static enum pool_mode get_pool_mode(struct pool *pool)
2437 {
2438 return pool->pf.mode;
2439 }
2440
notify_of_pool_mode_change(struct pool * pool,const char * new_mode)2441 static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
2442 {
2443 dm_table_event(pool->ti->table);
2444 DMINFO("%s: switching pool to %s mode",
2445 dm_device_name(pool->pool_md), new_mode);
2446 }
2447
notify_of_pool_mode_change_to_oods(struct pool * pool)2448 static void notify_of_pool_mode_change_to_oods(struct pool *pool)
2449 {
2450 if (!pool->pf.error_if_no_space)
2451 notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
2452 else
2453 notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
2454 }
2455
passdown_enabled(struct pool_c * pt)2456 static bool passdown_enabled(struct pool_c *pt)
2457 {
2458 return pt->adjusted_pf.discard_passdown;
2459 }
2460
set_discard_callbacks(struct pool * pool)2461 static void set_discard_callbacks(struct pool *pool)
2462 {
2463 struct pool_c *pt = pool->ti->private;
2464
2465 if (passdown_enabled(pt)) {
2466 pool->process_discard_cell = process_discard_cell_passdown;
2467 pool->process_prepared_discard = process_prepared_discard_passdown_pt1;
2468 pool->process_prepared_discard_pt2 = process_prepared_discard_passdown_pt2;
2469 } else {
2470 pool->process_discard_cell = process_discard_cell_no_passdown;
2471 pool->process_prepared_discard = process_prepared_discard_no_passdown;
2472 }
2473 }
2474
set_pool_mode(struct pool * pool,enum pool_mode new_mode)2475 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2476 {
2477 struct pool_c *pt = pool->ti->private;
2478 bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
2479 enum pool_mode old_mode = get_pool_mode(pool);
2480 unsigned long no_space_timeout = READ_ONCE(no_space_timeout_secs) * HZ;
2481
2482 /*
2483 * Never allow the pool to transition to PM_WRITE mode if user
2484 * intervention is required to verify metadata and data consistency.
2485 */
2486 if (new_mode == PM_WRITE && needs_check) {
2487 DMERR("%s: unable to switch pool to write mode until repaired.",
2488 dm_device_name(pool->pool_md));
2489 if (old_mode != new_mode)
2490 new_mode = old_mode;
2491 else
2492 new_mode = PM_READ_ONLY;
2493 }
2494 /*
2495 * If we were in PM_FAIL mode, rollback of metadata failed. We're
2496 * not going to recover without a thin_repair. So we never let the
2497 * pool move out of the old mode.
2498 */
2499 if (old_mode == PM_FAIL)
2500 new_mode = old_mode;
2501
2502 switch (new_mode) {
2503 case PM_FAIL:
2504 if (old_mode != new_mode)
2505 notify_of_pool_mode_change(pool, "failure");
2506 dm_pool_metadata_read_only(pool->pmd);
2507 pool->process_bio = process_bio_fail;
2508 pool->process_discard = process_bio_fail;
2509 pool->process_cell = process_cell_fail;
2510 pool->process_discard_cell = process_cell_fail;
2511 pool->process_prepared_mapping = process_prepared_mapping_fail;
2512 pool->process_prepared_discard = process_prepared_discard_fail;
2513
2514 error_retry_list(pool);
2515 break;
2516
2517 case PM_OUT_OF_METADATA_SPACE:
2518 case PM_READ_ONLY:
2519 if (!is_read_only_pool_mode(old_mode))
2520 notify_of_pool_mode_change(pool, "read-only");
2521 dm_pool_metadata_read_only(pool->pmd);
2522 pool->process_bio = process_bio_read_only;
2523 pool->process_discard = process_bio_success;
2524 pool->process_cell = process_cell_read_only;
2525 pool->process_discard_cell = process_cell_success;
2526 pool->process_prepared_mapping = process_prepared_mapping_fail;
2527 pool->process_prepared_discard = process_prepared_discard_success;
2528
2529 error_retry_list(pool);
2530 break;
2531
2532 case PM_OUT_OF_DATA_SPACE:
2533 /*
2534 * Ideally we'd never hit this state; the low water mark
2535 * would trigger userland to extend the pool before we
2536 * completely run out of data space. However, many small
2537 * IOs to unprovisioned space can consume data space at an
2538 * alarming rate. Adjust your low water mark if you're
2539 * frequently seeing this mode.
2540 */
2541 if (old_mode != new_mode)
2542 notify_of_pool_mode_change_to_oods(pool);
2543 pool->out_of_data_space = true;
2544 pool->process_bio = process_bio_read_only;
2545 pool->process_discard = process_discard_bio;
2546 pool->process_cell = process_cell_read_only;
2547 pool->process_prepared_mapping = process_prepared_mapping;
2548 set_discard_callbacks(pool);
2549
2550 if (!pool->pf.error_if_no_space && no_space_timeout)
2551 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
2552 break;
2553
2554 case PM_WRITE:
2555 if (old_mode != new_mode)
2556 notify_of_pool_mode_change(pool, "write");
2557 if (old_mode == PM_OUT_OF_DATA_SPACE)
2558 cancel_delayed_work_sync(&pool->no_space_timeout);
2559 pool->out_of_data_space = false;
2560 pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
2561 dm_pool_metadata_read_write(pool->pmd);
2562 pool->process_bio = process_bio;
2563 pool->process_discard = process_discard_bio;
2564 pool->process_cell = process_cell;
2565 pool->process_prepared_mapping = process_prepared_mapping;
2566 set_discard_callbacks(pool);
2567 break;
2568 }
2569
2570 pool->pf.mode = new_mode;
2571 /*
2572 * The pool mode may have changed, sync it so bind_control_target()
2573 * doesn't cause an unexpected mode transition on resume.
2574 */
2575 pt->adjusted_pf.mode = new_mode;
2576 }
2577
abort_transaction(struct pool * pool)2578 static void abort_transaction(struct pool *pool)
2579 {
2580 const char *dev_name = dm_device_name(pool->pool_md);
2581
2582 DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
2583 if (dm_pool_abort_metadata(pool->pmd)) {
2584 DMERR("%s: failed to abort metadata transaction", dev_name);
2585 set_pool_mode(pool, PM_FAIL);
2586 }
2587
2588 if (dm_pool_metadata_set_needs_check(pool->pmd)) {
2589 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
2590 set_pool_mode(pool, PM_FAIL);
2591 }
2592 }
2593
metadata_operation_failed(struct pool * pool,const char * op,int r)2594 static void metadata_operation_failed(struct pool *pool, const char *op, int r)
2595 {
2596 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
2597 dm_device_name(pool->pool_md), op, r);
2598
2599 abort_transaction(pool);
2600 set_pool_mode(pool, PM_READ_ONLY);
2601 }
2602
2603 /*----------------------------------------------------------------*/
2604
2605 /*
2606 * Mapping functions.
2607 */
2608
2609 /*
2610 * Called only while mapping a thin bio to hand it over to the workqueue.
2611 */
thin_defer_bio(struct thin_c * tc,struct bio * bio)2612 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
2613 {
2614 unsigned long flags;
2615 struct pool *pool = tc->pool;
2616
2617 spin_lock_irqsave(&tc->lock, flags);
2618 bio_list_add(&tc->deferred_bio_list, bio);
2619 spin_unlock_irqrestore(&tc->lock, flags);
2620
2621 wake_worker(pool);
2622 }
2623
thin_defer_bio_with_throttle(struct thin_c * tc,struct bio * bio)2624 static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
2625 {
2626 struct pool *pool = tc->pool;
2627
2628 throttle_lock(&pool->throttle);
2629 thin_defer_bio(tc, bio);
2630 throttle_unlock(&pool->throttle);
2631 }
2632
thin_defer_cell(struct thin_c * tc,struct dm_bio_prison_cell * cell)2633 static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2634 {
2635 unsigned long flags;
2636 struct pool *pool = tc->pool;
2637
2638 throttle_lock(&pool->throttle);
2639 spin_lock_irqsave(&tc->lock, flags);
2640 list_add_tail(&cell->user_list, &tc->deferred_cells);
2641 spin_unlock_irqrestore(&tc->lock, flags);
2642 throttle_unlock(&pool->throttle);
2643
2644 wake_worker(pool);
2645 }
2646
thin_hook_bio(struct thin_c * tc,struct bio * bio)2647 static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
2648 {
2649 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2650
2651 h->tc = tc;
2652 h->shared_read_entry = NULL;
2653 h->all_io_entry = NULL;
2654 h->overwrite_mapping = NULL;
2655 h->cell = NULL;
2656 }
2657
2658 /*
2659 * Non-blocking function called from the thin target's map function.
2660 */
thin_bio_map(struct dm_target * ti,struct bio * bio)2661 static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2662 {
2663 int r;
2664 struct thin_c *tc = ti->private;
2665 dm_block_t block = get_bio_block(tc, bio);
2666 struct dm_thin_device *td = tc->td;
2667 struct dm_thin_lookup_result result;
2668 struct dm_bio_prison_cell *virt_cell, *data_cell;
2669 struct dm_cell_key key;
2670
2671 thin_hook_bio(tc, bio);
2672
2673 if (tc->requeue_mode) {
2674 bio->bi_status = BLK_STS_DM_REQUEUE;
2675 bio_endio(bio);
2676 return DM_MAPIO_SUBMITTED;
2677 }
2678
2679 if (get_pool_mode(tc->pool) == PM_FAIL) {
2680 bio_io_error(bio);
2681 return DM_MAPIO_SUBMITTED;
2682 }
2683
2684 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) {
2685 thin_defer_bio_with_throttle(tc, bio);
2686 return DM_MAPIO_SUBMITTED;
2687 }
2688
2689 /*
2690 * We must hold the virtual cell before doing the lookup, otherwise
2691 * there's a race with discard.
2692 */
2693 build_virtual_key(tc->td, block, &key);
2694 if (bio_detain(tc->pool, &key, bio, &virt_cell))
2695 return DM_MAPIO_SUBMITTED;
2696
2697 r = dm_thin_find_block(td, block, 0, &result);
2698
2699 /*
2700 * Note that we defer readahead too.
2701 */
2702 switch (r) {
2703 case 0:
2704 if (unlikely(result.shared)) {
2705 /*
2706 * We have a race condition here between the
2707 * result.shared value returned by the lookup and
2708 * snapshot creation, which may cause new
2709 * sharing.
2710 *
2711 * To avoid this always quiesce the origin before
2712 * taking the snap. You want to do this anyway to
2713 * ensure a consistent application view
2714 * (i.e. lockfs).
2715 *
2716 * More distant ancestors are irrelevant. The
2717 * shared flag will be set in their case.
2718 */
2719 thin_defer_cell(tc, virt_cell);
2720 return DM_MAPIO_SUBMITTED;
2721 }
2722
2723 build_data_key(tc->td, result.block, &key);
2724 if (bio_detain(tc->pool, &key, bio, &data_cell)) {
2725 cell_defer_no_holder(tc, virt_cell);
2726 return DM_MAPIO_SUBMITTED;
2727 }
2728
2729 inc_all_io_entry(tc->pool, bio);
2730 cell_defer_no_holder(tc, data_cell);
2731 cell_defer_no_holder(tc, virt_cell);
2732
2733 remap(tc, bio, result.block);
2734 return DM_MAPIO_REMAPPED;
2735
2736 case -ENODATA:
2737 case -EWOULDBLOCK:
2738 thin_defer_cell(tc, virt_cell);
2739 return DM_MAPIO_SUBMITTED;
2740
2741 default:
2742 /*
2743 * Must always call bio_io_error on failure.
2744 * dm_thin_find_block can fail with -EINVAL if the
2745 * pool is switched to fail-io mode.
2746 */
2747 bio_io_error(bio);
2748 cell_defer_no_holder(tc, virt_cell);
2749 return DM_MAPIO_SUBMITTED;
2750 }
2751 }
2752
pool_is_congested(struct dm_target_callbacks * cb,int bdi_bits)2753 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
2754 {
2755 struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
2756 struct request_queue *q;
2757
2758 if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
2759 return 1;
2760
2761 q = bdev_get_queue(pt->data_dev->bdev);
2762 return bdi_congested(q->backing_dev_info, bdi_bits);
2763 }
2764
requeue_bios(struct pool * pool)2765 static void requeue_bios(struct pool *pool)
2766 {
2767 unsigned long flags;
2768 struct thin_c *tc;
2769
2770 rcu_read_lock();
2771 list_for_each_entry_rcu(tc, &pool->active_thins, list) {
2772 spin_lock_irqsave(&tc->lock, flags);
2773 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
2774 bio_list_init(&tc->retry_on_resume_list);
2775 spin_unlock_irqrestore(&tc->lock, flags);
2776 }
2777 rcu_read_unlock();
2778 }
2779
2780 /*----------------------------------------------------------------
2781 * Binding of control targets to a pool object
2782 *--------------------------------------------------------------*/
data_dev_supports_discard(struct pool_c * pt)2783 static bool data_dev_supports_discard(struct pool_c *pt)
2784 {
2785 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2786
2787 return q && blk_queue_discard(q);
2788 }
2789
is_factor(sector_t block_size,uint32_t n)2790 static bool is_factor(sector_t block_size, uint32_t n)
2791 {
2792 return !sector_div(block_size, n);
2793 }
2794
2795 /*
2796 * If discard_passdown was enabled verify that the data device
2797 * supports discards. Disable discard_passdown if not.
2798 */
disable_passdown_if_not_supported(struct pool_c * pt)2799 static void disable_passdown_if_not_supported(struct pool_c *pt)
2800 {
2801 struct pool *pool = pt->pool;
2802 struct block_device *data_bdev = pt->data_dev->bdev;
2803 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
2804 const char *reason = NULL;
2805 char buf[BDEVNAME_SIZE];
2806
2807 if (!pt->adjusted_pf.discard_passdown)
2808 return;
2809
2810 if (!data_dev_supports_discard(pt))
2811 reason = "discard unsupported";
2812
2813 else if (data_limits->max_discard_sectors < pool->sectors_per_block)
2814 reason = "max discard sectors smaller than a block";
2815
2816 if (reason) {
2817 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
2818 pt->adjusted_pf.discard_passdown = false;
2819 }
2820 }
2821
bind_control_target(struct pool * pool,struct dm_target * ti)2822 static int bind_control_target(struct pool *pool, struct dm_target *ti)
2823 {
2824 struct pool_c *pt = ti->private;
2825
2826 /*
2827 * We want to make sure that a pool in PM_FAIL mode is never upgraded.
2828 */
2829 enum pool_mode old_mode = get_pool_mode(pool);
2830 enum pool_mode new_mode = pt->adjusted_pf.mode;
2831
2832 /*
2833 * Don't change the pool's mode until set_pool_mode() below.
2834 * Otherwise the pool's process_* function pointers may
2835 * not match the desired pool mode.
2836 */
2837 pt->adjusted_pf.mode = old_mode;
2838
2839 pool->ti = ti;
2840 pool->pf = pt->adjusted_pf;
2841 pool->low_water_blocks = pt->low_water_blocks;
2842
2843 set_pool_mode(pool, new_mode);
2844
2845 return 0;
2846 }
2847
unbind_control_target(struct pool * pool,struct dm_target * ti)2848 static void unbind_control_target(struct pool *pool, struct dm_target *ti)
2849 {
2850 if (pool->ti == ti)
2851 pool->ti = NULL;
2852 }
2853
2854 /*----------------------------------------------------------------
2855 * Pool creation
2856 *--------------------------------------------------------------*/
2857 /* Initialize pool features. */
pool_features_init(struct pool_features * pf)2858 static void pool_features_init(struct pool_features *pf)
2859 {
2860 pf->mode = PM_WRITE;
2861 pf->zero_new_blocks = true;
2862 pf->discard_enabled = true;
2863 pf->discard_passdown = true;
2864 pf->error_if_no_space = false;
2865 }
2866
__pool_destroy(struct pool * pool)2867 static void __pool_destroy(struct pool *pool)
2868 {
2869 __pool_table_remove(pool);
2870
2871 vfree(pool->cell_sort_array);
2872 if (dm_pool_metadata_close(pool->pmd) < 0)
2873 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2874
2875 dm_bio_prison_destroy(pool->prison);
2876 dm_kcopyd_client_destroy(pool->copier);
2877
2878 if (pool->wq)
2879 destroy_workqueue(pool->wq);
2880
2881 if (pool->next_mapping)
2882 mempool_free(pool->next_mapping, &pool->mapping_pool);
2883 mempool_exit(&pool->mapping_pool);
2884 dm_deferred_set_destroy(pool->shared_read_ds);
2885 dm_deferred_set_destroy(pool->all_io_ds);
2886 kfree(pool);
2887 }
2888
2889 static struct kmem_cache *_new_mapping_cache;
2890
pool_create(struct mapped_device * pool_md,struct block_device * metadata_dev,unsigned long block_size,int read_only,char ** error)2891 static struct pool *pool_create(struct mapped_device *pool_md,
2892 struct block_device *metadata_dev,
2893 unsigned long block_size,
2894 int read_only, char **error)
2895 {
2896 int r;
2897 void *err_p;
2898 struct pool *pool;
2899 struct dm_pool_metadata *pmd;
2900 bool format_device = read_only ? false : true;
2901
2902 pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
2903 if (IS_ERR(pmd)) {
2904 *error = "Error creating metadata object";
2905 return (struct pool *)pmd;
2906 }
2907
2908 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
2909 if (!pool) {
2910 *error = "Error allocating memory for pool";
2911 err_p = ERR_PTR(-ENOMEM);
2912 goto bad_pool;
2913 }
2914
2915 pool->pmd = pmd;
2916 pool->sectors_per_block = block_size;
2917 if (block_size & (block_size - 1))
2918 pool->sectors_per_block_shift = -1;
2919 else
2920 pool->sectors_per_block_shift = __ffs(block_size);
2921 pool->low_water_blocks = 0;
2922 pool_features_init(&pool->pf);
2923 pool->prison = dm_bio_prison_create();
2924 if (!pool->prison) {
2925 *error = "Error creating pool's bio prison";
2926 err_p = ERR_PTR(-ENOMEM);
2927 goto bad_prison;
2928 }
2929
2930 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2931 if (IS_ERR(pool->copier)) {
2932 r = PTR_ERR(pool->copier);
2933 *error = "Error creating pool's kcopyd client";
2934 err_p = ERR_PTR(r);
2935 goto bad_kcopyd_client;
2936 }
2937
2938 /*
2939 * Create singlethreaded workqueue that will service all devices
2940 * that use this metadata.
2941 */
2942 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2943 if (!pool->wq) {
2944 *error = "Error creating pool's workqueue";
2945 err_p = ERR_PTR(-ENOMEM);
2946 goto bad_wq;
2947 }
2948
2949 throttle_init(&pool->throttle);
2950 INIT_WORK(&pool->worker, do_worker);
2951 INIT_DELAYED_WORK(&pool->waker, do_waker);
2952 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
2953 spin_lock_init(&pool->lock);
2954 bio_list_init(&pool->deferred_flush_bios);
2955 INIT_LIST_HEAD(&pool->prepared_mappings);
2956 INIT_LIST_HEAD(&pool->prepared_discards);
2957 INIT_LIST_HEAD(&pool->prepared_discards_pt2);
2958 INIT_LIST_HEAD(&pool->active_thins);
2959 pool->low_water_triggered = false;
2960 pool->suspended = true;
2961 pool->out_of_data_space = false;
2962
2963 pool->shared_read_ds = dm_deferred_set_create();
2964 if (!pool->shared_read_ds) {
2965 *error = "Error creating pool's shared read deferred set";
2966 err_p = ERR_PTR(-ENOMEM);
2967 goto bad_shared_read_ds;
2968 }
2969
2970 pool->all_io_ds = dm_deferred_set_create();
2971 if (!pool->all_io_ds) {
2972 *error = "Error creating pool's all io deferred set";
2973 err_p = ERR_PTR(-ENOMEM);
2974 goto bad_all_io_ds;
2975 }
2976
2977 pool->next_mapping = NULL;
2978 r = mempool_init_slab_pool(&pool->mapping_pool, MAPPING_POOL_SIZE,
2979 _new_mapping_cache);
2980 if (r) {
2981 *error = "Error creating pool's mapping mempool";
2982 err_p = ERR_PTR(r);
2983 goto bad_mapping_pool;
2984 }
2985
2986 pool->cell_sort_array =
2987 vmalloc(array_size(CELL_SORT_ARRAY_SIZE,
2988 sizeof(*pool->cell_sort_array)));
2989 if (!pool->cell_sort_array) {
2990 *error = "Error allocating cell sort array";
2991 err_p = ERR_PTR(-ENOMEM);
2992 goto bad_sort_array;
2993 }
2994
2995 pool->ref_count = 1;
2996 pool->last_commit_jiffies = jiffies;
2997 pool->pool_md = pool_md;
2998 pool->md_dev = metadata_dev;
2999 __pool_table_insert(pool);
3000
3001 return pool;
3002
3003 bad_sort_array:
3004 mempool_exit(&pool->mapping_pool);
3005 bad_mapping_pool:
3006 dm_deferred_set_destroy(pool->all_io_ds);
3007 bad_all_io_ds:
3008 dm_deferred_set_destroy(pool->shared_read_ds);
3009 bad_shared_read_ds:
3010 destroy_workqueue(pool->wq);
3011 bad_wq:
3012 dm_kcopyd_client_destroy(pool->copier);
3013 bad_kcopyd_client:
3014 dm_bio_prison_destroy(pool->prison);
3015 bad_prison:
3016 kfree(pool);
3017 bad_pool:
3018 if (dm_pool_metadata_close(pmd))
3019 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
3020
3021 return err_p;
3022 }
3023
__pool_inc(struct pool * pool)3024 static void __pool_inc(struct pool *pool)
3025 {
3026 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
3027 pool->ref_count++;
3028 }
3029
__pool_dec(struct pool * pool)3030 static void __pool_dec(struct pool *pool)
3031 {
3032 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
3033 BUG_ON(!pool->ref_count);
3034 if (!--pool->ref_count)
3035 __pool_destroy(pool);
3036 }
3037
__pool_find(struct mapped_device * pool_md,struct block_device * metadata_dev,unsigned long block_size,int read_only,char ** error,int * created)3038 static struct pool *__pool_find(struct mapped_device *pool_md,
3039 struct block_device *metadata_dev,
3040 unsigned long block_size, int read_only,
3041 char **error, int *created)
3042 {
3043 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
3044
3045 if (pool) {
3046 if (pool->pool_md != pool_md) {
3047 *error = "metadata device already in use by a pool";
3048 return ERR_PTR(-EBUSY);
3049 }
3050 __pool_inc(pool);
3051
3052 } else {
3053 pool = __pool_table_lookup(pool_md);
3054 if (pool) {
3055 if (pool->md_dev != metadata_dev) {
3056 *error = "different pool cannot replace a pool";
3057 return ERR_PTR(-EINVAL);
3058 }
3059 __pool_inc(pool);
3060
3061 } else {
3062 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
3063 *created = 1;
3064 }
3065 }
3066
3067 return pool;
3068 }
3069
3070 /*----------------------------------------------------------------
3071 * Pool target methods
3072 *--------------------------------------------------------------*/
pool_dtr(struct dm_target * ti)3073 static void pool_dtr(struct dm_target *ti)
3074 {
3075 struct pool_c *pt = ti->private;
3076
3077 mutex_lock(&dm_thin_pool_table.mutex);
3078
3079 unbind_control_target(pt->pool, ti);
3080 __pool_dec(pt->pool);
3081 dm_put_device(ti, pt->metadata_dev);
3082 dm_put_device(ti, pt->data_dev);
3083 kfree(pt);
3084
3085 mutex_unlock(&dm_thin_pool_table.mutex);
3086 }
3087
parse_pool_features(struct dm_arg_set * as,struct pool_features * pf,struct dm_target * ti)3088 static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
3089 struct dm_target *ti)
3090 {
3091 int r;
3092 unsigned argc;
3093 const char *arg_name;
3094
3095 static const struct dm_arg _args[] = {
3096 {0, 4, "Invalid number of pool feature arguments"},
3097 };
3098
3099 /*
3100 * No feature arguments supplied.
3101 */
3102 if (!as->argc)
3103 return 0;
3104
3105 r = dm_read_arg_group(_args, as, &argc, &ti->error);
3106 if (r)
3107 return -EINVAL;
3108
3109 while (argc && !r) {
3110 arg_name = dm_shift_arg(as);
3111 argc--;
3112
3113 if (!strcasecmp(arg_name, "skip_block_zeroing"))
3114 pf->zero_new_blocks = false;
3115
3116 else if (!strcasecmp(arg_name, "ignore_discard"))
3117 pf->discard_enabled = false;
3118
3119 else if (!strcasecmp(arg_name, "no_discard_passdown"))
3120 pf->discard_passdown = false;
3121
3122 else if (!strcasecmp(arg_name, "read_only"))
3123 pf->mode = PM_READ_ONLY;
3124
3125 else if (!strcasecmp(arg_name, "error_if_no_space"))
3126 pf->error_if_no_space = true;
3127
3128 else {
3129 ti->error = "Unrecognised pool feature requested";
3130 r = -EINVAL;
3131 break;
3132 }
3133 }
3134
3135 return r;
3136 }
3137
metadata_low_callback(void * context)3138 static void metadata_low_callback(void *context)
3139 {
3140 struct pool *pool = context;
3141
3142 DMWARN("%s: reached low water mark for metadata device: sending event.",
3143 dm_device_name(pool->pool_md));
3144
3145 dm_table_event(pool->ti->table);
3146 }
3147
get_dev_size(struct block_device * bdev)3148 static sector_t get_dev_size(struct block_device *bdev)
3149 {
3150 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
3151 }
3152
warn_if_metadata_device_too_big(struct block_device * bdev)3153 static void warn_if_metadata_device_too_big(struct block_device *bdev)
3154 {
3155 sector_t metadata_dev_size = get_dev_size(bdev);
3156 char buffer[BDEVNAME_SIZE];
3157
3158 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
3159 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
3160 bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
3161 }
3162
get_metadata_dev_size(struct block_device * bdev)3163 static sector_t get_metadata_dev_size(struct block_device *bdev)
3164 {
3165 sector_t metadata_dev_size = get_dev_size(bdev);
3166
3167 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
3168 metadata_dev_size = THIN_METADATA_MAX_SECTORS;
3169
3170 return metadata_dev_size;
3171 }
3172
get_metadata_dev_size_in_blocks(struct block_device * bdev)3173 static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
3174 {
3175 sector_t metadata_dev_size = get_metadata_dev_size(bdev);
3176
3177 sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
3178
3179 return metadata_dev_size;
3180 }
3181
3182 /*
3183 * When a metadata threshold is crossed a dm event is triggered, and
3184 * userland should respond by growing the metadata device. We could let
3185 * userland set the threshold, like we do with the data threshold, but I'm
3186 * not sure they know enough to do this well.
3187 */
calc_metadata_threshold(struct pool_c * pt)3188 static dm_block_t calc_metadata_threshold(struct pool_c *pt)
3189 {
3190 /*
3191 * 4M is ample for all ops with the possible exception of thin
3192 * device deletion which is harmless if it fails (just retry the
3193 * delete after you've grown the device).
3194 */
3195 dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
3196 return min((dm_block_t)1024ULL /* 4M */, quarter);
3197 }
3198
3199 /*
3200 * thin-pool <metadata dev> <data dev>
3201 * <data block size (sectors)>
3202 * <low water mark (blocks)>
3203 * [<#feature args> [<arg>]*]
3204 *
3205 * Optional feature arguments are:
3206 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
3207 * ignore_discard: disable discard
3208 * no_discard_passdown: don't pass discards down to the data device
3209 * read_only: Don't allow any changes to be made to the pool metadata.
3210 * error_if_no_space: error IOs, instead of queueing, if no space.
3211 */
pool_ctr(struct dm_target * ti,unsigned argc,char ** argv)3212 static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
3213 {
3214 int r, pool_created = 0;
3215 struct pool_c *pt;
3216 struct pool *pool;
3217 struct pool_features pf;
3218 struct dm_arg_set as;
3219 struct dm_dev *data_dev;
3220 unsigned long block_size;
3221 dm_block_t low_water_blocks;
3222 struct dm_dev *metadata_dev;
3223 fmode_t metadata_mode;
3224
3225 /*
3226 * FIXME Remove validation from scope of lock.
3227 */
3228 mutex_lock(&dm_thin_pool_table.mutex);
3229
3230 if (argc < 4) {
3231 ti->error = "Invalid argument count";
3232 r = -EINVAL;
3233 goto out_unlock;
3234 }
3235
3236 as.argc = argc;
3237 as.argv = argv;
3238
3239 /*
3240 * Set default pool features.
3241 */
3242 pool_features_init(&pf);
3243
3244 dm_consume_args(&as, 4);
3245 r = parse_pool_features(&as, &pf, ti);
3246 if (r)
3247 goto out_unlock;
3248
3249 metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
3250 r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
3251 if (r) {
3252 ti->error = "Error opening metadata block device";
3253 goto out_unlock;
3254 }
3255 warn_if_metadata_device_too_big(metadata_dev->bdev);
3256
3257 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
3258 if (r) {
3259 ti->error = "Error getting data device";
3260 goto out_metadata;
3261 }
3262
3263 if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
3264 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
3265 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
3266 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
3267 ti->error = "Invalid block size";
3268 r = -EINVAL;
3269 goto out;
3270 }
3271
3272 if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
3273 ti->error = "Invalid low water mark";
3274 r = -EINVAL;
3275 goto out;
3276 }
3277
3278 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
3279 if (!pt) {
3280 r = -ENOMEM;
3281 goto out;
3282 }
3283
3284 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
3285 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
3286 if (IS_ERR(pool)) {
3287 r = PTR_ERR(pool);
3288 goto out_free_pt;
3289 }
3290
3291 /*
3292 * 'pool_created' reflects whether this is the first table load.
3293 * Top level discard support is not allowed to be changed after
3294 * initial load. This would require a pool reload to trigger thin
3295 * device changes.
3296 */
3297 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
3298 ti->error = "Discard support cannot be disabled once enabled";
3299 r = -EINVAL;
3300 goto out_flags_changed;
3301 }
3302
3303 pt->pool = pool;
3304 pt->ti = ti;
3305 pt->metadata_dev = metadata_dev;
3306 pt->data_dev = data_dev;
3307 pt->low_water_blocks = low_water_blocks;
3308 pt->adjusted_pf = pt->requested_pf = pf;
3309 ti->num_flush_bios = 1;
3310
3311 /*
3312 * Only need to enable discards if the pool should pass
3313 * them down to the data device. The thin device's discard
3314 * processing will cause mappings to be removed from the btree.
3315 */
3316 if (pf.discard_enabled && pf.discard_passdown) {
3317 ti->num_discard_bios = 1;
3318
3319 /*
3320 * Setting 'discards_supported' circumvents the normal
3321 * stacking of discard limits (this keeps the pool and
3322 * thin devices' discard limits consistent).
3323 */
3324 ti->discards_supported = true;
3325 }
3326 ti->private = pt;
3327
3328 r = dm_pool_register_metadata_threshold(pt->pool->pmd,
3329 calc_metadata_threshold(pt),
3330 metadata_low_callback,
3331 pool);
3332 if (r)
3333 goto out_flags_changed;
3334
3335 pt->callbacks.congested_fn = pool_is_congested;
3336 dm_table_add_target_callbacks(ti->table, &pt->callbacks);
3337
3338 mutex_unlock(&dm_thin_pool_table.mutex);
3339
3340 return 0;
3341
3342 out_flags_changed:
3343 __pool_dec(pool);
3344 out_free_pt:
3345 kfree(pt);
3346 out:
3347 dm_put_device(ti, data_dev);
3348 out_metadata:
3349 dm_put_device(ti, metadata_dev);
3350 out_unlock:
3351 mutex_unlock(&dm_thin_pool_table.mutex);
3352
3353 return r;
3354 }
3355
pool_map(struct dm_target * ti,struct bio * bio)3356 static int pool_map(struct dm_target *ti, struct bio *bio)
3357 {
3358 int r;
3359 struct pool_c *pt = ti->private;
3360 struct pool *pool = pt->pool;
3361 unsigned long flags;
3362
3363 /*
3364 * As this is a singleton target, ti->begin is always zero.
3365 */
3366 spin_lock_irqsave(&pool->lock, flags);
3367 bio_set_dev(bio, pt->data_dev->bdev);
3368 r = DM_MAPIO_REMAPPED;
3369 spin_unlock_irqrestore(&pool->lock, flags);
3370
3371 return r;
3372 }
3373
maybe_resize_data_dev(struct dm_target * ti,bool * need_commit)3374 static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
3375 {
3376 int r;
3377 struct pool_c *pt = ti->private;
3378 struct pool *pool = pt->pool;
3379 sector_t data_size = ti->len;
3380 dm_block_t sb_data_size;
3381
3382 *need_commit = false;
3383
3384 (void) sector_div(data_size, pool->sectors_per_block);
3385
3386 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
3387 if (r) {
3388 DMERR("%s: failed to retrieve data device size",
3389 dm_device_name(pool->pool_md));
3390 return r;
3391 }
3392
3393 if (data_size < sb_data_size) {
3394 DMERR("%s: pool target (%llu blocks) too small: expected %llu",
3395 dm_device_name(pool->pool_md),
3396 (unsigned long long)data_size, sb_data_size);
3397 return -EINVAL;
3398
3399 } else if (data_size > sb_data_size) {
3400 if (dm_pool_metadata_needs_check(pool->pmd)) {
3401 DMERR("%s: unable to grow the data device until repaired.",
3402 dm_device_name(pool->pool_md));
3403 return 0;
3404 }
3405
3406 if (sb_data_size)
3407 DMINFO("%s: growing the data device from %llu to %llu blocks",
3408 dm_device_name(pool->pool_md),
3409 sb_data_size, (unsigned long long)data_size);
3410 r = dm_pool_resize_data_dev(pool->pmd, data_size);
3411 if (r) {
3412 metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
3413 return r;
3414 }
3415
3416 *need_commit = true;
3417 }
3418
3419 return 0;
3420 }
3421
maybe_resize_metadata_dev(struct dm_target * ti,bool * need_commit)3422 static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
3423 {
3424 int r;
3425 struct pool_c *pt = ti->private;
3426 struct pool *pool = pt->pool;
3427 dm_block_t metadata_dev_size, sb_metadata_dev_size;
3428
3429 *need_commit = false;
3430
3431 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
3432
3433 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
3434 if (r) {
3435 DMERR("%s: failed to retrieve metadata device size",
3436 dm_device_name(pool->pool_md));
3437 return r;
3438 }
3439
3440 if (metadata_dev_size < sb_metadata_dev_size) {
3441 DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
3442 dm_device_name(pool->pool_md),
3443 metadata_dev_size, sb_metadata_dev_size);
3444 return -EINVAL;
3445
3446 } else if (metadata_dev_size > sb_metadata_dev_size) {
3447 if (dm_pool_metadata_needs_check(pool->pmd)) {
3448 DMERR("%s: unable to grow the metadata device until repaired.",
3449 dm_device_name(pool->pool_md));
3450 return 0;
3451 }
3452
3453 warn_if_metadata_device_too_big(pool->md_dev);
3454 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
3455 dm_device_name(pool->pool_md),
3456 sb_metadata_dev_size, metadata_dev_size);
3457
3458 if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
3459 set_pool_mode(pool, PM_WRITE);
3460
3461 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
3462 if (r) {
3463 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
3464 return r;
3465 }
3466
3467 *need_commit = true;
3468 }
3469
3470 return 0;
3471 }
3472
3473 /*
3474 * Retrieves the number of blocks of the data device from
3475 * the superblock and compares it to the actual device size,
3476 * thus resizing the data device in case it has grown.
3477 *
3478 * This both copes with opening preallocated data devices in the ctr
3479 * being followed by a resume
3480 * -and-
3481 * calling the resume method individually after userspace has
3482 * grown the data device in reaction to a table event.
3483 */
pool_preresume(struct dm_target * ti)3484 static int pool_preresume(struct dm_target *ti)
3485 {
3486 int r;
3487 bool need_commit1, need_commit2;
3488 struct pool_c *pt = ti->private;
3489 struct pool *pool = pt->pool;
3490
3491 /*
3492 * Take control of the pool object.
3493 */
3494 r = bind_control_target(pool, ti);
3495 if (r)
3496 return r;
3497
3498 r = maybe_resize_data_dev(ti, &need_commit1);
3499 if (r)
3500 return r;
3501
3502 r = maybe_resize_metadata_dev(ti, &need_commit2);
3503 if (r)
3504 return r;
3505
3506 if (need_commit1 || need_commit2)
3507 (void) commit(pool);
3508
3509 return 0;
3510 }
3511
pool_suspend_active_thins(struct pool * pool)3512 static void pool_suspend_active_thins(struct pool *pool)
3513 {
3514 struct thin_c *tc;
3515
3516 /* Suspend all active thin devices */
3517 tc = get_first_thin(pool);
3518 while (tc) {
3519 dm_internal_suspend_noflush(tc->thin_md);
3520 tc = get_next_thin(pool, tc);
3521 }
3522 }
3523
pool_resume_active_thins(struct pool * pool)3524 static void pool_resume_active_thins(struct pool *pool)
3525 {
3526 struct thin_c *tc;
3527
3528 /* Resume all active thin devices */
3529 tc = get_first_thin(pool);
3530 while (tc) {
3531 dm_internal_resume(tc->thin_md);
3532 tc = get_next_thin(pool, tc);
3533 }
3534 }
3535
pool_resume(struct dm_target * ti)3536 static void pool_resume(struct dm_target *ti)
3537 {
3538 struct pool_c *pt = ti->private;
3539 struct pool *pool = pt->pool;
3540 unsigned long flags;
3541
3542 /*
3543 * Must requeue active_thins' bios and then resume
3544 * active_thins _before_ clearing 'suspend' flag.
3545 */
3546 requeue_bios(pool);
3547 pool_resume_active_thins(pool);
3548
3549 spin_lock_irqsave(&pool->lock, flags);
3550 pool->low_water_triggered = false;
3551 pool->suspended = false;
3552 spin_unlock_irqrestore(&pool->lock, flags);
3553
3554 do_waker(&pool->waker.work);
3555 }
3556
pool_presuspend(struct dm_target * ti)3557 static void pool_presuspend(struct dm_target *ti)
3558 {
3559 struct pool_c *pt = ti->private;
3560 struct pool *pool = pt->pool;
3561 unsigned long flags;
3562
3563 spin_lock_irqsave(&pool->lock, flags);
3564 pool->suspended = true;
3565 spin_unlock_irqrestore(&pool->lock, flags);
3566
3567 pool_suspend_active_thins(pool);
3568 }
3569
pool_presuspend_undo(struct dm_target * ti)3570 static void pool_presuspend_undo(struct dm_target *ti)
3571 {
3572 struct pool_c *pt = ti->private;
3573 struct pool *pool = pt->pool;
3574 unsigned long flags;
3575
3576 pool_resume_active_thins(pool);
3577
3578 spin_lock_irqsave(&pool->lock, flags);
3579 pool->suspended = false;
3580 spin_unlock_irqrestore(&pool->lock, flags);
3581 }
3582
pool_postsuspend(struct dm_target * ti)3583 static void pool_postsuspend(struct dm_target *ti)
3584 {
3585 struct pool_c *pt = ti->private;
3586 struct pool *pool = pt->pool;
3587
3588 cancel_delayed_work_sync(&pool->waker);
3589 cancel_delayed_work_sync(&pool->no_space_timeout);
3590 flush_workqueue(pool->wq);
3591 (void) commit(pool);
3592 }
3593
check_arg_count(unsigned argc,unsigned args_required)3594 static int check_arg_count(unsigned argc, unsigned args_required)
3595 {
3596 if (argc != args_required) {
3597 DMWARN("Message received with %u arguments instead of %u.",
3598 argc, args_required);
3599 return -EINVAL;
3600 }
3601
3602 return 0;
3603 }
3604
read_dev_id(char * arg,dm_thin_id * dev_id,int warning)3605 static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
3606 {
3607 if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
3608 *dev_id <= MAX_DEV_ID)
3609 return 0;
3610
3611 if (warning)
3612 DMWARN("Message received with invalid device id: %s", arg);
3613
3614 return -EINVAL;
3615 }
3616
process_create_thin_mesg(unsigned argc,char ** argv,struct pool * pool)3617 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
3618 {
3619 dm_thin_id dev_id;
3620 int r;
3621
3622 r = check_arg_count(argc, 2);
3623 if (r)
3624 return r;
3625
3626 r = read_dev_id(argv[1], &dev_id, 1);
3627 if (r)
3628 return r;
3629
3630 r = dm_pool_create_thin(pool->pmd, dev_id);
3631 if (r) {
3632 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
3633 argv[1]);
3634 return r;
3635 }
3636
3637 return 0;
3638 }
3639
process_create_snap_mesg(unsigned argc,char ** argv,struct pool * pool)3640 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3641 {
3642 dm_thin_id dev_id;
3643 dm_thin_id origin_dev_id;
3644 int r;
3645
3646 r = check_arg_count(argc, 3);
3647 if (r)
3648 return r;
3649
3650 r = read_dev_id(argv[1], &dev_id, 1);
3651 if (r)
3652 return r;
3653
3654 r = read_dev_id(argv[2], &origin_dev_id, 1);
3655 if (r)
3656 return r;
3657
3658 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
3659 if (r) {
3660 DMWARN("Creation of new snapshot %s of device %s failed.",
3661 argv[1], argv[2]);
3662 return r;
3663 }
3664
3665 return 0;
3666 }
3667
process_delete_mesg(unsigned argc,char ** argv,struct pool * pool)3668 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
3669 {
3670 dm_thin_id dev_id;
3671 int r;
3672
3673 r = check_arg_count(argc, 2);
3674 if (r)
3675 return r;
3676
3677 r = read_dev_id(argv[1], &dev_id, 1);
3678 if (r)
3679 return r;
3680
3681 r = dm_pool_delete_thin_device(pool->pmd, dev_id);
3682 if (r)
3683 DMWARN("Deletion of thin device %s failed.", argv[1]);
3684
3685 return r;
3686 }
3687
process_set_transaction_id_mesg(unsigned argc,char ** argv,struct pool * pool)3688 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
3689 {
3690 dm_thin_id old_id, new_id;
3691 int r;
3692
3693 r = check_arg_count(argc, 3);
3694 if (r)
3695 return r;
3696
3697 if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
3698 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
3699 return -EINVAL;
3700 }
3701
3702 if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
3703 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
3704 return -EINVAL;
3705 }
3706
3707 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
3708 if (r) {
3709 DMWARN("Failed to change transaction id from %s to %s.",
3710 argv[1], argv[2]);
3711 return r;
3712 }
3713
3714 return 0;
3715 }
3716
process_reserve_metadata_snap_mesg(unsigned argc,char ** argv,struct pool * pool)3717 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3718 {
3719 int r;
3720
3721 r = check_arg_count(argc, 1);
3722 if (r)
3723 return r;
3724
3725 (void) commit(pool);
3726
3727 r = dm_pool_reserve_metadata_snap(pool->pmd);
3728 if (r)
3729 DMWARN("reserve_metadata_snap message failed.");
3730
3731 return r;
3732 }
3733
process_release_metadata_snap_mesg(unsigned argc,char ** argv,struct pool * pool)3734 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3735 {
3736 int r;
3737
3738 r = check_arg_count(argc, 1);
3739 if (r)
3740 return r;
3741
3742 r = dm_pool_release_metadata_snap(pool->pmd);
3743 if (r)
3744 DMWARN("release_metadata_snap message failed.");
3745
3746 return r;
3747 }
3748
3749 /*
3750 * Messages supported:
3751 * create_thin <dev_id>
3752 * create_snap <dev_id> <origin_id>
3753 * delete <dev_id>
3754 * set_transaction_id <current_trans_id> <new_trans_id>
3755 * reserve_metadata_snap
3756 * release_metadata_snap
3757 */
pool_message(struct dm_target * ti,unsigned argc,char ** argv,char * result,unsigned maxlen)3758 static int pool_message(struct dm_target *ti, unsigned argc, char **argv,
3759 char *result, unsigned maxlen)
3760 {
3761 int r = -EINVAL;
3762 struct pool_c *pt = ti->private;
3763 struct pool *pool = pt->pool;
3764
3765 if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
3766 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
3767 dm_device_name(pool->pool_md));
3768 return -EOPNOTSUPP;
3769 }
3770
3771 if (!strcasecmp(argv[0], "create_thin"))
3772 r = process_create_thin_mesg(argc, argv, pool);
3773
3774 else if (!strcasecmp(argv[0], "create_snap"))
3775 r = process_create_snap_mesg(argc, argv, pool);
3776
3777 else if (!strcasecmp(argv[0], "delete"))
3778 r = process_delete_mesg(argc, argv, pool);
3779
3780 else if (!strcasecmp(argv[0], "set_transaction_id"))
3781 r = process_set_transaction_id_mesg(argc, argv, pool);
3782
3783 else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
3784 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
3785
3786 else if (!strcasecmp(argv[0], "release_metadata_snap"))
3787 r = process_release_metadata_snap_mesg(argc, argv, pool);
3788
3789 else
3790 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
3791
3792 if (!r)
3793 (void) commit(pool);
3794
3795 return r;
3796 }
3797
emit_flags(struct pool_features * pf,char * result,unsigned sz,unsigned maxlen)3798 static void emit_flags(struct pool_features *pf, char *result,
3799 unsigned sz, unsigned maxlen)
3800 {
3801 unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
3802 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
3803 pf->error_if_no_space;
3804 DMEMIT("%u ", count);
3805
3806 if (!pf->zero_new_blocks)
3807 DMEMIT("skip_block_zeroing ");
3808
3809 if (!pf->discard_enabled)
3810 DMEMIT("ignore_discard ");
3811
3812 if (!pf->discard_passdown)
3813 DMEMIT("no_discard_passdown ");
3814
3815 if (pf->mode == PM_READ_ONLY)
3816 DMEMIT("read_only ");
3817
3818 if (pf->error_if_no_space)
3819 DMEMIT("error_if_no_space ");
3820 }
3821
3822 /*
3823 * Status line is:
3824 * <transaction id> <used metadata sectors>/<total metadata sectors>
3825 * <used data sectors>/<total data sectors> <held metadata root>
3826 * <pool mode> <discard config> <no space config> <needs_check>
3827 */
pool_status(struct dm_target * ti,status_type_t type,unsigned status_flags,char * result,unsigned maxlen)3828 static void pool_status(struct dm_target *ti, status_type_t type,
3829 unsigned status_flags, char *result, unsigned maxlen)
3830 {
3831 int r;
3832 unsigned sz = 0;
3833 uint64_t transaction_id;
3834 dm_block_t nr_free_blocks_data;
3835 dm_block_t nr_free_blocks_metadata;
3836 dm_block_t nr_blocks_data;
3837 dm_block_t nr_blocks_metadata;
3838 dm_block_t held_root;
3839 enum pool_mode mode;
3840 char buf[BDEVNAME_SIZE];
3841 char buf2[BDEVNAME_SIZE];
3842 struct pool_c *pt = ti->private;
3843 struct pool *pool = pt->pool;
3844
3845 switch (type) {
3846 case STATUSTYPE_INFO:
3847 if (get_pool_mode(pool) == PM_FAIL) {
3848 DMEMIT("Fail");
3849 break;
3850 }
3851
3852 /* Commit to ensure statistics aren't out-of-date */
3853 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
3854 (void) commit(pool);
3855
3856 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
3857 if (r) {
3858 DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
3859 dm_device_name(pool->pool_md), r);
3860 goto err;
3861 }
3862
3863 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
3864 if (r) {
3865 DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
3866 dm_device_name(pool->pool_md), r);
3867 goto err;
3868 }
3869
3870 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
3871 if (r) {
3872 DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
3873 dm_device_name(pool->pool_md), r);
3874 goto err;
3875 }
3876
3877 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
3878 if (r) {
3879 DMERR("%s: dm_pool_get_free_block_count returned %d",
3880 dm_device_name(pool->pool_md), r);
3881 goto err;
3882 }
3883
3884 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
3885 if (r) {
3886 DMERR("%s: dm_pool_get_data_dev_size returned %d",
3887 dm_device_name(pool->pool_md), r);
3888 goto err;
3889 }
3890
3891 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
3892 if (r) {
3893 DMERR("%s: dm_pool_get_metadata_snap returned %d",
3894 dm_device_name(pool->pool_md), r);
3895 goto err;
3896 }
3897
3898 DMEMIT("%llu %llu/%llu %llu/%llu ",
3899 (unsigned long long)transaction_id,
3900 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3901 (unsigned long long)nr_blocks_metadata,
3902 (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
3903 (unsigned long long)nr_blocks_data);
3904
3905 if (held_root)
3906 DMEMIT("%llu ", held_root);
3907 else
3908 DMEMIT("- ");
3909
3910 mode = get_pool_mode(pool);
3911 if (mode == PM_OUT_OF_DATA_SPACE)
3912 DMEMIT("out_of_data_space ");
3913 else if (is_read_only_pool_mode(mode))
3914 DMEMIT("ro ");
3915 else
3916 DMEMIT("rw ");
3917
3918 if (!pool->pf.discard_enabled)
3919 DMEMIT("ignore_discard ");
3920 else if (pool->pf.discard_passdown)
3921 DMEMIT("discard_passdown ");
3922 else
3923 DMEMIT("no_discard_passdown ");
3924
3925 if (pool->pf.error_if_no_space)
3926 DMEMIT("error_if_no_space ");
3927 else
3928 DMEMIT("queue_if_no_space ");
3929
3930 if (dm_pool_metadata_needs_check(pool->pmd))
3931 DMEMIT("needs_check ");
3932 else
3933 DMEMIT("- ");
3934
3935 DMEMIT("%llu ", (unsigned long long)calc_metadata_threshold(pt));
3936
3937 break;
3938
3939 case STATUSTYPE_TABLE:
3940 DMEMIT("%s %s %lu %llu ",
3941 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
3942 format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
3943 (unsigned long)pool->sectors_per_block,
3944 (unsigned long long)pt->low_water_blocks);
3945 emit_flags(&pt->requested_pf, result, sz, maxlen);
3946 break;
3947 }
3948 return;
3949
3950 err:
3951 DMEMIT("Error");
3952 }
3953
pool_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)3954 static int pool_iterate_devices(struct dm_target *ti,
3955 iterate_devices_callout_fn fn, void *data)
3956 {
3957 struct pool_c *pt = ti->private;
3958
3959 return fn(ti, pt->data_dev, 0, ti->len, data);
3960 }
3961
pool_io_hints(struct dm_target * ti,struct queue_limits * limits)3962 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
3963 {
3964 struct pool_c *pt = ti->private;
3965 struct pool *pool = pt->pool;
3966 sector_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3967
3968 /*
3969 * If max_sectors is smaller than pool->sectors_per_block adjust it
3970 * to the highest possible power-of-2 factor of pool->sectors_per_block.
3971 * This is especially beneficial when the pool's data device is a RAID
3972 * device that has a full stripe width that matches pool->sectors_per_block
3973 * -- because even though partial RAID stripe-sized IOs will be issued to a
3974 * single RAID stripe; when aggregated they will end on a full RAID stripe
3975 * boundary.. which avoids additional partial RAID stripe writes cascading
3976 */
3977 if (limits->max_sectors < pool->sectors_per_block) {
3978 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) {
3979 if ((limits->max_sectors & (limits->max_sectors - 1)) == 0)
3980 limits->max_sectors--;
3981 limits->max_sectors = rounddown_pow_of_two(limits->max_sectors);
3982 }
3983 }
3984
3985 /*
3986 * If the system-determined stacked limits are compatible with the
3987 * pool's blocksize (io_opt is a factor) do not override them.
3988 */
3989 if (io_opt_sectors < pool->sectors_per_block ||
3990 !is_factor(io_opt_sectors, pool->sectors_per_block)) {
3991 if (is_factor(pool->sectors_per_block, limits->max_sectors))
3992 blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT);
3993 else
3994 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT);
3995 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
3996 }
3997
3998 /*
3999 * pt->adjusted_pf is a staging area for the actual features to use.
4000 * They get transferred to the live pool in bind_control_target()
4001 * called from pool_preresume().
4002 */
4003 if (!pt->adjusted_pf.discard_enabled) {
4004 /*
4005 * Must explicitly disallow stacking discard limits otherwise the
4006 * block layer will stack them if pool's data device has support.
4007 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
4008 * user to see that, so make sure to set all discard limits to 0.
4009 */
4010 limits->discard_granularity = 0;
4011 return;
4012 }
4013
4014 disable_passdown_if_not_supported(pt);
4015
4016 /*
4017 * The pool uses the same discard limits as the underlying data
4018 * device. DM core has already set this up.
4019 */
4020 }
4021
4022 static struct target_type pool_target = {
4023 .name = "thin-pool",
4024 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
4025 DM_TARGET_IMMUTABLE,
4026 .version = {1, 20, 0},
4027 .module = THIS_MODULE,
4028 .ctr = pool_ctr,
4029 .dtr = pool_dtr,
4030 .map = pool_map,
4031 .presuspend = pool_presuspend,
4032 .presuspend_undo = pool_presuspend_undo,
4033 .postsuspend = pool_postsuspend,
4034 .preresume = pool_preresume,
4035 .resume = pool_resume,
4036 .message = pool_message,
4037 .status = pool_status,
4038 .iterate_devices = pool_iterate_devices,
4039 .io_hints = pool_io_hints,
4040 };
4041
4042 /*----------------------------------------------------------------
4043 * Thin target methods
4044 *--------------------------------------------------------------*/
thin_get(struct thin_c * tc)4045 static void thin_get(struct thin_c *tc)
4046 {
4047 atomic_inc(&tc->refcount);
4048 }
4049
thin_put(struct thin_c * tc)4050 static void thin_put(struct thin_c *tc)
4051 {
4052 if (atomic_dec_and_test(&tc->refcount))
4053 complete(&tc->can_destroy);
4054 }
4055
thin_dtr(struct dm_target * ti)4056 static void thin_dtr(struct dm_target *ti)
4057 {
4058 struct thin_c *tc = ti->private;
4059 unsigned long flags;
4060
4061 spin_lock_irqsave(&tc->pool->lock, flags);
4062 list_del_rcu(&tc->list);
4063 spin_unlock_irqrestore(&tc->pool->lock, flags);
4064 synchronize_rcu();
4065
4066 thin_put(tc);
4067 wait_for_completion(&tc->can_destroy);
4068
4069 mutex_lock(&dm_thin_pool_table.mutex);
4070
4071 __pool_dec(tc->pool);
4072 dm_pool_close_thin_device(tc->td);
4073 dm_put_device(ti, tc->pool_dev);
4074 if (tc->origin_dev)
4075 dm_put_device(ti, tc->origin_dev);
4076 kfree(tc);
4077
4078 mutex_unlock(&dm_thin_pool_table.mutex);
4079 }
4080
4081 /*
4082 * Thin target parameters:
4083 *
4084 * <pool_dev> <dev_id> [origin_dev]
4085 *
4086 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
4087 * dev_id: the internal device identifier
4088 * origin_dev: a device external to the pool that should act as the origin
4089 *
4090 * If the pool device has discards disabled, they get disabled for the thin
4091 * device as well.
4092 */
thin_ctr(struct dm_target * ti,unsigned argc,char ** argv)4093 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
4094 {
4095 int r;
4096 struct thin_c *tc;
4097 struct dm_dev *pool_dev, *origin_dev;
4098 struct mapped_device *pool_md;
4099 unsigned long flags;
4100
4101 mutex_lock(&dm_thin_pool_table.mutex);
4102
4103 if (argc != 2 && argc != 3) {
4104 ti->error = "Invalid argument count";
4105 r = -EINVAL;
4106 goto out_unlock;
4107 }
4108
4109 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
4110 if (!tc) {
4111 ti->error = "Out of memory";
4112 r = -ENOMEM;
4113 goto out_unlock;
4114 }
4115 tc->thin_md = dm_table_get_md(ti->table);
4116 spin_lock_init(&tc->lock);
4117 INIT_LIST_HEAD(&tc->deferred_cells);
4118 bio_list_init(&tc->deferred_bio_list);
4119 bio_list_init(&tc->retry_on_resume_list);
4120 tc->sort_bio_list = RB_ROOT;
4121
4122 if (argc == 3) {
4123 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
4124 if (r) {
4125 ti->error = "Error opening origin device";
4126 goto bad_origin_dev;
4127 }
4128 tc->origin_dev = origin_dev;
4129 }
4130
4131 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
4132 if (r) {
4133 ti->error = "Error opening pool device";
4134 goto bad_pool_dev;
4135 }
4136 tc->pool_dev = pool_dev;
4137
4138 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
4139 ti->error = "Invalid device id";
4140 r = -EINVAL;
4141 goto bad_common;
4142 }
4143
4144 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
4145 if (!pool_md) {
4146 ti->error = "Couldn't get pool mapped device";
4147 r = -EINVAL;
4148 goto bad_common;
4149 }
4150
4151 tc->pool = __pool_table_lookup(pool_md);
4152 if (!tc->pool) {
4153 ti->error = "Couldn't find pool object";
4154 r = -EINVAL;
4155 goto bad_pool_lookup;
4156 }
4157 __pool_inc(tc->pool);
4158
4159 if (get_pool_mode(tc->pool) == PM_FAIL) {
4160 ti->error = "Couldn't open thin device, Pool is in fail mode";
4161 r = -EINVAL;
4162 goto bad_pool;
4163 }
4164
4165 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
4166 if (r) {
4167 ti->error = "Couldn't open thin internal device";
4168 goto bad_pool;
4169 }
4170
4171 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
4172 if (r)
4173 goto bad;
4174
4175 ti->num_flush_bios = 1;
4176 ti->flush_supported = true;
4177 ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
4178
4179 /* In case the pool supports discards, pass them on. */
4180 if (tc->pool->pf.discard_enabled) {
4181 ti->discards_supported = true;
4182 ti->num_discard_bios = 1;
4183 ti->split_discard_bios = false;
4184 }
4185
4186 mutex_unlock(&dm_thin_pool_table.mutex);
4187
4188 spin_lock_irqsave(&tc->pool->lock, flags);
4189 if (tc->pool->suspended) {
4190 spin_unlock_irqrestore(&tc->pool->lock, flags);
4191 mutex_lock(&dm_thin_pool_table.mutex); /* reacquire for __pool_dec */
4192 ti->error = "Unable to activate thin device while pool is suspended";
4193 r = -EINVAL;
4194 goto bad;
4195 }
4196 atomic_set(&tc->refcount, 1);
4197 init_completion(&tc->can_destroy);
4198 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
4199 spin_unlock_irqrestore(&tc->pool->lock, flags);
4200 /*
4201 * This synchronize_rcu() call is needed here otherwise we risk a
4202 * wake_worker() call finding no bios to process (because the newly
4203 * added tc isn't yet visible). So this reduces latency since we
4204 * aren't then dependent on the periodic commit to wake_worker().
4205 */
4206 synchronize_rcu();
4207
4208 dm_put(pool_md);
4209
4210 return 0;
4211
4212 bad:
4213 dm_pool_close_thin_device(tc->td);
4214 bad_pool:
4215 __pool_dec(tc->pool);
4216 bad_pool_lookup:
4217 dm_put(pool_md);
4218 bad_common:
4219 dm_put_device(ti, tc->pool_dev);
4220 bad_pool_dev:
4221 if (tc->origin_dev)
4222 dm_put_device(ti, tc->origin_dev);
4223 bad_origin_dev:
4224 kfree(tc);
4225 out_unlock:
4226 mutex_unlock(&dm_thin_pool_table.mutex);
4227
4228 return r;
4229 }
4230
thin_map(struct dm_target * ti,struct bio * bio)4231 static int thin_map(struct dm_target *ti, struct bio *bio)
4232 {
4233 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
4234
4235 return thin_bio_map(ti, bio);
4236 }
4237
thin_endio(struct dm_target * ti,struct bio * bio,blk_status_t * err)4238 static int thin_endio(struct dm_target *ti, struct bio *bio,
4239 blk_status_t *err)
4240 {
4241 unsigned long flags;
4242 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
4243 struct list_head work;
4244 struct dm_thin_new_mapping *m, *tmp;
4245 struct pool *pool = h->tc->pool;
4246
4247 if (h->shared_read_entry) {
4248 INIT_LIST_HEAD(&work);
4249 dm_deferred_entry_dec(h->shared_read_entry, &work);
4250
4251 spin_lock_irqsave(&pool->lock, flags);
4252 list_for_each_entry_safe(m, tmp, &work, list) {
4253 list_del(&m->list);
4254 __complete_mapping_preparation(m);
4255 }
4256 spin_unlock_irqrestore(&pool->lock, flags);
4257 }
4258
4259 if (h->all_io_entry) {
4260 INIT_LIST_HEAD(&work);
4261 dm_deferred_entry_dec(h->all_io_entry, &work);
4262 if (!list_empty(&work)) {
4263 spin_lock_irqsave(&pool->lock, flags);
4264 list_for_each_entry_safe(m, tmp, &work, list)
4265 list_add_tail(&m->list, &pool->prepared_discards);
4266 spin_unlock_irqrestore(&pool->lock, flags);
4267 wake_worker(pool);
4268 }
4269 }
4270
4271 if (h->cell)
4272 cell_defer_no_holder(h->tc, h->cell);
4273
4274 return DM_ENDIO_DONE;
4275 }
4276
thin_presuspend(struct dm_target * ti)4277 static void thin_presuspend(struct dm_target *ti)
4278 {
4279 struct thin_c *tc = ti->private;
4280
4281 if (dm_noflush_suspending(ti))
4282 noflush_work(tc, do_noflush_start);
4283 }
4284
thin_postsuspend(struct dm_target * ti)4285 static void thin_postsuspend(struct dm_target *ti)
4286 {
4287 struct thin_c *tc = ti->private;
4288
4289 /*
4290 * The dm_noflush_suspending flag has been cleared by now, so
4291 * unfortunately we must always run this.
4292 */
4293 noflush_work(tc, do_noflush_stop);
4294 }
4295
thin_preresume(struct dm_target * ti)4296 static int thin_preresume(struct dm_target *ti)
4297 {
4298 struct thin_c *tc = ti->private;
4299
4300 if (tc->origin_dev)
4301 tc->origin_size = get_dev_size(tc->origin_dev->bdev);
4302
4303 return 0;
4304 }
4305
4306 /*
4307 * <nr mapped sectors> <highest mapped sector>
4308 */
thin_status(struct dm_target * ti,status_type_t type,unsigned status_flags,char * result,unsigned maxlen)4309 static void thin_status(struct dm_target *ti, status_type_t type,
4310 unsigned status_flags, char *result, unsigned maxlen)
4311 {
4312 int r;
4313 ssize_t sz = 0;
4314 dm_block_t mapped, highest;
4315 char buf[BDEVNAME_SIZE];
4316 struct thin_c *tc = ti->private;
4317
4318 if (get_pool_mode(tc->pool) == PM_FAIL) {
4319 DMEMIT("Fail");
4320 return;
4321 }
4322
4323 if (!tc->td)
4324 DMEMIT("-");
4325 else {
4326 switch (type) {
4327 case STATUSTYPE_INFO:
4328 r = dm_thin_get_mapped_count(tc->td, &mapped);
4329 if (r) {
4330 DMERR("dm_thin_get_mapped_count returned %d", r);
4331 goto err;
4332 }
4333
4334 r = dm_thin_get_highest_mapped_block(tc->td, &highest);
4335 if (r < 0) {
4336 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
4337 goto err;
4338 }
4339
4340 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
4341 if (r)
4342 DMEMIT("%llu", ((highest + 1) *
4343 tc->pool->sectors_per_block) - 1);
4344 else
4345 DMEMIT("-");
4346 break;
4347
4348 case STATUSTYPE_TABLE:
4349 DMEMIT("%s %lu",
4350 format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
4351 (unsigned long) tc->dev_id);
4352 if (tc->origin_dev)
4353 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
4354 break;
4355 }
4356 }
4357
4358 return;
4359
4360 err:
4361 DMEMIT("Error");
4362 }
4363
thin_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)4364 static int thin_iterate_devices(struct dm_target *ti,
4365 iterate_devices_callout_fn fn, void *data)
4366 {
4367 sector_t blocks;
4368 struct thin_c *tc = ti->private;
4369 struct pool *pool = tc->pool;
4370
4371 /*
4372 * We can't call dm_pool_get_data_dev_size() since that blocks. So
4373 * we follow a more convoluted path through to the pool's target.
4374 */
4375 if (!pool->ti)
4376 return 0; /* nothing is bound */
4377
4378 blocks = pool->ti->len;
4379 (void) sector_div(blocks, pool->sectors_per_block);
4380 if (blocks)
4381 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
4382
4383 return 0;
4384 }
4385
thin_io_hints(struct dm_target * ti,struct queue_limits * limits)4386 static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
4387 {
4388 struct thin_c *tc = ti->private;
4389 struct pool *pool = tc->pool;
4390
4391 if (!pool->pf.discard_enabled)
4392 return;
4393
4394 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
4395 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
4396 }
4397
4398 static struct target_type thin_target = {
4399 .name = "thin",
4400 .version = {1, 20, 0},
4401 .module = THIS_MODULE,
4402 .ctr = thin_ctr,
4403 .dtr = thin_dtr,
4404 .map = thin_map,
4405 .end_io = thin_endio,
4406 .preresume = thin_preresume,
4407 .presuspend = thin_presuspend,
4408 .postsuspend = thin_postsuspend,
4409 .status = thin_status,
4410 .iterate_devices = thin_iterate_devices,
4411 .io_hints = thin_io_hints,
4412 };
4413
4414 /*----------------------------------------------------------------*/
4415
dm_thin_init(void)4416 static int __init dm_thin_init(void)
4417 {
4418 int r = -ENOMEM;
4419
4420 pool_table_init();
4421
4422 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
4423 if (!_new_mapping_cache)
4424 return r;
4425
4426 r = dm_register_target(&thin_target);
4427 if (r)
4428 goto bad_new_mapping_cache;
4429
4430 r = dm_register_target(&pool_target);
4431 if (r)
4432 goto bad_thin_target;
4433
4434 return 0;
4435
4436 bad_thin_target:
4437 dm_unregister_target(&thin_target);
4438 bad_new_mapping_cache:
4439 kmem_cache_destroy(_new_mapping_cache);
4440
4441 return r;
4442 }
4443
dm_thin_exit(void)4444 static void dm_thin_exit(void)
4445 {
4446 dm_unregister_target(&thin_target);
4447 dm_unregister_target(&pool_target);
4448
4449 kmem_cache_destroy(_new_mapping_cache);
4450
4451 pool_table_exit();
4452 }
4453
4454 module_init(dm_thin_init);
4455 module_exit(dm_thin_exit);
4456
4457 module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
4458 MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
4459
4460 MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
4461 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
4462 MODULE_LICENSE("GPL");
4463