Lines Matching refs:dd
77 static int dust_remove_block(struct dust_device *dd, unsigned long long block) in dust_remove_block() argument
82 spin_lock_irqsave(&dd->dust_lock, flags); in dust_remove_block()
83 bblock = dust_rb_search(&dd->badblocklist, block); in dust_remove_block()
86 if (!dd->quiet_mode) { in dust_remove_block()
90 spin_unlock_irqrestore(&dd->dust_lock, flags); in dust_remove_block()
94 rb_erase(&bblock->node, &dd->badblocklist); in dust_remove_block()
95 dd->badblock_count--; in dust_remove_block()
96 if (!dd->quiet_mode) in dust_remove_block()
99 spin_unlock_irqrestore(&dd->dust_lock, flags); in dust_remove_block()
104 static int dust_add_block(struct dust_device *dd, unsigned long long block) in dust_add_block() argument
111 if (!dd->quiet_mode) in dust_add_block()
116 spin_lock_irqsave(&dd->dust_lock, flags); in dust_add_block()
118 if (!dust_rb_insert(&dd->badblocklist, bblock)) { in dust_add_block()
119 if (!dd->quiet_mode) { in dust_add_block()
123 spin_unlock_irqrestore(&dd->dust_lock, flags); in dust_add_block()
128 dd->badblock_count++; in dust_add_block()
129 if (!dd->quiet_mode) in dust_add_block()
131 spin_unlock_irqrestore(&dd->dust_lock, flags); in dust_add_block()
136 static int dust_query_block(struct dust_device *dd, unsigned long long block) in dust_query_block() argument
141 spin_lock_irqsave(&dd->dust_lock, flags); in dust_query_block()
142 bblock = dust_rb_search(&dd->badblocklist, block); in dust_query_block()
147 spin_unlock_irqrestore(&dd->dust_lock, flags); in dust_query_block()
152 static int __dust_map_read(struct dust_device *dd, sector_t thisblock) in __dust_map_read() argument
154 struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock); in __dust_map_read()
162 static int dust_map_read(struct dust_device *dd, sector_t thisblock, in dust_map_read() argument
169 thisblock >>= dd->sect_per_block_shift; in dust_map_read()
170 spin_lock_irqsave(&dd->dust_lock, flags); in dust_map_read()
171 ret = __dust_map_read(dd, thisblock); in dust_map_read()
172 spin_unlock_irqrestore(&dd->dust_lock, flags); in dust_map_read()
178 static void __dust_map_write(struct dust_device *dd, sector_t thisblock) in __dust_map_write() argument
180 struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock); in __dust_map_write()
183 rb_erase(&bblk->node, &dd->badblocklist); in __dust_map_write()
184 dd->badblock_count--; in __dust_map_write()
186 if (!dd->quiet_mode) { in __dust_map_write()
187 sector_div(thisblock, dd->sect_per_block); in __dust_map_write()
194 static int dust_map_write(struct dust_device *dd, sector_t thisblock, in dust_map_write() argument
200 thisblock >>= dd->sect_per_block_shift; in dust_map_write()
201 spin_lock_irqsave(&dd->dust_lock, flags); in dust_map_write()
202 __dust_map_write(dd, thisblock); in dust_map_write()
203 spin_unlock_irqrestore(&dd->dust_lock, flags); in dust_map_write()
211 struct dust_device *dd = ti->private; in dust_map() local
214 bio_set_dev(bio, dd->dev->bdev); in dust_map()
215 bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector); in dust_map()
218 ret = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb); in dust_map()
220 ret = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb); in dust_map()
249 static int dust_clear_badblocks(struct dust_device *dd) in dust_clear_badblocks() argument
255 spin_lock_irqsave(&dd->dust_lock, flags); in dust_clear_badblocks()
256 badblocklist = dd->badblocklist; in dust_clear_badblocks()
257 badblock_count = dd->badblock_count; in dust_clear_badblocks()
258 dd->badblocklist = RB_ROOT; in dust_clear_badblocks()
259 dd->badblock_count = 0; in dust_clear_badblocks()
260 spin_unlock_irqrestore(&dd->dust_lock, flags); in dust_clear_badblocks()
281 struct dust_device *dd; in dust_ctr() local
321 dd = kzalloc(sizeof(struct dust_device), GFP_KERNEL); in dust_ctr()
322 if (dd == NULL) { in dust_ctr()
327 if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dd->dev)) { in dust_ctr()
329 kfree(dd); in dust_ctr()
333 dd->sect_per_block = sect_per_block; in dust_ctr()
334 dd->blksz = blksz; in dust_ctr()
335 dd->start = tmp; in dust_ctr()
337 dd->sect_per_block_shift = __ffs(sect_per_block); in dust_ctr()
343 dd->fail_read_on_bb = false; in dust_ctr()
348 dd->badblocklist = RB_ROOT; in dust_ctr()
349 dd->badblock_count = 0; in dust_ctr()
350 spin_lock_init(&dd->dust_lock); in dust_ctr()
352 dd->quiet_mode = false; in dust_ctr()
354 BUG_ON(dm_set_target_max_io_len(ti, dd->sect_per_block) != 0); in dust_ctr()
358 ti->private = dd; in dust_ctr()
365 struct dust_device *dd = ti->private; in dust_dtr() local
367 __dust_clear_badblocks(&dd->badblocklist, dd->badblock_count); in dust_dtr()
368 dm_put_device(ti, dd->dev); in dust_dtr()
369 kfree(dd); in dust_dtr()
375 struct dust_device *dd = ti->private; in dust_message() local
376 sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT; in dust_message()
390 dd->fail_read_on_bb = false; in dust_message()
394 dd->fail_read_on_bb = true; in dust_message()
397 spin_lock_irqsave(&dd->dust_lock, flags); in dust_message()
399 dd->badblock_count); in dust_message()
400 spin_unlock_irqrestore(&dd->dust_lock, flags); in dust_message()
403 result = dust_clear_badblocks(dd); in dust_message()
405 if (!dd->quiet_mode) in dust_message()
406 dd->quiet_mode = true; in dust_message()
408 dd->quiet_mode = false; in dust_message()
418 sector_div(size, dd->sect_per_block); in dust_message()
425 result = dust_add_block(dd, block); in dust_message()
427 result = dust_remove_block(dd, block); in dust_message()
429 result = dust_query_block(dd, block); in dust_message()
445 struct dust_device *dd = ti->private; in dust_status() local
450 DMEMIT("%s %s %s", dd->dev->name, in dust_status()
451 dd->fail_read_on_bb ? "fail_read_on_bad_block" : "bypass", in dust_status()
452 dd->quiet_mode ? "quiet" : "verbose"); in dust_status()
456 DMEMIT("%s %llu %u", dd->dev->name, in dust_status()
457 (unsigned long long)dd->start, dd->blksz); in dust_status()
464 struct dust_device *dd = ti->private; in dust_prepare_ioctl() local
465 struct dm_dev *dev = dd->dev; in dust_prepare_ioctl()
472 if (dd->start || in dust_prepare_ioctl()
482 struct dust_device *dd = ti->private; in dust_iterate_devices() local
484 return fn(ti, dd->dev, dd->start, ti->len, data); in dust_iterate_devices()