Lines Matching refs:rs
259 static void rs_config_backup(struct raid_set *rs, struct rs_layout *l) in rs_config_backup() argument
261 struct mddev *mddev = &rs->md; in rs_config_backup()
268 static void rs_config_restore(struct raid_set *rs, struct rs_layout *l) in rs_config_restore() argument
270 struct mddev *mddev = &rs->md; in rs_config_restore()
399 static bool rs_is_raid0(struct raid_set *rs) in rs_is_raid0() argument
401 return !rs->md.level; in rs_is_raid0()
405 static bool rs_is_raid1(struct raid_set *rs) in rs_is_raid1() argument
407 return rs->md.level == 1; in rs_is_raid1()
411 static bool rs_is_raid10(struct raid_set *rs) in rs_is_raid10() argument
413 return rs->md.level == 10; in rs_is_raid10()
417 static bool rs_is_raid6(struct raid_set *rs) in rs_is_raid6() argument
419 return rs->md.level == 6; in rs_is_raid6()
423 static bool rs_is_raid456(struct raid_set *rs) in rs_is_raid456() argument
425 return __within_range(rs->md.level, 4, 6); in rs_is_raid456()
430 static bool rs_is_reshapable(struct raid_set *rs) in rs_is_reshapable() argument
432 return rs_is_raid456(rs) || in rs_is_reshapable()
433 (rs_is_raid10(rs) && !__is_raid10_far(rs->md.new_layout)); in rs_is_reshapable()
437 static bool rs_is_recovering(struct raid_set *rs) in rs_is_recovering() argument
439 return rs->md.recovery_cp < rs->md.dev_sectors; in rs_is_recovering()
443 static bool rs_is_reshaping(struct raid_set *rs) in rs_is_reshaping() argument
445 return rs->md.reshape_position != MaxSector; in rs_is_reshaping()
490 static unsigned long __valid_flags(struct raid_set *rs) in __valid_flags() argument
492 if (rt_is_raid0(rs->raid_type)) in __valid_flags()
494 else if (rt_is_raid1(rs->raid_type)) in __valid_flags()
496 else if (rt_is_raid10(rs->raid_type)) in __valid_flags()
498 else if (rt_is_raid45(rs->raid_type)) in __valid_flags()
500 else if (rt_is_raid6(rs->raid_type)) in __valid_flags()
511 static int rs_check_for_valid_flags(struct raid_set *rs) in rs_check_for_valid_flags() argument
513 if (rs->ctr_flags & ~__valid_flags(rs)) { in rs_check_for_valid_flags()
514 rs->ti->error = "Invalid flags combination"; in rs_check_for_valid_flags()
598 static int raid10_format_to_md_layout(struct raid_set *rs, in raid10_format_to_md_layout() argument
619 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) in raid10_format_to_md_layout()
624 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) in raid10_format_to_md_layout()
682 static void rs_set_rdev_sectors(struct raid_set *rs) in rs_set_rdev_sectors() argument
684 struct mddev *mddev = &rs->md; in rs_set_rdev_sectors()
699 static void rs_set_capacity(struct raid_set *rs) in rs_set_capacity() argument
701 struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table)); in rs_set_capacity()
703 set_capacity_and_notify(gendisk, rs->md.array_sectors); in rs_set_capacity()
710 static void rs_set_cur(struct raid_set *rs) in rs_set_cur() argument
712 struct mddev *mddev = &rs->md; in rs_set_cur()
723 static void rs_set_new(struct raid_set *rs) in rs_set_new() argument
725 struct mddev *mddev = &rs->md; in rs_set_new()
730 mddev->raid_disks = rs->raid_disks; in rs_set_new()
738 struct raid_set *rs; in raid_set_alloc() local
745 rs = kzalloc(struct_size(rs, dev, raid_devs), GFP_KERNEL); in raid_set_alloc()
746 if (!rs) { in raid_set_alloc()
751 mddev_init(&rs->md); in raid_set_alloc()
753 rs->raid_disks = raid_devs; in raid_set_alloc()
754 rs->delta_disks = 0; in raid_set_alloc()
756 rs->ti = ti; in raid_set_alloc()
757 rs->raid_type = raid_type; in raid_set_alloc()
758 rs->stripe_cache_entries = 256; in raid_set_alloc()
759 rs->md.raid_disks = raid_devs; in raid_set_alloc()
760 rs->md.level = raid_type->level; in raid_set_alloc()
761 rs->md.new_level = rs->md.level; in raid_set_alloc()
762 rs->md.layout = raid_type->algorithm; in raid_set_alloc()
763 rs->md.new_layout = rs->md.layout; in raid_set_alloc()
764 rs->md.delta_disks = 0; in raid_set_alloc()
765 rs->md.recovery_cp = MaxSector; in raid_set_alloc()
768 md_rdev_init(&rs->dev[i].rdev); in raid_set_alloc()
779 return rs; in raid_set_alloc()
783 static void raid_set_free(struct raid_set *rs) in raid_set_free() argument
787 if (rs->journal_dev.dev) { in raid_set_free()
788 md_rdev_clear(&rs->journal_dev.rdev); in raid_set_free()
789 dm_put_device(rs->ti, rs->journal_dev.dev); in raid_set_free()
792 for (i = 0; i < rs->raid_disks; i++) { in raid_set_free()
793 if (rs->dev[i].meta_dev) in raid_set_free()
794 dm_put_device(rs->ti, rs->dev[i].meta_dev); in raid_set_free()
795 md_rdev_clear(&rs->dev[i].rdev); in raid_set_free()
796 if (rs->dev[i].data_dev) in raid_set_free()
797 dm_put_device(rs->ti, rs->dev[i].data_dev); in raid_set_free()
800 kfree(rs); in raid_set_free()
819 static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as) in parse_dev_params() argument
832 for (i = 0; i < rs->raid_disks; i++) { in parse_dev_params()
833 rs->dev[i].rdev.raid_disk = i; in parse_dev_params()
835 rs->dev[i].meta_dev = NULL; in parse_dev_params()
836 rs->dev[i].data_dev = NULL; in parse_dev_params()
842 rs->dev[i].rdev.data_offset = 0; in parse_dev_params()
843 rs->dev[i].rdev.new_data_offset = 0; in parse_dev_params()
844 rs->dev[i].rdev.mddev = &rs->md; in parse_dev_params()
851 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), in parse_dev_params()
852 &rs->dev[i].meta_dev); in parse_dev_params()
854 rs->ti->error = "RAID metadata device lookup failure"; in parse_dev_params()
858 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL); in parse_dev_params()
859 if (!rs->dev[i].rdev.sb_page) { in parse_dev_params()
860 rs->ti->error = "Failed to allocate superblock page"; in parse_dev_params()
870 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) && in parse_dev_params()
871 (!rs->dev[i].rdev.recovery_offset)) { in parse_dev_params()
872 rs->ti->error = "Drive designated for rebuild not specified"; in parse_dev_params()
876 if (rs->dev[i].meta_dev) { in parse_dev_params()
877 rs->ti->error = "No data device supplied with metadata device"; in parse_dev_params()
884 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), in parse_dev_params()
885 &rs->dev[i].data_dev); in parse_dev_params()
887 rs->ti->error = "RAID device lookup failure"; in parse_dev_params()
891 if (rs->dev[i].meta_dev) { in parse_dev_params()
893 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev; in parse_dev_params()
895 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; in parse_dev_params()
896 list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks); in parse_dev_params()
897 if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) in parse_dev_params()
901 if (rs->journal_dev.dev) in parse_dev_params()
902 list_add_tail(&rs->journal_dev.rdev.same_set, &rs->md.disks); in parse_dev_params()
905 rs->md.external = 0; in parse_dev_params()
906 rs->md.persistent = 1; in parse_dev_params()
907 rs->md.major_version = 2; in parse_dev_params()
908 } else if (rebuild && !rs->md.recovery_cp) { in parse_dev_params()
920 rs->ti->error = "Unable to rebuild drive while array is not in-sync"; in parse_dev_params()
937 static int validate_region_size(struct raid_set *rs, unsigned long region_size) in validate_region_size() argument
939 unsigned long min_region_size = rs->ti->len / (1 << 21); in validate_region_size()
941 if (rs_is_raid0(rs)) in validate_region_size()
961 if (region_size > rs->ti->len) { in validate_region_size()
962 rs->ti->error = "Supplied region size is too large"; in validate_region_size()
969 rs->ti->error = "Supplied region size is too small"; in validate_region_size()
974 rs->ti->error = "Region size is not a power of 2"; in validate_region_size()
978 if (region_size < rs->md.chunk_sectors) { in validate_region_size()
979 rs->ti->error = "Region size is smaller than the chunk size"; in validate_region_size()
987 rs->md.bitmap_info.chunksize = to_bytes(region_size); in validate_region_size()
1001 static int validate_raid_redundancy(struct raid_set *rs) in validate_raid_redundancy() argument
1007 for (i = 0; i < rs->md.raid_disks; i++) in validate_raid_redundancy()
1008 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || in validate_raid_redundancy()
1009 !rs->dev[i].rdev.sb_page) in validate_raid_redundancy()
1012 switch (rs->md.level) { in validate_raid_redundancy()
1016 if (rebuild_cnt >= rs->md.raid_disks) in validate_raid_redundancy()
1022 if (rebuild_cnt > rs->raid_type->parity_devs) in validate_raid_redundancy()
1026 copies = raid10_md_layout_to_copies(rs->md.new_layout); in validate_raid_redundancy()
1049 if (__is_raid10_near(rs->md.new_layout)) { in validate_raid_redundancy()
1050 for (i = 0; i < rs->md.raid_disks; i++) { in validate_raid_redundancy()
1053 if ((!rs->dev[i].rdev.sb_page || in validate_raid_redundancy()
1054 !test_bit(In_sync, &rs->dev[i].rdev.flags)) && in validate_raid_redundancy()
1073 group_size = (rs->md.raid_disks / copies); in validate_raid_redundancy()
1074 last_group_start = (rs->md.raid_disks / group_size) - 1; in validate_raid_redundancy()
1076 for (i = 0; i < rs->md.raid_disks; i++) { in validate_raid_redundancy()
1079 if ((!rs->dev[i].rdev.sb_page || in validate_raid_redundancy()
1080 !test_bit(In_sync, &rs->dev[i].rdev.flags)) && in validate_raid_redundancy()
1121 static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as, in parse_raid_params() argument
1131 struct raid_type *rt = rs->raid_type; in parse_raid_params()
1137 rs->ti->error = "Bad numerical argument given for chunk_size"; in parse_raid_params()
1150 rs->ti->error = "Chunk size must be a power of 2"; in parse_raid_params()
1153 rs->ti->error = "Chunk size value is too small"; in parse_raid_params()
1157 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; in parse_raid_params()
1176 for (i = 0; i < rs->raid_disks; i++) { in parse_raid_params()
1177 set_bit(In_sync, &rs->dev[i].rdev.flags); in parse_raid_params()
1178 rs->dev[i].rdev.recovery_offset = MaxSector; in parse_raid_params()
1187 rs->ti->error = "Not enough raid parameters given"; in parse_raid_params()
1192 if (test_and_set_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) { in parse_raid_params()
1193 rs->ti->error = "Only one 'nosync' argument allowed"; in parse_raid_params()
1199 if (test_and_set_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) { in parse_raid_params()
1200 rs->ti->error = "Only one 'sync' argument allowed"; in parse_raid_params()
1206 if (test_and_set_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) { in parse_raid_params()
1207 rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed"; in parse_raid_params()
1216 rs->ti->error = "Wrong number of raid parameters given"; in parse_raid_params()
1225 if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) { in parse_raid_params()
1226 rs->ti->error = "Only one 'raid10_format' argument pair allowed"; in parse_raid_params()
1230 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type"; in parse_raid_params()
1235 rs->ti->error = "Invalid 'raid10_format' value given"; in parse_raid_params()
1246 if (test_and_set_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) { in parse_raid_params()
1247 rs->ti->error = "Only one raid4/5/6 set journaling device allowed"; in parse_raid_params()
1251 rs->ti->error = "'journal_dev' is an invalid parameter for this RAID type"; in parse_raid_params()
1254 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), in parse_raid_params()
1255 &rs->journal_dev.dev); in parse_raid_params()
1257 rs->ti->error = "raid4/5/6 journal device lookup failure"; in parse_raid_params()
1260 jdev = &rs->journal_dev.rdev; in parse_raid_params()
1262 jdev->mddev = &rs->md; in parse_raid_params()
1263 jdev->bdev = rs->journal_dev.dev->bdev; in parse_raid_params()
1266 rs->ti->error = "No space for raid4/5/6 journal"; in parse_raid_params()
1269 rs->journal_dev.mode = R5C_JOURNAL_MODE_WRITE_THROUGH; in parse_raid_params()
1278 if (!test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) { in parse_raid_params()
1279 rs->ti->error = "raid4/5/6 'journal_mode' is invalid without 'journal_dev'"; in parse_raid_params()
1282 if (test_and_set_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) { in parse_raid_params()
1283 rs->ti->error = "Only one raid4/5/6 'journal_mode' argument allowed"; in parse_raid_params()
1288 rs->ti->error = "Invalid 'journal_mode' argument"; in parse_raid_params()
1291 rs->journal_dev.mode = r; in parse_raid_params()
1299 rs->ti->error = "Bad numerical argument given in raid params"; in parse_raid_params()
1309 if (!__within_range(value, 0, rs->raid_disks - 1)) { in parse_raid_params()
1310 rs->ti->error = "Invalid rebuild index given"; in parse_raid_params()
1314 if (test_and_set_bit(value, (void *) rs->rebuild_disks)) { in parse_raid_params()
1315 rs->ti->error = "rebuild for this index already given"; in parse_raid_params()
1319 rd = rs->dev + value; in parse_raid_params()
1323 set_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags); in parse_raid_params()
1326 rs->ti->error = "write_mostly option is only valid for RAID1"; in parse_raid_params()
1330 if (!__within_range(value, 0, rs->md.raid_disks - 1)) { in parse_raid_params()
1331 rs->ti->error = "Invalid write_mostly index given"; in parse_raid_params()
1336 set_bit(WriteMostly, &rs->dev[value].rdev.flags); in parse_raid_params()
1337 set_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags); in parse_raid_params()
1340 rs->ti->error = "max_write_behind option is only valid for RAID1"; in parse_raid_params()
1344 if (test_and_set_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) { in parse_raid_params()
1345 rs->ti->error = "Only one max_write_behind argument pair allowed"; in parse_raid_params()
1354 rs->ti->error = "Max write-behind limit out of range"; in parse_raid_params()
1358 rs->md.bitmap_info.max_write_behind = value / 2; in parse_raid_params()
1360 if (test_and_set_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) { in parse_raid_params()
1361 rs->ti->error = "Only one daemon_sleep argument pair allowed"; in parse_raid_params()
1365 rs->ti->error = "daemon sleep period out of range"; in parse_raid_params()
1368 rs->md.bitmap_info.daemon_sleep = value; in parse_raid_params()
1371 if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) { in parse_raid_params()
1372 rs->ti->error = "Only one data_offset argument pair allowed"; in parse_raid_params()
1378 rs->ti->error = "Bogus data_offset value"; in parse_raid_params()
1381 rs->data_offset = value; in parse_raid_params()
1384 if (test_and_set_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) { in parse_raid_params()
1385 rs->ti->error = "Only one delta_disks argument pair allowed"; in parse_raid_params()
1390 rs->ti->error = "Too many delta_disk requested"; in parse_raid_params()
1394 rs->delta_disks = value; in parse_raid_params()
1396 if (test_and_set_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) { in parse_raid_params()
1397 rs->ti->error = "Only one stripe_cache argument pair allowed"; in parse_raid_params()
1402 rs->ti->error = "Inappropriate argument: stripe_cache"; in parse_raid_params()
1407 rs->ti->error = "Bogus stripe cache entries value"; in parse_raid_params()
1410 rs->stripe_cache_entries = value; in parse_raid_params()
1412 if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) { in parse_raid_params()
1413 rs->ti->error = "Only one min_recovery_rate argument pair allowed"; in parse_raid_params()
1418 rs->ti->error = "min_recovery_rate out of range"; in parse_raid_params()
1421 rs->md.sync_speed_min = value; in parse_raid_params()
1423 if (test_and_set_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) { in parse_raid_params()
1424 rs->ti->error = "Only one max_recovery_rate argument pair allowed"; in parse_raid_params()
1429 rs->ti->error = "max_recovery_rate out of range"; in parse_raid_params()
1432 rs->md.sync_speed_max = value; in parse_raid_params()
1434 if (test_and_set_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) { in parse_raid_params()
1435 rs->ti->error = "Only one region_size argument pair allowed"; in parse_raid_params()
1440 rs->requested_bitmap_chunk_sectors = value; in parse_raid_params()
1442 if (test_and_set_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) { in parse_raid_params()
1443 rs->ti->error = "Only one raid10_copies argument pair allowed"; in parse_raid_params()
1447 if (!__within_range(value, 2, rs->md.raid_disks)) { in parse_raid_params()
1448 rs->ti->error = "Bad value for 'raid10_copies'"; in parse_raid_params()
1455 rs->ti->error = "Unable to parse RAID parameter"; in parse_raid_params()
1460 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) && in parse_raid_params()
1461 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) { in parse_raid_params()
1462 rs->ti->error = "sync and nosync are mutually exclusive"; in parse_raid_params()
1466 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && in parse_raid_params()
1467 (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) || in parse_raid_params()
1468 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))) { in parse_raid_params()
1469 rs->ti->error = "sync/nosync and rebuild are mutually exclusive"; in parse_raid_params()
1473 if (write_mostly >= rs->md.raid_disks) { in parse_raid_params()
1474 rs->ti->error = "Can't set all raid1 devices to write_mostly"; in parse_raid_params()
1478 if (rs->md.sync_speed_max && in parse_raid_params()
1479 rs->md.sync_speed_min > rs->md.sync_speed_max) { in parse_raid_params()
1480 rs->ti->error = "Bogus recovery rates"; in parse_raid_params()
1484 if (validate_region_size(rs, region_size)) in parse_raid_params()
1487 if (rs->md.chunk_sectors) in parse_raid_params()
1488 max_io_len = rs->md.chunk_sectors; in parse_raid_params()
1492 if (dm_set_target_max_io_len(rs->ti, max_io_len)) in parse_raid_params()
1496 if (raid10_copies > rs->md.raid_disks) { in parse_raid_params()
1497 rs->ti->error = "Not enough devices to satisfy specification"; in parse_raid_params()
1501 rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies); in parse_raid_params()
1502 if (rs->md.new_layout < 0) { in parse_raid_params()
1503 rs->ti->error = "Error getting raid10 format"; in parse_raid_params()
1504 return rs->md.new_layout; in parse_raid_params()
1507 rt = get_raid_type_by_ll(10, rs->md.new_layout); in parse_raid_params()
1509 rs->ti->error = "Failed to recognize new raid10 layout"; in parse_raid_params()
1515 test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) { in parse_raid_params()
1516 rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible"; in parse_raid_params()
1521 rs->raid10_copies = raid10_copies; in parse_raid_params()
1524 rs->md.persistent = 0; in parse_raid_params()
1525 rs->md.external = 1; in parse_raid_params()
1528 return rs_check_for_valid_flags(rs); in parse_raid_params()
1532 static int rs_set_raid456_stripe_cache(struct raid_set *rs) in rs_set_raid456_stripe_cache() argument
1536 struct mddev *mddev = &rs->md; in rs_set_raid456_stripe_cache()
1538 uint32_t nr_stripes = rs->stripe_cache_entries; in rs_set_raid456_stripe_cache()
1540 if (!rt_is_raid456(rs->raid_type)) { in rs_set_raid456_stripe_cache()
1541 rs->ti->error = "Inappropriate raid level; cannot change stripe_cache size"; in rs_set_raid456_stripe_cache()
1553 rs->ti->error = "Cannot change stripe_cache size on inactive RAID set"; in rs_set_raid456_stripe_cache()
1561 rs->ti->error = "Failed to set raid4/5/6 stripe cache size"; in rs_set_raid456_stripe_cache()
1572 static unsigned int mddev_data_stripes(struct raid_set *rs) in mddev_data_stripes() argument
1574 return rs->md.raid_disks - rs->raid_type->parity_devs; in mddev_data_stripes()
1578 static unsigned int rs_data_stripes(struct raid_set *rs) in rs_data_stripes() argument
1580 return rs->raid_disks - rs->raid_type->parity_devs; in rs_data_stripes()
1587 static sector_t __rdev_sectors(struct raid_set *rs) in __rdev_sectors() argument
1591 for (i = 0; i < rs->md.raid_disks; i++) { in __rdev_sectors()
1592 struct md_rdev *rdev = &rs->dev[i].rdev; in __rdev_sectors()
1603 static int _check_data_dev_sectors(struct raid_set *rs) in _check_data_dev_sectors() argument
1608 rdev_for_each(rdev, &rs->md) in _check_data_dev_sectors()
1611 if (ds < rs->md.dev_sectors) { in _check_data_dev_sectors()
1612 rs->ti->error = "Component device(s) too small"; in _check_data_dev_sectors()
1621 static int rs_set_dev_and_array_sectors(struct raid_set *rs, sector_t sectors, bool use_mddev) in rs_set_dev_and_array_sectors() argument
1626 struct mddev *mddev = &rs->md; in rs_set_dev_and_array_sectors()
1630 data_stripes = mddev_data_stripes(rs); in rs_set_dev_and_array_sectors()
1632 delta_disks = rs->delta_disks; in rs_set_dev_and_array_sectors()
1633 data_stripes = rs_data_stripes(rs); in rs_set_dev_and_array_sectors()
1637 if (rt_is_raid1(rs->raid_type)) in rs_set_dev_and_array_sectors()
1639 else if (rt_is_raid10(rs->raid_type)) { in rs_set_dev_and_array_sectors()
1640 if (rs->raid10_copies < 2 || in rs_set_dev_and_array_sectors()
1642 rs->ti->error = "Bogus raid10 data copies or delta disks"; in rs_set_dev_and_array_sectors()
1646 dev_sectors *= rs->raid10_copies; in rs_set_dev_and_array_sectors()
1651 if (sector_div(array_sectors, rs->raid10_copies)) in rs_set_dev_and_array_sectors()
1663 rs_set_rdev_sectors(rs); in rs_set_dev_and_array_sectors()
1665 return _check_data_dev_sectors(rs); in rs_set_dev_and_array_sectors()
1667 rs->ti->error = "Target length not divisible by number of data devices"; in rs_set_dev_and_array_sectors()
1672 static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors) in rs_setup_recovery() argument
1675 if (rs_is_raid0(rs)) in rs_setup_recovery()
1676 rs->md.recovery_cp = MaxSector; in rs_setup_recovery()
1682 else if (rs_is_raid6(rs)) in rs_setup_recovery()
1683 rs->md.recovery_cp = dev_sectors; in rs_setup_recovery()
1689 rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) in rs_setup_recovery()
1695 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work); in do_table_event() local
1698 if (!rs_is_reshaping(rs)) { in do_table_event()
1699 if (rs_is_raid10(rs)) in do_table_event()
1700 rs_set_rdev_sectors(rs); in do_table_event()
1701 rs_set_capacity(rs); in do_table_event()
1703 dm_table_event(rs->ti->table); in do_table_event()
1712 static int rs_check_takeover(struct raid_set *rs) in rs_check_takeover() argument
1714 struct mddev *mddev = &rs->md; in rs_check_takeover()
1717 if (rs->md.degraded) { in rs_check_takeover()
1718 rs->ti->error = "Can't takeover degraded raid set"; in rs_check_takeover()
1722 if (rs_is_reshaping(rs)) { in rs_check_takeover()
1723 rs->ti->error = "Can't takeover reshaping raid set"; in rs_check_takeover()
1736 !(rs->raid_disks % mddev->raid_disks)) in rs_check_takeover()
1862 rs->ti->error = "takeover not possible"; in rs_check_takeover()
1867 static bool rs_takeover_requested(struct raid_set *rs) in rs_takeover_requested() argument
1869 return rs->md.new_level != rs->md.level; in rs_takeover_requested()
1873 static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev) in rs_is_layout_change() argument
1875 return (use_mddev ? rs->md.delta_disks : rs->delta_disks) || in rs_is_layout_change()
1876 rs->md.new_layout != rs->md.layout || in rs_is_layout_change()
1877 rs->md.new_chunk_sectors != rs->md.chunk_sectors; in rs_is_layout_change()
1881 static bool rs_reshape_requested(struct raid_set *rs) in rs_reshape_requested() argument
1884 struct mddev *mddev = &rs->md; in rs_reshape_requested()
1886 if (rs_takeover_requested(rs)) in rs_reshape_requested()
1889 if (rs_is_raid0(rs)) in rs_reshape_requested()
1892 change = rs_is_layout_change(rs, false); in rs_reshape_requested()
1895 if (rs_is_raid1(rs)) { in rs_reshape_requested()
1896 if (rs->delta_disks) in rs_reshape_requested()
1897 return !!rs->delta_disks; in rs_reshape_requested()
1900 mddev->raid_disks != rs->raid_disks; in rs_reshape_requested()
1903 if (rs_is_raid10(rs)) in rs_reshape_requested()
1906 rs->delta_disks >= 0; in rs_reshape_requested()
2010 static int rs_check_reshape(struct raid_set *rs) in rs_check_reshape() argument
2012 struct mddev *mddev = &rs->md; in rs_check_reshape()
2015 rs->ti->error = "Reshape not supported"; in rs_check_reshape()
2017 rs->ti->error = "Can't reshape degraded raid set"; in rs_check_reshape()
2018 else if (rs_is_recovering(rs)) in rs_check_reshape()
2019 rs->ti->error = "Convert request on recovering raid set prohibited"; in rs_check_reshape()
2020 else if (rs_is_reshaping(rs)) in rs_check_reshape()
2021 rs->ti->error = "raid set already reshaping!"; in rs_check_reshape()
2022 else if (!(rs_is_raid1(rs) || rs_is_raid10(rs) || rs_is_raid456(rs))) in rs_check_reshape()
2023 rs->ti->error = "Reshaping only supported for raid1/4/5/6/10"; in rs_check_reshape()
2085 struct raid_set *rs = container_of(mddev, struct raid_set, md); in super_sync() local
2097 for (i = 0; i < rs->raid_disks; i++) in super_sync()
2098 if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) { in super_sync()
2206 static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) in super_init_validation() argument
2210 struct mddev *mddev = &rs->md; in super_init_validation()
2247 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) { in super_init_validation()
2259 rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout); in super_init_validation()
2269 if (rs_takeover_requested(rs)) { in super_init_validation()
2276 } else if (rs_reshape_requested(rs)) { in super_init_validation()
2289 if (rs->delta_disks) in super_init_validation()
2291 mddev->raid_disks, mddev->raid_disks + rs->delta_disks); in super_init_validation()
2292 if (rs_is_raid10(rs)) { in super_init_validation()
2306 if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) in super_init_validation()
2344 if (new_devs == rs->raid_disks || !rebuilds) { in super_init_validation()
2346 if (new_devs == rs->raid_disks) { in super_init_validation()
2350 new_devs != rs->delta_disks) { in super_init_validation()
2365 } else if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && rs_is_recovering(rs)) { in super_init_validation()
2369 } else if (rs_is_reshaping(rs)) { in super_init_validation()
2398 if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) { in super_init_validation()
2400 rs->raid_disks % rs->raid10_copies) { in super_init_validation()
2401 rs->ti->error = in super_init_validation()
2408 } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) && in super_init_validation()
2409 !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) && in super_init_validation()
2410 !rt_is_raid1(rs->raid_type)) { in super_init_validation()
2411 rs->ti->error = "Cannot change device positions in raid set"; in super_init_validation()
2430 static int super_validate(struct raid_set *rs, struct md_rdev *rdev) in super_validate() argument
2432 struct mddev *mddev = &rs->md; in super_validate()
2435 if (rs_is_raid0(rs) || !rdev->sb_page || rdev->raid_disk < 0) in super_validate()
2444 if (!mddev->events && super_init_validation(rs, rdev)) in super_validate()
2449 rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags"; in super_validate()
2454 rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet"; in super_validate()
2459 …mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(40… in super_validate()
2478 else if (!rs_is_reshaping(rs)) in super_validate()
2501 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) in analyse_superblocks() argument
2505 struct mddev *mddev = &rs->md; in analyse_superblocks()
2532 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) in analyse_superblocks()
2549 if (rs_is_raid0(rs)) in analyse_superblocks()
2572 rs->ti->error = "Unable to assemble array: Invalid superblocks"; in analyse_superblocks()
2573 if (super_validate(rs, freshest)) in analyse_superblocks()
2576 if (validate_raid_redundancy(rs)) { in analyse_superblocks()
2577 rs->ti->error = "Insufficient redundancy to activate array"; in analyse_superblocks()
2584 super_validate(rs, rdev)) in analyse_superblocks()
2597 static int rs_adjust_data_offsets(struct raid_set *rs) in rs_adjust_data_offsets() argument
2603 if (!test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) { in rs_adjust_data_offsets()
2604 if (!rs_is_reshapable(rs)) in rs_adjust_data_offsets()
2611 rdev = &rs->dev[0].rdev; in rs_adjust_data_offsets()
2613 if (rs->delta_disks < 0) { in rs_adjust_data_offsets()
2623 new_data_offset = rs->data_offset; in rs_adjust_data_offsets()
2625 } else if (rs->delta_disks > 0) { in rs_adjust_data_offsets()
2634 data_offset = rs->data_offset; in rs_adjust_data_offsets()
2656 data_offset = rs->data_offset ? rdev->data_offset : 0; in rs_adjust_data_offsets()
2657 new_data_offset = data_offset ? 0 : rs->data_offset; in rs_adjust_data_offsets()
2658 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in rs_adjust_data_offsets()
2664 if (rs->data_offset && in rs_adjust_data_offsets()
2665 to_sector(i_size_read(rdev->bdev->bd_inode)) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) { in rs_adjust_data_offsets()
2666 rs->ti->error = data_offset ? "No space for forward reshape" : in rs_adjust_data_offsets()
2675 if (rs->md.recovery_cp < rs->md.dev_sectors) in rs_adjust_data_offsets()
2676 rs->md.recovery_cp += rs->dev[0].rdev.data_offset; in rs_adjust_data_offsets()
2679 rdev_for_each(rdev, &rs->md) { in rs_adjust_data_offsets()
2690 static void __reorder_raid_disk_indexes(struct raid_set *rs) in __reorder_raid_disk_indexes() argument
2695 rdev_for_each(rdev, &rs->md) { in __reorder_raid_disk_indexes()
2706 static int rs_setup_takeover(struct raid_set *rs) in rs_setup_takeover() argument
2708 struct mddev *mddev = &rs->md; in rs_setup_takeover()
2710 unsigned int d = mddev->raid_disks = rs->raid_disks; in rs_setup_takeover()
2711 sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset; in rs_setup_takeover()
2713 if (rt_is_raid10(rs->raid_type)) { in rs_setup_takeover()
2714 if (rs_is_raid0(rs)) { in rs_setup_takeover()
2716 __reorder_raid_disk_indexes(rs); in rs_setup_takeover()
2719 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR, in rs_setup_takeover()
2720 rs->raid10_copies); in rs_setup_takeover()
2721 } else if (rs_is_raid1(rs)) in rs_setup_takeover()
2723 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, in rs_setup_takeover()
2724 rs->raid_disks); in rs_setup_takeover()
2734 rdev = &rs->dev[d].rdev; in rs_setup_takeover()
2736 if (test_bit(d, (void *) rs->rebuild_disks)) { in rs_setup_takeover()
2751 static int rs_prepare_reshape(struct raid_set *rs) in rs_prepare_reshape() argument
2754 struct mddev *mddev = &rs->md; in rs_prepare_reshape()
2756 if (rs_is_raid10(rs)) { in rs_prepare_reshape()
2757 if (rs->raid_disks != mddev->raid_disks && in rs_prepare_reshape()
2759 rs->raid10_copies && in rs_prepare_reshape()
2760 rs->raid10_copies != __raid10_near_copies(mddev->layout)) { in rs_prepare_reshape()
2767 if (rs->raid_disks % rs->raid10_copies) { in rs_prepare_reshape()
2768 rs->ti->error = "Can't reshape raid10 mirror groups"; in rs_prepare_reshape()
2773 __reorder_raid_disk_indexes(rs); in rs_prepare_reshape()
2774 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, in rs_prepare_reshape()
2775 rs->raid10_copies); in rs_prepare_reshape()
2781 } else if (rs_is_raid456(rs)) in rs_prepare_reshape()
2784 else if (rs_is_raid1(rs)) { in rs_prepare_reshape()
2785 if (rs->delta_disks) { in rs_prepare_reshape()
2787 mddev->degraded = rs->delta_disks < 0 ? -rs->delta_disks : rs->delta_disks; in rs_prepare_reshape()
2791 mddev->raid_disks = rs->raid_disks; in rs_prepare_reshape()
2795 rs->ti->error = "Called with bogus raid type"; in rs_prepare_reshape()
2800 set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags); in rs_prepare_reshape()
2801 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in rs_prepare_reshape()
2802 } else if (mddev->raid_disks < rs->raid_disks) in rs_prepare_reshape()
2804 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in rs_prepare_reshape()
2810 static sector_t _get_reshape_sectors(struct raid_set *rs) in _get_reshape_sectors() argument
2815 rdev_for_each(rdev, &rs->md) in _get_reshape_sectors()
2823 return max(reshape_sectors, (sector_t) rs->data_offset); in _get_reshape_sectors()
2833 static int rs_setup_reshape(struct raid_set *rs) in rs_setup_reshape() argument
2837 sector_t reshape_sectors = _get_reshape_sectors(rs); in rs_setup_reshape()
2838 struct mddev *mddev = &rs->md; in rs_setup_reshape()
2841 mddev->delta_disks = rs->delta_disks; in rs_setup_reshape()
2847 DMINFO("Ignoring invalid layout change with delta_disks=%d", rs->delta_disks); in rs_setup_reshape()
2874 if (rs->delta_disks > 0) { in rs_setup_reshape()
2876 for (d = cur_raid_devs; d < rs->raid_disks; d++) { in rs_setup_reshape()
2877 rdev = &rs->dev[d].rdev; in rs_setup_reshape()
2888 rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector; in rs_setup_reshape()
2894 } else if (rs->delta_disks < 0) { in rs_setup_reshape()
2895 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, true); in rs_setup_reshape()
2921 mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1; in rs_setup_reshape()
2929 rdev_for_each(rdev, &rs->md) in rs_setup_reshape()
2941 static void rs_reset_inconclusive_reshape(struct raid_set *rs) in rs_reset_inconclusive_reshape() argument
2943 if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) { in rs_reset_inconclusive_reshape()
2944 rs_set_cur(rs); in rs_reset_inconclusive_reshape()
2945 rs->md.delta_disks = 0; in rs_reset_inconclusive_reshape()
2946 rs->md.reshape_backwards = 0; in rs_reset_inconclusive_reshape()
2954 static void configure_discard_support(struct raid_set *rs) in configure_discard_support() argument
2958 struct dm_target *ti = rs->ti; in configure_discard_support()
2963 raid456 = rs_is_raid456(rs); in configure_discard_support()
2965 for (i = 0; i < rs->raid_disks; i++) { in configure_discard_support()
2968 if (!rs->dev[i].rdev.bdev) in configure_discard_support()
2971 q = bdev_get_queue(rs->dev[i].rdev.bdev); in configure_discard_support()
3007 struct raid_set *rs = NULL; in raid_ctr() local
3044 rs = raid_set_alloc(ti, rt, num_raid_devs); in raid_ctr()
3045 if (IS_ERR(rs)) in raid_ctr()
3046 return PTR_ERR(rs); in raid_ctr()
3048 r = parse_raid_params(rs, &as, num_raid_params); in raid_ctr()
3052 r = parse_dev_params(rs, &as); in raid_ctr()
3056 rs->md.sync_super = super_sync; in raid_ctr()
3064 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false); in raid_ctr()
3069 rs->array_sectors = rs->md.array_sectors; in raid_ctr()
3070 rs->dev_sectors = rs->md.dev_sectors; in raid_ctr()
3077 rs_config_backup(rs, &rs_layout); in raid_ctr()
3079 r = analyse_superblocks(ti, rs); in raid_ctr()
3084 sb_array_sectors = rs->md.array_sectors; in raid_ctr()
3085 rdev_sectors = __rdev_sectors(rs); in raid_ctr()
3093 reshape_sectors = _get_reshape_sectors(rs); in raid_ctr()
3094 if (rs->dev_sectors != rdev_sectors) { in raid_ctr()
3095 resize = (rs->dev_sectors != rdev_sectors - reshape_sectors); in raid_ctr()
3096 if (rs->dev_sectors > rdev_sectors - reshape_sectors) in raid_ctr()
3097 set_bit(RT_FLAG_RS_GROW, &rs->runtime_flags); in raid_ctr()
3100 INIT_WORK(&rs->md.event_work, do_table_event); in raid_ctr()
3101 ti->private = rs; in raid_ctr()
3105 rs_config_restore(rs, &rs_layout); in raid_ctr()
3113 if (test_bit(MD_ARRAY_FIRST_USE, &rs->md.flags)) { in raid_ctr()
3115 if (rs_is_raid6(rs) && in raid_ctr()
3116 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) { in raid_ctr()
3121 rs_setup_recovery(rs, 0); in raid_ctr()
3122 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in raid_ctr()
3123 rs_set_new(rs); in raid_ctr()
3124 } else if (rs_is_recovering(rs)) { in raid_ctr()
3127 } else if (rs_is_reshaping(rs)) { in raid_ctr()
3135 } else if (rs_takeover_requested(rs)) { in raid_ctr()
3136 if (rs_is_reshaping(rs)) { in raid_ctr()
3143 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) { in raid_ctr()
3157 r = rs_check_takeover(rs); in raid_ctr()
3161 r = rs_setup_takeover(rs); in raid_ctr()
3165 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in raid_ctr()
3167 rs_setup_recovery(rs, MaxSector); in raid_ctr()
3168 rs_set_new(rs); in raid_ctr()
3169 } else if (rs_reshape_requested(rs)) { in raid_ctr()
3171 clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags); in raid_ctr()
3179 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) { in raid_ctr()
3186 if (reshape_sectors || rs_is_raid1(rs)) { in raid_ctr()
3194 r = rs_prepare_reshape(rs); in raid_ctr()
3199 rs_setup_recovery(rs, MaxSector); in raid_ctr()
3201 rs_set_cur(rs); in raid_ctr()
3205 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) { in raid_ctr()
3206 clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags); in raid_ctr()
3207 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in raid_ctr()
3208 rs_setup_recovery(rs, MaxSector); in raid_ctr()
3209 } else if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) { in raid_ctr()
3214 r = rs_set_dev_and_array_sectors(rs, sb_array_sectors, false); in raid_ctr()
3218 …rs_setup_recovery(rs, rs->md.recovery_cp < rs->md.dev_sectors ? rs->md.recovery_cp : rs->md.dev_se… in raid_ctr()
3221 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false); in raid_ctr()
3225 if (sb_array_sectors > rs->array_sectors) in raid_ctr()
3226 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in raid_ctr()
3228 rs_set_cur(rs); in raid_ctr()
3232 r = rs_adjust_data_offsets(rs); in raid_ctr()
3237 rs_reset_inconclusive_reshape(rs); in raid_ctr()
3240 rs->md.ro = 1; in raid_ctr()
3241 rs->md.in_sync = 1; in raid_ctr()
3244 set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); in raid_ctr()
3247 mddev_lock_nointr(&rs->md); in raid_ctr()
3248 r = md_run(&rs->md); in raid_ctr()
3249 rs->md.in_sync = 0; /* Assume already marked dirty */ in raid_ctr()
3252 mddev_unlock(&rs->md); in raid_ctr()
3256 r = md_start(&rs->md); in raid_ctr()
3259 mddev_unlock(&rs->md); in raid_ctr()
3264 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) { in raid_ctr()
3265 r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode); in raid_ctr()
3268 mddev_unlock(&rs->md); in raid_ctr()
3273 mddev_suspend(&rs->md); in raid_ctr()
3274 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags); in raid_ctr()
3277 if (rs_is_raid456(rs)) { in raid_ctr()
3278 r = rs_set_raid456_stripe_cache(rs); in raid_ctr()
3284 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) { in raid_ctr()
3285 r = rs_check_reshape(rs); in raid_ctr()
3290 rs_config_restore(rs, &rs_layout); in raid_ctr()
3292 if (rs->md.pers->start_reshape) { in raid_ctr()
3293 r = rs->md.pers->check_reshape(&rs->md); in raid_ctr()
3302 configure_discard_support(rs); in raid_ctr()
3304 mddev_unlock(&rs->md); in raid_ctr()
3311 md_stop(&rs->md); in raid_ctr()
3313 raid_set_free(rs); in raid_ctr()
3320 struct raid_set *rs = ti->private; in raid_dtr() local
3322 md_stop(&rs->md); in raid_dtr()
3323 raid_set_free(rs); in raid_dtr()
3328 struct raid_set *rs = ti->private; in raid_map() local
3329 struct mddev *mddev = &rs->md; in raid_map()
3406 static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev) in __raid_dev_status() argument
3413 return (rs->journal_dev.mode == R5C_JOURNAL_MODE_WRITE_THROUGH) ? "A" : "a"; in __raid_dev_status()
3414 else if (test_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags) || in __raid_dev_status()
3415 (!test_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags) && in __raid_dev_status()
3423 static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery, in rs_get_progress() argument
3427 struct mddev *mddev = &rs->md; in rs_get_progress()
3429 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); in rs_get_progress()
3430 clear_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); in rs_get_progress()
3432 if (rs_is_raid0(rs)) { in rs_get_progress()
3434 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); in rs_get_progress()
3448 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); in rs_get_progress()
3465 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); in rs_get_progress()
3473 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); in rs_get_progress()
3480 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); in rs_get_progress()
3491 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); in rs_get_progress()
3495 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); in rs_get_progress()
3513 struct raid_set *rs = ti->private; in raid_status() local
3514 struct mddev *mddev = &rs->md; in raid_status()
3537 resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ? in raid_status()
3539 recovery = rs->md.recovery; in raid_status()
3541 progress = rs_get_progress(rs, recovery, state, resync_max_sectors); in raid_status()
3546 for (i = 0; i < rs->raid_disks; i++) in raid_status()
3547 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev)); in raid_status()
3592 DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset); in raid_status()
3597 DMEMIT(" %s", test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ? in raid_status()
3598 __raid_dev_status(rs, &rs->journal_dev.rdev) : "-"); in raid_status()
3608 for (i = 0; i < rs->raid_disks; i++) { in raid_status()
3609 rebuild_writemostly_count += (test_bit(i, (void *) rs->rebuild_disks) ? 2 : 0) + in raid_status()
3610 (test_bit(WriteMostly, &rs->dev[i].rdev.flags) ? 2 : 0); in raid_status()
3612 rebuild_writemostly_count -= (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) ? 2 : 0) + in raid_status()
3613 (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags) ? 2 : 0); in raid_status()
3616 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) + in raid_status()
3617 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2; in raid_status()
3620 DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors); in raid_status()
3621 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) in raid_status()
3623 if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) in raid_status()
3625 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) in raid_status()
3626 for (i = 0; i < rs->raid_disks; i++) in raid_status()
3627 if (test_bit(i, (void *) rs->rebuild_disks)) in raid_status()
3629 if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) in raid_status()
3632 if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) in raid_status()
3635 if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) in raid_status()
3638 if (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags)) in raid_status()
3639 for (i = 0; i < rs->raid_disks; i++) in raid_status()
3640 if (test_bit(WriteMostly, &rs->dev[i].rdev.flags)) in raid_status()
3642 rs->dev[i].rdev.raid_disk); in raid_status()
3643 if (test_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) in raid_status()
3646 if (test_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) in raid_status()
3649 if (test_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) in raid_status()
3652 if (test_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) in raid_status()
3655 if (test_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) in raid_status()
3658 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) in raid_status()
3660 max(rs->delta_disks, mddev->delta_disks)); in raid_status()
3661 if (test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) in raid_status()
3663 (unsigned long long) rs->data_offset); in raid_status()
3664 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) in raid_status()
3666 __get_dev_name(rs->journal_dev.dev)); in raid_status()
3667 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) in raid_status()
3669 md_journal_mode_to_dm_raid(rs->journal_dev.mode)); in raid_status()
3670 DMEMIT(" %d", rs->raid_disks); in raid_status()
3671 for (i = 0; i < rs->raid_disks; i++) in raid_status()
3672 DMEMIT(" %s %s", __get_dev_name(rs->dev[i].meta_dev), in raid_status()
3673 __get_dev_name(rs->dev[i].data_dev)); in raid_status()
3686 recovery = rs->md.recovery; in raid_status()
3690 for (i = 0; i < rs->raid_disks; i++) { in raid_status()
3692 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev)); in raid_status()
3697 switch (rs->journal_dev.mode) { in raid_status()
3719 struct raid_set *rs = ti->private; in raid_message() local
3720 struct mddev *mddev = &rs->md; in raid_message()
3770 struct raid_set *rs = ti->private; in raid_iterate_devices() local
3774 for (i = 0; !r && i < rs->md.raid_disks; i++) in raid_iterate_devices()
3775 if (rs->dev[i].data_dev) in raid_iterate_devices()
3777 rs->dev[i].data_dev, in raid_iterate_devices()
3779 rs->md.dev_sectors, in raid_iterate_devices()
3787 struct raid_set *rs = ti->private; in raid_io_hints() local
3788 unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors); in raid_io_hints()
3791 blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs)); in raid_io_hints()
3796 struct raid_set *rs = ti->private; in raid_postsuspend() local
3798 if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) { in raid_postsuspend()
3800 if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery)) in raid_postsuspend()
3801 md_stop_writes(&rs->md); in raid_postsuspend()
3803 mddev_lock_nointr(&rs->md); in raid_postsuspend()
3804 mddev_suspend(&rs->md); in raid_postsuspend()
3805 mddev_unlock(&rs->md); in raid_postsuspend()
3809 static void attempt_restore_of_faulty_devices(struct raid_set *rs) in attempt_restore_of_faulty_devices() argument
3816 struct mddev *mddev = &rs->md; in attempt_restore_of_faulty_devices()
3826 r = &rs->dev[i].rdev; in attempt_restore_of_faulty_devices()
3835 rs->raid_type->name, i); in attempt_restore_of_faulty_devices()
3877 rdev_for_each(r, &rs->md) { in attempt_restore_of_faulty_devices()
3892 static int __load_dirty_region_bitmap(struct raid_set *rs) in __load_dirty_region_bitmap() argument
3897 if (!rs_is_raid0(rs) && in __load_dirty_region_bitmap()
3898 !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) { in __load_dirty_region_bitmap()
3899 r = md_bitmap_load(&rs->md); in __load_dirty_region_bitmap()
3908 static void rs_update_sbs(struct raid_set *rs) in rs_update_sbs() argument
3910 struct mddev *mddev = &rs->md; in rs_update_sbs()
3926 static int rs_start_reshape(struct raid_set *rs) in rs_start_reshape() argument
3929 struct mddev *mddev = &rs->md; in rs_start_reshape()
3935 r = rs_setup_reshape(rs); in rs_start_reshape()
3946 rs->ti->error = "pers->check_reshape() failed"; in rs_start_reshape()
3957 rs->ti->error = "pers->start_reshape() failed"; in rs_start_reshape()
3967 rs_update_sbs(rs); in rs_start_reshape()
3975 struct raid_set *rs = ti->private; in raid_preresume() local
3976 struct mddev *mddev = &rs->md; in raid_preresume()
3979 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags)) in raid_preresume()
3988 if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags)) in raid_preresume()
3989 rs_update_sbs(rs); in raid_preresume()
3992 r = __load_dirty_region_bitmap(rs); in raid_preresume()
3997 if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) { in raid_preresume()
3998 mddev->array_sectors = rs->array_sectors; in raid_preresume()
3999 mddev->dev_sectors = rs->dev_sectors; in raid_preresume()
4000 rs_set_rdev_sectors(rs); in raid_preresume()
4001 rs_set_capacity(rs); in raid_preresume()
4005 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap && in raid_preresume()
4006 (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) || in raid_preresume()
4007 (rs->requested_bitmap_chunk_sectors && in raid_preresume()
4008 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) { in raid_preresume()
4009 int chunksize = to_bytes(rs->requested_bitmap_chunk_sectors) ?: mddev->bitmap_info.chunksize; in raid_preresume()
4022 if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) in raid_preresume()
4027 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) { in raid_preresume()
4029 rs_set_rdev_sectors(rs); in raid_preresume()
4031 r = rs_start_reshape(rs); in raid_preresume()
4043 struct raid_set *rs = ti->private; in raid_resume() local
4044 struct mddev *mddev = &rs->md; in raid_resume()
4046 if (test_and_set_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) { in raid_resume()
4052 attempt_restore_of_faulty_devices(rs); in raid_resume()
4055 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) { in raid_resume()
4058 rs_set_capacity(rs); in raid_resume()