Lines Matching refs:mddev

198 		md_wakeup_thread(conf->mddev->thread);  in raid5_wakeup_stripe_thread()
271 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
277 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
359 md_wakeup_thread(conf->mddev->thread); in release_inactive_stripe_list()
405 if (unlikely(!conf->mddev->thread) || in raid5_release_stripe()
410 md_wakeup_thread(conf->mddev->thread); in raid5_release_stripe()
748 int degraded = conf->mddev->degraded; in has_failed()
750 if (test_bit(MD_BROKEN, &conf->mddev->flags)) in has_failed()
753 if (conf->mddev->reshape_position != MaxSector) in has_failed()
991 md_wakeup_thread(conf->mddev->thread); in stripe_add_to_batch_list()
1230 if (!conf->mddev->external && in ops_run_io()
1231 conf->mddev->sb_flags) { in ops_run_io()
1236 md_check_recovery(conf->mddev); in ops_run_io()
1244 md_wait_for_blocked_rdev(rdev, conf->mddev); in ops_run_io()
1247 rdev_dec_pending(rdev, conf->mddev); in ops_run_io()
1306 if (conf->mddev->gendisk) in ops_run_io()
1308 disk_devt(conf->mddev->gendisk), in ops_run_io()
1353 if (conf->mddev->gendisk) in ops_run_io()
1355 disk_devt(conf->mddev->gendisk), in ops_run_io()
2433 if (conf->mddev->gendisk) in grow_stripes()
2435 "raid%d-%s", conf->level, mdname(conf->mddev)); in grow_stripes()
2438 "raid%d-%p", conf->level, conf->mddev); in grow_stripes()
2510 mddev_suspend(conf->mddev); in resize_chunks()
2524 mddev_resume(conf->mddev); in resize_chunks()
2565 md_allow_write(conf->mddev); in resize_stripes()
2749 static struct md_rdev *rdev_mdlock_deref(struct mddev *mddev, in rdev_mdlock_deref() argument
2753 lockdep_is_held(&mddev->reconfig_mutex)); in rdev_mdlock_deref()
2798 mdname(conf->mddev), RAID5_STRIPE_SECTORS(conf), in raid5_end_read_request()
2826 mdname(conf->mddev), in raid5_end_read_request()
2829 else if (conf->mddev->degraded >= conf->max_degraded) { in raid5_end_read_request()
2833 mdname(conf->mddev), in raid5_end_read_request()
2841 mdname(conf->mddev), in raid5_end_read_request()
2848 mdname(conf->mddev), in raid5_end_read_request()
2852 mdname(conf->mddev), rdev->bdev); in raid5_end_read_request()
2874 md_error(conf->mddev, rdev); in raid5_end_read_request()
2877 rdev_dec_pending(rdev, conf->mddev); in raid5_end_read_request()
2922 md_error(conf->mddev, rdev); in raid5_end_write_request()
2934 &rdev->mddev->recovery); in raid5_end_write_request()
2947 rdev_dec_pending(rdev, conf->mddev); in raid5_end_write_request()
2962 static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) in raid5_error() argument
2964 struct r5conf *conf = mddev->private; in raid5_error()
2969 mdname(mddev), rdev->bdev); in raid5_error()
2974 mddev->degraded = raid5_calc_degraded(conf); in raid5_error()
2977 set_bit(MD_BROKEN, &conf->mddev->flags); in raid5_error()
2978 conf->recovery_disabled = mddev->recovery_disabled; in raid5_error()
2981 mdname(mddev), mddev->degraded, conf->raid_disks); in raid5_error()
2984 mdname(mddev), conf->raid_disks - mddev->degraded); in raid5_error()
2988 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in raid5_error()
2991 set_mask_bits(&mddev->sb_flags, 0, in raid5_error()
2993 r5c_update_on_rdev_error(mddev, rdev); in raid5_error()
3316 mdname(conf->mddev)); in raid5_compute_blocknr()
3573 md_write_inc(conf->mddev, bi); in __add_stripe_bio()
3594 if (conf->mddev->bitmap && firstwrite) { in __add_stripe_bio()
3609 md_bitmap_startwrite(conf->mddev->bitmap, sh->sector, in __add_stripe_bio()
3684 md_error(conf->mddev, rdev); in handle_failed_stripe()
3685 rdev_dec_pending(rdev, conf->mddev); in handle_failed_stripe()
3706 md_write_end(conf->mddev); in handle_failed_stripe()
3711 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3727 md_write_end(conf->mddev); in handle_failed_stripe()
3757 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3769 md_wakeup_thread(conf->mddev->thread); in handle_failed_stripe()
3792 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { in handle_failed_sync()
3816 conf->mddev->recovery_disabled; in handle_failed_sync()
3818 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), !abort); in handle_failed_sync()
3832 || rdev->mddev->recovery_cp <= sh->sector)) in want_replace()
3925 sh->sector < sh->raid_conf->mddev->recovery_cp) in need_this_block()
4105 md_write_end(conf->mddev); in handle_stripe_clean_event()
4109 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_stripe_clean_event()
4166 md_wakeup_thread(conf->mddev->thread); in handle_stripe_clean_event()
4193 sector_t recovery_cp = conf->mddev->recovery_cp; in handle_stripe_dirtying()
4244 if (conf->mddev->queue) in handle_stripe_dirtying()
4245 blk_add_trace_msg(conf->mddev->queue, in handle_stripe_dirtying()
4324 if (rcw && conf->mddev->queue) in handle_stripe_dirtying()
4325 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", in handle_stripe_dirtying()
4413 atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches); in handle_parity_checks5()
4414 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { in handle_parity_checks5()
4418 "%llu-%llu\n", mdname(conf->mddev), in handle_parity_checks5()
4540 mdname(conf->mddev), in handle_parity_checks6()
4578 atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches); in handle_parity_checks6()
4579 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { in handle_parity_checks6()
4583 "%llu-%llu\n", mdname(conf->mddev), in handle_parity_checks6()
4880 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4881 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) in analyse_stripe()
5048 test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) { in handle_stripe()
5060 rdev_dec_pending(s.blocked_rdev, conf->mddev); in handle_stripe()
5248 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1); in handle_stripe()
5257 if (s.failed <= conf->max_degraded && !conf->mddev->ro) in handle_stripe()
5315 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1); in handle_stripe()
5325 if (conf->mddev->external) in handle_stripe()
5327 conf->mddev); in handle_stripe()
5334 conf->mddev); in handle_stripe()
5346 md_error(conf->mddev, rdev); in handle_stripe()
5347 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5353 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5362 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5379 md_wakeup_thread(conf->mddev->thread); in handle_stripe()
5420 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) in in_chunk_boundary() argument
5422 struct r5conf *conf = mddev->private; in in_chunk_boundary()
5446 md_wakeup_thread(conf->mddev->thread); in add_bio_to_retry()
5480 struct mddev *mddev = rdev->mddev; in raid5_align_endio() local
5481 struct r5conf *conf = mddev->private; in raid5_align_endio()
5486 rdev_dec_pending(rdev, conf->mddev); in raid5_align_endio()
5500 static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) in raid5_read_one_chunk() argument
5502 struct r5conf *conf = mddev->private; in raid5_read_one_chunk()
5509 if (!in_chunk_boundary(mddev, raid_bio)) { in raid5_read_one_chunk()
5539 rdev_dec_pending(rdev, mddev); in raid5_read_one_chunk()
5543 md_account_bio(mddev, &raid_bio); in raid5_read_one_chunk()
5547 &mddev->bio_set); in raid5_read_one_chunk()
5574 if (mddev->gendisk) in raid5_read_one_chunk()
5575 trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk), in raid5_read_one_chunk()
5585 static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) in chunk_aligned_read() argument
5589 unsigned chunk_sects = mddev->chunk_sectors; in chunk_aligned_read()
5593 struct r5conf *conf = mddev->private; in chunk_aligned_read()
5600 if (!raid5_read_one_chunk(mddev, raid_bio)) in chunk_aligned_read()
5720 struct mddev *mddev = cb->cb.data; in raid5_unplug() local
5721 struct r5conf *conf = mddev->private; in raid5_unplug()
5749 if (mddev->queue) in raid5_unplug()
5750 trace_block_unplug(mddev->queue, cnt, !from_schedule); in raid5_unplug()
5754 static void release_stripe_plug(struct mddev *mddev, in release_stripe_plug() argument
5758 raid5_unplug, mddev, in release_stripe_plug()
5782 static void make_discard_request(struct mddev *mddev, struct bio *bi) in make_discard_request() argument
5784 struct r5conf *conf = mddev->private; in make_discard_request()
5793 if (mddev->reshape_position != MaxSector) in make_discard_request()
5847 md_write_inc(mddev, bi); in make_discard_request()
5851 if (conf->mddev->bitmap) { in make_discard_request()
5855 md_bitmap_startwrite(mddev->bitmap, in make_discard_request()
5867 release_stripe_plug(mddev, sh); in make_discard_request()
5873 static bool ahead_of_reshape(struct mddev *mddev, sector_t sector, in ahead_of_reshape() argument
5876 return mddev->reshape_backwards ? sector < reshape_sector : in ahead_of_reshape()
5880 static bool range_ahead_of_reshape(struct mddev *mddev, sector_t min, in range_ahead_of_reshape() argument
5883 return mddev->reshape_backwards ? max < reshape_sector : in range_ahead_of_reshape()
5887 static bool stripe_ahead_of_reshape(struct mddev *mddev, struct r5conf *conf, in stripe_ahead_of_reshape() argument
5904 if (!range_ahead_of_reshape(mddev, min_sector, max_sector, in stripe_ahead_of_reshape()
5963 static bool reshape_inprogress(struct mddev *mddev) in reshape_inprogress() argument
5965 return test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in reshape_inprogress()
5966 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && in reshape_inprogress()
5967 !test_bit(MD_RECOVERY_DONE, &mddev->recovery) && in reshape_inprogress()
5968 !test_bit(MD_RECOVERY_INTR, &mddev->recovery); in reshape_inprogress()
5971 static bool reshape_disabled(struct mddev *mddev) in reshape_disabled() argument
5973 return is_md_suspended(mddev) || !md_is_rdwr(mddev); in reshape_disabled()
5976 static enum stripe_result make_stripe_request(struct mddev *mddev, in make_stripe_request() argument
6000 if (ahead_of_reshape(mddev, logical_sector, in make_stripe_request()
6004 if (ahead_of_reshape(mddev, logical_sector, in make_stripe_request()
6031 stripe_ahead_of_reshape(mddev, conf, sh)) { in make_stripe_request()
6056 md_wakeup_thread(mddev->thread); in make_stripe_request()
6082 release_stripe_plug(mddev, sh); in make_stripe_request()
6088 if (ret == STRIPE_SCHEDULE_AND_RETRY && !reshape_inprogress(mddev) && in make_stripe_request()
6089 reshape_disabled(mddev)) { in make_stripe_request()
6093 mdname(mddev)); in make_stripe_request()
6131 static bool raid5_make_request(struct mddev *mddev, struct bio * bi) in raid5_make_request() argument
6134 struct r5conf *conf = mddev->private; in raid5_make_request()
6147 if (md_flush_request(mddev, bi)) in raid5_make_request()
6158 if (!md_write_start(mddev, bi)) in raid5_make_request()
6165 if (rw == READ && mddev->degraded == 0 && in raid5_make_request()
6166 mddev->reshape_position == MaxSector) { in raid5_make_request()
6167 bi = chunk_aligned_read(mddev, bi); in raid5_make_request()
6173 make_discard_request(mddev, bi); in raid5_make_request()
6174 md_write_end(mddev); in raid5_make_request()
6193 !ahead_of_reshape(mddev, logical_sector, conf->reshape_progress) && in raid5_make_request()
6194 ahead_of_reshape(mddev, logical_sector, conf->reshape_safe)) { in raid5_make_request()
6197 md_write_end(mddev); in raid5_make_request()
6200 md_account_bio(mddev, &bi); in raid5_make_request()
6215 res = make_stripe_request(mddev, conf, &ctx, logical_sector, in raid5_make_request()
6254 md_write_end(mddev); in raid5_make_request()
6259 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
6261 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) in reshape_request() argument
6272 struct r5conf *conf = mddev->private; in reshape_request()
6289 if (mddev->reshape_backwards && in reshape_request()
6290 conf->reshape_progress < raid5_size(mddev, 0, 0)) { in reshape_request()
6291 sector_nr = raid5_size(mddev, 0, 0) in reshape_request()
6293 } else if (mddev->reshape_backwards && in reshape_request()
6297 } else if (!mddev->reshape_backwards && in reshape_request()
6302 mddev->curr_resync_completed = sector_nr; in reshape_request()
6303 sysfs_notify_dirent_safe(mddev->sysfs_completed); in reshape_request()
6329 if (mddev->reshape_backwards) { in reshape_request()
6347 if (mddev->reshape_backwards) { in reshape_request()
6350 BUG_ON((mddev->dev_sectors & in reshape_request()
6385 if ((mddev->reshape_backwards in reshape_request()
6392 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
6395 mddev->reshape_position = conf->reshape_progress; in reshape_request()
6396 mddev->curr_resync_completed = sector_nr; in reshape_request()
6397 if (!mddev->reshape_backwards) in reshape_request()
6399 rdev_for_each(rdev, mddev) in reshape_request()
6407 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in reshape_request()
6408 md_wakeup_thread(mddev->thread); in reshape_request()
6409 wait_event(mddev->sb_wait, mddev->sb_flags == 0 || in reshape_request()
6410 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
6411 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in reshape_request()
6414 conf->reshape_safe = mddev->reshape_position; in reshape_request()
6417 sysfs_notify_dirent_safe(mddev->sysfs_completed); in reshape_request()
6439 if (s < raid5_size(mddev, 0, 0)) { in reshape_request()
6454 if (mddev->reshape_backwards) in reshape_request()
6471 if (last_sector >= mddev->dev_sectors) in reshape_request()
6472 last_sector = mddev->dev_sectors - 1; in reshape_request()
6495 if (mddev->curr_resync_completed > mddev->resync_max || in reshape_request()
6496 (sector_nr - mddev->curr_resync_completed) * 2 in reshape_request()
6497 >= mddev->resync_max - mddev->curr_resync_completed) { in reshape_request()
6501 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
6504 mddev->reshape_position = conf->reshape_progress; in reshape_request()
6505 mddev->curr_resync_completed = sector_nr; in reshape_request()
6506 if (!mddev->reshape_backwards) in reshape_request()
6508 rdev_for_each(rdev, mddev) in reshape_request()
6515 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in reshape_request()
6516 md_wakeup_thread(mddev->thread); in reshape_request()
6517 wait_event(mddev->sb_wait, in reshape_request()
6518 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) in reshape_request()
6519 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
6520 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in reshape_request()
6523 conf->reshape_safe = mddev->reshape_position; in reshape_request()
6526 sysfs_notify_dirent_safe(mddev->sysfs_completed); in reshape_request()
6532 static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr, in raid5_sync_request() argument
6535 struct r5conf *conf = mddev->private; in raid5_sync_request()
6537 sector_t max_sector = mddev->dev_sectors; in raid5_sync_request()
6545 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { in raid5_sync_request()
6550 if (mddev->curr_resync < max_sector) /* aborted */ in raid5_sync_request()
6551 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, in raid5_sync_request()
6555 md_bitmap_close_sync(mddev->bitmap); in raid5_sync_request()
6563 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in raid5_sync_request()
6564 return reshape_request(mddev, sector_nr, skipped); in raid5_sync_request()
6576 if (mddev->degraded >= conf->max_degraded && in raid5_sync_request()
6577 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in raid5_sync_request()
6578 sector_t rv = mddev->dev_sectors - sector_nr; in raid5_sync_request()
6582 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in raid5_sync_request()
6584 !md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && in raid5_sync_request()
6593 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, false); in raid5_sync_request()
6617 md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); in raid5_sync_request()
6745 struct mddev *mddev = conf->mddev; in raid5_do_work() local
6766 wait_event_lock_irq(mddev->sb_wait, in raid5_do_work()
6767 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags), in raid5_do_work()
6793 struct mddev *mddev = thread->mddev; in raid5d() local
6794 struct r5conf *conf = mddev->private; in raid5d()
6800 md_check_recovery(mddev); in raid5d()
6819 md_bitmap_unplug(mddev->bitmap); in raid5d()
6842 if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) { in raid5d()
6844 md_check_recovery(mddev); in raid5d()
6855 wait_event_lock_irq(mddev->sb_wait, in raid5d()
6856 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags), in raid5d()
6883 raid5_show_stripe_cache_size(struct mddev *mddev, char *page) in raid5_show_stripe_cache_size() argument
6887 spin_lock(&mddev->lock); in raid5_show_stripe_cache_size()
6888 conf = mddev->private; in raid5_show_stripe_cache_size()
6891 spin_unlock(&mddev->lock); in raid5_show_stripe_cache_size()
6896 raid5_set_cache_size(struct mddev *mddev, int size) in raid5_set_cache_size() argument
6899 struct r5conf *conf = mddev->private; in raid5_set_cache_size()
6911 md_allow_write(mddev); in raid5_set_cache_size()
6927 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) in raid5_store_stripe_cache_size() argument
6937 err = mddev_lock(mddev); in raid5_store_stripe_cache_size()
6940 conf = mddev->private; in raid5_store_stripe_cache_size()
6944 err = raid5_set_cache_size(mddev, new); in raid5_store_stripe_cache_size()
6945 mddev_unlock(mddev); in raid5_store_stripe_cache_size()
6956 raid5_show_rmw_level(struct mddev *mddev, char *page) in raid5_show_rmw_level() argument
6958 struct r5conf *conf = mddev->private; in raid5_show_rmw_level()
6966 raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len) in raid5_store_rmw_level() argument
6968 struct r5conf *conf = mddev->private; in raid5_store_rmw_level()
6998 raid5_show_stripe_size(struct mddev *mddev, char *page) in raid5_show_stripe_size() argument
7003 spin_lock(&mddev->lock); in raid5_show_stripe_size()
7004 conf = mddev->private; in raid5_show_stripe_size()
7007 spin_unlock(&mddev->lock); in raid5_show_stripe_size()
7013 raid5_store_stripe_size(struct mddev *mddev, const char *page, size_t len) in raid5_store_stripe_size() argument
7035 err = mddev_lock(mddev); in raid5_store_stripe_size()
7039 conf = mddev->private; in raid5_store_stripe_size()
7051 if (mddev->sync_thread || in raid5_store_stripe_size()
7052 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in raid5_store_stripe_size()
7053 mddev->reshape_position != MaxSector || in raid5_store_stripe_size()
7054 mddev->sysfs_active) { in raid5_store_stripe_size()
7059 mddev_suspend(mddev); in raid5_store_stripe_size()
7070 mdname(mddev)); in raid5_store_stripe_size()
7074 mddev_resume(mddev); in raid5_store_stripe_size()
7077 mddev_unlock(mddev); in raid5_store_stripe_size()
7093 raid5_show_preread_threshold(struct mddev *mddev, char *page) in raid5_show_preread_threshold() argument
7097 spin_lock(&mddev->lock); in raid5_show_preread_threshold()
7098 conf = mddev->private; in raid5_show_preread_threshold()
7101 spin_unlock(&mddev->lock); in raid5_show_preread_threshold()
7106 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) in raid5_store_preread_threshold() argument
7117 err = mddev_lock(mddev); in raid5_store_preread_threshold()
7120 conf = mddev->private; in raid5_store_preread_threshold()
7127 mddev_unlock(mddev); in raid5_store_preread_threshold()
7138 raid5_show_skip_copy(struct mddev *mddev, char *page) in raid5_show_skip_copy() argument
7142 spin_lock(&mddev->lock); in raid5_show_skip_copy()
7143 conf = mddev->private; in raid5_show_skip_copy()
7146 spin_unlock(&mddev->lock); in raid5_show_skip_copy()
7151 raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) in raid5_store_skip_copy() argument
7163 err = mddev_lock(mddev); in raid5_store_skip_copy()
7166 conf = mddev->private; in raid5_store_skip_copy()
7170 struct request_queue *q = mddev->queue; in raid5_store_skip_copy()
7172 mddev_suspend(mddev); in raid5_store_skip_copy()
7178 mddev_resume(mddev); in raid5_store_skip_copy()
7180 mddev_unlock(mddev); in raid5_store_skip_copy()
7190 stripe_cache_active_show(struct mddev *mddev, char *page) in stripe_cache_active_show() argument
7192 struct r5conf *conf = mddev->private; in stripe_cache_active_show()
7203 raid5_show_group_thread_cnt(struct mddev *mddev, char *page) in raid5_show_group_thread_cnt() argument
7207 spin_lock(&mddev->lock); in raid5_show_group_thread_cnt()
7208 conf = mddev->private; in raid5_show_group_thread_cnt()
7211 spin_unlock(&mddev->lock); in raid5_show_group_thread_cnt()
7219 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) in raid5_store_group_thread_cnt() argument
7235 err = mddev_lock(mddev); in raid5_store_group_thread_cnt()
7238 conf = mddev->private; in raid5_store_group_thread_cnt()
7242 mddev_suspend(mddev); in raid5_store_group_thread_cnt()
7260 mddev_resume(mddev); in raid5_store_group_thread_cnt()
7262 mddev_unlock(mddev); in raid5_store_group_thread_cnt()
7343 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid5_size() argument
7345 struct r5conf *conf = mddev->private; in raid5_size()
7348 sectors = mddev->dev_sectors; in raid5_size()
7488 static struct r5conf *setup_conf(struct mddev *mddev) in setup_conf() argument
7500 if (mddev->new_level != 5 in setup_conf()
7501 && mddev->new_level != 4 in setup_conf()
7502 && mddev->new_level != 6) { in setup_conf()
7504 mdname(mddev), mddev->new_level); in setup_conf()
7507 if ((mddev->new_level == 5 in setup_conf()
7508 && !algorithm_valid_raid5(mddev->new_layout)) || in setup_conf()
7509 (mddev->new_level == 6 in setup_conf()
7510 && !algorithm_valid_raid6(mddev->new_layout))) { in setup_conf()
7512 mdname(mddev), mddev->new_layout); in setup_conf()
7515 if (mddev->new_level == 6 && mddev->raid_disks < 4) { in setup_conf()
7517 mdname(mddev), mddev->raid_disks); in setup_conf()
7521 if (!mddev->new_chunk_sectors || in setup_conf()
7522 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || in setup_conf()
7523 !is_power_of_2(mddev->new_chunk_sectors)) { in setup_conf()
7525 mdname(mddev), mddev->new_chunk_sectors << 9); in setup_conf()
7572 rdev_for_each(rdev, mddev) { in setup_conf()
7582 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
7584 conf->raid_disks = mddev->raid_disks; in setup_conf()
7585 if (mddev->reshape_position == MaxSector) in setup_conf()
7586 conf->previous_raid_disks = mddev->raid_disks; in setup_conf()
7588 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; in setup_conf()
7606 conf->mddev = mddev; in setup_conf()
7635 conf->level = mddev->new_level; in setup_conf()
7636 conf->chunk_sectors = mddev->new_chunk_sectors; in setup_conf()
7641 pr_debug("raid456: run(%s) called.\n", mdname(mddev)); in setup_conf()
7644 rdev_for_each(rdev, mddev) { in setup_conf()
7663 mdname(mddev), rdev->bdev, raid_disk); in setup_conf()
7669 conf->level = mddev->new_level; in setup_conf()
7680 conf->algorithm = mddev->new_layout; in setup_conf()
7681 conf->reshape_progress = mddev->reshape_position; in setup_conf()
7683 conf->prev_chunk_sectors = mddev->chunk_sectors; in setup_conf()
7684 conf->prev_algo = mddev->layout; in setup_conf()
7691 if (mddev->reshape_position != MaxSector) { in setup_conf()
7693 ((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4, in setup_conf()
7694 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4); in setup_conf()
7698 mdname(mddev), conf->min_nr_stripes); in setup_conf()
7705 mdname(mddev), memory); in setup_conf()
7709 pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory); in setup_conf()
7720 ret = register_shrinker(&conf->shrinker, "md-raid5:%s", mdname(mddev)); in setup_conf()
7723 mdname(mddev)); in setup_conf()
7727 sprintf(pers_name, "raid%d", mddev->new_level); in setup_conf()
7729 md_register_thread(raid5d, mddev, pers_name)); in setup_conf()
7732 mdname(mddev)); in setup_conf()
7773 blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) * in raid5_set_io_opt()
7777 static int raid5_run(struct mddev *mddev) in raid5_run() argument
7788 if (mddev_init_writes_pending(mddev) < 0) in raid5_run()
7791 if (mddev->recovery_cp != MaxSector) in raid5_run()
7793 mdname(mddev)); in raid5_run()
7795 rdev_for_each(rdev, mddev) { in raid5_run()
7808 } else if (mddev->reshape_backwards && in raid5_run()
7811 else if (!mddev->reshape_backwards && in raid5_run()
7816 if ((test_bit(MD_HAS_JOURNAL, &mddev->flags) || journal_dev) && in raid5_run()
7817 (mddev->bitmap_info.offset || mddev->bitmap_info.file)) { in raid5_run()
7819 mdname(mddev)); in raid5_run()
7823 if (mddev->reshape_position != MaxSector) { in raid5_run()
7838 int max_degraded = (mddev->level == 6 ? 2 : 1); in raid5_run()
7844 mdname(mddev)); in raid5_run()
7848 if (mddev->new_level != mddev->level) { in raid5_run()
7850 mdname(mddev)); in raid5_run()
7853 old_disks = mddev->raid_disks - mddev->delta_disks; in raid5_run()
7861 here_new = mddev->reshape_position; in raid5_run()
7862 chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors); in raid5_run()
7863 new_data_disks = mddev->raid_disks - max_degraded; in raid5_run()
7866 mdname(mddev)); in raid5_run()
7871 here_old = mddev->reshape_position; in raid5_run()
7875 if (mddev->delta_disks == 0) { in raid5_run()
7883 if (abs(min_offset_diff) >= mddev->chunk_sectors && in raid5_run()
7884 abs(min_offset_diff) >= mddev->new_chunk_sectors) in raid5_run()
7886 else if (mddev->ro == 0) { in raid5_run()
7888 mdname(mddev)); in raid5_run()
7891 } else if (mddev->reshape_backwards in raid5_run()
7898 mdname(mddev)); in raid5_run()
7901 pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev)); in raid5_run()
7904 BUG_ON(mddev->level != mddev->new_level); in raid5_run()
7905 BUG_ON(mddev->layout != mddev->new_layout); in raid5_run()
7906 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); in raid5_run()
7907 BUG_ON(mddev->delta_disks != 0); in raid5_run()
7910 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && in raid5_run()
7911 test_bit(MD_HAS_PPL, &mddev->flags)) { in raid5_run()
7913 mdname(mddev)); in raid5_run()
7914 clear_bit(MD_HAS_PPL, &mddev->flags); in raid5_run()
7915 clear_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags); in raid5_run()
7918 if (mddev->private == NULL) in raid5_run()
7919 conf = setup_conf(mddev); in raid5_run()
7921 conf = mddev->private; in raid5_run()
7926 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { in raid5_run()
7929 mdname(mddev)); in raid5_run()
7930 mddev->ro = 1; in raid5_run()
7931 set_disk_ro(mddev->gendisk, 1); in raid5_run()
7932 } else if (mddev->recovery_cp == MaxSector) in raid5_run()
7933 set_bit(MD_JOURNAL_CLEAN, &mddev->flags); in raid5_run()
7937 rcu_assign_pointer(mddev->thread, conf->thread); in raid5_run()
7939 mddev->private = conf; in raid5_run()
7943 rdev = rdev_mdlock_deref(mddev, conf->disks[i].rdev); in raid5_run()
7946 rdev = rdev_mdlock_deref(mddev, in raid5_run()
7971 if (mddev->major_version == 0 && in raid5_run()
7972 mddev->minor_version > 90) in raid5_run()
7994 mddev->degraded = raid5_calc_degraded(conf); in raid5_run()
7998 mdname(mddev), mddev->degraded, conf->raid_disks); in raid5_run()
8003 mddev->dev_sectors &= ~((sector_t)mddev->chunk_sectors - 1); in raid5_run()
8004 mddev->resync_max_sectors = mddev->dev_sectors; in raid5_run()
8006 if (mddev->degraded > dirty_parity_disks && in raid5_run()
8007 mddev->recovery_cp != MaxSector) { in raid5_run()
8008 if (test_bit(MD_HAS_PPL, &mddev->flags)) in raid5_run()
8010 mdname(mddev)); in raid5_run()
8011 else if (mddev->ok_start_degraded) in raid5_run()
8013 mdname(mddev)); in raid5_run()
8016 mdname(mddev)); in raid5_run()
8022 mdname(mddev), conf->level, in raid5_run()
8023 mddev->raid_disks-mddev->degraded, mddev->raid_disks, in raid5_run()
8024 mddev->new_layout); in raid5_run()
8031 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid5_run()
8032 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid5_run()
8033 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid5_run()
8034 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in raid5_run()
8035 rcu_assign_pointer(mddev->sync_thread, in raid5_run()
8036 md_register_thread(md_do_sync, mddev, "reshape")); in raid5_run()
8037 if (!mddev->sync_thread) in raid5_run()
8042 if (mddev->to_remove == &raid5_attrs_group) in raid5_run()
8043 mddev->to_remove = NULL; in raid5_run()
8044 else if (mddev->kobj.sd && in raid5_run()
8045 sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) in raid5_run()
8047 mdname(mddev)); in raid5_run()
8048 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); in raid5_run()
8050 if (mddev->queue) { in raid5_run()
8058 ((mddev->chunk_sectors << 9) / PAGE_SIZE); in raid5_run()
8060 chunk_size = mddev->chunk_sectors << 9; in raid5_run()
8061 blk_queue_io_min(mddev->queue, chunk_size); in raid5_run()
8063 mddev->queue->limits.raid_partial_stripes_expensive = 1; in raid5_run()
8070 mddev->queue->limits.discard_granularity = stripe; in raid5_run()
8072 blk_queue_max_write_zeroes_sectors(mddev->queue, 0); in raid5_run()
8074 rdev_for_each(rdev, mddev) { in raid5_run()
8075 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid5_run()
8077 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid5_run()
8097 mddev->queue->limits.max_discard_sectors < (stripe >> 9) || in raid5_run()
8098 mddev->queue->limits.discard_granularity < stripe) in raid5_run()
8099 blk_queue_max_discard_sectors(mddev->queue, 0); in raid5_run()
8105 blk_queue_max_hw_sectors(mddev->queue, in raid5_run()
8109 blk_queue_max_segments(mddev->queue, USHRT_MAX); in raid5_run()
8117 md_unregister_thread(mddev, &mddev->thread); in raid5_run()
8120 mddev->private = NULL; in raid5_run()
8121 pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev)); in raid5_run()
8125 static void raid5_free(struct mddev *mddev, void *priv) in raid5_free() argument
8130 mddev->to_remove = &raid5_attrs_group; in raid5_free()
8133 static void raid5_status(struct seq_file *seq, struct mddev *mddev) in raid5_status() argument
8135 struct r5conf *conf = mddev->private; in raid5_status()
8138 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, in raid5_status()
8139 conf->chunk_sectors / 2, mddev->layout); in raid5_status()
8140 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); in raid5_status()
8162 conf->raid_disks - conf->mddev->degraded); in print_raid5_conf()
8175 static int raid5_spare_active(struct mddev *mddev) in raid5_spare_active() argument
8178 struct r5conf *conf = mddev->private; in raid5_spare_active()
8184 rdev = rdev_mdlock_deref(mddev, conf->disks[i].rdev); in raid5_spare_active()
8185 replacement = rdev_mdlock_deref(mddev, in raid5_spare_active()
8214 mddev->degraded = raid5_calc_degraded(conf); in raid5_spare_active()
8220 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) in raid5_remove_disk() argument
8222 struct r5conf *conf = mddev->private; in raid5_remove_disk()
8268 mddev->recovery_disabled != conf->recovery_disabled && in raid5_remove_disk()
8278 lockdep_assert_held(&mddev->reconfig_mutex); in raid5_remove_disk()
8313 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) in raid5_add_disk() argument
8315 struct r5conf *conf = mddev->private; in raid5_add_disk()
8342 if (mddev->recovery_disabled == conf->recovery_disabled) in raid5_add_disk()
8377 tmp = rdev_mdlock_deref(mddev, p->rdev); in raid5_add_disk()
8379 mddev->reshape_position == MaxSector && in raid5_add_disk()
8395 static int raid5_resize(struct mddev *mddev, sector_t sectors) in raid5_resize() argument
8405 struct r5conf *conf = mddev->private; in raid5_resize()
8410 newsize = raid5_size(mddev, sectors, mddev->raid_disks); in raid5_resize()
8411 if (mddev->external_size && in raid5_resize()
8412 mddev->array_sectors > newsize) in raid5_resize()
8414 if (mddev->bitmap) { in raid5_resize()
8415 int ret = md_bitmap_resize(mddev->bitmap, sectors, 0, 0); in raid5_resize()
8419 md_set_array_sectors(mddev, newsize); in raid5_resize()
8420 if (sectors > mddev->dev_sectors && in raid5_resize()
8421 mddev->recovery_cp > mddev->dev_sectors) { in raid5_resize()
8422 mddev->recovery_cp = mddev->dev_sectors; in raid5_resize()
8423 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid5_resize()
8425 mddev->dev_sectors = sectors; in raid5_resize()
8426 mddev->resync_max_sectors = sectors; in raid5_resize()
8430 static int check_stripe_cache(struct mddev *mddev) in check_stripe_cache() argument
8440 struct r5conf *conf = mddev->private; in check_stripe_cache()
8441 if (((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4 in check_stripe_cache()
8443 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4 in check_stripe_cache()
8446 mdname(mddev), in check_stripe_cache()
8447 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) in check_stripe_cache()
8454 static int check_reshape(struct mddev *mddev) in check_reshape() argument
8456 struct r5conf *conf = mddev->private; in check_reshape()
8460 if (mddev->delta_disks == 0 && in check_reshape()
8461 mddev->new_layout == mddev->layout && in check_reshape()
8462 mddev->new_chunk_sectors == mddev->chunk_sectors) in check_reshape()
8466 if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) { in check_reshape()
8473 if (mddev->level == 6) in check_reshape()
8475 if (mddev->raid_disks + mddev->delta_disks < min) in check_reshape()
8479 if (!check_stripe_cache(mddev)) in check_reshape()
8482 if (mddev->new_chunk_sectors > mddev->chunk_sectors || in check_reshape()
8483 mddev->delta_disks > 0) in check_reshape()
8486 + max(0, mddev->delta_disks), in check_reshape()
8487 max(mddev->new_chunk_sectors, in check_reshape()
8488 mddev->chunk_sectors) in check_reshape()
8492 if (conf->previous_raid_disks + mddev->delta_disks <= conf->pool_size) in check_reshape()
8495 + mddev->delta_disks)); in check_reshape()
8498 static int raid5_start_reshape(struct mddev *mddev) in raid5_start_reshape() argument
8500 struct r5conf *conf = mddev->private; in raid5_start_reshape()
8506 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in raid5_start_reshape()
8509 if (!check_stripe_cache(mddev)) in raid5_start_reshape()
8516 if (mddev->recovery_cp < MaxSector) in raid5_start_reshape()
8519 if (rdev_mdlock_deref(mddev, conf->disks[i].replacement)) in raid5_start_reshape()
8522 rdev_for_each(rdev, mddev) { in raid5_start_reshape()
8528 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) in raid5_start_reshape()
8538 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) in raid5_start_reshape()
8539 < mddev->array_sectors) { in raid5_start_reshape()
8541 mdname(mddev)); in raid5_start_reshape()
8549 conf->raid_disks += mddev->delta_disks; in raid5_start_reshape()
8551 conf->chunk_sectors = mddev->new_chunk_sectors; in raid5_start_reshape()
8553 conf->algorithm = mddev->new_layout; in raid5_start_reshape()
8559 if (mddev->reshape_backwards) in raid5_start_reshape()
8560 conf->reshape_progress = raid5_size(mddev, 0, 0); in raid5_start_reshape()
8571 mddev_suspend(mddev); in raid5_start_reshape()
8572 mddev_resume(mddev); in raid5_start_reshape()
8581 if (mddev->delta_disks >= 0) { in raid5_start_reshape()
8582 rdev_for_each(rdev, mddev) in raid5_start_reshape()
8585 if (raid5_add_disk(mddev, rdev) == 0) { in raid5_start_reshape()
8593 sysfs_link_rdev(mddev, rdev); in raid5_start_reshape()
8606 mddev->degraded = raid5_calc_degraded(conf); in raid5_start_reshape()
8609 mddev->raid_disks = conf->raid_disks; in raid5_start_reshape()
8610 mddev->reshape_position = conf->reshape_progress; in raid5_start_reshape()
8611 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in raid5_start_reshape()
8613 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid5_start_reshape()
8614 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid5_start_reshape()
8615 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in raid5_start_reshape()
8616 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid5_start_reshape()
8617 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in raid5_start_reshape()
8618 rcu_assign_pointer(mddev->sync_thread, in raid5_start_reshape()
8619 md_register_thread(md_do_sync, mddev, "reshape")); in raid5_start_reshape()
8620 if (!mddev->sync_thread) { in raid5_start_reshape()
8621 mddev->recovery = 0; in raid5_start_reshape()
8624 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; in raid5_start_reshape()
8625 mddev->new_chunk_sectors = in raid5_start_reshape()
8627 mddev->new_layout = conf->algorithm = conf->prev_algo; in raid5_start_reshape()
8628 rdev_for_each(rdev, mddev) in raid5_start_reshape()
8633 mddev->reshape_position = MaxSector; in raid5_start_reshape()
8639 md_wakeup_thread(mddev->sync_thread); in raid5_start_reshape()
8650 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in end_reshape()
8655 md_finish_reshape(conf->mddev); in end_reshape()
8658 conf->mddev->reshape_position = MaxSector; in end_reshape()
8659 rdev_for_each(rdev, conf->mddev) in end_reshape()
8667 if (conf->mddev->queue) in end_reshape()
8675 static void raid5_finish_reshape(struct mddev *mddev) in raid5_finish_reshape() argument
8677 struct r5conf *conf = mddev->private; in raid5_finish_reshape()
8680 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in raid5_finish_reshape()
8682 if (mddev->delta_disks <= 0) { in raid5_finish_reshape()
8685 mddev->degraded = raid5_calc_degraded(conf); in raid5_finish_reshape()
8688 d < conf->raid_disks - mddev->delta_disks; in raid5_finish_reshape()
8690 rdev = rdev_mdlock_deref(mddev, in raid5_finish_reshape()
8694 rdev = rdev_mdlock_deref(mddev, in raid5_finish_reshape()
8700 mddev->layout = conf->algorithm; in raid5_finish_reshape()
8701 mddev->chunk_sectors = conf->chunk_sectors; in raid5_finish_reshape()
8702 mddev->reshape_position = MaxSector; in raid5_finish_reshape()
8703 mddev->delta_disks = 0; in raid5_finish_reshape()
8704 mddev->reshape_backwards = 0; in raid5_finish_reshape()
8708 static void raid5_quiesce(struct mddev *mddev, int quiesce) in raid5_quiesce() argument
8710 struct r5conf *conf = mddev->private; in raid5_quiesce()
8743 static void *raid45_takeover_raid0(struct mddev *mddev, int level) in raid45_takeover_raid0() argument
8745 struct r0conf *raid0_conf = mddev->private; in raid45_takeover_raid0()
8751 mdname(mddev)); in raid45_takeover_raid0()
8757 mddev->dev_sectors = sectors; in raid45_takeover_raid0()
8758 mddev->new_level = level; in raid45_takeover_raid0()
8759 mddev->new_layout = ALGORITHM_PARITY_N; in raid45_takeover_raid0()
8760 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid45_takeover_raid0()
8761 mddev->raid_disks += 1; in raid45_takeover_raid0()
8762 mddev->delta_disks = 1; in raid45_takeover_raid0()
8764 mddev->recovery_cp = MaxSector; in raid45_takeover_raid0()
8766 return setup_conf(mddev); in raid45_takeover_raid0()
8769 static void *raid5_takeover_raid1(struct mddev *mddev) in raid5_takeover_raid1() argument
8774 if (mddev->raid_disks != 2 || in raid5_takeover_raid1()
8775 mddev->degraded > 1) in raid5_takeover_raid1()
8783 while (chunksect && (mddev->array_sectors & (chunksect-1))) in raid5_takeover_raid1()
8786 if ((chunksect<<9) < RAID5_STRIPE_SIZE((struct r5conf *)mddev->private)) in raid5_takeover_raid1()
8790 mddev->new_level = 5; in raid5_takeover_raid1()
8791 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; in raid5_takeover_raid1()
8792 mddev->new_chunk_sectors = chunksect; in raid5_takeover_raid1()
8794 ret = setup_conf(mddev); in raid5_takeover_raid1()
8796 mddev_clear_unsupported_flags(mddev, in raid5_takeover_raid1()
8801 static void *raid5_takeover_raid6(struct mddev *mddev) in raid5_takeover_raid6() argument
8805 switch (mddev->layout) { in raid5_takeover_raid6()
8827 mddev->new_level = 5; in raid5_takeover_raid6()
8828 mddev->new_layout = new_layout; in raid5_takeover_raid6()
8829 mddev->delta_disks = -1; in raid5_takeover_raid6()
8830 mddev->raid_disks -= 1; in raid5_takeover_raid6()
8831 return setup_conf(mddev); in raid5_takeover_raid6()
8834 static int raid5_check_reshape(struct mddev *mddev) in raid5_check_reshape() argument
8841 struct r5conf *conf = mddev->private; in raid5_check_reshape()
8842 int new_chunk = mddev->new_chunk_sectors; in raid5_check_reshape()
8844 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) in raid5_check_reshape()
8851 if (mddev->array_sectors & (new_chunk-1)) in raid5_check_reshape()
8858 if (mddev->raid_disks == 2) { in raid5_check_reshape()
8860 if (mddev->new_layout >= 0) { in raid5_check_reshape()
8861 conf->algorithm = mddev->new_layout; in raid5_check_reshape()
8862 mddev->layout = mddev->new_layout; in raid5_check_reshape()
8866 mddev->chunk_sectors = new_chunk; in raid5_check_reshape()
8868 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in raid5_check_reshape()
8869 md_wakeup_thread(mddev->thread); in raid5_check_reshape()
8871 return check_reshape(mddev); in raid5_check_reshape()
8874 static int raid6_check_reshape(struct mddev *mddev) in raid6_check_reshape() argument
8876 int new_chunk = mddev->new_chunk_sectors; in raid6_check_reshape()
8878 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) in raid6_check_reshape()
8885 if (mddev->array_sectors & (new_chunk-1)) in raid6_check_reshape()
8891 return check_reshape(mddev); in raid6_check_reshape()
8894 static void *raid5_takeover(struct mddev *mddev) in raid5_takeover() argument
8902 if (mddev->level == 0) in raid5_takeover()
8903 return raid45_takeover_raid0(mddev, 5); in raid5_takeover()
8904 if (mddev->level == 1) in raid5_takeover()
8905 return raid5_takeover_raid1(mddev); in raid5_takeover()
8906 if (mddev->level == 4) { in raid5_takeover()
8907 mddev->new_layout = ALGORITHM_PARITY_N; in raid5_takeover()
8908 mddev->new_level = 5; in raid5_takeover()
8909 return setup_conf(mddev); in raid5_takeover()
8911 if (mddev->level == 6) in raid5_takeover()
8912 return raid5_takeover_raid6(mddev); in raid5_takeover()
8917 static void *raid4_takeover(struct mddev *mddev) in raid4_takeover() argument
8923 if (mddev->level == 0) in raid4_takeover()
8924 return raid45_takeover_raid0(mddev, 4); in raid4_takeover()
8925 if (mddev->level == 5 && in raid4_takeover()
8926 mddev->layout == ALGORITHM_PARITY_N) { in raid4_takeover()
8927 mddev->new_layout = 0; in raid4_takeover()
8928 mddev->new_level = 4; in raid4_takeover()
8929 return setup_conf(mddev); in raid4_takeover()
8936 static void *raid6_takeover(struct mddev *mddev) in raid6_takeover() argument
8944 if (mddev->pers != &raid5_personality) in raid6_takeover()
8946 if (mddev->degraded > 1) in raid6_takeover()
8948 if (mddev->raid_disks > 253) in raid6_takeover()
8950 if (mddev->raid_disks < 3) in raid6_takeover()
8953 switch (mddev->layout) { in raid6_takeover()
8975 mddev->new_level = 6; in raid6_takeover()
8976 mddev->new_layout = new_layout; in raid6_takeover()
8977 mddev->delta_disks = 1; in raid6_takeover()
8978 mddev->raid_disks += 1; in raid6_takeover()
8979 return setup_conf(mddev); in raid6_takeover()
8982 static int raid5_change_consistency_policy(struct mddev *mddev, const char *buf) in raid5_change_consistency_policy() argument
8987 err = mddev_lock(mddev); in raid5_change_consistency_policy()
8990 conf = mddev->private; in raid5_change_consistency_policy()
8992 mddev_unlock(mddev); in raid5_change_consistency_policy()
9003 mddev_suspend(mddev); in raid5_change_consistency_policy()
9005 mddev_resume(mddev); in raid5_change_consistency_policy()
9012 mddev_suspend(mddev); in raid5_change_consistency_policy()
9014 mddev_resume(mddev); in raid5_change_consistency_policy()
9016 } else if (test_bit(MD_HAS_JOURNAL, &conf->mddev->flags) && in raid5_change_consistency_policy()
9021 rdev_for_each(rdev, mddev) in raid5_change_consistency_policy()
9028 mddev_suspend(mddev); in raid5_change_consistency_policy()
9029 clear_bit(MD_HAS_JOURNAL, &mddev->flags); in raid5_change_consistency_policy()
9030 mddev_resume(mddev); in raid5_change_consistency_policy()
9040 md_update_sb(mddev, 1); in raid5_change_consistency_policy()
9042 mddev_unlock(mddev); in raid5_change_consistency_policy()
9047 static int raid5_start(struct mddev *mddev) in raid5_start() argument
9049 struct r5conf *conf = mddev->private; in raid5_start()
9054 static void raid5_prepare_suspend(struct mddev *mddev) in raid5_prepare_suspend() argument
9056 struct r5conf *conf = mddev->private; in raid5_prepare_suspend()
9058 wait_event(mddev->sb_wait, !reshape_inprogress(mddev) || in raid5_prepare_suspend()
9059 percpu_ref_is_zero(&mddev->active_io)); in raid5_prepare_suspend()
9060 if (percpu_ref_is_zero(&mddev->active_io)) in raid5_prepare_suspend()