Lines Matching refs:mddev

88 static int remove_and_add_spares(struct mddev *mddev,
90 static void mddev_detach(struct mddev *mddev);
91 static void export_rdev(struct md_rdev *rdev, struct mddev *mddev);
117 static inline int speed_min(struct mddev *mddev) in speed_min() argument
119 return mddev->sync_speed_min ? in speed_min()
120 mddev->sync_speed_min : sysctl_speed_limit_min; in speed_min()
123 static inline int speed_max(struct mddev *mddev) in speed_max() argument
125 return mddev->sync_speed_max ? in speed_max()
126 mddev->sync_speed_max : sysctl_speed_limit_max; in speed_max()
138 static void rdevs_uninit_serial(struct mddev *mddev) in rdevs_uninit_serial() argument
142 rdev_for_each(rdev, mddev) in rdevs_uninit_serial()
174 static int rdevs_init_serial(struct mddev *mddev) in rdevs_init_serial() argument
179 rdev_for_each(rdev, mddev) { in rdevs_init_serial()
186 if (ret && !mddev->serial_info_pool) in rdevs_init_serial()
187 rdevs_uninit_serial(mddev); in rdevs_init_serial()
199 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 && in rdev_need_serial()
209 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, in mddev_create_serial_pool() argument
219 mddev_suspend(mddev); in mddev_create_serial_pool()
222 ret = rdevs_init_serial(mddev); in mddev_create_serial_pool()
228 if (mddev->serial_info_pool == NULL) { in mddev_create_serial_pool()
233 mddev->serial_info_pool = in mddev_create_serial_pool()
236 if (!mddev->serial_info_pool) { in mddev_create_serial_pool()
237 rdevs_uninit_serial(mddev); in mddev_create_serial_pool()
244 mddev_resume(mddev); in mddev_create_serial_pool()
253 void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, in mddev_destroy_serial_pool() argument
259 if (mddev->serial_info_pool) { in mddev_destroy_serial_pool()
264 mddev_suspend(mddev); in mddev_destroy_serial_pool()
265 rdev_for_each(temp, mddev) { in mddev_destroy_serial_pool()
267 if (!mddev->serialize_policy || in mddev_destroy_serial_pool()
283 mempool_destroy(mddev->serial_info_pool); in mddev_destroy_serial_pool()
284 mddev->serial_info_pool = NULL; in mddev_destroy_serial_pool()
287 mddev_resume(mddev); in mddev_destroy_serial_pool()
356 static bool is_suspended(struct mddev *mddev, struct bio *bio) in is_suspended() argument
358 if (is_md_suspended(mddev)) in is_suspended()
362 if (mddev->suspend_lo >= mddev->suspend_hi) in is_suspended()
364 if (bio->bi_iter.bi_sector >= mddev->suspend_hi) in is_suspended()
366 if (bio_end_sector(bio) < mddev->suspend_lo) in is_suspended()
371 void md_handle_request(struct mddev *mddev, struct bio *bio) in md_handle_request() argument
374 if (is_suspended(mddev, bio)) { in md_handle_request()
382 prepare_to_wait(&mddev->sb_wait, &__wait, in md_handle_request()
384 if (!is_suspended(mddev, bio)) in md_handle_request()
388 finish_wait(&mddev->sb_wait, &__wait); in md_handle_request()
390 if (!percpu_ref_tryget_live(&mddev->active_io)) in md_handle_request()
393 if (!mddev->pers->make_request(mddev, bio)) { in md_handle_request()
394 percpu_ref_put(&mddev->active_io); in md_handle_request()
398 percpu_ref_put(&mddev->active_io); in md_handle_request()
405 struct mddev *mddev = bio->bi_bdev->bd_disk->private_data; in md_submit_bio() local
407 if (mddev == NULL || mddev->pers == NULL) { in md_submit_bio()
412 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { in md_submit_bio()
421 if (mddev->ro == MD_RDONLY && unlikely(rw == WRITE)) { in md_submit_bio()
431 md_handle_request(mddev, bio); in md_submit_bio()
440 void mddev_suspend(struct mddev *mddev) in mddev_suspend() argument
442 struct md_thread *thread = rcu_dereference_protected(mddev->thread, in mddev_suspend()
443 lockdep_is_held(&mddev->reconfig_mutex)); in mddev_suspend()
446 if (mddev->suspended++) in mddev_suspend()
448 wake_up(&mddev->sb_wait); in mddev_suspend()
449 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags); in mddev_suspend()
450 percpu_ref_kill(&mddev->active_io); in mddev_suspend()
452 if (mddev->pers->prepare_suspend) in mddev_suspend()
453 mddev->pers->prepare_suspend(mddev); in mddev_suspend()
455 wait_event(mddev->sb_wait, percpu_ref_is_zero(&mddev->active_io)); in mddev_suspend()
456 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags); in mddev_suspend()
457 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags)); in mddev_suspend()
459 del_timer_sync(&mddev->safemode_timer); in mddev_suspend()
461 mddev->noio_flag = memalloc_noio_save(); in mddev_suspend()
465 void mddev_resume(struct mddev *mddev) in mddev_resume() argument
467 lockdep_assert_held(&mddev->reconfig_mutex); in mddev_resume()
468 if (--mddev->suspended) in mddev_resume()
472 memalloc_noio_restore(mddev->noio_flag); in mddev_resume()
474 percpu_ref_resurrect(&mddev->active_io); in mddev_resume()
475 wake_up(&mddev->sb_wait); in mddev_resume()
477 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in mddev_resume()
478 md_wakeup_thread(mddev->thread); in mddev_resume()
479 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ in mddev_resume()
490 struct mddev *mddev = rdev->mddev; in md_end_flush() local
494 rdev_dec_pending(rdev, mddev); in md_end_flush()
496 if (atomic_dec_and_test(&mddev->flush_pending)) { in md_end_flush()
498 queue_work(md_wq, &mddev->flush_work); in md_end_flush()
506 struct mddev *mddev = container_of(ws, struct mddev, flush_work); in submit_flushes() local
509 mddev->start_flush = ktime_get_boottime(); in submit_flushes()
510 INIT_WORK(&mddev->flush_work, md_submit_flush_data); in submit_flushes()
511 atomic_set(&mddev->flush_pending, 1); in submit_flushes()
513 rdev_for_each_rcu(rdev, mddev) in submit_flushes()
526 GFP_NOIO, &mddev->bio_set); in submit_flushes()
529 atomic_inc(&mddev->flush_pending); in submit_flushes()
532 rdev_dec_pending(rdev, mddev); in submit_flushes()
535 if (atomic_dec_and_test(&mddev->flush_pending)) in submit_flushes()
536 queue_work(md_wq, &mddev->flush_work); in submit_flushes()
541 struct mddev *mddev = container_of(ws, struct mddev, flush_work); in md_submit_flush_data() local
542 struct bio *bio = mddev->flush_bio; in md_submit_flush_data()
550 spin_lock_irq(&mddev->lock); in md_submit_flush_data()
551 mddev->prev_flush_start = mddev->start_flush; in md_submit_flush_data()
552 mddev->flush_bio = NULL; in md_submit_flush_data()
553 spin_unlock_irq(&mddev->lock); in md_submit_flush_data()
554 wake_up(&mddev->sb_wait); in md_submit_flush_data()
561 md_handle_request(mddev, bio); in md_submit_flush_data()
571 bool md_flush_request(struct mddev *mddev, struct bio *bio) in md_flush_request() argument
574 spin_lock_irq(&mddev->lock); in md_flush_request()
578 wait_event_lock_irq(mddev->sb_wait, in md_flush_request()
579 !mddev->flush_bio || in md_flush_request()
580 ktime_before(req_start, mddev->prev_flush_start), in md_flush_request()
581 mddev->lock); in md_flush_request()
583 if (ktime_after(req_start, mddev->prev_flush_start)) { in md_flush_request()
584 WARN_ON(mddev->flush_bio); in md_flush_request()
585 mddev->flush_bio = bio; in md_flush_request()
588 spin_unlock_irq(&mddev->lock); in md_flush_request()
591 INIT_WORK(&mddev->flush_work, submit_flushes); in md_flush_request()
592 queue_work(md_wq, &mddev->flush_work); in md_flush_request()
607 static inline struct mddev *mddev_get(struct mddev *mddev) in mddev_get() argument
611 if (test_bit(MD_DELETED, &mddev->flags)) in mddev_get()
613 atomic_inc(&mddev->active); in mddev_get()
614 return mddev; in mddev_get()
619 void mddev_put(struct mddev *mddev) in mddev_put() argument
621 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) in mddev_put()
623 if (!mddev->raid_disks && list_empty(&mddev->disks) && in mddev_put()
624 mddev->ctime == 0 && !mddev->hold_active) { in mddev_put()
627 set_bit(MD_DELETED, &mddev->flags); in mddev_put()
634 INIT_WORK(&mddev->del_work, mddev_delayed_delete); in mddev_put()
635 queue_work(md_misc_wq, &mddev->del_work); in mddev_put()
642 void mddev_init(struct mddev *mddev) in mddev_init() argument
644 mutex_init(&mddev->open_mutex); in mddev_init()
645 mutex_init(&mddev->reconfig_mutex); in mddev_init()
646 mutex_init(&mddev->sync_mutex); in mddev_init()
647 mutex_init(&mddev->bitmap_info.mutex); in mddev_init()
648 INIT_LIST_HEAD(&mddev->disks); in mddev_init()
649 INIT_LIST_HEAD(&mddev->all_mddevs); in mddev_init()
650 INIT_LIST_HEAD(&mddev->deleting); in mddev_init()
651 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0); in mddev_init()
652 atomic_set(&mddev->active, 1); in mddev_init()
653 atomic_set(&mddev->openers, 0); in mddev_init()
654 atomic_set(&mddev->sync_seq, 0); in mddev_init()
655 spin_lock_init(&mddev->lock); in mddev_init()
656 atomic_set(&mddev->flush_pending, 0); in mddev_init()
657 init_waitqueue_head(&mddev->sb_wait); in mddev_init()
658 init_waitqueue_head(&mddev->recovery_wait); in mddev_init()
659 mddev->reshape_position = MaxSector; in mddev_init()
660 mddev->reshape_backwards = 0; in mddev_init()
661 mddev->last_sync_action = "none"; in mddev_init()
662 mddev->resync_min = 0; in mddev_init()
663 mddev->resync_max = MaxSector; in mddev_init()
664 mddev->level = LEVEL_NONE; in mddev_init()
668 static struct mddev *mddev_find_locked(dev_t unit) in mddev_find_locked()
670 struct mddev *mddev; in mddev_find_locked() local
672 list_for_each_entry(mddev, &all_mddevs, all_mddevs) in mddev_find_locked()
673 if (mddev->unit == unit) in mddev_find_locked()
674 return mddev; in mddev_find_locked()
700 static struct mddev *mddev_alloc(dev_t unit) in mddev_alloc()
702 struct mddev *new; in mddev_alloc()
742 static void mddev_free(struct mddev *mddev) in mddev_free() argument
745 list_del(&mddev->all_mddevs); in mddev_free()
748 kfree(mddev); in mddev_free()
753 void mddev_unlock(struct mddev *mddev) in mddev_unlock() argument
759 if (!list_empty(&mddev->deleting)) in mddev_unlock()
760 list_splice_init(&mddev->deleting, &delete); in mddev_unlock()
762 if (mddev->to_remove) { in mddev_unlock()
775 const struct attribute_group *to_remove = mddev->to_remove; in mddev_unlock()
776 mddev->to_remove = NULL; in mddev_unlock()
777 mddev->sysfs_active = 1; in mddev_unlock()
778 mutex_unlock(&mddev->reconfig_mutex); in mddev_unlock()
780 if (mddev->kobj.sd) { in mddev_unlock()
782 sysfs_remove_group(&mddev->kobj, to_remove); in mddev_unlock()
783 if (mddev->pers == NULL || in mddev_unlock()
784 mddev->pers->sync_request == NULL) { in mddev_unlock()
785 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); in mddev_unlock()
786 if (mddev->sysfs_action) in mddev_unlock()
787 sysfs_put(mddev->sysfs_action); in mddev_unlock()
788 if (mddev->sysfs_completed) in mddev_unlock()
789 sysfs_put(mddev->sysfs_completed); in mddev_unlock()
790 if (mddev->sysfs_degraded) in mddev_unlock()
791 sysfs_put(mddev->sysfs_degraded); in mddev_unlock()
792 mddev->sysfs_action = NULL; in mddev_unlock()
793 mddev->sysfs_completed = NULL; in mddev_unlock()
794 mddev->sysfs_degraded = NULL; in mddev_unlock()
797 mddev->sysfs_active = 0; in mddev_unlock()
799 mutex_unlock(&mddev->reconfig_mutex); in mddev_unlock()
801 md_wakeup_thread(mddev->thread); in mddev_unlock()
802 wake_up(&mddev->sb_wait); in mddev_unlock()
807 export_rdev(rdev, mddev); in mddev_unlock()
812 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) in md_find_rdev_nr_rcu() argument
816 rdev_for_each_rcu(rdev, mddev) in md_find_rdev_nr_rcu()
824 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) in find_rdev() argument
828 rdev_for_each(rdev, mddev) in find_rdev()
835 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev) in md_find_rdev_rcu() argument
839 rdev_for_each_rcu(rdev, mddev) in md_find_rdev_rcu()
893 struct mddev *mddev = rdev->mddev; in super_written() local
898 md_error(mddev, rdev); in super_written()
901 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); in super_written()
909 rdev_dec_pending(rdev, mddev); in super_written()
911 if (atomic_dec_and_test(&mddev->pending_writes)) in super_written()
912 wake_up(&mddev->sb_wait); in super_written()
915 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, in md_super_write() argument
935 GFP_NOIO, &mddev->sync_set); in md_super_write()
944 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && in md_super_write()
949 atomic_inc(&mddev->pending_writes); in md_super_write()
953 int md_super_wait(struct mddev *mddev) in md_super_wait() argument
956 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); in md_super_wait()
957 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags)) in md_super_wait()
975 else if (rdev->mddev->reshape_position != MaxSector && in sync_page_io()
976 (rdev->mddev->reshape_backwards == in sync_page_io()
977 (sector >= rdev->mddev->reshape_position))) in sync_page_io()
1114 int (*validate_super)(struct mddev *mddev,
1116 void (*sync_super)(struct mddev *mddev,
1132 int md_check_no_bitmap(struct mddev *mddev) in md_check_no_bitmap() argument
1134 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) in md_check_no_bitmap()
1137 mdname(mddev), mddev->pers->name); in md_check_no_bitmap()
1253 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) in super_90_validate() argument
1265 if (mddev->raid_disks == 0) { in super_90_validate()
1266 mddev->major_version = 0; in super_90_validate()
1267 mddev->minor_version = sb->minor_version; in super_90_validate()
1268 mddev->patch_version = sb->patch_version; in super_90_validate()
1269 mddev->external = 0; in super_90_validate()
1270 mddev->chunk_sectors = sb->chunk_size >> 9; in super_90_validate()
1271 mddev->ctime = sb->ctime; in super_90_validate()
1272 mddev->utime = sb->utime; in super_90_validate()
1273 mddev->level = sb->level; in super_90_validate()
1274 mddev->clevel[0] = 0; in super_90_validate()
1275 mddev->layout = sb->layout; in super_90_validate()
1276 mddev->raid_disks = sb->raid_disks; in super_90_validate()
1277 mddev->dev_sectors = ((sector_t)sb->size) * 2; in super_90_validate()
1278 mddev->events = ev1; in super_90_validate()
1279 mddev->bitmap_info.offset = 0; in super_90_validate()
1280 mddev->bitmap_info.space = 0; in super_90_validate()
1282 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; in super_90_validate()
1283 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); in super_90_validate()
1284 mddev->reshape_backwards = 0; in super_90_validate()
1286 if (mddev->minor_version >= 91) { in super_90_validate()
1287 mddev->reshape_position = sb->reshape_position; in super_90_validate()
1288 mddev->delta_disks = sb->delta_disks; in super_90_validate()
1289 mddev->new_level = sb->new_level; in super_90_validate()
1290 mddev->new_layout = sb->new_layout; in super_90_validate()
1291 mddev->new_chunk_sectors = sb->new_chunk >> 9; in super_90_validate()
1292 if (mddev->delta_disks < 0) in super_90_validate()
1293 mddev->reshape_backwards = 1; in super_90_validate()
1295 mddev->reshape_position = MaxSector; in super_90_validate()
1296 mddev->delta_disks = 0; in super_90_validate()
1297 mddev->new_level = mddev->level; in super_90_validate()
1298 mddev->new_layout = mddev->layout; in super_90_validate()
1299 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_90_validate()
1301 if (mddev->level == 0) in super_90_validate()
1302 mddev->layout = -1; in super_90_validate()
1305 mddev->recovery_cp = MaxSector; in super_90_validate()
1309 mddev->recovery_cp = sb->recovery_cp; in super_90_validate()
1311 mddev->recovery_cp = 0; in super_90_validate()
1314 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); in super_90_validate()
1315 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); in super_90_validate()
1316 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); in super_90_validate()
1317 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); in super_90_validate()
1319 mddev->max_disks = MD_SB_DISKS; in super_90_validate()
1322 mddev->bitmap_info.file == NULL) { in super_90_validate()
1323 mddev->bitmap_info.offset = in super_90_validate()
1324 mddev->bitmap_info.default_offset; in super_90_validate()
1325 mddev->bitmap_info.space = in super_90_validate()
1326 mddev->bitmap_info.default_space; in super_90_validate()
1329 } else if (mddev->pers == NULL) { in super_90_validate()
1335 if (ev1 < mddev->events) in super_90_validate()
1337 } else if (mddev->bitmap) { in super_90_validate()
1341 if (ev1 < mddev->bitmap->events_cleared) in super_90_validate()
1343 if (ev1 < mddev->events) in super_90_validate()
1346 if (ev1 < mddev->events) in super_90_validate()
1351 if (mddev->level != LEVEL_MULTIPATH) { in super_90_validate()
1365 if (mddev->minor_version >= 91) { in super_90_validate()
1382 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) in super_90_sync() argument
1386 int next_spare = mddev->raid_disks; in super_90_sync()
1408 sb->major_version = mddev->major_version; in super_90_sync()
1409 sb->patch_version = mddev->patch_version; in super_90_sync()
1411 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); in super_90_sync()
1412 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); in super_90_sync()
1413 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); in super_90_sync()
1414 memcpy(&sb->set_uuid3, mddev->uuid+12,4); in super_90_sync()
1416 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); in super_90_sync()
1417 sb->level = mddev->level; in super_90_sync()
1418 sb->size = mddev->dev_sectors / 2; in super_90_sync()
1419 sb->raid_disks = mddev->raid_disks; in super_90_sync()
1420 sb->md_minor = mddev->md_minor; in super_90_sync()
1422 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); in super_90_sync()
1424 sb->events_hi = (mddev->events>>32); in super_90_sync()
1425 sb->events_lo = (u32)mddev->events; in super_90_sync()
1427 if (mddev->reshape_position == MaxSector) in super_90_sync()
1431 sb->reshape_position = mddev->reshape_position; in super_90_sync()
1432 sb->new_level = mddev->new_level; in super_90_sync()
1433 sb->delta_disks = mddev->delta_disks; in super_90_sync()
1434 sb->new_layout = mddev->new_layout; in super_90_sync()
1435 sb->new_chunk = mddev->new_chunk_sectors << 9; in super_90_sync()
1437 mddev->minor_version = sb->minor_version; in super_90_sync()
1438 if (mddev->in_sync) in super_90_sync()
1440 sb->recovery_cp = mddev->recovery_cp; in super_90_sync()
1441 sb->cp_events_hi = (mddev->events>>32); in super_90_sync()
1442 sb->cp_events_lo = (u32)mddev->events; in super_90_sync()
1443 if (mddev->recovery_cp == MaxSector) in super_90_sync()
1448 sb->layout = mddev->layout; in super_90_sync()
1449 sb->chunk_size = mddev->chunk_sectors << 9; in super_90_sync()
1451 if (mddev->bitmap && mddev->bitmap_info.file == NULL) in super_90_sync()
1455 rdev_for_each(rdev2, mddev) { in super_90_sync()
1503 for (i=0 ; i < mddev->raid_disks ; i++) { in super_90_sync()
1529 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_90_rdev_size_change()
1531 if (rdev->mddev->bitmap_info.offset) in super_90_rdev_size_change()
1539 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) in super_90_rdev_size_change()
1542 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, in super_90_rdev_size_change()
1544 } while (md_super_wait(rdev->mddev) < 0); in super_90_rdev_size_change()
1765 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) in super_1_validate() argument
1776 if (mddev->raid_disks == 0) { in super_1_validate()
1777 mddev->major_version = 1; in super_1_validate()
1778 mddev->patch_version = 0; in super_1_validate()
1779 mddev->external = 0; in super_1_validate()
1780 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); in super_1_validate()
1781 mddev->ctime = le64_to_cpu(sb->ctime); in super_1_validate()
1782 mddev->utime = le64_to_cpu(sb->utime); in super_1_validate()
1783 mddev->level = le32_to_cpu(sb->level); in super_1_validate()
1784 mddev->clevel[0] = 0; in super_1_validate()
1785 mddev->layout = le32_to_cpu(sb->layout); in super_1_validate()
1786 mddev->raid_disks = le32_to_cpu(sb->raid_disks); in super_1_validate()
1787 mddev->dev_sectors = le64_to_cpu(sb->size); in super_1_validate()
1788 mddev->events = ev1; in super_1_validate()
1789 mddev->bitmap_info.offset = 0; in super_1_validate()
1790 mddev->bitmap_info.space = 0; in super_1_validate()
1794 mddev->bitmap_info.default_offset = 1024 >> 9; in super_1_validate()
1795 mddev->bitmap_info.default_space = (4096-1024) >> 9; in super_1_validate()
1796 mddev->reshape_backwards = 0; in super_1_validate()
1798 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); in super_1_validate()
1799 memcpy(mddev->uuid, sb->set_uuid, 16); in super_1_validate()
1801 mddev->max_disks = (4096-256)/2; in super_1_validate()
1804 mddev->bitmap_info.file == NULL) { in super_1_validate()
1805 mddev->bitmap_info.offset = in super_1_validate()
1812 if (mddev->minor_version > 0) in super_1_validate()
1813 mddev->bitmap_info.space = 0; in super_1_validate()
1814 else if (mddev->bitmap_info.offset > 0) in super_1_validate()
1815 mddev->bitmap_info.space = in super_1_validate()
1816 8 - mddev->bitmap_info.offset; in super_1_validate()
1818 mddev->bitmap_info.space = in super_1_validate()
1819 -mddev->bitmap_info.offset; in super_1_validate()
1823 mddev->reshape_position = le64_to_cpu(sb->reshape_position); in super_1_validate()
1824 mddev->delta_disks = le32_to_cpu(sb->delta_disks); in super_1_validate()
1825 mddev->new_level = le32_to_cpu(sb->new_level); in super_1_validate()
1826 mddev->new_layout = le32_to_cpu(sb->new_layout); in super_1_validate()
1827 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); in super_1_validate()
1828 if (mddev->delta_disks < 0 || in super_1_validate()
1829 (mddev->delta_disks == 0 && in super_1_validate()
1832 mddev->reshape_backwards = 1; in super_1_validate()
1834 mddev->reshape_position = MaxSector; in super_1_validate()
1835 mddev->delta_disks = 0; in super_1_validate()
1836 mddev->new_level = mddev->level; in super_1_validate()
1837 mddev->new_layout = mddev->layout; in super_1_validate()
1838 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_1_validate()
1841 if (mddev->level == 0 && in super_1_validate()
1843 mddev->layout = -1; in super_1_validate()
1846 set_bit(MD_HAS_JOURNAL, &mddev->flags); in super_1_validate()
1857 set_bit(MD_HAS_PPL, &mddev->flags); in super_1_validate()
1859 } else if (mddev->pers == NULL) { in super_1_validate()
1867 if (ev1 < mddev->events) in super_1_validate()
1869 } else if (mddev->bitmap) { in super_1_validate()
1873 if (ev1 < mddev->bitmap->events_cleared) in super_1_validate()
1875 if (ev1 < mddev->events) in super_1_validate()
1878 if (ev1 < mddev->events) in super_1_validate()
1882 if (mddev->level != LEVEL_MULTIPATH) { in super_1_validate()
1920 &mddev->recovery)) in super_1_validate()
1938 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) in super_1_sync() argument
1952 sb->utime = cpu_to_le64((__u64)mddev->utime); in super_1_sync()
1953 sb->events = cpu_to_le64(mddev->events); in super_1_sync()
1954 if (mddev->in_sync) in super_1_sync()
1955 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); in super_1_sync()
1956 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) in super_1_sync()
1963 sb->raid_disks = cpu_to_le32(mddev->raid_disks); in super_1_sync()
1964 sb->size = cpu_to_le64(mddev->dev_sectors); in super_1_sync()
1965 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); in super_1_sync()
1966 sb->level = cpu_to_le32(mddev->level); in super_1_sync()
1967 sb->layout = cpu_to_le32(mddev->layout); in super_1_sync()
1980 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { in super_1_sync()
1981 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); in super_1_sync()
1991 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) in super_1_sync()
2002 if (mddev->reshape_position != MaxSector) { in super_1_sync()
2004 sb->reshape_position = cpu_to_le64(mddev->reshape_position); in super_1_sync()
2005 sb->new_layout = cpu_to_le32(mddev->new_layout); in super_1_sync()
2006 sb->delta_disks = cpu_to_le32(mddev->delta_disks); in super_1_sync()
2007 sb->new_level = cpu_to_le32(mddev->new_level); in super_1_sync()
2008 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); in super_1_sync()
2009 if (mddev->delta_disks == 0 && in super_1_sync()
2010 mddev->reshape_backwards) in super_1_sync()
2021 if (mddev_is_clustered(mddev)) in super_1_sync()
2028 md_error(mddev, rdev); in super_1_sync()
2059 rdev_for_each(rdev2, mddev) in super_1_sync()
2076 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) in super_1_sync()
2079 if (test_bit(MD_HAS_PPL, &mddev->flags)) { in super_1_sync()
2080 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags)) in super_1_sync()
2089 rdev_for_each(rdev2, mddev) { in super_1_sync()
2129 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_1_rdev_size_change()
2138 } else if (rdev->mddev->bitmap_info.offset) { in super_1_rdev_size_change()
2166 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, in super_1_rdev_size_change()
2168 } while (md_super_wait(rdev->mddev) < 0); in super_1_rdev_size_change()
2184 if (rdev->mddev->minor_version == 0) in super_1_allow_new_offset()
2195 bitmap = rdev->mddev->bitmap; in super_1_allow_new_offset()
2196 if (bitmap && !rdev->mddev->bitmap_info.file && in super_1_allow_new_offset()
2197 rdev->sb_start + rdev->mddev->bitmap_info.offset + in super_1_allow_new_offset()
2227 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) in sync_super() argument
2229 if (mddev->sync_super) { in sync_super()
2230 mddev->sync_super(mddev, rdev); in sync_super()
2234 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); in sync_super()
2236 super_types[mddev->major_version].sync_super(mddev, rdev); in sync_super()
2239 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) in match_mddev_units()
2273 int md_integrity_register(struct mddev *mddev) in md_integrity_register() argument
2277 if (list_empty(&mddev->disks)) in md_integrity_register()
2279 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) in md_integrity_register()
2281 rdev_for_each(rdev, mddev) { in md_integrity_register()
2303 blk_integrity_register(mddev->gendisk, in md_integrity_register()
2306 pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); in md_integrity_register()
2307 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) || in md_integrity_register()
2308 (mddev->level != 1 && mddev->level != 10 && in md_integrity_register()
2309 bioset_integrity_create(&mddev->io_clone_set, BIO_POOL_SIZE))) { in md_integrity_register()
2317 mdname(mddev)); in md_integrity_register()
2328 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) in md_integrity_add_rdev() argument
2332 if (!mddev->gendisk) in md_integrity_add_rdev()
2335 bi_mddev = blk_get_integrity(mddev->gendisk); in md_integrity_add_rdev()
2340 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) { in md_integrity_add_rdev()
2342 mdname(mddev), rdev->bdev); in md_integrity_add_rdev()
2356 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) in bind_rdev_to_array() argument
2362 if (find_rdev(mddev, rdev->bdev->bd_dev)) in bind_rdev_to_array()
2365 if (rdev_read_only(rdev) && mddev->pers) in bind_rdev_to_array()
2371 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) { in bind_rdev_to_array()
2372 if (mddev->pers) { in bind_rdev_to_array()
2377 if (mddev->level > 0) in bind_rdev_to_array()
2380 mddev->dev_sectors = rdev->sectors; in bind_rdev_to_array()
2390 if (mddev->pers) in bind_rdev_to_array()
2391 choice = mddev->raid_disks; in bind_rdev_to_array()
2392 while (md_find_rdev_nr_rcu(mddev, choice)) in bind_rdev_to_array()
2396 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { in bind_rdev_to_array()
2403 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { in bind_rdev_to_array()
2405 mdname(mddev), mddev->max_disks); in bind_rdev_to_array()
2411 rdev->mddev = mddev; in bind_rdev_to_array()
2414 if (mddev->raid_disks) in bind_rdev_to_array()
2415 mddev_create_serial_pool(mddev, rdev, false); in bind_rdev_to_array()
2417 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) in bind_rdev_to_array()
2428 list_add_rcu(&rdev->same_set, &mddev->disks); in bind_rdev_to_array()
2429 bd_link_disk_holder(rdev->bdev, mddev->gendisk); in bind_rdev_to_array()
2432 mddev->recovery_disabled++; in bind_rdev_to_array()
2438 b, mdname(mddev)); in bind_rdev_to_array()
2447 static void export_rdev(struct md_rdev *rdev, struct mddev *mddev) in export_rdev() argument
2463 struct mddev *mddev = rdev->mddev; in md_kick_rdev_from_array() local
2465 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); in md_kick_rdev_from_array()
2468 mddev_destroy_serial_pool(rdev->mddev, rdev, false); in md_kick_rdev_from_array()
2469 rdev->mddev = NULL; in md_kick_rdev_from_array()
2486 list_add(&rdev->same_set, &mddev->deleting); in md_kick_rdev_from_array()
2489 static void export_array(struct mddev *mddev) in export_array() argument
2493 while (!list_empty(&mddev->disks)) { in export_array()
2494 rdev = list_first_entry(&mddev->disks, struct md_rdev, in export_array()
2498 mddev->raid_disks = 0; in export_array()
2499 mddev->major_version = 0; in export_array()
2502 static bool set_in_sync(struct mddev *mddev) in set_in_sync() argument
2504 lockdep_assert_held(&mddev->lock); in set_in_sync()
2505 if (!mddev->in_sync) { in set_in_sync()
2506 mddev->sync_checkers++; in set_in_sync()
2507 spin_unlock(&mddev->lock); in set_in_sync()
2508 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending); in set_in_sync()
2509 spin_lock(&mddev->lock); in set_in_sync()
2510 if (!mddev->in_sync && in set_in_sync()
2511 percpu_ref_is_zero(&mddev->writes_pending)) { in set_in_sync()
2512 mddev->in_sync = 1; in set_in_sync()
2518 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in set_in_sync()
2519 sysfs_notify_dirent_safe(mddev->sysfs_state); in set_in_sync()
2521 if (--mddev->sync_checkers == 0) in set_in_sync()
2522 percpu_ref_switch_to_percpu(&mddev->writes_pending); in set_in_sync()
2524 if (mddev->safemode == 1) in set_in_sync()
2525 mddev->safemode = 0; in set_in_sync()
2526 return mddev->in_sync; in set_in_sync()
2529 static void sync_sbs(struct mddev *mddev, int nospares) in sync_sbs() argument
2538 rdev_for_each(rdev, mddev) { in sync_sbs()
2539 if (rdev->sb_events == mddev->events || in sync_sbs()
2542 rdev->sb_events+1 == mddev->events)) { in sync_sbs()
2546 sync_super(mddev, rdev); in sync_sbs()
2552 static bool does_sb_need_changing(struct mddev *mddev) in does_sb_need_changing() argument
2559 rdev_for_each(iter, mddev) in does_sb_need_changing()
2571 rdev_for_each(rdev, mddev) { in does_sb_need_changing()
2583 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || in does_sb_need_changing()
2584 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || in does_sb_need_changing()
2585 (mddev->layout != le32_to_cpu(sb->layout)) || in does_sb_need_changing()
2586 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || in does_sb_need_changing()
2587 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) in does_sb_need_changing()
2593 void md_update_sb(struct mddev *mddev, int force_change) in md_update_sb() argument
2601 if (!md_is_rdwr(mddev)) { in md_update_sb()
2603 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_update_sb()
2608 if (mddev_is_clustered(mddev)) { in md_update_sb()
2609 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) in md_update_sb()
2611 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) in md_update_sb()
2613 ret = md_cluster_ops->metadata_update_start(mddev); in md_update_sb()
2615 if (!does_sb_need_changing(mddev)) { in md_update_sb()
2617 md_cluster_ops->metadata_update_cancel(mddev); in md_update_sb()
2618 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), in md_update_sb()
2631 rdev_for_each(rdev, mddev) { in md_update_sb()
2633 mddev->delta_disks >= 0 && in md_update_sb()
2634 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && in md_update_sb()
2635 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) && in md_update_sb()
2636 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_update_sb()
2639 mddev->curr_resync_completed > rdev->recovery_offset) in md_update_sb()
2640 rdev->recovery_offset = mddev->curr_resync_completed; in md_update_sb()
2643 if (!mddev->persistent) { in md_update_sb()
2644 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_update_sb()
2645 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_update_sb()
2646 if (!mddev->external) { in md_update_sb()
2647 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_update_sb()
2648 rdev_for_each(rdev, mddev) { in md_update_sb()
2652 md_error(mddev, rdev); in md_update_sb()
2659 wake_up(&mddev->sb_wait); in md_update_sb()
2663 spin_lock(&mddev->lock); in md_update_sb()
2665 mddev->utime = ktime_get_real_seconds(); in md_update_sb()
2667 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) in md_update_sb()
2669 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) in md_update_sb()
2677 if (mddev->degraded) in md_update_sb()
2689 sync_req = mddev->in_sync; in md_update_sb()
2694 && (mddev->in_sync && mddev->recovery_cp == MaxSector) in md_update_sb()
2695 && mddev->can_decrease_events in md_update_sb()
2696 && mddev->events != 1) { in md_update_sb()
2697 mddev->events--; in md_update_sb()
2698 mddev->can_decrease_events = 0; in md_update_sb()
2701 mddev->events ++; in md_update_sb()
2702 mddev->can_decrease_events = nospares; in md_update_sb()
2710 WARN_ON(mddev->events == 0); in md_update_sb()
2712 rdev_for_each(rdev, mddev) { in md_update_sb()
2719 sync_sbs(mddev, nospares); in md_update_sb()
2720 spin_unlock(&mddev->lock); in md_update_sb()
2723 mdname(mddev), mddev->in_sync); in md_update_sb()
2725 if (mddev->queue) in md_update_sb()
2726 blk_add_trace_msg(mddev->queue, "md md_update_sb"); in md_update_sb()
2728 md_bitmap_update_sb(mddev->bitmap); in md_update_sb()
2729 rdev_for_each(rdev, mddev) { in md_update_sb()
2734 md_super_write(mddev,rdev, in md_update_sb()
2740 rdev->sb_events = mddev->events; in md_update_sb()
2742 md_super_write(mddev, rdev, in md_update_sb()
2753 if (mddev->level == LEVEL_MULTIPATH) in md_update_sb()
2757 if (md_super_wait(mddev) < 0) in md_update_sb()
2761 if (mddev_is_clustered(mddev) && ret == 0) in md_update_sb()
2762 md_cluster_ops->metadata_update_finish(mddev); in md_update_sb()
2764 if (mddev->in_sync != sync_req || in md_update_sb()
2765 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), in md_update_sb()
2769 wake_up(&mddev->sb_wait); in md_update_sb()
2770 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in md_update_sb()
2771 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_update_sb()
2773 rdev_for_each(rdev, mddev) { in md_update_sb()
2787 struct mddev *mddev = rdev->mddev; in add_bound_rdev() local
2791 if (!mddev->pers->hot_remove_disk || add_journal) { in add_bound_rdev()
2796 super_types[mddev->major_version]. in add_bound_rdev()
2797 validate_super(mddev, rdev); in add_bound_rdev()
2799 mddev_suspend(mddev); in add_bound_rdev()
2800 err = mddev->pers->hot_add_disk(mddev, rdev); in add_bound_rdev()
2802 mddev_resume(mddev); in add_bound_rdev()
2810 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in add_bound_rdev()
2811 if (mddev->degraded) in add_bound_rdev()
2812 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in add_bound_rdev()
2813 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in add_bound_rdev()
2815 md_wakeup_thread(mddev->thread); in add_bound_rdev()
2905 struct mddev *mddev = rdev->mddev; in state_store() local
2909 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { in state_store()
2910 md_error(rdev->mddev, rdev); in state_store()
2912 if (test_bit(MD_BROKEN, &rdev->mddev->flags)) in state_store()
2917 if (rdev->mddev->pers) { in state_store()
2919 remove_and_add_spares(rdev->mddev, rdev); in state_store()
2925 if (mddev_is_clustered(mddev)) in state_store()
2926 err = md_cluster_ops->remove_disk(mddev, rdev); in state_store()
2930 if (mddev->pers) { in state_store()
2931 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in state_store()
2932 md_wakeup_thread(mddev->thread); in state_store()
2939 mddev_create_serial_pool(rdev->mddev, rdev, false); in state_store()
2943 mddev_destroy_serial_pool(rdev->mddev, rdev, false); in state_store()
2957 md_error(rdev->mddev, rdev); in state_store()
2962 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in state_store()
2963 md_wakeup_thread(rdev->mddev->thread); in state_store()
2979 if (rdev->mddev->pers == NULL) { in state_store()
3000 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in state_store()
3001 md_wakeup_thread(rdev->mddev->thread); in state_store()
3014 if (rdev->mddev->pers) in state_store()
3022 if (rdev->mddev->pers) in state_store()
3029 if (!rdev->mddev->pers) in state_store()
3039 if (!mddev_is_clustered(rdev->mddev) || in state_store()
3046 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) { in state_store()
3050 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) { in state_store()
3055 md_update_sb(mddev, 1); in state_store()
3113 if (rdev->mddev->pers && slot == -1) { in slot_store()
3124 if (rdev->mddev->pers->hot_remove_disk == NULL) in slot_store()
3127 remove_and_add_spares(rdev->mddev, rdev); in slot_store()
3130 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in slot_store()
3131 md_wakeup_thread(rdev->mddev->thread); in slot_store()
3132 } else if (rdev->mddev->pers) { in slot_store()
3141 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) in slot_store()
3144 if (rdev->mddev->pers->hot_add_disk == NULL) in slot_store()
3147 if (slot >= rdev->mddev->raid_disks && in slot_store()
3148 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) in slot_store()
3158 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev); in slot_store()
3165 sysfs_link_rdev(rdev->mddev, rdev); in slot_store()
3168 if (slot >= rdev->mddev->raid_disks && in slot_store()
3169 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) in slot_store()
3196 if (rdev->mddev->pers && rdev->raid_disk >= 0) in offset_store()
3198 if (rdev->sectors && rdev->mddev->external) in offset_store()
3220 struct mddev *mddev = rdev->mddev; in new_offset_store() local
3225 if (mddev->sync_thread || in new_offset_store()
3226 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) in new_offset_store()
3234 + mddev->dev_sectors > rdev->sectors) in new_offset_store()
3243 mddev->reshape_backwards) in new_offset_store()
3250 !mddev->reshape_backwards) in new_offset_store()
3253 if (mddev->pers && mddev->persistent && in new_offset_store()
3254 !super_types[mddev->major_version] in new_offset_store()
3259 mddev->reshape_backwards = 1; in new_offset_store()
3261 mddev->reshape_backwards = 0; in new_offset_store()
3286 struct mddev *mddev; in md_rdev_overlaps() local
3290 list_for_each_entry(mddev, &all_mddevs, all_mddevs) { in md_rdev_overlaps()
3291 if (test_bit(MD_DELETED, &mddev->flags)) in md_rdev_overlaps()
3293 rdev_for_each(rdev2, mddev) { in md_rdev_overlaps()
3327 struct mddev *my_mddev = rdev->mddev; in rdev_size_store()
3396 if (rdev->mddev->pers && in recovery_start_store()
3464 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && in ppl_sector_store()
3468 if (rdev->mddev->persistent) { in ppl_sector_store()
3469 if (rdev->mddev->major_version == 0) in ppl_sector_store()
3477 } else if (!rdev->mddev->external) { in ppl_sector_store()
3501 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && in ppl_size_store()
3505 if (rdev->mddev->persistent) { in ppl_size_store()
3506 if (rdev->mddev->major_version == 0) in ppl_size_store()
3510 } else if (!rdev->mddev->external) { in ppl_size_store()
3543 if (!rdev->mddev) in rdev_attr_show()
3556 struct mddev *mddev = rdev->mddev; in rdev_attr_store() local
3566 rv = mddev ? mddev_lock(mddev) : -ENODEV; in rdev_attr_store()
3568 if (rdev->mddev == NULL) in rdev_attr_store()
3572 mddev_unlock(mddev); in rdev_attr_store()
3708 static int analyze_sbs(struct mddev *mddev) in analyze_sbs() argument
3714 rdev_for_each_safe(rdev, tmp, mddev) in analyze_sbs()
3715 switch (super_types[mddev->major_version]. in analyze_sbs()
3716 load_super(rdev, freshest, mddev->minor_version)) { in analyze_sbs()
3734 super_types[mddev->major_version]. in analyze_sbs()
3735 validate_super(mddev, freshest); in analyze_sbs()
3738 rdev_for_each_safe(rdev, tmp, mddev) { in analyze_sbs()
3739 if (mddev->max_disks && in analyze_sbs()
3740 (rdev->desc_nr >= mddev->max_disks || in analyze_sbs()
3741 i > mddev->max_disks)) { in analyze_sbs()
3743 mdname(mddev), rdev->bdev, in analyze_sbs()
3744 mddev->max_disks); in analyze_sbs()
3749 if (super_types[mddev->major_version]. in analyze_sbs()
3750 validate_super(mddev, rdev)) { in analyze_sbs()
3757 if (mddev->level == LEVEL_MULTIPATH) { in analyze_sbs()
3762 (mddev->raid_disks - min(0, mddev->delta_disks)) && in analyze_sbs()
3809 safe_delay_show(struct mddev *mddev, char *page) in safe_delay_show() argument
3811 unsigned int msec = ((unsigned long)mddev->safemode_delay*1000)/HZ; in safe_delay_show()
3816 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) in safe_delay_store() argument
3820 if (mddev_is_clustered(mddev)) { in safe_delay_store()
3828 mddev->safemode_delay = 0; in safe_delay_store()
3830 unsigned long old_delay = mddev->safemode_delay; in safe_delay_store()
3835 mddev->safemode_delay = new_delay; in safe_delay_store()
3837 mod_timer(&mddev->safemode_timer, jiffies+1); in safe_delay_store()
3845 level_show(struct mddev *mddev, char *page) in level_show() argument
3849 spin_lock(&mddev->lock); in level_show()
3850 p = mddev->pers; in level_show()
3853 else if (mddev->clevel[0]) in level_show()
3854 ret = sprintf(page, "%s\n", mddev->clevel); in level_show()
3855 else if (mddev->level != LEVEL_NONE) in level_show()
3856 ret = sprintf(page, "%d\n", mddev->level); in level_show()
3859 spin_unlock(&mddev->lock); in level_show()
3864 level_store(struct mddev *mddev, const char *buf, size_t len) in level_store() argument
3877 rv = mddev_lock(mddev); in level_store()
3881 if (mddev->pers == NULL) { in level_store()
3882 strncpy(mddev->clevel, buf, slen); in level_store()
3883 if (mddev->clevel[slen-1] == '\n') in level_store()
3885 mddev->clevel[slen] = 0; in level_store()
3886 mddev->level = LEVEL_NONE; in level_store()
3891 if (!md_is_rdwr(mddev)) in level_store()
3901 if (mddev->sync_thread || in level_store()
3902 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in level_store()
3903 mddev->reshape_position != MaxSector || in level_store()
3904 mddev->sysfs_active) in level_store()
3908 if (!mddev->pers->quiesce) { in level_store()
3910 mdname(mddev), mddev->pers->name); in level_store()
3934 if (pers == mddev->pers) { in level_store()
3943 mdname(mddev), clevel); in level_store()
3948 rdev_for_each(rdev, mddev) in level_store()
3954 priv = pers->takeover(mddev); in level_store()
3956 mddev->new_level = mddev->level; in level_store()
3957 mddev->new_layout = mddev->layout; in level_store()
3958 mddev->new_chunk_sectors = mddev->chunk_sectors; in level_store()
3959 mddev->raid_disks -= mddev->delta_disks; in level_store()
3960 mddev->delta_disks = 0; in level_store()
3961 mddev->reshape_backwards = 0; in level_store()
3964 mdname(mddev), clevel); in level_store()
3970 mddev_suspend(mddev); in level_store()
3971 mddev_detach(mddev); in level_store()
3973 spin_lock(&mddev->lock); in level_store()
3974 oldpers = mddev->pers; in level_store()
3975 oldpriv = mddev->private; in level_store()
3976 mddev->pers = pers; in level_store()
3977 mddev->private = priv; in level_store()
3978 strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); in level_store()
3979 mddev->level = mddev->new_level; in level_store()
3980 mddev->layout = mddev->new_layout; in level_store()
3981 mddev->chunk_sectors = mddev->new_chunk_sectors; in level_store()
3982 mddev->delta_disks = 0; in level_store()
3983 mddev->reshape_backwards = 0; in level_store()
3984 mddev->degraded = 0; in level_store()
3985 spin_unlock(&mddev->lock); in level_store()
3988 mddev->external) { in level_store()
3996 mddev->in_sync = 0; in level_store()
3997 mddev->safemode_delay = 0; in level_store()
3998 mddev->safemode = 0; in level_store()
4001 oldpers->free(mddev, oldpriv); in level_store()
4006 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) in level_store()
4008 mdname(mddev)); in level_store()
4009 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); in level_store()
4010 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); in level_store()
4011 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); in level_store()
4016 if (mddev->to_remove == NULL) in level_store()
4017 mddev->to_remove = &md_redundancy_group; in level_store()
4022 rdev_for_each(rdev, mddev) { in level_store()
4025 if (rdev->new_raid_disk >= mddev->raid_disks) in level_store()
4029 sysfs_unlink_rdev(mddev, rdev); in level_store()
4031 rdev_for_each(rdev, mddev) { in level_store()
4040 if (sysfs_link_rdev(mddev, rdev)) in level_store()
4042 rdev->raid_disk, mdname(mddev)); in level_store()
4050 mddev->in_sync = 1; in level_store()
4051 del_timer_sync(&mddev->safemode_timer); in level_store()
4053 blk_set_stacking_limits(&mddev->queue->limits); in level_store()
4054 pers->run(mddev); in level_store()
4055 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in level_store()
4056 mddev_resume(mddev); in level_store()
4057 if (!mddev->thread) in level_store()
4058 md_update_sb(mddev, 1); in level_store()
4059 sysfs_notify_dirent_safe(mddev->sysfs_level); in level_store()
4063 mddev_unlock(mddev); in level_store()
4071 layout_show(struct mddev *mddev, char *page) in layout_show() argument
4074 if (mddev->reshape_position != MaxSector && in layout_show()
4075 mddev->layout != mddev->new_layout) in layout_show()
4077 mddev->new_layout, mddev->layout); in layout_show()
4078 return sprintf(page, "%d\n", mddev->layout); in layout_show()
4082 layout_store(struct mddev *mddev, const char *buf, size_t len) in layout_store() argument
4090 err = mddev_lock(mddev); in layout_store()
4094 if (mddev->pers) { in layout_store()
4095 if (mddev->pers->check_reshape == NULL) in layout_store()
4097 else if (!md_is_rdwr(mddev)) in layout_store()
4100 mddev->new_layout = n; in layout_store()
4101 err = mddev->pers->check_reshape(mddev); in layout_store()
4103 mddev->new_layout = mddev->layout; in layout_store()
4106 mddev->new_layout = n; in layout_store()
4107 if (mddev->reshape_position == MaxSector) in layout_store()
4108 mddev->layout = n; in layout_store()
4110 mddev_unlock(mddev); in layout_store()
4117 raid_disks_show(struct mddev *mddev, char *page) in raid_disks_show() argument
4119 if (mddev->raid_disks == 0) in raid_disks_show()
4121 if (mddev->reshape_position != MaxSector && in raid_disks_show()
4122 mddev->delta_disks != 0) in raid_disks_show()
4123 return sprintf(page, "%d (%d)\n", mddev->raid_disks, in raid_disks_show()
4124 mddev->raid_disks - mddev->delta_disks); in raid_disks_show()
4125 return sprintf(page, "%d\n", mddev->raid_disks); in raid_disks_show()
4128 static int update_raid_disks(struct mddev *mddev, int raid_disks);
4131 raid_disks_store(struct mddev *mddev, const char *buf, size_t len) in raid_disks_store() argument
4140 err = mddev_lock(mddev); in raid_disks_store()
4143 if (mddev->pers) in raid_disks_store()
4144 err = update_raid_disks(mddev, n); in raid_disks_store()
4145 else if (mddev->reshape_position != MaxSector) { in raid_disks_store()
4147 int olddisks = mddev->raid_disks - mddev->delta_disks; in raid_disks_store()
4150 rdev_for_each(rdev, mddev) { in raid_disks_store()
4159 mddev->delta_disks = n - olddisks; in raid_disks_store()
4160 mddev->raid_disks = n; in raid_disks_store()
4161 mddev->reshape_backwards = (mddev->delta_disks < 0); in raid_disks_store()
4163 mddev->raid_disks = n; in raid_disks_store()
4165 mddev_unlock(mddev); in raid_disks_store()
4172 uuid_show(struct mddev *mddev, char *page) in uuid_show() argument
4174 return sprintf(page, "%pU\n", mddev->uuid); in uuid_show()
4180 chunk_size_show(struct mddev *mddev, char *page) in chunk_size_show() argument
4182 if (mddev->reshape_position != MaxSector && in chunk_size_show()
4183 mddev->chunk_sectors != mddev->new_chunk_sectors) in chunk_size_show()
4185 mddev->new_chunk_sectors << 9, in chunk_size_show()
4186 mddev->chunk_sectors << 9); in chunk_size_show()
4187 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); in chunk_size_show()
4191 chunk_size_store(struct mddev *mddev, const char *buf, size_t len) in chunk_size_store() argument
4200 err = mddev_lock(mddev); in chunk_size_store()
4203 if (mddev->pers) { in chunk_size_store()
4204 if (mddev->pers->check_reshape == NULL) in chunk_size_store()
4206 else if (!md_is_rdwr(mddev)) in chunk_size_store()
4209 mddev->new_chunk_sectors = n >> 9; in chunk_size_store()
4210 err = mddev->pers->check_reshape(mddev); in chunk_size_store()
4212 mddev->new_chunk_sectors = mddev->chunk_sectors; in chunk_size_store()
4215 mddev->new_chunk_sectors = n >> 9; in chunk_size_store()
4216 if (mddev->reshape_position == MaxSector) in chunk_size_store()
4217 mddev->chunk_sectors = n >> 9; in chunk_size_store()
4219 mddev_unlock(mddev); in chunk_size_store()
4226 resync_start_show(struct mddev *mddev, char *page) in resync_start_show() argument
4228 if (mddev->recovery_cp == MaxSector) in resync_start_show()
4230 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); in resync_start_show()
4234 resync_start_store(struct mddev *mddev, const char *buf, size_t len) in resync_start_store() argument
4249 err = mddev_lock(mddev); in resync_start_store()
4252 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) in resync_start_store()
4256 mddev->recovery_cp = n; in resync_start_store()
4257 if (mddev->pers) in resync_start_store()
4258 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in resync_start_store()
4260 mddev_unlock(mddev); in resync_start_store()
4323 array_state_show(struct mddev *mddev, char *page) in array_state_show() argument
4327 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) { in array_state_show()
4328 switch(mddev->ro) { in array_state_show()
4336 spin_lock(&mddev->lock); in array_state_show()
4337 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) in array_state_show()
4339 else if (mddev->in_sync) in array_state_show()
4341 else if (mddev->safemode) in array_state_show()
4345 spin_unlock(&mddev->lock); in array_state_show()
4348 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean) in array_state_show()
4351 if (list_empty(&mddev->disks) && in array_state_show()
4352 mddev->raid_disks == 0 && in array_state_show()
4353 mddev->dev_sectors == 0) in array_state_show()
4361 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4362 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
4363 static int restart_array(struct mddev *mddev);
4366 array_state_store(struct mddev *mddev, const char *buf, size_t len) in array_state_store() argument
4371 if (mddev->pers && (st == active || st == clean) && in array_state_store()
4372 mddev->ro != MD_RDONLY) { in array_state_store()
4376 spin_lock(&mddev->lock); in array_state_store()
4378 restart_array(mddev); in array_state_store()
4379 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in array_state_store()
4380 md_wakeup_thread(mddev->thread); in array_state_store()
4381 wake_up(&mddev->sb_wait); in array_state_store()
4383 restart_array(mddev); in array_state_store()
4384 if (!set_in_sync(mddev)) in array_state_store()
4388 sysfs_notify_dirent_safe(mddev->sysfs_state); in array_state_store()
4389 spin_unlock(&mddev->lock); in array_state_store()
4392 err = mddev_lock(mddev); in array_state_store()
4401 err = do_md_stop(mddev, 0, NULL); in array_state_store()
4405 if (mddev->pers) in array_state_store()
4406 err = do_md_stop(mddev, 2, NULL); in array_state_store()
4413 if (mddev->pers) in array_state_store()
4414 err = md_set_readonly(mddev, NULL); in array_state_store()
4416 mddev->ro = MD_RDONLY; in array_state_store()
4417 set_disk_ro(mddev->gendisk, 1); in array_state_store()
4418 err = do_md_run(mddev); in array_state_store()
4422 if (mddev->pers) { in array_state_store()
4423 if (md_is_rdwr(mddev)) in array_state_store()
4424 err = md_set_readonly(mddev, NULL); in array_state_store()
4425 else if (mddev->ro == MD_RDONLY) in array_state_store()
4426 err = restart_array(mddev); in array_state_store()
4428 mddev->ro = MD_AUTO_READ; in array_state_store()
4429 set_disk_ro(mddev->gendisk, 0); in array_state_store()
4432 mddev->ro = MD_AUTO_READ; in array_state_store()
4433 err = do_md_run(mddev); in array_state_store()
4437 if (mddev->pers) { in array_state_store()
4438 err = restart_array(mddev); in array_state_store()
4441 spin_lock(&mddev->lock); in array_state_store()
4442 if (!set_in_sync(mddev)) in array_state_store()
4444 spin_unlock(&mddev->lock); in array_state_store()
4449 if (mddev->pers) { in array_state_store()
4450 err = restart_array(mddev); in array_state_store()
4453 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in array_state_store()
4454 wake_up(&mddev->sb_wait); in array_state_store()
4457 mddev->ro = MD_RDWR; in array_state_store()
4458 set_disk_ro(mddev->gendisk, 0); in array_state_store()
4459 err = do_md_run(mddev); in array_state_store()
4470 if (mddev->hold_active == UNTIL_IOCTL) in array_state_store()
4471 mddev->hold_active = 0; in array_state_store()
4472 sysfs_notify_dirent_safe(mddev->sysfs_state); in array_state_store()
4474 mddev_unlock(mddev); in array_state_store()
4481 max_corrected_read_errors_show(struct mddev *mddev, char *page) { in max_corrected_read_errors_show() argument
4483 atomic_read(&mddev->max_corr_read_errors)); in max_corrected_read_errors_show()
4487 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) in max_corrected_read_errors_store() argument
4497 atomic_set(&mddev->max_corr_read_errors, n); in max_corrected_read_errors_store()
4506 null_show(struct mddev *mddev, char *page) in null_show() argument
4512 new_dev_store(struct mddev *mddev, const char *buf, size_t len) in new_dev_store() argument
4538 err = mddev_lock(mddev); in new_dev_store()
4541 if (mddev->persistent) { in new_dev_store()
4542 rdev = md_import_device(dev, mddev->major_version, in new_dev_store()
4543 mddev->minor_version); in new_dev_store()
4544 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { in new_dev_store()
4546 = list_entry(mddev->disks.next, in new_dev_store()
4548 err = super_types[mddev->major_version] in new_dev_store()
4549 .load_super(rdev, rdev0, mddev->minor_version); in new_dev_store()
4553 } else if (mddev->external) in new_dev_store()
4559 mddev_unlock(mddev); in new_dev_store()
4562 err = bind_rdev_to_array(rdev, mddev); in new_dev_store()
4565 export_rdev(rdev, mddev); in new_dev_store()
4566 mddev_unlock(mddev); in new_dev_store()
4576 bitmap_store(struct mddev *mddev, const char *buf, size_t len) in bitmap_store() argument
4582 err = mddev_lock(mddev); in bitmap_store()
4585 if (!mddev->bitmap) in bitmap_store()
4597 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); in bitmap_store()
4600 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ in bitmap_store()
4602 mddev_unlock(mddev); in bitmap_store()
4610 size_show(struct mddev *mddev, char *page) in size_show() argument
4613 (unsigned long long)mddev->dev_sectors / 2); in size_show()
4616 static int update_size(struct mddev *mddev, sector_t num_sectors);
4619 size_store(struct mddev *mddev, const char *buf, size_t len) in size_store() argument
4630 err = mddev_lock(mddev); in size_store()
4633 if (mddev->pers) { in size_store()
4634 err = update_size(mddev, sectors); in size_store()
4636 md_update_sb(mddev, 1); in size_store()
4638 if (mddev->dev_sectors == 0 || in size_store()
4639 mddev->dev_sectors > sectors) in size_store()
4640 mddev->dev_sectors = sectors; in size_store()
4644 mddev_unlock(mddev); in size_store()
4658 metadata_show(struct mddev *mddev, char *page) in metadata_show() argument
4660 if (mddev->persistent) in metadata_show()
4662 mddev->major_version, mddev->minor_version); in metadata_show()
4663 else if (mddev->external) in metadata_show()
4664 return sprintf(page, "external:%s\n", mddev->metadata_type); in metadata_show()
4670 metadata_store(struct mddev *mddev, const char *buf, size_t len) in metadata_store() argument
4680 err = mddev_lock(mddev); in metadata_store()
4684 if (mddev->external && strncmp(buf, "external:", 9) == 0) in metadata_store()
4686 else if (!list_empty(&mddev->disks)) in metadata_store()
4691 mddev->persistent = 0; in metadata_store()
4692 mddev->external = 0; in metadata_store()
4693 mddev->major_version = 0; in metadata_store()
4694 mddev->minor_version = 90; in metadata_store()
4699 if (namelen >= sizeof(mddev->metadata_type)) in metadata_store()
4700 namelen = sizeof(mddev->metadata_type)-1; in metadata_store()
4701 strncpy(mddev->metadata_type, buf+9, namelen); in metadata_store()
4702 mddev->metadata_type[namelen] = 0; in metadata_store()
4703 if (namelen && mddev->metadata_type[namelen-1] == '\n') in metadata_store()
4704 mddev->metadata_type[--namelen] = 0; in metadata_store()
4705 mddev->persistent = 0; in metadata_store()
4706 mddev->external = 1; in metadata_store()
4707 mddev->major_version = 0; in metadata_store()
4708 mddev->minor_version = 90; in metadata_store()
4722 mddev->major_version = major; in metadata_store()
4723 mddev->minor_version = minor; in metadata_store()
4724 mddev->persistent = 1; in metadata_store()
4725 mddev->external = 0; in metadata_store()
4728 mddev_unlock(mddev); in metadata_store()
4736 action_show(struct mddev *mddev, char *page) in action_show() argument
4739 unsigned long recovery = mddev->recovery; in action_show()
4743 (md_is_rdwr(mddev) && test_bit(MD_RECOVERY_NEEDED, &recovery))) { in action_show()
4755 else if (mddev->reshape_position != MaxSector) in action_show()
4761 static void stop_sync_thread(struct mddev *mddev) in stop_sync_thread() argument
4763 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in stop_sync_thread()
4766 if (mddev_lock(mddev)) in stop_sync_thread()
4773 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { in stop_sync_thread()
4774 mddev_unlock(mddev); in stop_sync_thread()
4778 if (work_pending(&mddev->del_work)) in stop_sync_thread()
4781 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in stop_sync_thread()
4786 md_wakeup_thread_directly(mddev->sync_thread); in stop_sync_thread()
4788 mddev_unlock(mddev); in stop_sync_thread()
4791 static void idle_sync_thread(struct mddev *mddev) in idle_sync_thread() argument
4793 int sync_seq = atomic_read(&mddev->sync_seq); in idle_sync_thread()
4795 mutex_lock(&mddev->sync_mutex); in idle_sync_thread()
4796 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in idle_sync_thread()
4797 stop_sync_thread(mddev); in idle_sync_thread()
4799 wait_event(resync_wait, sync_seq != atomic_read(&mddev->sync_seq) || in idle_sync_thread()
4800 !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)); in idle_sync_thread()
4802 mutex_unlock(&mddev->sync_mutex); in idle_sync_thread()
4805 static void frozen_sync_thread(struct mddev *mddev) in frozen_sync_thread() argument
4807 mutex_lock(&mddev->sync_mutex); in frozen_sync_thread()
4808 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in frozen_sync_thread()
4809 stop_sync_thread(mddev); in frozen_sync_thread()
4811 wait_event(resync_wait, mddev->sync_thread == NULL && in frozen_sync_thread()
4812 !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)); in frozen_sync_thread()
4814 mutex_unlock(&mddev->sync_mutex); in frozen_sync_thread()
4818 action_store(struct mddev *mddev, const char *page, size_t len) in action_store() argument
4820 if (!mddev->pers || !mddev->pers->sync_request) in action_store()
4825 idle_sync_thread(mddev); in action_store()
4827 frozen_sync_thread(mddev); in action_store()
4828 else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in action_store()
4831 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4833 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4834 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in action_store()
4837 if (mddev->pers->start_reshape == NULL) in action_store()
4839 err = mddev_lock(mddev); in action_store()
4841 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { in action_store()
4843 } else if (mddev->reshape_position == MaxSector || in action_store()
4844 mddev->pers->check_reshape == NULL || in action_store()
4845 mddev->pers->check_reshape(mddev)) { in action_store()
4846 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4847 err = mddev->pers->start_reshape(mddev); in action_store()
4855 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4857 mddev_unlock(mddev); in action_store()
4861 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in action_store()
4864 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); in action_store()
4867 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4868 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in action_store()
4869 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); in action_store()
4871 if (mddev->ro == MD_AUTO_READ) { in action_store()
4875 mddev->ro = MD_RDWR; in action_store()
4876 md_wakeup_thread(mddev->sync_thread); in action_store()
4878 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in action_store()
4879 md_wakeup_thread(mddev->thread); in action_store()
4880 sysfs_notify_dirent_safe(mddev->sysfs_action); in action_store()
4888 last_sync_action_show(struct mddev *mddev, char *page) in last_sync_action_show() argument
4890 return sprintf(page, "%s\n", mddev->last_sync_action); in last_sync_action_show()
4896 mismatch_cnt_show(struct mddev *mddev, char *page) in mismatch_cnt_show() argument
4900 atomic64_read(&mddev->resync_mismatches)); in mismatch_cnt_show()
4906 sync_min_show(struct mddev *mddev, char *page) in sync_min_show() argument
4908 return sprintf(page, "%d (%s)\n", speed_min(mddev), in sync_min_show()
4909 mddev->sync_speed_min ? "local": "system"); in sync_min_show()
4913 sync_min_store(struct mddev *mddev, const char *buf, size_t len) in sync_min_store() argument
4927 mddev->sync_speed_min = min; in sync_min_store()
4935 sync_max_show(struct mddev *mddev, char *page) in sync_max_show() argument
4937 return sprintf(page, "%d (%s)\n", speed_max(mddev), in sync_max_show()
4938 mddev->sync_speed_max ? "local": "system"); in sync_max_show()
4942 sync_max_store(struct mddev *mddev, const char *buf, size_t len) in sync_max_store() argument
4956 mddev->sync_speed_max = max; in sync_max_store()
4964 degraded_show(struct mddev *mddev, char *page) in degraded_show() argument
4966 return sprintf(page, "%d\n", mddev->degraded); in degraded_show()
4971 sync_force_parallel_show(struct mddev *mddev, char *page) in sync_force_parallel_show() argument
4973 return sprintf(page, "%d\n", mddev->parallel_resync); in sync_force_parallel_show()
4977 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) in sync_force_parallel_store() argument
4987 mddev->parallel_resync = n; in sync_force_parallel_store()
4989 if (mddev->sync_thread) in sync_force_parallel_store()
5001 sync_speed_show(struct mddev *mddev, char *page) in sync_speed_show() argument
5004 if (mddev->curr_resync == MD_RESYNC_NONE) in sync_speed_show()
5006 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); in sync_speed_show()
5007 dt = (jiffies - mddev->resync_mark) / HZ; in sync_speed_show()
5009 db = resync - mddev->resync_mark_cnt; in sync_speed_show()
5016 sync_completed_show(struct mddev *mddev, char *page) in sync_completed_show() argument
5020 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in sync_completed_show()
5023 if (mddev->curr_resync == MD_RESYNC_YIELDED || in sync_completed_show()
5024 mddev->curr_resync == MD_RESYNC_DELAYED) in sync_completed_show()
5027 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in sync_completed_show()
5028 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in sync_completed_show()
5029 max_sectors = mddev->resync_max_sectors; in sync_completed_show()
5031 max_sectors = mddev->dev_sectors; in sync_completed_show()
5033 resync = mddev->curr_resync_completed; in sync_completed_show()
5041 min_sync_show(struct mddev *mddev, char *page) in min_sync_show() argument
5044 (unsigned long long)mddev->resync_min); in min_sync_show()
5047 min_sync_store(struct mddev *mddev, const char *buf, size_t len) in min_sync_store() argument
5055 spin_lock(&mddev->lock); in min_sync_store()
5057 if (min > mddev->resync_max) in min_sync_store()
5061 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in min_sync_store()
5065 mddev->resync_min = round_down(min, 8); in min_sync_store()
5069 spin_unlock(&mddev->lock); in min_sync_store()
5077 max_sync_show(struct mddev *mddev, char *page) in max_sync_show() argument
5079 if (mddev->resync_max == MaxSector) in max_sync_show()
5083 (unsigned long long)mddev->resync_max); in max_sync_show()
5086 max_sync_store(struct mddev *mddev, const char *buf, size_t len) in max_sync_store() argument
5089 spin_lock(&mddev->lock); in max_sync_store()
5091 mddev->resync_max = MaxSector; in max_sync_store()
5099 if (max < mddev->resync_min) in max_sync_store()
5103 if (max < mddev->resync_max && md_is_rdwr(mddev) && in max_sync_store()
5104 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in max_sync_store()
5108 chunk = mddev->chunk_sectors; in max_sync_store()
5116 mddev->resync_max = max; in max_sync_store()
5118 wake_up(&mddev->recovery_wait); in max_sync_store()
5121 spin_unlock(&mddev->lock); in max_sync_store()
5129 suspend_lo_show(struct mddev *mddev, char *page) in suspend_lo_show() argument
5131 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); in suspend_lo_show()
5135 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) in suspend_lo_store() argument
5146 err = mddev_lock(mddev); in suspend_lo_store()
5150 if (mddev->pers == NULL || in suspend_lo_store()
5151 mddev->pers->quiesce == NULL) in suspend_lo_store()
5153 mddev_suspend(mddev); in suspend_lo_store()
5154 mddev->suspend_lo = new; in suspend_lo_store()
5155 mddev_resume(mddev); in suspend_lo_store()
5159 mddev_unlock(mddev); in suspend_lo_store()
5166 suspend_hi_show(struct mddev *mddev, char *page) in suspend_hi_show() argument
5168 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); in suspend_hi_show()
5172 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) in suspend_hi_store() argument
5183 err = mddev_lock(mddev); in suspend_hi_store()
5187 if (mddev->pers == NULL) in suspend_hi_store()
5190 mddev_suspend(mddev); in suspend_hi_store()
5191 mddev->suspend_hi = new; in suspend_hi_store()
5192 mddev_resume(mddev); in suspend_hi_store()
5196 mddev_unlock(mddev); in suspend_hi_store()
5203 reshape_position_show(struct mddev *mddev, char *page) in reshape_position_show() argument
5205 if (mddev->reshape_position != MaxSector) in reshape_position_show()
5207 (unsigned long long)mddev->reshape_position); in reshape_position_show()
5213 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) in reshape_position_store() argument
5224 err = mddev_lock(mddev); in reshape_position_store()
5228 if (mddev->pers) in reshape_position_store()
5230 mddev->reshape_position = new; in reshape_position_store()
5231 mddev->delta_disks = 0; in reshape_position_store()
5232 mddev->reshape_backwards = 0; in reshape_position_store()
5233 mddev->new_level = mddev->level; in reshape_position_store()
5234 mddev->new_layout = mddev->layout; in reshape_position_store()
5235 mddev->new_chunk_sectors = mddev->chunk_sectors; in reshape_position_store()
5236 rdev_for_each(rdev, mddev) in reshape_position_store()
5240 mddev_unlock(mddev); in reshape_position_store()
5249 reshape_direction_show(struct mddev *mddev, char *page) in reshape_direction_show() argument
5252 mddev->reshape_backwards ? "backwards" : "forwards"); in reshape_direction_show()
5256 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) in reshape_direction_store() argument
5267 if (mddev->reshape_backwards == backwards) in reshape_direction_store()
5270 err = mddev_lock(mddev); in reshape_direction_store()
5274 if (mddev->delta_disks) in reshape_direction_store()
5276 else if (mddev->persistent && in reshape_direction_store()
5277 mddev->major_version == 0) in reshape_direction_store()
5280 mddev->reshape_backwards = backwards; in reshape_direction_store()
5281 mddev_unlock(mddev); in reshape_direction_store()
5290 array_size_show(struct mddev *mddev, char *page) in array_size_show() argument
5292 if (mddev->external_size) in array_size_show()
5294 (unsigned long long)mddev->array_sectors/2); in array_size_show()
5300 array_size_store(struct mddev *mddev, const char *buf, size_t len) in array_size_store() argument
5305 err = mddev_lock(mddev); in array_size_store()
5310 if (mddev_is_clustered(mddev)) { in array_size_store()
5311 mddev_unlock(mddev); in array_size_store()
5316 if (mddev->pers) in array_size_store()
5317 sectors = mddev->pers->size(mddev, 0, 0); in array_size_store()
5319 sectors = mddev->array_sectors; in array_size_store()
5321 mddev->external_size = 0; in array_size_store()
5325 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) in array_size_store()
5328 mddev->external_size = 1; in array_size_store()
5332 mddev->array_sectors = sectors; in array_size_store()
5333 if (mddev->pers) in array_size_store()
5334 set_capacity_and_notify(mddev->gendisk, in array_size_store()
5335 mddev->array_sectors); in array_size_store()
5337 mddev_unlock(mddev); in array_size_store()
5346 consistency_policy_show(struct mddev *mddev, char *page) in consistency_policy_show() argument
5350 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { in consistency_policy_show()
5352 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) { in consistency_policy_show()
5354 } else if (mddev->bitmap) { in consistency_policy_show()
5356 } else if (mddev->pers) { in consistency_policy_show()
5357 if (mddev->pers->sync_request) in consistency_policy_show()
5369 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len) in consistency_policy_store() argument
5373 if (mddev->pers) { in consistency_policy_store()
5374 if (mddev->pers->change_consistency_policy) in consistency_policy_store()
5375 err = mddev->pers->change_consistency_policy(mddev, buf); in consistency_policy_store()
5378 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) { in consistency_policy_store()
5379 set_bit(MD_HAS_PPL, &mddev->flags); in consistency_policy_store()
5391 static ssize_t fail_last_dev_show(struct mddev *mddev, char *page) in fail_last_dev_show() argument
5393 return sprintf(page, "%d\n", mddev->fail_last_dev); in fail_last_dev_show()
5401 fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len) in fail_last_dev_store() argument
5410 if (value != mddev->fail_last_dev) in fail_last_dev_store()
5411 mddev->fail_last_dev = value; in fail_last_dev_store()
5419 static ssize_t serialize_policy_show(struct mddev *mddev, char *page) in serialize_policy_show() argument
5421 if (mddev->pers == NULL || (mddev->pers->level != 1)) in serialize_policy_show()
5424 return sprintf(page, "%d\n", mddev->serialize_policy); in serialize_policy_show()
5432 serialize_policy_store(struct mddev *mddev, const char *buf, size_t len) in serialize_policy_store() argument
5441 if (value == mddev->serialize_policy) in serialize_policy_store()
5444 err = mddev_lock(mddev); in serialize_policy_store()
5447 if (mddev->pers == NULL || (mddev->pers->level != 1)) { in serialize_policy_store()
5453 mddev_suspend(mddev); in serialize_policy_store()
5455 mddev_create_serial_pool(mddev, NULL, true); in serialize_policy_store()
5457 mddev_destroy_serial_pool(mddev, NULL, true); in serialize_policy_store()
5458 mddev->serialize_policy = value; in serialize_policy_store()
5459 mddev_resume(mddev); in serialize_policy_store()
5461 mddev_unlock(mddev); in serialize_policy_store()
5528 struct mddev *mddev = container_of(kobj, struct mddev, kobj); in md_attr_show() local
5534 if (!mddev_get(mddev)) { in md_attr_show()
5540 rv = entry->show(mddev, page); in md_attr_show()
5541 mddev_put(mddev); in md_attr_show()
5550 struct mddev *mddev = container_of(kobj, struct mddev, kobj); in md_attr_store() local
5558 if (!mddev_get(mddev)) { in md_attr_store()
5563 rv = entry->store(mddev, page, length); in md_attr_store()
5564 mddev_put(mddev); in md_attr_store()
5570 struct mddev *mddev = container_of(ko, struct mddev, kobj); in md_kobj_release() local
5572 if (mddev->sysfs_state) in md_kobj_release()
5573 sysfs_put(mddev->sysfs_state); in md_kobj_release()
5574 if (mddev->sysfs_level) in md_kobj_release()
5575 sysfs_put(mddev->sysfs_level); in md_kobj_release()
5577 del_gendisk(mddev->gendisk); in md_kobj_release()
5578 put_disk(mddev->gendisk); in md_kobj_release()
5595 struct mddev *mddev = container_of(ws, struct mddev, del_work); in mddev_delayed_delete() local
5597 kobject_put(&mddev->kobj); in mddev_delayed_delete()
5602 int mddev_init_writes_pending(struct mddev *mddev) in mddev_init_writes_pending() argument
5604 if (mddev->writes_pending.percpu_count_ptr) in mddev_init_writes_pending()
5606 if (percpu_ref_init(&mddev->writes_pending, no_op, in mddev_init_writes_pending()
5610 percpu_ref_put(&mddev->writes_pending); in mddev_init_writes_pending()
5615 struct mddev *md_alloc(dev_t dev, char *name) in md_alloc()
5627 struct mddev *mddev; in md_alloc() local
5641 mddev = mddev_alloc(dev); in md_alloc()
5642 if (IS_ERR(mddev)) { in md_alloc()
5643 error = PTR_ERR(mddev); in md_alloc()
5647 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); in md_alloc()
5649 unit = MINOR(mddev->unit) >> shift; in md_alloc()
5654 struct mddev *mddev2; in md_alloc()
5670 mddev->hold_active = UNTIL_STOP; in md_alloc()
5677 disk->major = MAJOR(mddev->unit); in md_alloc()
5687 disk->private_data = mddev; in md_alloc()
5689 mddev->queue = disk->queue; in md_alloc()
5690 blk_set_stacking_limits(&mddev->queue->limits); in md_alloc()
5691 blk_queue_write_cache(mddev->queue, true, true); in md_alloc()
5693 mddev->gendisk = disk; in md_alloc()
5698 kobject_init(&mddev->kobj, &md_ktype); in md_alloc()
5699 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md"); in md_alloc()
5706 mddev->hold_active = 0; in md_alloc()
5708 mddev_put(mddev); in md_alloc()
5712 kobject_uevent(&mddev->kobj, KOBJ_ADD); in md_alloc()
5713 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); in md_alloc()
5714 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level"); in md_alloc()
5716 return mddev; in md_alloc()
5721 mddev_free(mddev); in md_alloc()
5729 struct mddev *mddev = md_alloc(dev, name); in md_alloc_and_put() local
5731 if (IS_ERR(mddev)) in md_alloc_and_put()
5732 return PTR_ERR(mddev); in md_alloc_and_put()
5733 mddev_put(mddev); in md_alloc_and_put()
5776 struct mddev *mddev = from_timer(mddev, t, safemode_timer); in md_safemode_timeout() local
5778 mddev->safemode = 1; in md_safemode_timeout()
5779 if (mddev->external) in md_safemode_timeout()
5780 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_safemode_timeout()
5782 md_wakeup_thread(mddev->thread); in md_safemode_timeout()
5788 struct mddev *mddev = container_of(ref, struct mddev, active_io); in active_io_release() local
5790 wake_up(&mddev->sb_wait); in active_io_release()
5793 int md_run(struct mddev *mddev) in md_run() argument
5800 if (list_empty(&mddev->disks)) in md_run()
5804 if (mddev->pers) in md_run()
5807 if (mddev->sysfs_active) in md_run()
5813 if (!mddev->raid_disks) { in md_run()
5814 if (!mddev->persistent) in md_run()
5816 err = analyze_sbs(mddev); in md_run()
5821 if (mddev->level != LEVEL_NONE) in md_run()
5822 request_module("md-level-%d", mddev->level); in md_run()
5823 else if (mddev->clevel[0]) in md_run()
5824 request_module("md-%s", mddev->clevel); in md_run()
5831 mddev->has_superblocks = false; in md_run()
5832 rdev_for_each(rdev, mddev) { in md_run()
5837 if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) { in md_run()
5838 mddev->ro = MD_RDONLY; in md_run()
5839 if (mddev->gendisk) in md_run()
5840 set_disk_ro(mddev->gendisk, 1); in md_run()
5844 mddev->has_superblocks = true; in md_run()
5853 if (mddev->dev_sectors && in md_run()
5854 rdev->data_offset + mddev->dev_sectors in md_run()
5857 mdname(mddev)); in md_run()
5864 mdname(mddev)); in md_run()
5872 err = percpu_ref_init(&mddev->active_io, active_io_release, in md_run()
5877 if (!bioset_initialized(&mddev->bio_set)) { in md_run()
5878 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); in md_run()
5882 if (!bioset_initialized(&mddev->sync_set)) { in md_run()
5883 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); in md_run()
5888 if (!bioset_initialized(&mddev->io_clone_set)) { in md_run()
5889 err = bioset_init(&mddev->io_clone_set, BIO_POOL_SIZE, in md_run()
5896 pers = find_pers(mddev->level, mddev->clevel); in md_run()
5899 if (mddev->level != LEVEL_NONE) in md_run()
5901 mddev->level); in md_run()
5904 mddev->clevel); in md_run()
5909 if (mddev->level != pers->level) { in md_run()
5910 mddev->level = pers->level; in md_run()
5911 mddev->new_level = pers->level; in md_run()
5913 strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); in md_run()
5915 if (mddev->reshape_position != MaxSector && in md_run()
5930 rdev_for_each(rdev, mddev) in md_run()
5931 rdev_for_each(rdev2, mddev) { in md_run()
5936 mdname(mddev), in md_run()
5947 mddev->recovery = 0; in md_run()
5949 mddev->resync_max_sectors = mddev->dev_sectors; in md_run()
5951 mddev->ok_start_degraded = start_dirty_degraded; in md_run()
5953 if (start_readonly && md_is_rdwr(mddev)) in md_run()
5954 mddev->ro = MD_AUTO_READ; /* read-only, but switch on first write */ in md_run()
5956 err = pers->run(mddev); in md_run()
5959 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { in md_run()
5960 WARN_ONCE(!mddev->external_size, in md_run()
5964 (unsigned long long)mddev->array_sectors / 2, in md_run()
5965 (unsigned long long)pers->size(mddev, 0, 0) / 2); in md_run()
5969 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { in md_run()
5972 bitmap = md_bitmap_create(mddev, -1); in md_run()
5976 mdname(mddev), err); in md_run()
5978 mddev->bitmap = bitmap; in md_run()
5984 if (mddev->bitmap_info.max_write_behind > 0) { in md_run()
5987 rdev_for_each(rdev, mddev) { in md_run()
5992 if (create_pool && mddev->serial_info_pool == NULL) { in md_run()
5993 mddev->serial_info_pool = in md_run()
5996 if (!mddev->serial_info_pool) { in md_run()
6003 if (mddev->queue) { in md_run()
6006 rdev_for_each(rdev, mddev) { in md_run()
6012 if (mddev->degraded) in md_run()
6015 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); in md_run()
6017 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); in md_run()
6018 blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue); in md_run()
6022 blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue); in md_run()
6025 if (mddev->kobj.sd && in md_run()
6026 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) in md_run()
6028 mdname(mddev)); in md_run()
6029 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); in md_run()
6030 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); in md_run()
6031 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); in md_run()
6032 } else if (mddev->ro == MD_AUTO_READ) in md_run()
6033 mddev->ro = MD_RDWR; in md_run()
6035 atomic_set(&mddev->max_corr_read_errors, in md_run()
6037 mddev->safemode = 0; in md_run()
6038 if (mddev_is_clustered(mddev)) in md_run()
6039 mddev->safemode_delay = 0; in md_run()
6041 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; in md_run()
6042 mddev->in_sync = 1; in md_run()
6044 spin_lock(&mddev->lock); in md_run()
6045 mddev->pers = pers; in md_run()
6046 spin_unlock(&mddev->lock); in md_run()
6047 rdev_for_each(rdev, mddev) in md_run()
6049 sysfs_link_rdev(mddev, rdev); /* failure here is OK */ in md_run()
6051 if (mddev->degraded && md_is_rdwr(mddev)) in md_run()
6055 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_run()
6056 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_run()
6058 if (mddev->sb_flags) in md_run()
6059 md_update_sb(mddev, 0); in md_run()
6065 mddev_detach(mddev); in md_run()
6066 if (mddev->private) in md_run()
6067 pers->free(mddev, mddev->private); in md_run()
6068 mddev->private = NULL; in md_run()
6070 md_bitmap_destroy(mddev); in md_run()
6072 bioset_exit(&mddev->io_clone_set); in md_run()
6074 bioset_exit(&mddev->sync_set); in md_run()
6076 bioset_exit(&mddev->bio_set); in md_run()
6078 percpu_ref_exit(&mddev->active_io); in md_run()
6083 int do_md_run(struct mddev *mddev) in do_md_run() argument
6087 set_bit(MD_NOT_READY, &mddev->flags); in do_md_run()
6088 err = md_run(mddev); in do_md_run()
6091 err = md_bitmap_load(mddev); in do_md_run()
6093 md_bitmap_destroy(mddev); in do_md_run()
6097 if (mddev_is_clustered(mddev)) in do_md_run()
6098 md_allow_write(mddev); in do_md_run()
6101 md_start(mddev); in do_md_run()
6103 md_wakeup_thread(mddev->thread); in do_md_run()
6104 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ in do_md_run()
6106 set_capacity_and_notify(mddev->gendisk, mddev->array_sectors); in do_md_run()
6107 clear_bit(MD_NOT_READY, &mddev->flags); in do_md_run()
6108 mddev->changed = 1; in do_md_run()
6109 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); in do_md_run()
6110 sysfs_notify_dirent_safe(mddev->sysfs_state); in do_md_run()
6111 sysfs_notify_dirent_safe(mddev->sysfs_action); in do_md_run()
6112 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in do_md_run()
6114 clear_bit(MD_NOT_READY, &mddev->flags); in do_md_run()
6118 int md_start(struct mddev *mddev) in md_start() argument
6122 if (mddev->pers->start) { in md_start()
6123 set_bit(MD_RECOVERY_WAIT, &mddev->recovery); in md_start()
6124 md_wakeup_thread(mddev->thread); in md_start()
6125 ret = mddev->pers->start(mddev); in md_start()
6126 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery); in md_start()
6127 md_wakeup_thread(mddev->sync_thread); in md_start()
6133 static int restart_array(struct mddev *mddev) in restart_array() argument
6135 struct gendisk *disk = mddev->gendisk; in restart_array()
6141 if (list_empty(&mddev->disks)) in restart_array()
6143 if (!mddev->pers) in restart_array()
6145 if (md_is_rdwr(mddev)) in restart_array()
6149 rdev_for_each_rcu(rdev, mddev) { in restart_array()
6157 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal) in restart_array()
6163 mddev->safemode = 0; in restart_array()
6164 mddev->ro = MD_RDWR; in restart_array()
6166 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev)); in restart_array()
6168 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in restart_array()
6169 md_wakeup_thread(mddev->thread); in restart_array()
6170 md_wakeup_thread(mddev->sync_thread); in restart_array()
6171 sysfs_notify_dirent_safe(mddev->sysfs_state); in restart_array()
6175 static void md_clean(struct mddev *mddev) in md_clean() argument
6177 mddev->array_sectors = 0; in md_clean()
6178 mddev->external_size = 0; in md_clean()
6179 mddev->dev_sectors = 0; in md_clean()
6180 mddev->raid_disks = 0; in md_clean()
6181 mddev->recovery_cp = 0; in md_clean()
6182 mddev->resync_min = 0; in md_clean()
6183 mddev->resync_max = MaxSector; in md_clean()
6184 mddev->reshape_position = MaxSector; in md_clean()
6186 mddev->persistent = 0; in md_clean()
6187 mddev->level = LEVEL_NONE; in md_clean()
6188 mddev->clevel[0] = 0; in md_clean()
6189 mddev->flags = 0; in md_clean()
6190 mddev->sb_flags = 0; in md_clean()
6191 mddev->ro = MD_RDWR; in md_clean()
6192 mddev->metadata_type[0] = 0; in md_clean()
6193 mddev->chunk_sectors = 0; in md_clean()
6194 mddev->ctime = mddev->utime = 0; in md_clean()
6195 mddev->layout = 0; in md_clean()
6196 mddev->max_disks = 0; in md_clean()
6197 mddev->events = 0; in md_clean()
6198 mddev->can_decrease_events = 0; in md_clean()
6199 mddev->delta_disks = 0; in md_clean()
6200 mddev->reshape_backwards = 0; in md_clean()
6201 mddev->new_level = LEVEL_NONE; in md_clean()
6202 mddev->new_layout = 0; in md_clean()
6203 mddev->new_chunk_sectors = 0; in md_clean()
6204 mddev->curr_resync = MD_RESYNC_NONE; in md_clean()
6205 atomic64_set(&mddev->resync_mismatches, 0); in md_clean()
6206 mddev->suspend_lo = mddev->suspend_hi = 0; in md_clean()
6207 mddev->sync_speed_min = mddev->sync_speed_max = 0; in md_clean()
6208 mddev->recovery = 0; in md_clean()
6209 mddev->in_sync = 0; in md_clean()
6210 mddev->changed = 0; in md_clean()
6211 mddev->degraded = 0; in md_clean()
6212 mddev->safemode = 0; in md_clean()
6213 mddev->private = NULL; in md_clean()
6214 mddev->cluster_info = NULL; in md_clean()
6215 mddev->bitmap_info.offset = 0; in md_clean()
6216 mddev->bitmap_info.default_offset = 0; in md_clean()
6217 mddev->bitmap_info.default_space = 0; in md_clean()
6218 mddev->bitmap_info.chunksize = 0; in md_clean()
6219 mddev->bitmap_info.daemon_sleep = 0; in md_clean()
6220 mddev->bitmap_info.max_write_behind = 0; in md_clean()
6221 mddev->bitmap_info.nodes = 0; in md_clean()
6224 static void __md_stop_writes(struct mddev *mddev) in __md_stop_writes() argument
6226 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in __md_stop_writes()
6227 if (work_pending(&mddev->del_work)) in __md_stop_writes()
6229 if (mddev->sync_thread) { in __md_stop_writes()
6230 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in __md_stop_writes()
6231 md_reap_sync_thread(mddev); in __md_stop_writes()
6234 del_timer_sync(&mddev->safemode_timer); in __md_stop_writes()
6236 if (mddev->pers && mddev->pers->quiesce) { in __md_stop_writes()
6237 mddev->pers->quiesce(mddev, 1); in __md_stop_writes()
6238 mddev->pers->quiesce(mddev, 0); in __md_stop_writes()
6240 md_bitmap_flush(mddev); in __md_stop_writes()
6242 if (md_is_rdwr(mddev) && in __md_stop_writes()
6243 ((!mddev->in_sync && !mddev_is_clustered(mddev)) || in __md_stop_writes()
6244 mddev->sb_flags)) { in __md_stop_writes()
6246 if (!mddev_is_clustered(mddev)) in __md_stop_writes()
6247 mddev->in_sync = 1; in __md_stop_writes()
6248 md_update_sb(mddev, 1); in __md_stop_writes()
6251 mddev->serialize_policy = 0; in __md_stop_writes()
6252 mddev_destroy_serial_pool(mddev, NULL, true); in __md_stop_writes()
6255 void md_stop_writes(struct mddev *mddev) in md_stop_writes() argument
6257 mddev_lock_nointr(mddev); in md_stop_writes()
6258 __md_stop_writes(mddev); in md_stop_writes()
6259 mddev_unlock(mddev); in md_stop_writes()
6263 static void mddev_detach(struct mddev *mddev) in mddev_detach() argument
6265 md_bitmap_wait_behind_writes(mddev); in mddev_detach()
6266 if (mddev->pers && mddev->pers->quiesce && !is_md_suspended(mddev)) { in mddev_detach()
6267 mddev->pers->quiesce(mddev, 1); in mddev_detach()
6268 mddev->pers->quiesce(mddev, 0); in mddev_detach()
6270 md_unregister_thread(mddev, &mddev->thread); in mddev_detach()
6271 if (mddev->queue) in mddev_detach()
6272 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ in mddev_detach()
6275 static void __md_stop(struct mddev *mddev) in __md_stop() argument
6277 struct md_personality *pers = mddev->pers; in __md_stop()
6278 md_bitmap_destroy(mddev); in __md_stop()
6279 mddev_detach(mddev); in __md_stop()
6281 if (mddev->event_work.func) in __md_stop()
6283 spin_lock(&mddev->lock); in __md_stop()
6284 mddev->pers = NULL; in __md_stop()
6285 spin_unlock(&mddev->lock); in __md_stop()
6286 if (mddev->private) in __md_stop()
6287 pers->free(mddev, mddev->private); in __md_stop()
6288 mddev->private = NULL; in __md_stop()
6289 if (pers->sync_request && mddev->to_remove == NULL) in __md_stop()
6290 mddev->to_remove = &md_redundancy_group; in __md_stop()
6292 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in __md_stop()
6294 percpu_ref_exit(&mddev->active_io); in __md_stop()
6295 bioset_exit(&mddev->bio_set); in __md_stop()
6296 bioset_exit(&mddev->sync_set); in __md_stop()
6297 bioset_exit(&mddev->io_clone_set); in __md_stop()
6300 void md_stop(struct mddev *mddev) in md_stop() argument
6302 lockdep_assert_held(&mddev->reconfig_mutex); in md_stop()
6307 __md_stop_writes(mddev); in md_stop()
6308 __md_stop(mddev); in md_stop()
6309 percpu_ref_exit(&mddev->writes_pending); in md_stop()
6314 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) in md_set_readonly() argument
6319 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { in md_set_readonly()
6321 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
6322 md_wakeup_thread(mddev->thread); in md_set_readonly()
6324 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in md_set_readonly()
6325 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_set_readonly()
6331 md_wakeup_thread_directly(mddev->sync_thread); in md_set_readonly()
6333 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) in md_set_readonly()
6335 mddev_unlock(mddev); in md_set_readonly()
6337 &mddev->recovery)); in md_set_readonly()
6338 wait_event(mddev->sb_wait, in md_set_readonly()
6339 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); in md_set_readonly()
6340 mddev_lock_nointr(mddev); in md_set_readonly()
6342 mutex_lock(&mddev->open_mutex); in md_set_readonly()
6343 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || in md_set_readonly()
6344 mddev->sync_thread || in md_set_readonly()
6345 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { in md_set_readonly()
6346 pr_warn("md: %s still in use.\n",mdname(mddev)); in md_set_readonly()
6348 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
6349 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_set_readonly()
6350 md_wakeup_thread(mddev->thread); in md_set_readonly()
6355 if (mddev->pers) { in md_set_readonly()
6356 __md_stop_writes(mddev); in md_set_readonly()
6359 if (mddev->ro == MD_RDONLY) in md_set_readonly()
6361 mddev->ro = MD_RDONLY; in md_set_readonly()
6362 set_disk_ro(mddev->gendisk, 1); in md_set_readonly()
6363 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
6364 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_set_readonly()
6365 md_wakeup_thread(mddev->thread); in md_set_readonly()
6366 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_set_readonly()
6370 mutex_unlock(&mddev->open_mutex); in md_set_readonly()
6378 static int do_md_stop(struct mddev *mddev, int mode, in do_md_stop() argument
6381 struct gendisk *disk = mddev->gendisk; in do_md_stop()
6385 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { in do_md_stop()
6387 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in do_md_stop()
6388 md_wakeup_thread(mddev->thread); in do_md_stop()
6390 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in do_md_stop()
6391 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in do_md_stop()
6397 md_wakeup_thread_directly(mddev->sync_thread); in do_md_stop()
6399 mddev_unlock(mddev); in do_md_stop()
6400 wait_event(resync_wait, (mddev->sync_thread == NULL && in do_md_stop()
6402 &mddev->recovery))); in do_md_stop()
6403 mddev_lock_nointr(mddev); in do_md_stop()
6405 mutex_lock(&mddev->open_mutex); in do_md_stop()
6406 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || in do_md_stop()
6407 mddev->sysfs_active || in do_md_stop()
6408 mddev->sync_thread || in do_md_stop()
6409 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { in do_md_stop()
6410 pr_warn("md: %s still in use.\n",mdname(mddev)); in do_md_stop()
6411 mutex_unlock(&mddev->open_mutex); in do_md_stop()
6413 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in do_md_stop()
6414 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in do_md_stop()
6415 md_wakeup_thread(mddev->thread); in do_md_stop()
6419 if (mddev->pers) { in do_md_stop()
6420 if (!md_is_rdwr(mddev)) in do_md_stop()
6423 __md_stop_writes(mddev); in do_md_stop()
6424 __md_stop(mddev); in do_md_stop()
6427 sysfs_notify_dirent_safe(mddev->sysfs_state); in do_md_stop()
6429 rdev_for_each(rdev, mddev) in do_md_stop()
6431 sysfs_unlink_rdev(mddev, rdev); in do_md_stop()
6434 mutex_unlock(&mddev->open_mutex); in do_md_stop()
6435 mddev->changed = 1; in do_md_stop()
6437 if (!md_is_rdwr(mddev)) in do_md_stop()
6438 mddev->ro = MD_RDWR; in do_md_stop()
6440 mutex_unlock(&mddev->open_mutex); in do_md_stop()
6445 pr_info("md: %s stopped.\n", mdname(mddev)); in do_md_stop()
6447 if (mddev->bitmap_info.file) { in do_md_stop()
6448 struct file *f = mddev->bitmap_info.file; in do_md_stop()
6449 spin_lock(&mddev->lock); in do_md_stop()
6450 mddev->bitmap_info.file = NULL; in do_md_stop()
6451 spin_unlock(&mddev->lock); in do_md_stop()
6454 mddev->bitmap_info.offset = 0; in do_md_stop()
6456 export_array(mddev); in do_md_stop()
6458 md_clean(mddev); in do_md_stop()
6459 if (mddev->hold_active == UNTIL_STOP) in do_md_stop()
6460 mddev->hold_active = 0; in do_md_stop()
6463 sysfs_notify_dirent_safe(mddev->sysfs_state); in do_md_stop()
6468 static void autorun_array(struct mddev *mddev) in autorun_array() argument
6473 if (list_empty(&mddev->disks)) in autorun_array()
6478 rdev_for_each(rdev, mddev) { in autorun_array()
6483 err = do_md_run(mddev); in autorun_array()
6486 do_md_stop(mddev, 0, NULL); in autorun_array()
6505 struct mddev *mddev; in autorun_devices() local
6542 mddev = md_alloc(dev, NULL); in autorun_devices()
6543 if (IS_ERR(mddev)) in autorun_devices()
6546 if (mddev_lock(mddev)) in autorun_devices()
6547 pr_warn("md: %s locked, cannot run\n", mdname(mddev)); in autorun_devices()
6548 else if (mddev->raid_disks || mddev->major_version in autorun_devices()
6549 || !list_empty(&mddev->disks)) { in autorun_devices()
6551 mdname(mddev), rdev0->bdev); in autorun_devices()
6552 mddev_unlock(mddev); in autorun_devices()
6554 pr_debug("md: created %s\n", mdname(mddev)); in autorun_devices()
6555 mddev->persistent = 1; in autorun_devices()
6558 if (bind_rdev_to_array(rdev, mddev)) in autorun_devices()
6559 export_rdev(rdev, mddev); in autorun_devices()
6561 autorun_array(mddev); in autorun_devices()
6562 mddev_unlock(mddev); in autorun_devices()
6569 export_rdev(rdev, mddev); in autorun_devices()
6571 mddev_put(mddev); in autorun_devices()
6591 static int get_array_info(struct mddev *mddev, void __user *arg) in get_array_info() argument
6599 rdev_for_each_rcu(rdev, mddev) { in get_array_info()
6616 info.major_version = mddev->major_version; in get_array_info()
6617 info.minor_version = mddev->minor_version; in get_array_info()
6619 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); in get_array_info()
6620 info.level = mddev->level; in get_array_info()
6621 info.size = mddev->dev_sectors / 2; in get_array_info()
6622 if (info.size != mddev->dev_sectors / 2) /* overflow */ in get_array_info()
6625 info.raid_disks = mddev->raid_disks; in get_array_info()
6626 info.md_minor = mddev->md_minor; in get_array_info()
6627 info.not_persistent= !mddev->persistent; in get_array_info()
6629 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); in get_array_info()
6631 if (mddev->in_sync) in get_array_info()
6633 if (mddev->bitmap && mddev->bitmap_info.offset) in get_array_info()
6635 if (mddev_is_clustered(mddev)) in get_array_info()
6642 info.layout = mddev->layout; in get_array_info()
6643 info.chunk_size = mddev->chunk_sectors << 9; in get_array_info()
6651 static int get_bitmap_file(struct mddev *mddev, void __user * arg) in get_bitmap_file() argument
6662 spin_lock(&mddev->lock); in get_bitmap_file()
6664 if (mddev->bitmap_info.file) { in get_bitmap_file()
6665 ptr = file_path(mddev->bitmap_info.file, file->pathname, in get_bitmap_file()
6673 spin_unlock(&mddev->lock); in get_bitmap_file()
6683 static int get_disk_info(struct mddev *mddev, void __user * arg) in get_disk_info() argument
6692 rdev = md_find_rdev_nr_rcu(mddev, info.number); in get_disk_info()
6723 int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) in md_add_new_disk() argument
6728 if (mddev_is_clustered(mddev) && in md_add_new_disk()
6731 mdname(mddev)); in md_add_new_disk()
6738 if (!mddev->raid_disks) { in md_add_new_disk()
6741 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); in md_add_new_disk()
6747 if (!list_empty(&mddev->disks)) { in md_add_new_disk()
6749 = list_entry(mddev->disks.next, in md_add_new_disk()
6751 err = super_types[mddev->major_version] in md_add_new_disk()
6752 .load_super(rdev, rdev0, mddev->minor_version); in md_add_new_disk()
6757 export_rdev(rdev, mddev); in md_add_new_disk()
6761 err = bind_rdev_to_array(rdev, mddev); in md_add_new_disk()
6763 export_rdev(rdev, mddev); in md_add_new_disk()
6772 if (mddev->pers) { in md_add_new_disk()
6774 if (!mddev->pers->hot_add_disk) { in md_add_new_disk()
6776 mdname(mddev)); in md_add_new_disk()
6779 if (mddev->persistent) in md_add_new_disk()
6780 rdev = md_import_device(dev, mddev->major_version, in md_add_new_disk()
6781 mddev->minor_version); in md_add_new_disk()
6790 if (!mddev->persistent) { in md_add_new_disk()
6792 info->raid_disk < mddev->raid_disks) { in md_add_new_disk()
6799 super_types[mddev->major_version]. in md_add_new_disk()
6800 validate_super(mddev, rdev); in md_add_new_disk()
6806 export_rdev(rdev, mddev); in md_add_new_disk()
6825 rdev_for_each(rdev2, mddev) { in md_add_new_disk()
6831 if (has_journal || mddev->bitmap) { in md_add_new_disk()
6832 export_rdev(rdev, mddev); in md_add_new_disk()
6840 if (mddev_is_clustered(mddev)) { in md_add_new_disk()
6845 err = md_cluster_ops->add_new_disk(mddev, rdev); in md_add_new_disk()
6847 export_rdev(rdev, mddev); in md_add_new_disk()
6854 err = bind_rdev_to_array(rdev, mddev); in md_add_new_disk()
6857 export_rdev(rdev, mddev); in md_add_new_disk()
6859 if (mddev_is_clustered(mddev)) { in md_add_new_disk()
6862 err = md_cluster_ops->new_disk_ack(mddev, in md_add_new_disk()
6869 md_cluster_ops->add_new_disk_cancel(mddev); in md_add_new_disk()
6883 if (mddev->major_version != 0) { in md_add_new_disk()
6884 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev)); in md_add_new_disk()
6897 if (info->raid_disk < mddev->raid_disks) in md_add_new_disk()
6902 if (rdev->raid_disk < mddev->raid_disks) in md_add_new_disk()
6911 if (!mddev->persistent) { in md_add_new_disk()
6918 err = bind_rdev_to_array(rdev, mddev); in md_add_new_disk()
6920 export_rdev(rdev, mddev); in md_add_new_disk()
6928 static int hot_remove_disk(struct mddev *mddev, dev_t dev) in hot_remove_disk() argument
6932 if (!mddev->pers) in hot_remove_disk()
6935 rdev = find_rdev(mddev, dev); in hot_remove_disk()
6943 remove_and_add_spares(mddev, rdev); in hot_remove_disk()
6949 if (mddev_is_clustered(mddev)) { in hot_remove_disk()
6950 if (md_cluster_ops->remove_disk(mddev, rdev)) in hot_remove_disk()
6955 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in hot_remove_disk()
6956 if (mddev->thread) in hot_remove_disk()
6957 md_wakeup_thread(mddev->thread); in hot_remove_disk()
6959 md_update_sb(mddev, 1); in hot_remove_disk()
6965 rdev->bdev, mdname(mddev)); in hot_remove_disk()
6969 static int hot_add_disk(struct mddev *mddev, dev_t dev) in hot_add_disk() argument
6974 if (!mddev->pers) in hot_add_disk()
6977 if (mddev->major_version != 0) { in hot_add_disk()
6979 mdname(mddev)); in hot_add_disk()
6982 if (!mddev->pers->hot_add_disk) { in hot_add_disk()
6984 mdname(mddev)); in hot_add_disk()
6995 if (mddev->persistent) in hot_add_disk()
7004 rdev->bdev, mdname(mddev)); in hot_add_disk()
7012 err = bind_rdev_to_array(rdev, mddev); in hot_add_disk()
7023 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in hot_add_disk()
7024 if (!mddev->thread) in hot_add_disk()
7025 md_update_sb(mddev, 1); in hot_add_disk()
7032 mdname(mddev), rdev->bdev); in hot_add_disk()
7033 blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue); in hot_add_disk()
7039 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in hot_add_disk()
7040 md_wakeup_thread(mddev->thread); in hot_add_disk()
7045 export_rdev(rdev, mddev); in hot_add_disk()
7049 static int set_bitmap_file(struct mddev *mddev, int fd) in set_bitmap_file() argument
7053 if (mddev->pers) { in set_bitmap_file()
7054 if (!mddev->pers->quiesce || !mddev->thread) in set_bitmap_file()
7056 if (mddev->recovery || mddev->sync_thread) in set_bitmap_file()
7065 if (mddev->bitmap || mddev->bitmap_info.file) in set_bitmap_file()
7070 mdname(mddev)); in set_bitmap_file()
7074 mdname(mddev)); in set_bitmap_file()
7080 mdname(mddev)); in set_bitmap_file()
7087 mdname(mddev)); in set_bitmap_file()
7091 mdname(mddev)); in set_bitmap_file()
7095 mdname(mddev)); in set_bitmap_file()
7102 mddev->bitmap_info.file = f; in set_bitmap_file()
7103 mddev->bitmap_info.offset = 0; /* file overrides offset */ in set_bitmap_file()
7104 } else if (mddev->bitmap == NULL) in set_bitmap_file()
7107 if (mddev->pers) { in set_bitmap_file()
7111 bitmap = md_bitmap_create(mddev, -1); in set_bitmap_file()
7112 mddev_suspend(mddev); in set_bitmap_file()
7114 mddev->bitmap = bitmap; in set_bitmap_file()
7115 err = md_bitmap_load(mddev); in set_bitmap_file()
7119 md_bitmap_destroy(mddev); in set_bitmap_file()
7122 mddev_resume(mddev); in set_bitmap_file()
7124 mddev_suspend(mddev); in set_bitmap_file()
7125 md_bitmap_destroy(mddev); in set_bitmap_file()
7126 mddev_resume(mddev); in set_bitmap_file()
7130 struct file *f = mddev->bitmap_info.file; in set_bitmap_file()
7132 spin_lock(&mddev->lock); in set_bitmap_file()
7133 mddev->bitmap_info.file = NULL; in set_bitmap_file()
7134 spin_unlock(&mddev->lock); in set_bitmap_file()
7155 int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info) in md_set_array_info() argument
7167 mddev->major_version = info->major_version; in md_set_array_info()
7168 mddev->minor_version = info->minor_version; in md_set_array_info()
7169 mddev->patch_version = info->patch_version; in md_set_array_info()
7170 mddev->persistent = !info->not_persistent; in md_set_array_info()
7174 mddev->ctime = ktime_get_real_seconds(); in md_set_array_info()
7177 mddev->major_version = MD_MAJOR_VERSION; in md_set_array_info()
7178 mddev->minor_version = MD_MINOR_VERSION; in md_set_array_info()
7179 mddev->patch_version = MD_PATCHLEVEL_VERSION; in md_set_array_info()
7180 mddev->ctime = ktime_get_real_seconds(); in md_set_array_info()
7182 mddev->level = info->level; in md_set_array_info()
7183 mddev->clevel[0] = 0; in md_set_array_info()
7184 mddev->dev_sectors = 2 * (sector_t)info->size; in md_set_array_info()
7185 mddev->raid_disks = info->raid_disks; in md_set_array_info()
7190 mddev->recovery_cp = MaxSector; in md_set_array_info()
7192 mddev->recovery_cp = 0; in md_set_array_info()
7193 mddev->persistent = ! info->not_persistent; in md_set_array_info()
7194 mddev->external = 0; in md_set_array_info()
7196 mddev->layout = info->layout; in md_set_array_info()
7197 if (mddev->level == 0) in md_set_array_info()
7199 mddev->layout = -1; in md_set_array_info()
7200 mddev->chunk_sectors = info->chunk_size >> 9; in md_set_array_info()
7202 if (mddev->persistent) { in md_set_array_info()
7203 mddev->max_disks = MD_SB_DISKS; in md_set_array_info()
7204 mddev->flags = 0; in md_set_array_info()
7205 mddev->sb_flags = 0; in md_set_array_info()
7207 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_set_array_info()
7209 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; in md_set_array_info()
7210 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); in md_set_array_info()
7211 mddev->bitmap_info.offset = 0; in md_set_array_info()
7213 mddev->reshape_position = MaxSector; in md_set_array_info()
7218 get_random_bytes(mddev->uuid, 16); in md_set_array_info()
7220 mddev->new_level = mddev->level; in md_set_array_info()
7221 mddev->new_chunk_sectors = mddev->chunk_sectors; in md_set_array_info()
7222 mddev->new_layout = mddev->layout; in md_set_array_info()
7223 mddev->delta_disks = 0; in md_set_array_info()
7224 mddev->reshape_backwards = 0; in md_set_array_info()
7229 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) in md_set_array_sectors() argument
7231 lockdep_assert_held(&mddev->reconfig_mutex); in md_set_array_sectors()
7233 if (mddev->external_size) in md_set_array_sectors()
7236 mddev->array_sectors = array_sectors; in md_set_array_sectors()
7240 static int update_size(struct mddev *mddev, sector_t num_sectors) in update_size() argument
7245 sector_t old_dev_sectors = mddev->dev_sectors; in update_size()
7247 if (mddev->pers->resize == NULL) in update_size()
7258 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in update_size()
7259 mddev->sync_thread) in update_size()
7261 if (!md_is_rdwr(mddev)) in update_size()
7264 rdev_for_each(rdev, mddev) { in update_size()
7272 rv = mddev->pers->resize(mddev, num_sectors); in update_size()
7274 if (mddev_is_clustered(mddev)) in update_size()
7275 md_cluster_ops->update_size(mddev, old_dev_sectors); in update_size()
7276 else if (mddev->queue) { in update_size()
7277 set_capacity_and_notify(mddev->gendisk, in update_size()
7278 mddev->array_sectors); in update_size()
7284 static int update_raid_disks(struct mddev *mddev, int raid_disks) in update_raid_disks() argument
7289 if (mddev->pers->check_reshape == NULL) in update_raid_disks()
7291 if (!md_is_rdwr(mddev)) in update_raid_disks()
7294 (mddev->max_disks && raid_disks >= mddev->max_disks)) in update_raid_disks()
7296 if (mddev->sync_thread || in update_raid_disks()
7297 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in update_raid_disks()
7298 test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) || in update_raid_disks()
7299 mddev->reshape_position != MaxSector) in update_raid_disks()
7302 rdev_for_each(rdev, mddev) { in update_raid_disks()
7303 if (mddev->raid_disks < raid_disks && in update_raid_disks()
7306 if (mddev->raid_disks > raid_disks && in update_raid_disks()
7311 mddev->delta_disks = raid_disks - mddev->raid_disks; in update_raid_disks()
7312 if (mddev->delta_disks < 0) in update_raid_disks()
7313 mddev->reshape_backwards = 1; in update_raid_disks()
7314 else if (mddev->delta_disks > 0) in update_raid_disks()
7315 mddev->reshape_backwards = 0; in update_raid_disks()
7317 rv = mddev->pers->check_reshape(mddev); in update_raid_disks()
7319 mddev->delta_disks = 0; in update_raid_disks()
7320 mddev->reshape_backwards = 0; in update_raid_disks()
7333 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) in update_array_info() argument
7340 if (mddev->bitmap && mddev->bitmap_info.offset) in update_array_info()
7343 if (mddev->major_version != info->major_version || in update_array_info()
7344 mddev->minor_version != info->minor_version || in update_array_info()
7346 mddev->ctime != info->ctime || in update_array_info()
7347 mddev->level != info->level || in update_array_info()
7349 mddev->persistent != !info->not_persistent || in update_array_info()
7350 mddev->chunk_sectors != info->chunk_size >> 9 || in update_array_info()
7356 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) in update_array_info()
7358 if (mddev->raid_disks != info->raid_disks) in update_array_info()
7360 if (mddev->layout != info->layout) in update_array_info()
7369 if (mddev->layout != info->layout) { in update_array_info()
7374 if (mddev->pers->check_reshape == NULL) in update_array_info()
7377 mddev->new_layout = info->layout; in update_array_info()
7378 rv = mddev->pers->check_reshape(mddev); in update_array_info()
7380 mddev->new_layout = mddev->layout; in update_array_info()
7384 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) in update_array_info()
7385 rv = update_size(mddev, (sector_t)info->size * 2); in update_array_info()
7387 if (mddev->raid_disks != info->raid_disks) in update_array_info()
7388 rv = update_raid_disks(mddev, info->raid_disks); in update_array_info()
7391 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { in update_array_info()
7395 if (mddev->recovery || mddev->sync_thread) { in update_array_info()
7402 if (mddev->bitmap) { in update_array_info()
7406 if (mddev->bitmap_info.default_offset == 0) { in update_array_info()
7410 mddev->bitmap_info.offset = in update_array_info()
7411 mddev->bitmap_info.default_offset; in update_array_info()
7412 mddev->bitmap_info.space = in update_array_info()
7413 mddev->bitmap_info.default_space; in update_array_info()
7414 bitmap = md_bitmap_create(mddev, -1); in update_array_info()
7415 mddev_suspend(mddev); in update_array_info()
7417 mddev->bitmap = bitmap; in update_array_info()
7418 rv = md_bitmap_load(mddev); in update_array_info()
7422 md_bitmap_destroy(mddev); in update_array_info()
7423 mddev_resume(mddev); in update_array_info()
7426 if (!mddev->bitmap) { in update_array_info()
7430 if (mddev->bitmap->storage.file) { in update_array_info()
7434 if (mddev->bitmap_info.nodes) { in update_array_info()
7436 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) { in update_array_info()
7439 md_cluster_ops->unlock_all_bitmaps(mddev); in update_array_info()
7443 mddev->bitmap_info.nodes = 0; in update_array_info()
7444 md_cluster_ops->leave(mddev); in update_array_info()
7446 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; in update_array_info()
7448 mddev_suspend(mddev); in update_array_info()
7449 md_bitmap_destroy(mddev); in update_array_info()
7450 mddev_resume(mddev); in update_array_info()
7451 mddev->bitmap_info.offset = 0; in update_array_info()
7454 md_update_sb(mddev, 1); in update_array_info()
7460 static int set_disk_faulty(struct mddev *mddev, dev_t dev) in set_disk_faulty() argument
7465 if (mddev->pers == NULL) in set_disk_faulty()
7469 rdev = md_find_rdev_rcu(mddev, dev); in set_disk_faulty()
7473 md_error(mddev, rdev); in set_disk_faulty()
7474 if (test_bit(MD_BROKEN, &mddev->flags)) in set_disk_faulty()
7489 struct mddev *mddev = bdev->bd_disk->private_data; in md_getgeo() local
7493 geo->cylinders = mddev->array_sectors / 8; in md_getgeo()
7521 static int __md_set_array_info(struct mddev *mddev, void __user *argp) in __md_set_array_info() argument
7531 if (mddev->pers) { in __md_set_array_info()
7532 err = update_array_info(mddev, &info); in __md_set_array_info()
7538 if (!list_empty(&mddev->disks)) { in __md_set_array_info()
7539 pr_warn("md: array %s already has disks!\n", mdname(mddev)); in __md_set_array_info()
7543 if (mddev->raid_disks) { in __md_set_array_info()
7544 pr_warn("md: array %s already initialised!\n", mdname(mddev)); in __md_set_array_info()
7548 err = md_set_array_info(mddev, &info); in __md_set_array_info()
7560 struct mddev *mddev = NULL; in md_ioctl() local
7591 mddev = bdev->bd_disk->private_data; in md_ioctl()
7593 if (!mddev) { in md_ioctl()
7601 if (!mddev->raid_disks && !mddev->external) in md_ioctl()
7604 err = get_array_info(mddev, argp); in md_ioctl()
7608 if (!mddev->raid_disks && !mddev->external) in md_ioctl()
7611 err = get_disk_info(mddev, argp); in md_ioctl()
7615 err = set_disk_faulty(mddev, new_decode_dev(arg)); in md_ioctl()
7619 err = get_bitmap_file(mddev, argp); in md_ioctl()
7626 wait_event_interruptible_timeout(mddev->sb_wait, in md_ioctl()
7628 &mddev->recovery), in md_ioctl()
7634 mutex_lock(&mddev->open_mutex); in md_ioctl()
7635 if (mddev->pers && atomic_read(&mddev->openers) > 1) { in md_ioctl()
7636 mutex_unlock(&mddev->open_mutex); in md_ioctl()
7640 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) { in md_ioctl()
7641 mutex_unlock(&mddev->open_mutex); in md_ioctl()
7646 mutex_unlock(&mddev->open_mutex); in md_ioctl()
7649 err = mddev_lock(mddev); in md_ioctl()
7657 err = __md_set_array_info(mddev, argp); in md_ioctl()
7666 if ((!mddev->raid_disks && !mddev->external) in md_ioctl()
7679 err = restart_array(mddev); in md_ioctl()
7683 err = do_md_stop(mddev, 0, bdev); in md_ioctl()
7687 err = md_set_readonly(mddev, bdev); in md_ioctl()
7691 err = hot_remove_disk(mddev, new_decode_dev(arg)); in md_ioctl()
7699 if (mddev->pers) { in md_ioctl()
7707 err = md_add_new_disk(mddev, &info); in md_ioctl()
7717 if (!md_is_rdwr(mddev) && mddev->pers) { in md_ioctl()
7718 if (mddev->ro != MD_AUTO_READ) { in md_ioctl()
7722 mddev->ro = MD_RDWR; in md_ioctl()
7723 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_ioctl()
7724 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_ioctl()
7729 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) { in md_ioctl()
7730 mddev_unlock(mddev); in md_ioctl()
7731 wait_event(mddev->sb_wait, in md_ioctl()
7732 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) && in md_ioctl()
7733 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); in md_ioctl()
7734 mddev_lock_nointr(mddev); in md_ioctl()
7745 err = md_add_new_disk(mddev, &info); in md_ioctl()
7750 if (mddev_is_clustered(mddev)) in md_ioctl()
7751 md_cluster_ops->new_disk_ack(mddev, false); in md_ioctl()
7757 err = hot_add_disk(mddev, new_decode_dev(arg)); in md_ioctl()
7761 err = do_md_run(mddev); in md_ioctl()
7765 err = set_bitmap_file(mddev, (int)arg); in md_ioctl()
7774 if (mddev->hold_active == UNTIL_IOCTL && in md_ioctl()
7776 mddev->hold_active = 0; in md_ioctl()
7777 mddev_unlock(mddev); in md_ioctl()
7780 clear_bit(MD_CLOSING, &mddev->flags); in md_ioctl()
7805 struct mddev *mddev = bdev->bd_disk->private_data; in md_set_read_only() local
7808 err = mddev_lock(mddev); in md_set_read_only()
7812 if (!mddev->raid_disks && !mddev->external) { in md_set_read_only()
7821 if (!ro && mddev->ro == MD_RDONLY && mddev->pers) { in md_set_read_only()
7822 err = restart_array(mddev); in md_set_read_only()
7825 mddev->ro = MD_AUTO_READ; in md_set_read_only()
7829 mddev_unlock(mddev); in md_set_read_only()
7835 struct mddev *mddev; in md_open() local
7839 mddev = mddev_get(disk->private_data); in md_open()
7841 if (!mddev) in md_open()
7844 err = mutex_lock_interruptible(&mddev->open_mutex); in md_open()
7849 if (test_bit(MD_CLOSING, &mddev->flags)) in md_open()
7852 atomic_inc(&mddev->openers); in md_open()
7853 mutex_unlock(&mddev->open_mutex); in md_open()
7859 mutex_unlock(&mddev->open_mutex); in md_open()
7861 mddev_put(mddev); in md_open()
7867 struct mddev *mddev = disk->private_data; in md_release() local
7869 BUG_ON(!mddev); in md_release()
7870 atomic_dec(&mddev->openers); in md_release()
7871 mddev_put(mddev); in md_release()
7876 struct mddev *mddev = disk->private_data; in md_check_events() local
7879 if (mddev->changed) in md_check_events()
7881 mddev->changed = 0; in md_check_events()
7887 struct mddev *mddev = disk->private_data; in md_free_disk() local
7889 percpu_ref_exit(&mddev->writes_pending); in md_free_disk()
7890 mddev_free(mddev); in md_free_disk()
7979 struct mddev *mddev, const char *name) in md_register_thread() argument
7990 thread->mddev = mddev; in md_register_thread()
7994 mdname(thread->mddev), in md_register_thread()
8004 void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **threadp) in md_unregister_thread() argument
8007 lockdep_is_held(&mddev->reconfig_mutex)); in md_unregister_thread()
8021 void md_error(struct mddev *mddev, struct md_rdev *rdev) in md_error() argument
8026 if (!mddev->pers || !mddev->pers->error_handler) in md_error()
8028 mddev->pers->error_handler(mddev, rdev); in md_error()
8030 if (mddev->pers->level == 0 || mddev->pers->level == LEVEL_LINEAR) in md_error()
8033 if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags)) in md_error()
8034 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_error()
8036 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_error()
8037 if (!test_bit(MD_BROKEN, &mddev->flags)) { in md_error()
8038 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_error()
8039 md_wakeup_thread(mddev->thread); in md_error()
8041 if (mddev->event_work.func) in md_error()
8042 queue_work(md_misc_wq, &mddev->event_work); in md_error()
8066 static int status_resync(struct seq_file *seq, struct mddev *mddev) in status_resync() argument
8074 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in status_resync()
8075 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in status_resync()
8076 max_sectors = mddev->resync_max_sectors; in status_resync()
8078 max_sectors = mddev->dev_sectors; in status_resync()
8080 resync = mddev->curr_resync; in status_resync()
8082 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) in status_resync()
8088 res = atomic_read(&mddev->recovery_active); in status_resync()
8101 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) { in status_resync()
8104 rdev_for_each(rdev, mddev) in status_resync()
8112 if (mddev->reshape_position != MaxSector) in status_resync()
8118 if (mddev->recovery_cp < MaxSector) { in status_resync()
8155 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? in status_resync()
8157 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? in status_resync()
8159 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? in status_resync()
8182 dt = ((jiffies - mddev->resync_mark) / HZ); in status_resync()
8185 curr_mark_cnt = mddev->curr_mark_cnt; in status_resync()
8186 recovery_active = atomic_read(&mddev->recovery_active); in status_resync()
8187 resync_mark_cnt = mddev->resync_mark_cnt; in status_resync()
8208 struct mddev *mddev; in md_seq_start() local
8223 mddev = list_entry(tmp, struct mddev, all_mddevs); in md_seq_start()
8224 if (!mddev_get(mddev)) in md_seq_start()
8227 return mddev; in md_seq_start()
8238 struct mddev *next_mddev, *mddev = v; in md_seq_next() local
8239 struct mddev *to_put = NULL; in md_seq_next()
8249 to_put = mddev; in md_seq_next()
8250 tmp = mddev->all_mddevs.next; in md_seq_next()
8259 next_mddev = list_entry(tmp, struct mddev, all_mddevs); in md_seq_next()
8262 mddev = next_mddev; in md_seq_next()
8263 tmp = mddev->all_mddevs.next; in md_seq_next()
8275 struct mddev *mddev = v; in md_seq_stop() local
8277 if (mddev && v != (void*)1 && v != (void*)2) in md_seq_stop()
8278 mddev_put(mddev); in md_seq_stop()
8283 struct mddev *mddev = v; in md_seq_show() local
8304 spin_lock(&mddev->lock); in md_seq_show()
8305 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { in md_seq_show()
8306 seq_printf(seq, "%s : %sactive", mdname(mddev), in md_seq_show()
8307 mddev->pers ? "" : "in"); in md_seq_show()
8308 if (mddev->pers) { in md_seq_show()
8309 if (mddev->ro == MD_RDONLY) in md_seq_show()
8311 if (mddev->ro == MD_AUTO_READ) in md_seq_show()
8313 seq_printf(seq, " %s", mddev->pers->name); in md_seq_show()
8318 rdev_for_each_rcu(rdev, mddev) { in md_seq_show()
8337 if (!list_empty(&mddev->disks)) { in md_seq_show()
8338 if (mddev->pers) in md_seq_show()
8341 mddev->array_sectors / 2); in md_seq_show()
8346 if (mddev->persistent) { in md_seq_show()
8347 if (mddev->major_version != 0 || in md_seq_show()
8348 mddev->minor_version != 90) { in md_seq_show()
8350 mddev->major_version, in md_seq_show()
8351 mddev->minor_version); in md_seq_show()
8353 } else if (mddev->external) in md_seq_show()
8355 mddev->metadata_type); in md_seq_show()
8359 if (mddev->pers) { in md_seq_show()
8360 mddev->pers->status(seq, mddev); in md_seq_show()
8362 if (mddev->pers->sync_request) { in md_seq_show()
8363 if (status_resync(seq, mddev)) in md_seq_show()
8369 md_bitmap_status(seq, mddev->bitmap); in md_seq_show()
8373 spin_unlock(&mddev->lock); in md_seq_show()
8471 int md_setup_cluster(struct mddev *mddev, int nodes) in md_setup_cluster() argument
8485 ret = md_cluster_ops->join(mddev, nodes); in md_setup_cluster()
8487 mddev->safemode_delay = 0; in md_setup_cluster()
8491 void md_cluster_stop(struct mddev *mddev) in md_cluster_stop() argument
8495 md_cluster_ops->leave(mddev); in md_cluster_stop()
8499 static int is_mddev_idle(struct mddev *mddev, int init) in is_mddev_idle() argument
8507 rdev_for_each_rcu(rdev, mddev) { in is_mddev_idle()
8542 void md_done_sync(struct mddev *mddev, int blocks, int ok) in md_done_sync() argument
8545 atomic_sub(blocks, &mddev->recovery_active); in md_done_sync()
8546 wake_up(&mddev->recovery_wait); in md_done_sync()
8548 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_done_sync()
8549 set_bit(MD_RECOVERY_ERROR, &mddev->recovery); in md_done_sync()
8550 md_wakeup_thread(mddev->thread); in md_done_sync()
8563 bool md_write_start(struct mddev *mddev, struct bio *bi) in md_write_start() argument
8570 BUG_ON(mddev->ro == MD_RDONLY); in md_write_start()
8571 if (mddev->ro == MD_AUTO_READ) { in md_write_start()
8573 mddev->ro = MD_RDWR; in md_write_start()
8574 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_write_start()
8575 md_wakeup_thread(mddev->thread); in md_write_start()
8576 md_wakeup_thread(mddev->sync_thread); in md_write_start()
8580 percpu_ref_get(&mddev->writes_pending); in md_write_start()
8582 if (mddev->safemode == 1) in md_write_start()
8583 mddev->safemode = 0; in md_write_start()
8585 if (mddev->in_sync || mddev->sync_checkers) { in md_write_start()
8586 spin_lock(&mddev->lock); in md_write_start()
8587 if (mddev->in_sync) { in md_write_start()
8588 mddev->in_sync = 0; in md_write_start()
8589 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_write_start()
8590 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_write_start()
8591 md_wakeup_thread(mddev->thread); in md_write_start()
8594 spin_unlock(&mddev->lock); in md_write_start()
8598 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_write_start()
8599 if (!mddev->has_superblocks) in md_write_start()
8601 wait_event(mddev->sb_wait, in md_write_start()
8602 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || in md_write_start()
8603 is_md_suspended(mddev)); in md_write_start()
8604 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { in md_write_start()
8605 percpu_ref_put(&mddev->writes_pending); in md_write_start()
8620 void md_write_inc(struct mddev *mddev, struct bio *bi) in md_write_inc() argument
8624 WARN_ON_ONCE(mddev->in_sync || !md_is_rdwr(mddev)); in md_write_inc()
8625 percpu_ref_get(&mddev->writes_pending); in md_write_inc()
8629 void md_write_end(struct mddev *mddev) in md_write_end() argument
8631 percpu_ref_put(&mddev->writes_pending); in md_write_end()
8633 if (mddev->safemode == 2) in md_write_end()
8634 md_wakeup_thread(mddev->thread); in md_write_end()
8635 else if (mddev->safemode_delay) in md_write_end()
8639 mod_timer(&mddev->safemode_timer, in md_write_end()
8640 roundup(jiffies, mddev->safemode_delay) + in md_write_end()
8641 mddev->safemode_delay); in md_write_end()
8647 void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, in md_submit_discard_bio() argument
8658 if (mddev->gendisk) in md_submit_discard_bio()
8660 disk_devt(mddev->gendisk), in md_submit_discard_bio()
8670 struct mddev *mddev = md_io_clone->mddev; in md_end_clone_io() local
8679 percpu_ref_put(&mddev->active_io); in md_end_clone_io()
8682 static void md_clone_bio(struct mddev *mddev, struct bio **bio) in md_clone_bio() argument
8687 bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_clone_set); in md_clone_bio()
8691 md_io_clone->mddev = mddev; in md_clone_bio()
8700 void md_account_bio(struct mddev *mddev, struct bio **bio) in md_account_bio() argument
8702 percpu_ref_get(&mddev->active_io); in md_account_bio()
8703 md_clone_bio(mddev, bio); in md_account_bio()
8713 void md_allow_write(struct mddev *mddev) in md_allow_write() argument
8715 if (!mddev->pers) in md_allow_write()
8717 if (!md_is_rdwr(mddev)) in md_allow_write()
8719 if (!mddev->pers->sync_request) in md_allow_write()
8722 spin_lock(&mddev->lock); in md_allow_write()
8723 if (mddev->in_sync) { in md_allow_write()
8724 mddev->in_sync = 0; in md_allow_write()
8725 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_allow_write()
8726 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_allow_write()
8727 if (mddev->safemode_delay && in md_allow_write()
8728 mddev->safemode == 0) in md_allow_write()
8729 mddev->safemode = 1; in md_allow_write()
8730 spin_unlock(&mddev->lock); in md_allow_write()
8731 md_update_sb(mddev, 0); in md_allow_write()
8732 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_allow_write()
8734 wait_event(mddev->sb_wait, in md_allow_write()
8735 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); in md_allow_write()
8737 spin_unlock(&mddev->lock); in md_allow_write()
8746 struct mddev *mddev = thread->mddev; in md_do_sync() local
8747 struct mddev *mddev2; in md_do_sync()
8762 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) || in md_do_sync()
8763 test_bit(MD_RECOVERY_WAIT, &mddev->recovery)) in md_do_sync()
8765 if (!md_is_rdwr(mddev)) {/* never try to sync a read-only array */ in md_do_sync()
8766 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_do_sync()
8770 if (mddev_is_clustered(mddev)) { in md_do_sync()
8771 ret = md_cluster_ops->resync_start(mddev); in md_do_sync()
8775 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags); in md_do_sync()
8776 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in md_do_sync()
8777 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) || in md_do_sync()
8778 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) in md_do_sync()
8779 && ((unsigned long long)mddev->curr_resync_completed in md_do_sync()
8780 < (unsigned long long)mddev->resync_max_sectors)) in md_do_sync()
8784 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
8785 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { in md_do_sync()
8788 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in md_do_sync()
8793 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in md_do_sync()
8798 mddev->last_sync_action = action ?: desc; in md_do_sync()
8812 mddev->curr_resync = MD_RESYNC_DELAYED; in md_do_sync()
8815 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
8821 if (mddev2 == mddev) in md_do_sync()
8823 if (!mddev->parallel_resync in md_do_sync()
8825 && match_mddev_units(mddev, mddev2)) { in md_do_sync()
8827 if (mddev < mddev2 && in md_do_sync()
8828 mddev->curr_resync == MD_RESYNC_DELAYED) { in md_do_sync()
8830 mddev->curr_resync = MD_RESYNC_YIELDED; in md_do_sync()
8833 if (mddev > mddev2 && in md_do_sync()
8834 mddev->curr_resync == MD_RESYNC_YIELDED) in md_do_sync()
8844 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_do_sync()
8845 mddev2->curr_resync >= mddev->curr_resync) { in md_do_sync()
8849 desc, mdname(mddev), in md_do_sync()
8864 } while (mddev->curr_resync < MD_RESYNC_DELAYED); in md_do_sync()
8867 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
8871 max_sectors = mddev->resync_max_sectors; in md_do_sync()
8872 atomic64_set(&mddev->resync_mismatches, 0); in md_do_sync()
8874 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
8875 j = mddev->resync_min; in md_do_sync()
8876 else if (!mddev->bitmap) in md_do_sync()
8877 j = mddev->recovery_cp; in md_do_sync()
8879 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { in md_do_sync()
8880 max_sectors = mddev->resync_max_sectors; in md_do_sync()
8886 if (mddev_is_clustered(mddev) && in md_do_sync()
8887 mddev->reshape_position != MaxSector) in md_do_sync()
8888 j = mddev->reshape_position; in md_do_sync()
8891 max_sectors = mddev->dev_sectors; in md_do_sync()
8894 rdev_for_each_rcu(rdev, mddev) in md_do_sync()
8911 if (mddev->bitmap) { in md_do_sync()
8912 mddev->pers->quiesce(mddev, 1); in md_do_sync()
8913 mddev->pers->quiesce(mddev, 0); in md_do_sync()
8917 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev)); in md_do_sync()
8918 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev)); in md_do_sync()
8920 speed_max(mddev), desc); in md_do_sync()
8922 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ in md_do_sync()
8930 mddev->resync_mark = mark[last_mark]; in md_do_sync()
8931 mddev->resync_mark_cnt = mark_cnt[last_mark]; in md_do_sync()
8940 atomic_set(&mddev->recovery_active, 0); in md_do_sync()
8945 desc, mdname(mddev)); in md_do_sync()
8946 mddev->curr_resync = j; in md_do_sync()
8948 mddev->curr_resync = MD_RESYNC_ACTIVE; /* no longer delayed */ in md_do_sync()
8949 mddev->curr_resync_completed = j; in md_do_sync()
8950 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_do_sync()
8960 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
8961 ((mddev->curr_resync > mddev->curr_resync_completed && in md_do_sync()
8962 (mddev->curr_resync - mddev->curr_resync_completed) in md_do_sync()
8965 (j - mddev->curr_resync_completed)*2 in md_do_sync()
8966 >= mddev->resync_max - mddev->curr_resync_completed || in md_do_sync()
8967 mddev->curr_resync_completed > mddev->resync_max in md_do_sync()
8970 wait_event(mddev->recovery_wait, in md_do_sync()
8971 atomic_read(&mddev->recovery_active) == 0); in md_do_sync()
8972 mddev->curr_resync_completed = j; in md_do_sync()
8973 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && in md_do_sync()
8974 j > mddev->recovery_cp) in md_do_sync()
8975 mddev->recovery_cp = j; in md_do_sync()
8977 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_do_sync()
8978 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_do_sync()
8981 while (j >= mddev->resync_max && in md_do_sync()
8982 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
8988 wait_event_interruptible(mddev->recovery_wait, in md_do_sync()
8989 mddev->resync_max > j in md_do_sync()
8991 &mddev->recovery)); in md_do_sync()
8994 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
8997 sectors = mddev->pers->sync_request(mddev, j, &skipped); in md_do_sync()
8999 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_do_sync()
9005 atomic_add(sectors, &mddev->recovery_active); in md_do_sync()
9008 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
9016 mddev->curr_resync = j; in md_do_sync()
9017 mddev->curr_mark_cnt = io_sectors; in md_do_sync()
9033 mddev->resync_mark = mark[next]; in md_do_sync()
9034 mddev->resync_mark_cnt = mark_cnt[next]; in md_do_sync()
9036 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); in md_do_sync()
9040 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
9053 recovery_done = io_sectors - atomic_read(&mddev->recovery_active); in md_do_sync()
9054 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 in md_do_sync()
9055 /((jiffies-mddev->resync_mark)/HZ +1) +1; in md_do_sync()
9057 if (currspeed > speed_min(mddev)) { in md_do_sync()
9058 if (currspeed > speed_max(mddev)) { in md_do_sync()
9062 if (!is_mddev_idle(mddev, 0)) { in md_do_sync()
9067 wait_event(mddev->recovery_wait, in md_do_sync()
9068 !atomic_read(&mddev->recovery_active)); in md_do_sync()
9072 pr_info("md: %s: %s %s.\n",mdname(mddev), desc, in md_do_sync()
9073 test_bit(MD_RECOVERY_INTR, &mddev->recovery) in md_do_sync()
9079 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); in md_do_sync()
9081 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
9082 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_do_sync()
9083 mddev->curr_resync >= MD_RESYNC_ACTIVE) { in md_do_sync()
9084 mddev->curr_resync_completed = mddev->curr_resync; in md_do_sync()
9085 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_do_sync()
9087 mddev->pers->sync_request(mddev, max_sectors, &skipped); in md_do_sync()
9089 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && in md_do_sync()
9090 mddev->curr_resync > MD_RESYNC_ACTIVE) { in md_do_sync()
9091 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
9092 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
9093 if (mddev->curr_resync >= mddev->recovery_cp) { in md_do_sync()
9095 desc, mdname(mddev)); in md_do_sync()
9097 &mddev->recovery)) in md_do_sync()
9098 mddev->recovery_cp = in md_do_sync()
9099 mddev->curr_resync_completed; in md_do_sync()
9101 mddev->recovery_cp = in md_do_sync()
9102 mddev->curr_resync; in md_do_sync()
9105 mddev->recovery_cp = MaxSector; in md_do_sync()
9107 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
9108 mddev->curr_resync = MaxSector; in md_do_sync()
9109 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
9110 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) { in md_do_sync()
9112 rdev_for_each_rcu(rdev, mddev) in md_do_sync()
9114 mddev->delta_disks >= 0 && in md_do_sync()
9118 rdev->recovery_offset < mddev->curr_resync) in md_do_sync()
9119 rdev->recovery_offset = mddev->curr_resync; in md_do_sync()
9128 set_mask_bits(&mddev->sb_flags, 0, in md_do_sync()
9131 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
9132 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_do_sync()
9133 mddev->delta_disks > 0 && in md_do_sync()
9134 mddev->pers->finish_reshape && in md_do_sync()
9135 mddev->pers->size && in md_do_sync()
9136 mddev->queue) { in md_do_sync()
9137 mddev_lock_nointr(mddev); in md_do_sync()
9138 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); in md_do_sync()
9139 mddev_unlock(mddev); in md_do_sync()
9140 if (!mddev_is_clustered(mddev)) in md_do_sync()
9141 set_capacity_and_notify(mddev->gendisk, in md_do_sync()
9142 mddev->array_sectors); in md_do_sync()
9145 spin_lock(&mddev->lock); in md_do_sync()
9146 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
9148 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
9149 mddev->resync_min = 0; in md_do_sync()
9150 mddev->resync_max = MaxSector; in md_do_sync()
9151 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
9152 mddev->resync_min = mddev->curr_resync_completed; in md_do_sync()
9153 set_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_do_sync()
9154 mddev->curr_resync = MD_RESYNC_NONE; in md_do_sync()
9155 spin_unlock(&mddev->lock); in md_do_sync()
9158 wake_up(&mddev->sb_wait); in md_do_sync()
9159 md_wakeup_thread(mddev->thread); in md_do_sync()
9164 static int remove_and_add_spares(struct mddev *mddev, in remove_and_add_spares() argument
9172 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in remove_and_add_spares()
9176 rdev_for_each(rdev, mddev) { in remove_and_add_spares()
9194 rdev_for_each(rdev, mddev) { in remove_and_add_spares()
9202 if (mddev->pers->hot_remove_disk( in remove_and_add_spares()
9203 mddev, rdev) == 0) { in remove_and_add_spares()
9204 sysfs_unlink_rdev(mddev, rdev); in remove_and_add_spares()
9214 if (removed && mddev->kobj.sd) in remove_and_add_spares()
9215 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in remove_and_add_spares()
9220 rdev_for_each(rdev, mddev) { in remove_and_add_spares()
9235 if (!md_is_rdwr(mddev) && in remove_and_add_spares()
9242 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) { in remove_and_add_spares()
9244 sysfs_link_rdev(mddev, rdev); in remove_and_add_spares()
9248 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in remove_and_add_spares()
9253 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in remove_and_add_spares()
9259 struct mddev *mddev = container_of(ws, struct mddev, del_work); in md_start_sync() local
9261 rcu_assign_pointer(mddev->sync_thread, in md_start_sync()
9262 md_register_thread(md_do_sync, mddev, "resync")); in md_start_sync()
9263 if (!mddev->sync_thread) { in md_start_sync()
9265 mdname(mddev)); in md_start_sync()
9267 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_start_sync()
9268 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_start_sync()
9269 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_start_sync()
9270 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_start_sync()
9271 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_start_sync()
9274 &mddev->recovery)) in md_start_sync()
9275 if (mddev->sysfs_action) in md_start_sync()
9276 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_start_sync()
9278 md_wakeup_thread(mddev->sync_thread); in md_start_sync()
9279 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_start_sync()
9305 void md_check_recovery(struct mddev *mddev) in md_check_recovery() argument
9307 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) { in md_check_recovery()
9311 set_bit(MD_UPDATING_SB, &mddev->flags); in md_check_recovery()
9313 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags)) in md_check_recovery()
9314 md_update_sb(mddev, 0); in md_check_recovery()
9315 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags); in md_check_recovery()
9316 wake_up(&mddev->sb_wait); in md_check_recovery()
9319 if (is_md_suspended(mddev)) in md_check_recovery()
9322 if (mddev->bitmap) in md_check_recovery()
9323 md_bitmap_daemon_work(mddev); in md_check_recovery()
9326 if (mddev->pers->sync_request && !mddev->external) { in md_check_recovery()
9328 mdname(mddev)); in md_check_recovery()
9329 mddev->safemode = 2; in md_check_recovery()
9334 if (!md_is_rdwr(mddev) && in md_check_recovery()
9335 !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) in md_check_recovery()
9338 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) || in md_check_recovery()
9339 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || in md_check_recovery()
9340 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || in md_check_recovery()
9341 (mddev->external == 0 && mddev->safemode == 1) || in md_check_recovery()
9342 (mddev->safemode == 2 in md_check_recovery()
9343 && !mddev->in_sync && mddev->recovery_cp == MaxSector) in md_check_recovery()
9347 if (mddev_trylock(mddev)) { in md_check_recovery()
9349 bool try_set_sync = mddev->safemode != 0; in md_check_recovery()
9351 if (!mddev->external && mddev->safemode == 1) in md_check_recovery()
9352 mddev->safemode = 0; in md_check_recovery()
9354 if (!md_is_rdwr(mddev)) { in md_check_recovery()
9356 if (!mddev->external && mddev->in_sync) in md_check_recovery()
9362 rdev_for_each(rdev, mddev) in md_check_recovery()
9371 remove_and_add_spares(mddev, NULL); in md_check_recovery()
9375 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_check_recovery()
9376 md_reap_sync_thread(mddev); in md_check_recovery()
9377 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9378 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_check_recovery()
9379 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_check_recovery()
9383 if (mddev_is_clustered(mddev)) { in md_check_recovery()
9388 rdev_for_each_safe(rdev, tmp, mddev) { in md_check_recovery()
9395 if (try_set_sync && !mddev->external && !mddev->in_sync) { in md_check_recovery()
9396 spin_lock(&mddev->lock); in md_check_recovery()
9397 set_in_sync(mddev); in md_check_recovery()
9398 spin_unlock(&mddev->lock); in md_check_recovery()
9401 if (mddev->sb_flags) in md_check_recovery()
9402 md_update_sb(mddev, 0); in md_check_recovery()
9408 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { in md_check_recovery()
9409 if (!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { in md_check_recovery()
9411 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_check_recovery()
9415 if (WARN_ON_ONCE(!mddev->sync_thread)) in md_check_recovery()
9418 md_reap_sync_thread(mddev); in md_check_recovery()
9425 mddev->curr_resync_completed = 0; in md_check_recovery()
9426 spin_lock(&mddev->lock); in md_check_recovery()
9427 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_check_recovery()
9428 spin_unlock(&mddev->lock); in md_check_recovery()
9432 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_check_recovery()
9433 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_check_recovery()
9435 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || in md_check_recovery()
9436 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) in md_check_recovery()
9445 if (mddev->reshape_position != MaxSector) { in md_check_recovery()
9446 if (mddev->pers->check_reshape == NULL || in md_check_recovery()
9447 mddev->pers->check_reshape(mddev) != 0) in md_check_recovery()
9450 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_check_recovery()
9451 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9452 } else if ((spares = remove_and_add_spares(mddev, NULL))) { in md_check_recovery()
9453 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_check_recovery()
9454 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_check_recovery()
9455 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_check_recovery()
9456 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9457 } else if (mddev->recovery_cp < MaxSector) { in md_check_recovery()
9458 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_check_recovery()
9459 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9460 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) in md_check_recovery()
9464 if (mddev->pers->sync_request) { in md_check_recovery()
9470 md_bitmap_write_all(mddev->bitmap); in md_check_recovery()
9472 INIT_WORK(&mddev->del_work, md_start_sync); in md_check_recovery()
9473 queue_work(md_misc_wq, &mddev->del_work); in md_check_recovery()
9477 if (!mddev->sync_thread) { in md_check_recovery()
9478 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_check_recovery()
9481 &mddev->recovery)) in md_check_recovery()
9482 if (mddev->sysfs_action) in md_check_recovery()
9483 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_check_recovery()
9486 wake_up(&mddev->sb_wait); in md_check_recovery()
9487 mddev_unlock(mddev); in md_check_recovery()
9492 void md_reap_sync_thread(struct mddev *mddev) in md_reap_sync_thread() argument
9495 sector_t old_dev_sectors = mddev->dev_sectors; in md_reap_sync_thread()
9499 md_unregister_thread(mddev, &mddev->sync_thread); in md_reap_sync_thread()
9500 atomic_inc(&mddev->sync_seq); in md_reap_sync_thread()
9502 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_reap_sync_thread()
9503 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in md_reap_sync_thread()
9504 mddev->degraded != mddev->raid_disks) { in md_reap_sync_thread()
9507 if (mddev->pers->spare_active(mddev)) { in md_reap_sync_thread()
9508 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in md_reap_sync_thread()
9509 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_reap_sync_thread()
9512 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_reap_sync_thread()
9513 mddev->pers->finish_reshape) { in md_reap_sync_thread()
9514 mddev->pers->finish_reshape(mddev); in md_reap_sync_thread()
9515 if (mddev_is_clustered(mddev)) in md_reap_sync_thread()
9522 if (!mddev->degraded) in md_reap_sync_thread()
9523 rdev_for_each(rdev, mddev) in md_reap_sync_thread()
9526 md_update_sb(mddev, 1); in md_reap_sync_thread()
9530 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags)) in md_reap_sync_thread()
9531 md_cluster_ops->resync_finish(mddev); in md_reap_sync_thread()
9532 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_reap_sync_thread()
9533 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_reap_sync_thread()
9534 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_reap_sync_thread()
9535 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_reap_sync_thread()
9536 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_reap_sync_thread()
9537 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_reap_sync_thread()
9543 if (mddev_is_clustered(mddev) && is_reshaped in md_reap_sync_thread()
9544 && !test_bit(MD_CLOSING, &mddev->flags)) in md_reap_sync_thread()
9545 md_cluster_ops->update_size(mddev, old_dev_sectors); in md_reap_sync_thread()
9547 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_reap_sync_thread()
9548 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_reap_sync_thread()
9549 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_reap_sync_thread()
9551 if (mddev->event_work.func) in md_reap_sync_thread()
9552 queue_work(md_misc_wq, &mddev->event_work); in md_reap_sync_thread()
9557 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) in md_wait_for_blocked_rdev() argument
9564 rdev_dec_pending(rdev, mddev); in md_wait_for_blocked_rdev()
9568 void md_finish_reshape(struct mddev *mddev) in md_finish_reshape() argument
9573 rdev_for_each(rdev, mddev) { in md_finish_reshape()
9589 struct mddev *mddev = rdev->mddev; in rdev_set_badblocks() local
9601 set_mask_bits(&mddev->sb_flags, 0, in rdev_set_badblocks()
9603 md_wakeup_thread(rdev->mddev->thread); in rdev_set_badblocks()
9628 struct mddev *mddev, *n; in md_notify_reboot() local
9632 list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) { in md_notify_reboot()
9633 if (!mddev_get(mddev)) in md_notify_reboot()
9636 if (mddev_trylock(mddev)) { in md_notify_reboot()
9637 if (mddev->pers) in md_notify_reboot()
9638 __md_stop_writes(mddev); in md_notify_reboot()
9639 if (mddev->persistent) in md_notify_reboot()
9640 mddev->safemode = 2; in md_notify_reboot()
9641 mddev_unlock(mddev); in md_notify_reboot()
9644 mddev_put(mddev); in md_notify_reboot()
9718 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) in check_sb_changes() argument
9728 if (mddev->dev_sectors != le64_to_cpu(sb->size)) { in check_sb_changes()
9729 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size)); in check_sb_changes()
9733 md_bitmap_update_sb(mddev->bitmap); in check_sb_changes()
9737 rdev_for_each_safe(rdev2, tmp, mddev) { in check_sb_changes()
9763 ret = remove_and_add_spares(mddev, rdev2); in check_sb_changes()
9768 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in check_sb_changes()
9769 md_wakeup_thread(mddev->thread); in check_sb_changes()
9778 md_error(mddev, rdev2); in check_sb_changes()
9784 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) { in check_sb_changes()
9785 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); in check_sb_changes()
9794 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && in check_sb_changes()
9800 mddev->reshape_position = le64_to_cpu(sb->reshape_position); in check_sb_changes()
9801 if (mddev->pers->update_reshape_pos) in check_sb_changes()
9802 mddev->pers->update_reshape_pos(mddev); in check_sb_changes()
9803 if (mddev->pers->start_reshape) in check_sb_changes()
9804 mddev->pers->start_reshape(mddev); in check_sb_changes()
9805 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && in check_sb_changes()
9806 mddev->reshape_position != MaxSector && in check_sb_changes()
9809 mddev->reshape_position = MaxSector; in check_sb_changes()
9810 if (mddev->pers->update_reshape_pos) in check_sb_changes()
9811 mddev->pers->update_reshape_pos(mddev); in check_sb_changes()
9815 mddev->events = le64_to_cpu(sb->events); in check_sb_changes()
9818 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) in read_rdev() argument
9832 err = super_types[mddev->major_version]. in read_rdev()
9833 load_super(rdev, NULL, mddev->minor_version); in read_rdev()
9858 mddev->pers->spare_active(mddev)) in read_rdev()
9859 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in read_rdev()
9865 void md_reload_sb(struct mddev *mddev, int nr) in md_reload_sb() argument
9871 rdev_for_each_rcu(iter, mddev) { in md_reload_sb()
9883 err = read_rdev(mddev, rdev); in md_reload_sb()
9887 check_sb_changes(mddev, rdev); in md_reload_sb()
9890 rdev_for_each_rcu(rdev, mddev) { in md_reload_sb()
9892 read_rdev(mddev, rdev); in md_reload_sb()
9968 struct mddev *mddev, *n; in md_exit() local
9989 list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) { in md_exit()
9990 if (!mddev_get(mddev)) in md_exit()
9993 export_array(mddev); in md_exit()
9994 mddev->ctime = 0; in md_exit()
9995 mddev->hold_active = 0; in md_exit()
10001 mddev_put(mddev); in md_exit()