Lines Matching refs:mddev

92 static int remove_and_add_spares(struct mddev *mddev,
94 static void mddev_detach(struct mddev *mddev);
119 static inline int speed_min(struct mddev *mddev) in speed_min() argument
121 return mddev->sync_speed_min ? in speed_min()
122 mddev->sync_speed_min : sysctl_speed_limit_min; in speed_min()
125 static inline int speed_max(struct mddev *mddev) in speed_max() argument
127 return mddev->sync_speed_max ? in speed_max()
128 mddev->sync_speed_max : sysctl_speed_limit_max; in speed_max()
140 static void rdevs_uninit_serial(struct mddev *mddev) in rdevs_uninit_serial() argument
144 rdev_for_each(rdev, mddev) in rdevs_uninit_serial()
176 static int rdevs_init_serial(struct mddev *mddev) in rdevs_init_serial() argument
181 rdev_for_each(rdev, mddev) { in rdevs_init_serial()
188 if (ret && !mddev->serial_info_pool) in rdevs_init_serial()
189 rdevs_uninit_serial(mddev); in rdevs_init_serial()
201 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 && in rdev_need_serial()
211 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, in mddev_create_serial_pool() argument
221 mddev_suspend(mddev); in mddev_create_serial_pool()
224 ret = rdevs_init_serial(mddev); in mddev_create_serial_pool()
230 if (mddev->serial_info_pool == NULL) { in mddev_create_serial_pool()
235 mddev->serial_info_pool = in mddev_create_serial_pool()
238 if (!mddev->serial_info_pool) { in mddev_create_serial_pool()
239 rdevs_uninit_serial(mddev); in mddev_create_serial_pool()
246 mddev_resume(mddev); in mddev_create_serial_pool()
255 void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, in mddev_destroy_serial_pool() argument
261 if (mddev->serial_info_pool) { in mddev_destroy_serial_pool()
266 mddev_suspend(mddev); in mddev_destroy_serial_pool()
267 rdev_for_each(temp, mddev) { in mddev_destroy_serial_pool()
269 if (!mddev->serialize_policy || in mddev_destroy_serial_pool()
285 mempool_destroy(mddev->serial_info_pool); in mddev_destroy_serial_pool()
286 mddev->serial_info_pool = NULL; in mddev_destroy_serial_pool()
289 mddev_resume(mddev); in mddev_destroy_serial_pool()
378 static bool is_suspended(struct mddev *mddev, struct bio *bio) in is_suspended() argument
380 if (mddev->suspended) in is_suspended()
384 if (mddev->suspend_lo >= mddev->suspend_hi) in is_suspended()
386 if (bio->bi_iter.bi_sector >= mddev->suspend_hi) in is_suspended()
388 if (bio_end_sector(bio) < mddev->suspend_lo) in is_suspended()
393 void md_handle_request(struct mddev *mddev, struct bio *bio) in md_handle_request() argument
397 if (is_suspended(mddev, bio)) { in md_handle_request()
406 prepare_to_wait(&mddev->sb_wait, &__wait, in md_handle_request()
408 if (!is_suspended(mddev, bio)) in md_handle_request()
414 finish_wait(&mddev->sb_wait, &__wait); in md_handle_request()
416 atomic_inc(&mddev->active_io); in md_handle_request()
419 if (!mddev->pers->make_request(mddev, bio)) { in md_handle_request()
420 atomic_dec(&mddev->active_io); in md_handle_request()
421 wake_up(&mddev->sb_wait); in md_handle_request()
425 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) in md_handle_request()
426 wake_up(&mddev->sb_wait); in md_handle_request()
433 struct mddev *mddev = bio->bi_bdev->bd_disk->private_data; in md_submit_bio() local
435 if (mddev == NULL || mddev->pers == NULL) { in md_submit_bio()
440 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { in md_submit_bio()
447 if (mddev->ro == 1 && unlikely(rw == WRITE)) { in md_submit_bio()
457 md_handle_request(mddev, bio); in md_submit_bio()
466 void mddev_suspend(struct mddev *mddev) in mddev_suspend() argument
468 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk); in mddev_suspend()
469 lockdep_assert_held(&mddev->reconfig_mutex); in mddev_suspend()
470 if (mddev->suspended++) in mddev_suspend()
473 wake_up(&mddev->sb_wait); in mddev_suspend()
474 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags); in mddev_suspend()
476 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); in mddev_suspend()
477 mddev->pers->quiesce(mddev, 1); in mddev_suspend()
478 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags); in mddev_suspend()
479 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags)); in mddev_suspend()
481 del_timer_sync(&mddev->safemode_timer); in mddev_suspend()
483 mddev->noio_flag = memalloc_noio_save(); in mddev_suspend()
487 void mddev_resume(struct mddev *mddev) in mddev_resume() argument
490 memalloc_noio_restore(mddev->noio_flag); in mddev_resume()
491 lockdep_assert_held(&mddev->reconfig_mutex); in mddev_resume()
492 if (--mddev->suspended) in mddev_resume()
494 wake_up(&mddev->sb_wait); in mddev_resume()
495 mddev->pers->quiesce(mddev, 0); in mddev_resume()
497 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in mddev_resume()
498 md_wakeup_thread(mddev->thread); in mddev_resume()
499 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ in mddev_resume()
510 struct mddev *mddev = rdev->mddev; in md_end_flush() local
512 rdev_dec_pending(rdev, mddev); in md_end_flush()
514 if (atomic_dec_and_test(&mddev->flush_pending)) { in md_end_flush()
516 queue_work(md_wq, &mddev->flush_work); in md_end_flush()
525 struct mddev *mddev = container_of(ws, struct mddev, flush_work); in submit_flushes() local
528 mddev->start_flush = ktime_get_boottime(); in submit_flushes()
529 INIT_WORK(&mddev->flush_work, md_submit_flush_data); in submit_flushes()
530 atomic_set(&mddev->flush_pending, 1); in submit_flushes()
532 rdev_for_each_rcu(rdev, mddev) in submit_flushes()
545 GFP_NOIO, &mddev->bio_set); in submit_flushes()
548 atomic_inc(&mddev->flush_pending); in submit_flushes()
551 rdev_dec_pending(rdev, mddev); in submit_flushes()
554 if (atomic_dec_and_test(&mddev->flush_pending)) in submit_flushes()
555 queue_work(md_wq, &mddev->flush_work); in submit_flushes()
560 struct mddev *mddev = container_of(ws, struct mddev, flush_work); in md_submit_flush_data() local
561 struct bio *bio = mddev->flush_bio; in md_submit_flush_data()
569 spin_lock_irq(&mddev->lock); in md_submit_flush_data()
570 mddev->prev_flush_start = mddev->start_flush; in md_submit_flush_data()
571 mddev->flush_bio = NULL; in md_submit_flush_data()
572 spin_unlock_irq(&mddev->lock); in md_submit_flush_data()
573 wake_up(&mddev->sb_wait); in md_submit_flush_data()
580 md_handle_request(mddev, bio); in md_submit_flush_data()
590 bool md_flush_request(struct mddev *mddev, struct bio *bio) in md_flush_request() argument
593 spin_lock_irq(&mddev->lock); in md_flush_request()
597 wait_event_lock_irq(mddev->sb_wait, in md_flush_request()
598 !mddev->flush_bio || in md_flush_request()
599 ktime_before(req_start, mddev->prev_flush_start), in md_flush_request()
600 mddev->lock); in md_flush_request()
602 if (ktime_after(req_start, mddev->prev_flush_start)) { in md_flush_request()
603 WARN_ON(mddev->flush_bio); in md_flush_request()
604 mddev->flush_bio = bio; in md_flush_request()
607 spin_unlock_irq(&mddev->lock); in md_flush_request()
610 INIT_WORK(&mddev->flush_work, submit_flushes); in md_flush_request()
611 queue_work(md_wq, &mddev->flush_work); in md_flush_request()
626 static inline struct mddev *mddev_get(struct mddev *mddev) in mddev_get() argument
630 if (test_bit(MD_DELETED, &mddev->flags)) in mddev_get()
632 atomic_inc(&mddev->active); in mddev_get()
633 return mddev; in mddev_get()
638 void mddev_put(struct mddev *mddev) in mddev_put() argument
640 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) in mddev_put()
642 if (!mddev->raid_disks && list_empty(&mddev->disks) && in mddev_put()
643 mddev->ctime == 0 && !mddev->hold_active) { in mddev_put()
646 set_bit(MD_DELETED, &mddev->flags); in mddev_put()
653 INIT_WORK(&mddev->del_work, mddev_delayed_delete); in mddev_put()
654 queue_work(md_misc_wq, &mddev->del_work); in mddev_put()
661 void mddev_init(struct mddev *mddev) in mddev_init() argument
663 mutex_init(&mddev->open_mutex); in mddev_init()
664 mutex_init(&mddev->reconfig_mutex); in mddev_init()
665 mutex_init(&mddev->bitmap_info.mutex); in mddev_init()
666 INIT_LIST_HEAD(&mddev->disks); in mddev_init()
667 INIT_LIST_HEAD(&mddev->all_mddevs); in mddev_init()
668 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0); in mddev_init()
669 atomic_set(&mddev->active, 1); in mddev_init()
670 atomic_set(&mddev->openers, 0); in mddev_init()
671 atomic_set(&mddev->active_io, 0); in mddev_init()
672 spin_lock_init(&mddev->lock); in mddev_init()
673 atomic_set(&mddev->flush_pending, 0); in mddev_init()
674 init_waitqueue_head(&mddev->sb_wait); in mddev_init()
675 init_waitqueue_head(&mddev->recovery_wait); in mddev_init()
676 mddev->reshape_position = MaxSector; in mddev_init()
677 mddev->reshape_backwards = 0; in mddev_init()
678 mddev->last_sync_action = "none"; in mddev_init()
679 mddev->resync_min = 0; in mddev_init()
680 mddev->resync_max = MaxSector; in mddev_init()
681 mddev->level = LEVEL_NONE; in mddev_init()
685 static struct mddev *mddev_find_locked(dev_t unit) in mddev_find_locked()
687 struct mddev *mddev; in mddev_find_locked() local
689 list_for_each_entry(mddev, &all_mddevs, all_mddevs) in mddev_find_locked()
690 if (mddev->unit == unit) in mddev_find_locked()
691 return mddev; in mddev_find_locked()
717 static struct mddev *mddev_alloc(dev_t unit) in mddev_alloc()
719 struct mddev *new; in mddev_alloc()
759 static void mddev_free(struct mddev *mddev) in mddev_free() argument
762 list_del(&mddev->all_mddevs); in mddev_free()
765 kfree(mddev); in mddev_free()
770 void mddev_unlock(struct mddev *mddev) in mddev_unlock() argument
772 if (mddev->to_remove) { in mddev_unlock()
785 const struct attribute_group *to_remove = mddev->to_remove; in mddev_unlock()
786 mddev->to_remove = NULL; in mddev_unlock()
787 mddev->sysfs_active = 1; in mddev_unlock()
788 mutex_unlock(&mddev->reconfig_mutex); in mddev_unlock()
790 if (mddev->kobj.sd) { in mddev_unlock()
792 sysfs_remove_group(&mddev->kobj, to_remove); in mddev_unlock()
793 if (mddev->pers == NULL || in mddev_unlock()
794 mddev->pers->sync_request == NULL) { in mddev_unlock()
795 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); in mddev_unlock()
796 if (mddev->sysfs_action) in mddev_unlock()
797 sysfs_put(mddev->sysfs_action); in mddev_unlock()
798 if (mddev->sysfs_completed) in mddev_unlock()
799 sysfs_put(mddev->sysfs_completed); in mddev_unlock()
800 if (mddev->sysfs_degraded) in mddev_unlock()
801 sysfs_put(mddev->sysfs_degraded); in mddev_unlock()
802 mddev->sysfs_action = NULL; in mddev_unlock()
803 mddev->sysfs_completed = NULL; in mddev_unlock()
804 mddev->sysfs_degraded = NULL; in mddev_unlock()
807 mddev->sysfs_active = 0; in mddev_unlock()
809 mutex_unlock(&mddev->reconfig_mutex); in mddev_unlock()
815 md_wakeup_thread(mddev->thread); in mddev_unlock()
816 wake_up(&mddev->sb_wait); in mddev_unlock()
821 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) in md_find_rdev_nr_rcu() argument
825 rdev_for_each_rcu(rdev, mddev) in md_find_rdev_nr_rcu()
833 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) in find_rdev() argument
837 rdev_for_each(rdev, mddev) in find_rdev()
844 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev) in md_find_rdev_rcu() argument
848 rdev_for_each_rcu(rdev, mddev) in md_find_rdev_rcu()
902 struct mddev *mddev = rdev->mddev; in super_written() local
907 md_error(mddev, rdev); in super_written()
910 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); in super_written()
916 if (atomic_dec_and_test(&mddev->pending_writes)) in super_written()
917 wake_up(&mddev->sb_wait); in super_written()
918 rdev_dec_pending(rdev, mddev); in super_written()
922 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, in md_super_write() argument
942 GFP_NOIO, &mddev->sync_set); in md_super_write()
951 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && in md_super_write()
956 atomic_inc(&mddev->pending_writes); in md_super_write()
960 int md_super_wait(struct mddev *mddev) in md_super_wait() argument
963 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); in md_super_wait()
964 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags)) in md_super_wait()
982 else if (rdev->mddev->reshape_position != MaxSector && in sync_page_io()
983 (rdev->mddev->reshape_backwards == in sync_page_io()
984 (sector >= rdev->mddev->reshape_position))) in sync_page_io()
1121 int (*validate_super)(struct mddev *mddev,
1123 void (*sync_super)(struct mddev *mddev,
1139 int md_check_no_bitmap(struct mddev *mddev) in md_check_no_bitmap() argument
1141 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) in md_check_no_bitmap()
1144 mdname(mddev), mddev->pers->name); in md_check_no_bitmap()
1260 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) in super_90_validate() argument
1272 if (mddev->raid_disks == 0) { in super_90_validate()
1273 mddev->major_version = 0; in super_90_validate()
1274 mddev->minor_version = sb->minor_version; in super_90_validate()
1275 mddev->patch_version = sb->patch_version; in super_90_validate()
1276 mddev->external = 0; in super_90_validate()
1277 mddev->chunk_sectors = sb->chunk_size >> 9; in super_90_validate()
1278 mddev->ctime = sb->ctime; in super_90_validate()
1279 mddev->utime = sb->utime; in super_90_validate()
1280 mddev->level = sb->level; in super_90_validate()
1281 mddev->clevel[0] = 0; in super_90_validate()
1282 mddev->layout = sb->layout; in super_90_validate()
1283 mddev->raid_disks = sb->raid_disks; in super_90_validate()
1284 mddev->dev_sectors = ((sector_t)sb->size) * 2; in super_90_validate()
1285 mddev->events = ev1; in super_90_validate()
1286 mddev->bitmap_info.offset = 0; in super_90_validate()
1287 mddev->bitmap_info.space = 0; in super_90_validate()
1289 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; in super_90_validate()
1290 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); in super_90_validate()
1291 mddev->reshape_backwards = 0; in super_90_validate()
1293 if (mddev->minor_version >= 91) { in super_90_validate()
1294 mddev->reshape_position = sb->reshape_position; in super_90_validate()
1295 mddev->delta_disks = sb->delta_disks; in super_90_validate()
1296 mddev->new_level = sb->new_level; in super_90_validate()
1297 mddev->new_layout = sb->new_layout; in super_90_validate()
1298 mddev->new_chunk_sectors = sb->new_chunk >> 9; in super_90_validate()
1299 if (mddev->delta_disks < 0) in super_90_validate()
1300 mddev->reshape_backwards = 1; in super_90_validate()
1302 mddev->reshape_position = MaxSector; in super_90_validate()
1303 mddev->delta_disks = 0; in super_90_validate()
1304 mddev->new_level = mddev->level; in super_90_validate()
1305 mddev->new_layout = mddev->layout; in super_90_validate()
1306 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_90_validate()
1308 if (mddev->level == 0) in super_90_validate()
1309 mddev->layout = -1; in super_90_validate()
1312 mddev->recovery_cp = MaxSector; in super_90_validate()
1316 mddev->recovery_cp = sb->recovery_cp; in super_90_validate()
1318 mddev->recovery_cp = 0; in super_90_validate()
1321 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); in super_90_validate()
1322 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); in super_90_validate()
1323 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); in super_90_validate()
1324 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); in super_90_validate()
1326 mddev->max_disks = MD_SB_DISKS; in super_90_validate()
1329 mddev->bitmap_info.file == NULL) { in super_90_validate()
1330 mddev->bitmap_info.offset = in super_90_validate()
1331 mddev->bitmap_info.default_offset; in super_90_validate()
1332 mddev->bitmap_info.space = in super_90_validate()
1333 mddev->bitmap_info.default_space; in super_90_validate()
1336 } else if (mddev->pers == NULL) { in super_90_validate()
1342 if (ev1 < mddev->events) in super_90_validate()
1344 } else if (mddev->bitmap) { in super_90_validate()
1348 if (ev1 < mddev->bitmap->events_cleared) in super_90_validate()
1350 if (ev1 < mddev->events) in super_90_validate()
1353 if (ev1 < mddev->events) in super_90_validate()
1358 if (mddev->level != LEVEL_MULTIPATH) { in super_90_validate()
1372 if (mddev->minor_version >= 91) { in super_90_validate()
1389 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) in super_90_sync() argument
1393 int next_spare = mddev->raid_disks; in super_90_sync()
1415 sb->major_version = mddev->major_version; in super_90_sync()
1416 sb->patch_version = mddev->patch_version; in super_90_sync()
1418 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); in super_90_sync()
1419 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); in super_90_sync()
1420 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); in super_90_sync()
1421 memcpy(&sb->set_uuid3, mddev->uuid+12,4); in super_90_sync()
1423 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); in super_90_sync()
1424 sb->level = mddev->level; in super_90_sync()
1425 sb->size = mddev->dev_sectors / 2; in super_90_sync()
1426 sb->raid_disks = mddev->raid_disks; in super_90_sync()
1427 sb->md_minor = mddev->md_minor; in super_90_sync()
1429 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); in super_90_sync()
1431 sb->events_hi = (mddev->events>>32); in super_90_sync()
1432 sb->events_lo = (u32)mddev->events; in super_90_sync()
1434 if (mddev->reshape_position == MaxSector) in super_90_sync()
1438 sb->reshape_position = mddev->reshape_position; in super_90_sync()
1439 sb->new_level = mddev->new_level; in super_90_sync()
1440 sb->delta_disks = mddev->delta_disks; in super_90_sync()
1441 sb->new_layout = mddev->new_layout; in super_90_sync()
1442 sb->new_chunk = mddev->new_chunk_sectors << 9; in super_90_sync()
1444 mddev->minor_version = sb->minor_version; in super_90_sync()
1445 if (mddev->in_sync) in super_90_sync()
1447 sb->recovery_cp = mddev->recovery_cp; in super_90_sync()
1448 sb->cp_events_hi = (mddev->events>>32); in super_90_sync()
1449 sb->cp_events_lo = (u32)mddev->events; in super_90_sync()
1450 if (mddev->recovery_cp == MaxSector) in super_90_sync()
1455 sb->layout = mddev->layout; in super_90_sync()
1456 sb->chunk_size = mddev->chunk_sectors << 9; in super_90_sync()
1458 if (mddev->bitmap && mddev->bitmap_info.file == NULL) in super_90_sync()
1462 rdev_for_each(rdev2, mddev) { in super_90_sync()
1510 for (i=0 ; i < mddev->raid_disks ; i++) { in super_90_sync()
1536 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_90_rdev_size_change()
1538 if (rdev->mddev->bitmap_info.offset) in super_90_rdev_size_change()
1546 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) in super_90_rdev_size_change()
1549 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, in super_90_rdev_size_change()
1551 } while (md_super_wait(rdev->mddev) < 0); in super_90_rdev_size_change()
1772 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) in super_1_validate() argument
1783 if (mddev->raid_disks == 0) { in super_1_validate()
1784 mddev->major_version = 1; in super_1_validate()
1785 mddev->patch_version = 0; in super_1_validate()
1786 mddev->external = 0; in super_1_validate()
1787 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); in super_1_validate()
1788 mddev->ctime = le64_to_cpu(sb->ctime); in super_1_validate()
1789 mddev->utime = le64_to_cpu(sb->utime); in super_1_validate()
1790 mddev->level = le32_to_cpu(sb->level); in super_1_validate()
1791 mddev->clevel[0] = 0; in super_1_validate()
1792 mddev->layout = le32_to_cpu(sb->layout); in super_1_validate()
1793 mddev->raid_disks = le32_to_cpu(sb->raid_disks); in super_1_validate()
1794 mddev->dev_sectors = le64_to_cpu(sb->size); in super_1_validate()
1795 mddev->events = ev1; in super_1_validate()
1796 mddev->bitmap_info.offset = 0; in super_1_validate()
1797 mddev->bitmap_info.space = 0; in super_1_validate()
1801 mddev->bitmap_info.default_offset = 1024 >> 9; in super_1_validate()
1802 mddev->bitmap_info.default_space = (4096-1024) >> 9; in super_1_validate()
1803 mddev->reshape_backwards = 0; in super_1_validate()
1805 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); in super_1_validate()
1806 memcpy(mddev->uuid, sb->set_uuid, 16); in super_1_validate()
1808 mddev->max_disks = (4096-256)/2; in super_1_validate()
1811 mddev->bitmap_info.file == NULL) { in super_1_validate()
1812 mddev->bitmap_info.offset = in super_1_validate()
1819 if (mddev->minor_version > 0) in super_1_validate()
1820 mddev->bitmap_info.space = 0; in super_1_validate()
1821 else if (mddev->bitmap_info.offset > 0) in super_1_validate()
1822 mddev->bitmap_info.space = in super_1_validate()
1823 8 - mddev->bitmap_info.offset; in super_1_validate()
1825 mddev->bitmap_info.space = in super_1_validate()
1826 -mddev->bitmap_info.offset; in super_1_validate()
1830 mddev->reshape_position = le64_to_cpu(sb->reshape_position); in super_1_validate()
1831 mddev->delta_disks = le32_to_cpu(sb->delta_disks); in super_1_validate()
1832 mddev->new_level = le32_to_cpu(sb->new_level); in super_1_validate()
1833 mddev->new_layout = le32_to_cpu(sb->new_layout); in super_1_validate()
1834 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); in super_1_validate()
1835 if (mddev->delta_disks < 0 || in super_1_validate()
1836 (mddev->delta_disks == 0 && in super_1_validate()
1839 mddev->reshape_backwards = 1; in super_1_validate()
1841 mddev->reshape_position = MaxSector; in super_1_validate()
1842 mddev->delta_disks = 0; in super_1_validate()
1843 mddev->new_level = mddev->level; in super_1_validate()
1844 mddev->new_layout = mddev->layout; in super_1_validate()
1845 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_1_validate()
1848 if (mddev->level == 0 && in super_1_validate()
1850 mddev->layout = -1; in super_1_validate()
1853 set_bit(MD_HAS_JOURNAL, &mddev->flags); in super_1_validate()
1864 set_bit(MD_HAS_PPL, &mddev->flags); in super_1_validate()
1866 } else if (mddev->pers == NULL) { in super_1_validate()
1874 if (ev1 < mddev->events) in super_1_validate()
1876 } else if (mddev->bitmap) { in super_1_validate()
1880 if (ev1 < mddev->bitmap->events_cleared) in super_1_validate()
1882 if (ev1 < mddev->events) in super_1_validate()
1885 if (ev1 < mddev->events) in super_1_validate()
1889 if (mddev->level != LEVEL_MULTIPATH) { in super_1_validate()
1927 &mddev->recovery)) in super_1_validate()
1945 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) in super_1_sync() argument
1959 sb->utime = cpu_to_le64((__u64)mddev->utime); in super_1_sync()
1960 sb->events = cpu_to_le64(mddev->events); in super_1_sync()
1961 if (mddev->in_sync) in super_1_sync()
1962 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); in super_1_sync()
1963 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) in super_1_sync()
1970 sb->raid_disks = cpu_to_le32(mddev->raid_disks); in super_1_sync()
1971 sb->size = cpu_to_le64(mddev->dev_sectors); in super_1_sync()
1972 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); in super_1_sync()
1973 sb->level = cpu_to_le32(mddev->level); in super_1_sync()
1974 sb->layout = cpu_to_le32(mddev->layout); in super_1_sync()
1987 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { in super_1_sync()
1988 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); in super_1_sync()
1998 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) in super_1_sync()
2009 if (mddev->reshape_position != MaxSector) { in super_1_sync()
2011 sb->reshape_position = cpu_to_le64(mddev->reshape_position); in super_1_sync()
2012 sb->new_layout = cpu_to_le32(mddev->new_layout); in super_1_sync()
2013 sb->delta_disks = cpu_to_le32(mddev->delta_disks); in super_1_sync()
2014 sb->new_level = cpu_to_le32(mddev->new_level); in super_1_sync()
2015 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); in super_1_sync()
2016 if (mddev->delta_disks == 0 && in super_1_sync()
2017 mddev->reshape_backwards) in super_1_sync()
2028 if (mddev_is_clustered(mddev)) in super_1_sync()
2035 md_error(mddev, rdev); in super_1_sync()
2066 rdev_for_each(rdev2, mddev) in super_1_sync()
2083 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) in super_1_sync()
2086 if (test_bit(MD_HAS_PPL, &mddev->flags)) { in super_1_sync()
2087 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags)) in super_1_sync()
2096 rdev_for_each(rdev2, mddev) { in super_1_sync()
2136 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_1_rdev_size_change()
2145 } else if (rdev->mddev->bitmap_info.offset) { in super_1_rdev_size_change()
2173 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, in super_1_rdev_size_change()
2175 } while (md_super_wait(rdev->mddev) < 0); in super_1_rdev_size_change()
2191 if (rdev->mddev->minor_version == 0) in super_1_allow_new_offset()
2202 bitmap = rdev->mddev->bitmap; in super_1_allow_new_offset()
2203 if (bitmap && !rdev->mddev->bitmap_info.file && in super_1_allow_new_offset()
2204 rdev->sb_start + rdev->mddev->bitmap_info.offset + in super_1_allow_new_offset()
2234 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) in sync_super() argument
2236 if (mddev->sync_super) { in sync_super()
2237 mddev->sync_super(mddev, rdev); in sync_super()
2241 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); in sync_super()
2243 super_types[mddev->major_version].sync_super(mddev, rdev); in sync_super()
2246 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) in match_mddev_units()
2280 int md_integrity_register(struct mddev *mddev) in md_integrity_register() argument
2284 if (list_empty(&mddev->disks)) in md_integrity_register()
2286 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) in md_integrity_register()
2288 rdev_for_each(rdev, mddev) { in md_integrity_register()
2310 blk_integrity_register(mddev->gendisk, in md_integrity_register()
2313 pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); in md_integrity_register()
2314 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) || in md_integrity_register()
2315 (mddev->level != 1 && mddev->level != 10 && in md_integrity_register()
2316 bioset_integrity_create(&mddev->io_acct_set, BIO_POOL_SIZE))) { in md_integrity_register()
2324 mdname(mddev)); in md_integrity_register()
2335 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) in md_integrity_add_rdev() argument
2339 if (!mddev->gendisk) in md_integrity_add_rdev()
2342 bi_mddev = blk_get_integrity(mddev->gendisk); in md_integrity_add_rdev()
2347 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) { in md_integrity_add_rdev()
2349 mdname(mddev), rdev->bdev); in md_integrity_add_rdev()
2363 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) in bind_rdev_to_array() argument
2369 if (find_rdev(mddev, rdev->bdev->bd_dev)) in bind_rdev_to_array()
2372 if (rdev_read_only(rdev) && mddev->pers) in bind_rdev_to_array()
2378 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) { in bind_rdev_to_array()
2379 if (mddev->pers) { in bind_rdev_to_array()
2384 if (mddev->level > 0) in bind_rdev_to_array()
2387 mddev->dev_sectors = rdev->sectors; in bind_rdev_to_array()
2397 if (mddev->pers) in bind_rdev_to_array()
2398 choice = mddev->raid_disks; in bind_rdev_to_array()
2399 while (md_find_rdev_nr_rcu(mddev, choice)) in bind_rdev_to_array()
2403 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { in bind_rdev_to_array()
2410 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { in bind_rdev_to_array()
2412 mdname(mddev), mddev->max_disks); in bind_rdev_to_array()
2418 rdev->mddev = mddev; in bind_rdev_to_array()
2421 if (mddev->raid_disks) in bind_rdev_to_array()
2422 mddev_create_serial_pool(mddev, rdev, false); in bind_rdev_to_array()
2424 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) in bind_rdev_to_array()
2435 list_add_rcu(&rdev->same_set, &mddev->disks); in bind_rdev_to_array()
2436 bd_link_disk_holder(rdev->bdev, mddev->gendisk); in bind_rdev_to_array()
2439 mddev->recovery_disabled++; in bind_rdev_to_array()
2445 b, mdname(mddev)); in bind_rdev_to_array()
2458 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); in unbind_rdev_from_array()
2461 mddev_destroy_serial_pool(rdev->mddev, rdev, false); in unbind_rdev_from_array()
2462 rdev->mddev = NULL; in unbind_rdev_from_array()
2530 static void export_array(struct mddev *mddev) in export_array() argument
2534 while (!list_empty(&mddev->disks)) { in export_array()
2535 rdev = list_first_entry(&mddev->disks, struct md_rdev, in export_array()
2539 mddev->raid_disks = 0; in export_array()
2540 mddev->major_version = 0; in export_array()
2543 static bool set_in_sync(struct mddev *mddev) in set_in_sync() argument
2545 lockdep_assert_held(&mddev->lock); in set_in_sync()
2546 if (!mddev->in_sync) { in set_in_sync()
2547 mddev->sync_checkers++; in set_in_sync()
2548 spin_unlock(&mddev->lock); in set_in_sync()
2549 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending); in set_in_sync()
2550 spin_lock(&mddev->lock); in set_in_sync()
2551 if (!mddev->in_sync && in set_in_sync()
2552 percpu_ref_is_zero(&mddev->writes_pending)) { in set_in_sync()
2553 mddev->in_sync = 1; in set_in_sync()
2559 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in set_in_sync()
2560 sysfs_notify_dirent_safe(mddev->sysfs_state); in set_in_sync()
2562 if (--mddev->sync_checkers == 0) in set_in_sync()
2563 percpu_ref_switch_to_percpu(&mddev->writes_pending); in set_in_sync()
2565 if (mddev->safemode == 1) in set_in_sync()
2566 mddev->safemode = 0; in set_in_sync()
2567 return mddev->in_sync; in set_in_sync()
2570 static void sync_sbs(struct mddev *mddev, int nospares) in sync_sbs() argument
2579 rdev_for_each(rdev, mddev) { in sync_sbs()
2580 if (rdev->sb_events == mddev->events || in sync_sbs()
2583 rdev->sb_events+1 == mddev->events)) { in sync_sbs()
2587 sync_super(mddev, rdev); in sync_sbs()
2593 static bool does_sb_need_changing(struct mddev *mddev) in does_sb_need_changing() argument
2600 rdev_for_each(iter, mddev) in does_sb_need_changing()
2612 rdev_for_each(rdev, mddev) { in does_sb_need_changing()
2624 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || in does_sb_need_changing()
2625 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || in does_sb_need_changing()
2626 (mddev->layout != le32_to_cpu(sb->layout)) || in does_sb_need_changing()
2627 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || in does_sb_need_changing()
2628 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) in does_sb_need_changing()
2634 void md_update_sb(struct mddev *mddev, int force_change) in md_update_sb() argument
2642 if (mddev->ro) { in md_update_sb()
2644 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_update_sb()
2649 if (mddev_is_clustered(mddev)) { in md_update_sb()
2650 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) in md_update_sb()
2652 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) in md_update_sb()
2654 ret = md_cluster_ops->metadata_update_start(mddev); in md_update_sb()
2656 if (!does_sb_need_changing(mddev)) { in md_update_sb()
2658 md_cluster_ops->metadata_update_cancel(mddev); in md_update_sb()
2659 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), in md_update_sb()
2672 rdev_for_each(rdev, mddev) { in md_update_sb()
2674 mddev->delta_disks >= 0 && in md_update_sb()
2675 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && in md_update_sb()
2676 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) && in md_update_sb()
2677 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_update_sb()
2680 mddev->curr_resync_completed > rdev->recovery_offset) in md_update_sb()
2681 rdev->recovery_offset = mddev->curr_resync_completed; in md_update_sb()
2684 if (!mddev->persistent) { in md_update_sb()
2685 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_update_sb()
2686 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_update_sb()
2687 if (!mddev->external) { in md_update_sb()
2688 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_update_sb()
2689 rdev_for_each(rdev, mddev) { in md_update_sb()
2693 md_error(mddev, rdev); in md_update_sb()
2700 wake_up(&mddev->sb_wait); in md_update_sb()
2704 spin_lock(&mddev->lock); in md_update_sb()
2706 mddev->utime = ktime_get_real_seconds(); in md_update_sb()
2708 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) in md_update_sb()
2710 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) in md_update_sb()
2718 if (mddev->degraded) in md_update_sb()
2730 sync_req = mddev->in_sync; in md_update_sb()
2735 && (mddev->in_sync && mddev->recovery_cp == MaxSector) in md_update_sb()
2736 && mddev->can_decrease_events in md_update_sb()
2737 && mddev->events != 1) { in md_update_sb()
2738 mddev->events--; in md_update_sb()
2739 mddev->can_decrease_events = 0; in md_update_sb()
2742 mddev->events ++; in md_update_sb()
2743 mddev->can_decrease_events = nospares; in md_update_sb()
2751 WARN_ON(mddev->events == 0); in md_update_sb()
2753 rdev_for_each(rdev, mddev) { in md_update_sb()
2760 sync_sbs(mddev, nospares); in md_update_sb()
2761 spin_unlock(&mddev->lock); in md_update_sb()
2764 mdname(mddev), mddev->in_sync); in md_update_sb()
2766 if (mddev->queue) in md_update_sb()
2767 blk_add_trace_msg(mddev->queue, "md md_update_sb"); in md_update_sb()
2769 md_bitmap_update_sb(mddev->bitmap); in md_update_sb()
2770 rdev_for_each(rdev, mddev) { in md_update_sb()
2775 md_super_write(mddev,rdev, in md_update_sb()
2781 rdev->sb_events = mddev->events; in md_update_sb()
2783 md_super_write(mddev, rdev, in md_update_sb()
2794 if (mddev->level == LEVEL_MULTIPATH) in md_update_sb()
2798 if (md_super_wait(mddev) < 0) in md_update_sb()
2802 if (mddev_is_clustered(mddev) && ret == 0) in md_update_sb()
2803 md_cluster_ops->metadata_update_finish(mddev); in md_update_sb()
2805 if (mddev->in_sync != sync_req || in md_update_sb()
2806 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), in md_update_sb()
2810 wake_up(&mddev->sb_wait); in md_update_sb()
2811 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in md_update_sb()
2812 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_update_sb()
2814 rdev_for_each(rdev, mddev) { in md_update_sb()
2828 struct mddev *mddev = rdev->mddev; in add_bound_rdev() local
2832 if (!mddev->pers->hot_remove_disk || add_journal) { in add_bound_rdev()
2837 super_types[mddev->major_version]. in add_bound_rdev()
2838 validate_super(mddev, rdev); in add_bound_rdev()
2840 mddev_suspend(mddev); in add_bound_rdev()
2841 err = mddev->pers->hot_add_disk(mddev, rdev); in add_bound_rdev()
2843 mddev_resume(mddev); in add_bound_rdev()
2851 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in add_bound_rdev()
2852 if (mddev->degraded) in add_bound_rdev()
2853 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in add_bound_rdev()
2854 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in add_bound_rdev()
2856 md_wakeup_thread(mddev->thread); in add_bound_rdev()
2946 struct mddev *mddev = rdev->mddev; in state_store() local
2950 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { in state_store()
2951 md_error(rdev->mddev, rdev); in state_store()
2953 if (test_bit(MD_BROKEN, &rdev->mddev->flags)) in state_store()
2958 if (rdev->mddev->pers) { in state_store()
2960 remove_and_add_spares(rdev->mddev, rdev); in state_store()
2966 if (mddev_is_clustered(mddev)) in state_store()
2967 err = md_cluster_ops->remove_disk(mddev, rdev); in state_store()
2971 if (mddev->pers) { in state_store()
2972 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in state_store()
2973 md_wakeup_thread(mddev->thread); in state_store()
2980 mddev_create_serial_pool(rdev->mddev, rdev, false); in state_store()
2984 mddev_destroy_serial_pool(rdev->mddev, rdev, false); in state_store()
2998 md_error(rdev->mddev, rdev); in state_store()
3003 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in state_store()
3004 md_wakeup_thread(rdev->mddev->thread); in state_store()
3020 if (rdev->mddev->pers == NULL) { in state_store()
3041 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in state_store()
3042 md_wakeup_thread(rdev->mddev->thread); in state_store()
3055 if (rdev->mddev->pers) in state_store()
3063 if (rdev->mddev->pers) in state_store()
3070 if (!rdev->mddev->pers) in state_store()
3080 if (!mddev_is_clustered(rdev->mddev) || in state_store()
3087 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) { in state_store()
3091 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) { in state_store()
3096 md_update_sb(mddev, 1); in state_store()
3151 if (rdev->mddev->pers && slot == -1) { in slot_store()
3162 if (rdev->mddev->pers->hot_remove_disk == NULL) in slot_store()
3165 remove_and_add_spares(rdev->mddev, rdev); in slot_store()
3168 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in slot_store()
3169 md_wakeup_thread(rdev->mddev->thread); in slot_store()
3170 } else if (rdev->mddev->pers) { in slot_store()
3179 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) in slot_store()
3182 if (rdev->mddev->pers->hot_add_disk == NULL) in slot_store()
3185 if (slot >= rdev->mddev->raid_disks && in slot_store()
3186 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) in slot_store()
3196 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev); in slot_store()
3203 sysfs_link_rdev(rdev->mddev, rdev); in slot_store()
3206 if (slot >= rdev->mddev->raid_disks && in slot_store()
3207 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) in slot_store()
3234 if (rdev->mddev->pers && rdev->raid_disk >= 0) in offset_store()
3236 if (rdev->sectors && rdev->mddev->external) in offset_store()
3258 struct mddev *mddev = rdev->mddev; in new_offset_store() local
3263 if (mddev->sync_thread || in new_offset_store()
3264 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) in new_offset_store()
3272 + mddev->dev_sectors > rdev->sectors) in new_offset_store()
3281 mddev->reshape_backwards) in new_offset_store()
3288 !mddev->reshape_backwards) in new_offset_store()
3291 if (mddev->pers && mddev->persistent && in new_offset_store()
3292 !super_types[mddev->major_version] in new_offset_store()
3297 mddev->reshape_backwards = 1; in new_offset_store()
3299 mddev->reshape_backwards = 0; in new_offset_store()
3324 struct mddev *mddev; in md_rdev_overlaps() local
3328 list_for_each_entry(mddev, &all_mddevs, all_mddevs) { in md_rdev_overlaps()
3329 if (test_bit(MD_DELETED, &mddev->flags)) in md_rdev_overlaps()
3331 rdev_for_each(rdev2, mddev) { in md_rdev_overlaps()
3365 struct mddev *my_mddev = rdev->mddev; in rdev_size_store()
3434 if (rdev->mddev->pers && in recovery_start_store()
3502 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && in ppl_sector_store()
3506 if (rdev->mddev->persistent) { in ppl_sector_store()
3507 if (rdev->mddev->major_version == 0) in ppl_sector_store()
3515 } else if (!rdev->mddev->external) { in ppl_sector_store()
3539 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && in ppl_size_store()
3543 if (rdev->mddev->persistent) { in ppl_size_store()
3544 if (rdev->mddev->major_version == 0) in ppl_size_store()
3548 } else if (!rdev->mddev->external) { in ppl_size_store()
3581 if (!rdev->mddev) in rdev_attr_show()
3593 struct mddev *mddev = rdev->mddev; in rdev_attr_store() local
3599 rv = mddev ? mddev_lock(mddev) : -ENODEV; in rdev_attr_store()
3601 if (rdev->mddev == NULL) in rdev_attr_store()
3605 mddev_unlock(mddev); in rdev_attr_store()
3722 static int analyze_sbs(struct mddev *mddev) in analyze_sbs() argument
3728 rdev_for_each_safe(rdev, tmp, mddev) in analyze_sbs()
3729 switch (super_types[mddev->major_version]. in analyze_sbs()
3730 load_super(rdev, freshest, mddev->minor_version)) { in analyze_sbs()
3748 super_types[mddev->major_version]. in analyze_sbs()
3749 validate_super(mddev, freshest); in analyze_sbs()
3752 rdev_for_each_safe(rdev, tmp, mddev) { in analyze_sbs()
3753 if (mddev->max_disks && in analyze_sbs()
3754 (rdev->desc_nr >= mddev->max_disks || in analyze_sbs()
3755 i > mddev->max_disks)) { in analyze_sbs()
3757 mdname(mddev), rdev->bdev, in analyze_sbs()
3758 mddev->max_disks); in analyze_sbs()
3763 if (super_types[mddev->major_version]. in analyze_sbs()
3764 validate_super(mddev, rdev)) { in analyze_sbs()
3771 if (mddev->level == LEVEL_MULTIPATH) { in analyze_sbs()
3776 (mddev->raid_disks - min(0, mddev->delta_disks)) && in analyze_sbs()
3823 safe_delay_show(struct mddev *mddev, char *page) in safe_delay_show() argument
3825 int msec = (mddev->safemode_delay*1000)/HZ; in safe_delay_show()
3829 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) in safe_delay_store() argument
3833 if (mddev_is_clustered(mddev)) { in safe_delay_store()
3841 mddev->safemode_delay = 0; in safe_delay_store()
3843 unsigned long old_delay = mddev->safemode_delay; in safe_delay_store()
3848 mddev->safemode_delay = new_delay; in safe_delay_store()
3850 mod_timer(&mddev->safemode_timer, jiffies+1); in safe_delay_store()
3858 level_show(struct mddev *mddev, char *page) in level_show() argument
3862 spin_lock(&mddev->lock); in level_show()
3863 p = mddev->pers; in level_show()
3866 else if (mddev->clevel[0]) in level_show()
3867 ret = sprintf(page, "%s\n", mddev->clevel); in level_show()
3868 else if (mddev->level != LEVEL_NONE) in level_show()
3869 ret = sprintf(page, "%d\n", mddev->level); in level_show()
3872 spin_unlock(&mddev->lock); in level_show()
3877 level_store(struct mddev *mddev, const char *buf, size_t len) in level_store() argument
3890 rv = mddev_lock(mddev); in level_store()
3894 if (mddev->pers == NULL) { in level_store()
3895 strncpy(mddev->clevel, buf, slen); in level_store()
3896 if (mddev->clevel[slen-1] == '\n') in level_store()
3898 mddev->clevel[slen] = 0; in level_store()
3899 mddev->level = LEVEL_NONE; in level_store()
3904 if (mddev->ro) in level_store()
3914 if (mddev->sync_thread || in level_store()
3915 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in level_store()
3916 mddev->reshape_position != MaxSector || in level_store()
3917 mddev->sysfs_active) in level_store()
3921 if (!mddev->pers->quiesce) { in level_store()
3923 mdname(mddev), mddev->pers->name); in level_store()
3947 if (pers == mddev->pers) { in level_store()
3956 mdname(mddev), clevel); in level_store()
3961 rdev_for_each(rdev, mddev) in level_store()
3967 priv = pers->takeover(mddev); in level_store()
3969 mddev->new_level = mddev->level; in level_store()
3970 mddev->new_layout = mddev->layout; in level_store()
3971 mddev->new_chunk_sectors = mddev->chunk_sectors; in level_store()
3972 mddev->raid_disks -= mddev->delta_disks; in level_store()
3973 mddev->delta_disks = 0; in level_store()
3974 mddev->reshape_backwards = 0; in level_store()
3977 mdname(mddev), clevel); in level_store()
3983 mddev_suspend(mddev); in level_store()
3984 mddev_detach(mddev); in level_store()
3986 spin_lock(&mddev->lock); in level_store()
3987 oldpers = mddev->pers; in level_store()
3988 oldpriv = mddev->private; in level_store()
3989 mddev->pers = pers; in level_store()
3990 mddev->private = priv; in level_store()
3991 strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); in level_store()
3992 mddev->level = mddev->new_level; in level_store()
3993 mddev->layout = mddev->new_layout; in level_store()
3994 mddev->chunk_sectors = mddev->new_chunk_sectors; in level_store()
3995 mddev->delta_disks = 0; in level_store()
3996 mddev->reshape_backwards = 0; in level_store()
3997 mddev->degraded = 0; in level_store()
3998 spin_unlock(&mddev->lock); in level_store()
4001 mddev->external) { in level_store()
4009 mddev->in_sync = 0; in level_store()
4010 mddev->safemode_delay = 0; in level_store()
4011 mddev->safemode = 0; in level_store()
4014 oldpers->free(mddev, oldpriv); in level_store()
4019 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) in level_store()
4021 mdname(mddev)); in level_store()
4022 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); in level_store()
4023 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); in level_store()
4024 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); in level_store()
4029 if (mddev->to_remove == NULL) in level_store()
4030 mddev->to_remove = &md_redundancy_group; in level_store()
4035 rdev_for_each(rdev, mddev) { in level_store()
4038 if (rdev->new_raid_disk >= mddev->raid_disks) in level_store()
4042 sysfs_unlink_rdev(mddev, rdev); in level_store()
4044 rdev_for_each(rdev, mddev) { in level_store()
4053 if (sysfs_link_rdev(mddev, rdev)) in level_store()
4055 rdev->raid_disk, mdname(mddev)); in level_store()
4063 mddev->in_sync = 1; in level_store()
4064 del_timer_sync(&mddev->safemode_timer); in level_store()
4066 blk_set_stacking_limits(&mddev->queue->limits); in level_store()
4067 pers->run(mddev); in level_store()
4068 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in level_store()
4069 mddev_resume(mddev); in level_store()
4070 if (!mddev->thread) in level_store()
4071 md_update_sb(mddev, 1); in level_store()
4072 sysfs_notify_dirent_safe(mddev->sysfs_level); in level_store()
4076 mddev_unlock(mddev); in level_store()
4084 layout_show(struct mddev *mddev, char *page) in layout_show() argument
4087 if (mddev->reshape_position != MaxSector && in layout_show()
4088 mddev->layout != mddev->new_layout) in layout_show()
4090 mddev->new_layout, mddev->layout); in layout_show()
4091 return sprintf(page, "%d\n", mddev->layout); in layout_show()
4095 layout_store(struct mddev *mddev, const char *buf, size_t len) in layout_store() argument
4103 err = mddev_lock(mddev); in layout_store()
4107 if (mddev->pers) { in layout_store()
4108 if (mddev->pers->check_reshape == NULL) in layout_store()
4110 else if (mddev->ro) in layout_store()
4113 mddev->new_layout = n; in layout_store()
4114 err = mddev->pers->check_reshape(mddev); in layout_store()
4116 mddev->new_layout = mddev->layout; in layout_store()
4119 mddev->new_layout = n; in layout_store()
4120 if (mddev->reshape_position == MaxSector) in layout_store()
4121 mddev->layout = n; in layout_store()
4123 mddev_unlock(mddev); in layout_store()
4130 raid_disks_show(struct mddev *mddev, char *page) in raid_disks_show() argument
4132 if (mddev->raid_disks == 0) in raid_disks_show()
4134 if (mddev->reshape_position != MaxSector && in raid_disks_show()
4135 mddev->delta_disks != 0) in raid_disks_show()
4136 return sprintf(page, "%d (%d)\n", mddev->raid_disks, in raid_disks_show()
4137 mddev->raid_disks - mddev->delta_disks); in raid_disks_show()
4138 return sprintf(page, "%d\n", mddev->raid_disks); in raid_disks_show()
4141 static int update_raid_disks(struct mddev *mddev, int raid_disks);
4144 raid_disks_store(struct mddev *mddev, const char *buf, size_t len) in raid_disks_store() argument
4153 err = mddev_lock(mddev); in raid_disks_store()
4156 if (mddev->pers) in raid_disks_store()
4157 err = update_raid_disks(mddev, n); in raid_disks_store()
4158 else if (mddev->reshape_position != MaxSector) { in raid_disks_store()
4160 int olddisks = mddev->raid_disks - mddev->delta_disks; in raid_disks_store()
4163 rdev_for_each(rdev, mddev) { in raid_disks_store()
4172 mddev->delta_disks = n - olddisks; in raid_disks_store()
4173 mddev->raid_disks = n; in raid_disks_store()
4174 mddev->reshape_backwards = (mddev->delta_disks < 0); in raid_disks_store()
4176 mddev->raid_disks = n; in raid_disks_store()
4178 mddev_unlock(mddev); in raid_disks_store()
4185 uuid_show(struct mddev *mddev, char *page) in uuid_show() argument
4187 return sprintf(page, "%pU\n", mddev->uuid); in uuid_show()
4193 chunk_size_show(struct mddev *mddev, char *page) in chunk_size_show() argument
4195 if (mddev->reshape_position != MaxSector && in chunk_size_show()
4196 mddev->chunk_sectors != mddev->new_chunk_sectors) in chunk_size_show()
4198 mddev->new_chunk_sectors << 9, in chunk_size_show()
4199 mddev->chunk_sectors << 9); in chunk_size_show()
4200 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); in chunk_size_show()
4204 chunk_size_store(struct mddev *mddev, const char *buf, size_t len) in chunk_size_store() argument
4213 err = mddev_lock(mddev); in chunk_size_store()
4216 if (mddev->pers) { in chunk_size_store()
4217 if (mddev->pers->check_reshape == NULL) in chunk_size_store()
4219 else if (mddev->ro) in chunk_size_store()
4222 mddev->new_chunk_sectors = n >> 9; in chunk_size_store()
4223 err = mddev->pers->check_reshape(mddev); in chunk_size_store()
4225 mddev->new_chunk_sectors = mddev->chunk_sectors; in chunk_size_store()
4228 mddev->new_chunk_sectors = n >> 9; in chunk_size_store()
4229 if (mddev->reshape_position == MaxSector) in chunk_size_store()
4230 mddev->chunk_sectors = n >> 9; in chunk_size_store()
4232 mddev_unlock(mddev); in chunk_size_store()
4239 resync_start_show(struct mddev *mddev, char *page) in resync_start_show() argument
4241 if (mddev->recovery_cp == MaxSector) in resync_start_show()
4243 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); in resync_start_show()
4247 resync_start_store(struct mddev *mddev, const char *buf, size_t len) in resync_start_store() argument
4262 err = mddev_lock(mddev); in resync_start_store()
4265 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) in resync_start_store()
4269 mddev->recovery_cp = n; in resync_start_store()
4270 if (mddev->pers) in resync_start_store()
4271 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in resync_start_store()
4273 mddev_unlock(mddev); in resync_start_store()
4336 array_state_show(struct mddev *mddev, char *page) in array_state_show() argument
4340 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) { in array_state_show()
4341 switch(mddev->ro) { in array_state_show()
4349 spin_lock(&mddev->lock); in array_state_show()
4350 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) in array_state_show()
4352 else if (mddev->in_sync) in array_state_show()
4354 else if (mddev->safemode) in array_state_show()
4358 spin_unlock(&mddev->lock); in array_state_show()
4361 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean) in array_state_show()
4364 if (list_empty(&mddev->disks) && in array_state_show()
4365 mddev->raid_disks == 0 && in array_state_show()
4366 mddev->dev_sectors == 0) in array_state_show()
4374 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4375 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
4376 static int restart_array(struct mddev *mddev);
4379 array_state_store(struct mddev *mddev, const char *buf, size_t len) in array_state_store() argument
4384 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) { in array_state_store()
4388 spin_lock(&mddev->lock); in array_state_store()
4390 restart_array(mddev); in array_state_store()
4391 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in array_state_store()
4392 md_wakeup_thread(mddev->thread); in array_state_store()
4393 wake_up(&mddev->sb_wait); in array_state_store()
4395 restart_array(mddev); in array_state_store()
4396 if (!set_in_sync(mddev)) in array_state_store()
4400 sysfs_notify_dirent_safe(mddev->sysfs_state); in array_state_store()
4401 spin_unlock(&mddev->lock); in array_state_store()
4404 err = mddev_lock(mddev); in array_state_store()
4413 err = do_md_stop(mddev, 0, NULL); in array_state_store()
4417 if (mddev->pers) in array_state_store()
4418 err = do_md_stop(mddev, 2, NULL); in array_state_store()
4425 if (mddev->pers) in array_state_store()
4426 err = md_set_readonly(mddev, NULL); in array_state_store()
4428 mddev->ro = 1; in array_state_store()
4429 set_disk_ro(mddev->gendisk, 1); in array_state_store()
4430 err = do_md_run(mddev); in array_state_store()
4434 if (mddev->pers) { in array_state_store()
4435 if (mddev->ro == 0) in array_state_store()
4436 err = md_set_readonly(mddev, NULL); in array_state_store()
4437 else if (mddev->ro == 1) in array_state_store()
4438 err = restart_array(mddev); in array_state_store()
4440 mddev->ro = 2; in array_state_store()
4441 set_disk_ro(mddev->gendisk, 0); in array_state_store()
4444 mddev->ro = 2; in array_state_store()
4445 err = do_md_run(mddev); in array_state_store()
4449 if (mddev->pers) { in array_state_store()
4450 err = restart_array(mddev); in array_state_store()
4453 spin_lock(&mddev->lock); in array_state_store()
4454 if (!set_in_sync(mddev)) in array_state_store()
4456 spin_unlock(&mddev->lock); in array_state_store()
4461 if (mddev->pers) { in array_state_store()
4462 err = restart_array(mddev); in array_state_store()
4465 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in array_state_store()
4466 wake_up(&mddev->sb_wait); in array_state_store()
4469 mddev->ro = 0; in array_state_store()
4470 set_disk_ro(mddev->gendisk, 0); in array_state_store()
4471 err = do_md_run(mddev); in array_state_store()
4482 if (mddev->hold_active == UNTIL_IOCTL) in array_state_store()
4483 mddev->hold_active = 0; in array_state_store()
4484 sysfs_notify_dirent_safe(mddev->sysfs_state); in array_state_store()
4486 mddev_unlock(mddev); in array_state_store()
4493 max_corrected_read_errors_show(struct mddev *mddev, char *page) { in max_corrected_read_errors_show() argument
4495 atomic_read(&mddev->max_corr_read_errors)); in max_corrected_read_errors_show()
4499 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) in max_corrected_read_errors_store() argument
4507 atomic_set(&mddev->max_corr_read_errors, n); in max_corrected_read_errors_store()
4516 null_show(struct mddev *mddev, char *page) in null_show() argument
4522 static void flush_rdev_wq(struct mddev *mddev) in flush_rdev_wq() argument
4527 rdev_for_each_rcu(rdev, mddev) in flush_rdev_wq()
4536 new_dev_store(struct mddev *mddev, const char *buf, size_t len) in new_dev_store() argument
4562 flush_rdev_wq(mddev); in new_dev_store()
4563 err = mddev_lock(mddev); in new_dev_store()
4566 if (mddev->persistent) { in new_dev_store()
4567 rdev = md_import_device(dev, mddev->major_version, in new_dev_store()
4568 mddev->minor_version); in new_dev_store()
4569 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { in new_dev_store()
4571 = list_entry(mddev->disks.next, in new_dev_store()
4573 err = super_types[mddev->major_version] in new_dev_store()
4574 .load_super(rdev, rdev0, mddev->minor_version); in new_dev_store()
4578 } else if (mddev->external) in new_dev_store()
4584 mddev_unlock(mddev); in new_dev_store()
4587 err = bind_rdev_to_array(rdev, mddev); in new_dev_store()
4591 mddev_unlock(mddev); in new_dev_store()
4601 bitmap_store(struct mddev *mddev, const char *buf, size_t len) in bitmap_store() argument
4607 err = mddev_lock(mddev); in bitmap_store()
4610 if (!mddev->bitmap) in bitmap_store()
4622 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); in bitmap_store()
4625 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ in bitmap_store()
4627 mddev_unlock(mddev); in bitmap_store()
4635 size_show(struct mddev *mddev, char *page) in size_show() argument
4638 (unsigned long long)mddev->dev_sectors / 2); in size_show()
4641 static int update_size(struct mddev *mddev, sector_t num_sectors);
4644 size_store(struct mddev *mddev, const char *buf, size_t len) in size_store() argument
4655 err = mddev_lock(mddev); in size_store()
4658 if (mddev->pers) { in size_store()
4659 err = update_size(mddev, sectors); in size_store()
4661 md_update_sb(mddev, 1); in size_store()
4663 if (mddev->dev_sectors == 0 || in size_store()
4664 mddev->dev_sectors > sectors) in size_store()
4665 mddev->dev_sectors = sectors; in size_store()
4669 mddev_unlock(mddev); in size_store()
4683 metadata_show(struct mddev *mddev, char *page) in metadata_show() argument
4685 if (mddev->persistent) in metadata_show()
4687 mddev->major_version, mddev->minor_version); in metadata_show()
4688 else if (mddev->external) in metadata_show()
4689 return sprintf(page, "external:%s\n", mddev->metadata_type); in metadata_show()
4695 metadata_store(struct mddev *mddev, const char *buf, size_t len) in metadata_store() argument
4705 err = mddev_lock(mddev); in metadata_store()
4709 if (mddev->external && strncmp(buf, "external:", 9) == 0) in metadata_store()
4711 else if (!list_empty(&mddev->disks)) in metadata_store()
4716 mddev->persistent = 0; in metadata_store()
4717 mddev->external = 0; in metadata_store()
4718 mddev->major_version = 0; in metadata_store()
4719 mddev->minor_version = 90; in metadata_store()
4724 if (namelen >= sizeof(mddev->metadata_type)) in metadata_store()
4725 namelen = sizeof(mddev->metadata_type)-1; in metadata_store()
4726 strncpy(mddev->metadata_type, buf+9, namelen); in metadata_store()
4727 mddev->metadata_type[namelen] = 0; in metadata_store()
4728 if (namelen && mddev->metadata_type[namelen-1] == '\n') in metadata_store()
4729 mddev->metadata_type[--namelen] = 0; in metadata_store()
4730 mddev->persistent = 0; in metadata_store()
4731 mddev->external = 1; in metadata_store()
4732 mddev->major_version = 0; in metadata_store()
4733 mddev->minor_version = 90; in metadata_store()
4747 mddev->major_version = major; in metadata_store()
4748 mddev->minor_version = minor; in metadata_store()
4749 mddev->persistent = 1; in metadata_store()
4750 mddev->external = 0; in metadata_store()
4753 mddev_unlock(mddev); in metadata_store()
4761 action_show(struct mddev *mddev, char *page) in action_show() argument
4764 unsigned long recovery = mddev->recovery; in action_show()
4768 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) { in action_show()
4780 else if (mddev->reshape_position != MaxSector) in action_show()
4787 action_store(struct mddev *mddev, const char *page, size_t len) in action_store() argument
4789 if (!mddev->pers || !mddev->pers->sync_request) in action_store()
4795 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4797 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4798 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && in action_store()
4799 mddev_lock(mddev) == 0) { in action_store()
4800 if (work_pending(&mddev->del_work)) in action_store()
4802 if (mddev->sync_thread) { in action_store()
4803 sector_t save_rp = mddev->reshape_position; in action_store()
4805 mddev_unlock(mddev); in action_store()
4806 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in action_store()
4807 md_unregister_thread(&mddev->sync_thread); in action_store()
4808 mddev_lock_nointr(mddev); in action_store()
4815 mddev->reshape_position = save_rp; in action_store()
4816 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in action_store()
4817 md_reap_sync_thread(mddev); in action_store()
4819 mddev_unlock(mddev); in action_store()
4821 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in action_store()
4824 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4826 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4827 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in action_store()
4830 if (mddev->pers->start_reshape == NULL) in action_store()
4832 err = mddev_lock(mddev); in action_store()
4834 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in action_store()
4837 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4838 err = mddev->pers->start_reshape(mddev); in action_store()
4840 mddev_unlock(mddev); in action_store()
4844 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in action_store()
4847 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); in action_store()
4850 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4851 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in action_store()
4852 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); in action_store()
4854 if (mddev->ro == 2) { in action_store()
4858 mddev->ro = 0; in action_store()
4859 md_wakeup_thread(mddev->sync_thread); in action_store()
4861 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in action_store()
4862 md_wakeup_thread(mddev->thread); in action_store()
4863 sysfs_notify_dirent_safe(mddev->sysfs_action); in action_store()
4871 last_sync_action_show(struct mddev *mddev, char *page) in last_sync_action_show() argument
4873 return sprintf(page, "%s\n", mddev->last_sync_action); in last_sync_action_show()
4879 mismatch_cnt_show(struct mddev *mddev, char *page) in mismatch_cnt_show() argument
4883 atomic64_read(&mddev->resync_mismatches)); in mismatch_cnt_show()
4889 sync_min_show(struct mddev *mddev, char *page) in sync_min_show() argument
4891 return sprintf(page, "%d (%s)\n", speed_min(mddev), in sync_min_show()
4892 mddev->sync_speed_min ? "local": "system"); in sync_min_show()
4896 sync_min_store(struct mddev *mddev, const char *buf, size_t len) in sync_min_store() argument
4910 mddev->sync_speed_min = min; in sync_min_store()
4918 sync_max_show(struct mddev *mddev, char *page) in sync_max_show() argument
4920 return sprintf(page, "%d (%s)\n", speed_max(mddev), in sync_max_show()
4921 mddev->sync_speed_max ? "local": "system"); in sync_max_show()
4925 sync_max_store(struct mddev *mddev, const char *buf, size_t len) in sync_max_store() argument
4939 mddev->sync_speed_max = max; in sync_max_store()
4947 degraded_show(struct mddev *mddev, char *page) in degraded_show() argument
4949 return sprintf(page, "%d\n", mddev->degraded); in degraded_show()
4954 sync_force_parallel_show(struct mddev *mddev, char *page) in sync_force_parallel_show() argument
4956 return sprintf(page, "%d\n", mddev->parallel_resync); in sync_force_parallel_show()
4960 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) in sync_force_parallel_store() argument
4970 mddev->parallel_resync = n; in sync_force_parallel_store()
4972 if (mddev->sync_thread) in sync_force_parallel_store()
4984 sync_speed_show(struct mddev *mddev, char *page) in sync_speed_show() argument
4987 if (mddev->curr_resync == MD_RESYNC_NONE) in sync_speed_show()
4989 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); in sync_speed_show()
4990 dt = (jiffies - mddev->resync_mark) / HZ; in sync_speed_show()
4992 db = resync - mddev->resync_mark_cnt; in sync_speed_show()
4999 sync_completed_show(struct mddev *mddev, char *page) in sync_completed_show() argument
5003 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in sync_completed_show()
5006 if (mddev->curr_resync == MD_RESYNC_YIELDED || in sync_completed_show()
5007 mddev->curr_resync == MD_RESYNC_DELAYED) in sync_completed_show()
5010 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in sync_completed_show()
5011 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in sync_completed_show()
5012 max_sectors = mddev->resync_max_sectors; in sync_completed_show()
5014 max_sectors = mddev->dev_sectors; in sync_completed_show()
5016 resync = mddev->curr_resync_completed; in sync_completed_show()
5024 min_sync_show(struct mddev *mddev, char *page) in min_sync_show() argument
5027 (unsigned long long)mddev->resync_min); in min_sync_show()
5030 min_sync_store(struct mddev *mddev, const char *buf, size_t len) in min_sync_store() argument
5038 spin_lock(&mddev->lock); in min_sync_store()
5040 if (min > mddev->resync_max) in min_sync_store()
5044 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in min_sync_store()
5048 mddev->resync_min = round_down(min, 8); in min_sync_store()
5052 spin_unlock(&mddev->lock); in min_sync_store()
5060 max_sync_show(struct mddev *mddev, char *page) in max_sync_show() argument
5062 if (mddev->resync_max == MaxSector) in max_sync_show()
5066 (unsigned long long)mddev->resync_max); in max_sync_show()
5069 max_sync_store(struct mddev *mddev, const char *buf, size_t len) in max_sync_store() argument
5072 spin_lock(&mddev->lock); in max_sync_store()
5074 mddev->resync_max = MaxSector; in max_sync_store()
5082 if (max < mddev->resync_min) in max_sync_store()
5086 if (max < mddev->resync_max && in max_sync_store()
5087 mddev->ro == 0 && in max_sync_store()
5088 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in max_sync_store()
5092 chunk = mddev->chunk_sectors; in max_sync_store()
5100 mddev->resync_max = max; in max_sync_store()
5102 wake_up(&mddev->recovery_wait); in max_sync_store()
5105 spin_unlock(&mddev->lock); in max_sync_store()
5113 suspend_lo_show(struct mddev *mddev, char *page) in suspend_lo_show() argument
5115 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); in suspend_lo_show()
5119 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) in suspend_lo_store() argument
5130 err = mddev_lock(mddev); in suspend_lo_store()
5134 if (mddev->pers == NULL || in suspend_lo_store()
5135 mddev->pers->quiesce == NULL) in suspend_lo_store()
5137 mddev_suspend(mddev); in suspend_lo_store()
5138 mddev->suspend_lo = new; in suspend_lo_store()
5139 mddev_resume(mddev); in suspend_lo_store()
5143 mddev_unlock(mddev); in suspend_lo_store()
5150 suspend_hi_show(struct mddev *mddev, char *page) in suspend_hi_show() argument
5152 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); in suspend_hi_show()
5156 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) in suspend_hi_store() argument
5167 err = mddev_lock(mddev); in suspend_hi_store()
5171 if (mddev->pers == NULL) in suspend_hi_store()
5174 mddev_suspend(mddev); in suspend_hi_store()
5175 mddev->suspend_hi = new; in suspend_hi_store()
5176 mddev_resume(mddev); in suspend_hi_store()
5180 mddev_unlock(mddev); in suspend_hi_store()
5187 reshape_position_show(struct mddev *mddev, char *page) in reshape_position_show() argument
5189 if (mddev->reshape_position != MaxSector) in reshape_position_show()
5191 (unsigned long long)mddev->reshape_position); in reshape_position_show()
5197 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) in reshape_position_store() argument
5208 err = mddev_lock(mddev); in reshape_position_store()
5212 if (mddev->pers) in reshape_position_store()
5214 mddev->reshape_position = new; in reshape_position_store()
5215 mddev->delta_disks = 0; in reshape_position_store()
5216 mddev->reshape_backwards = 0; in reshape_position_store()
5217 mddev->new_level = mddev->level; in reshape_position_store()
5218 mddev->new_layout = mddev->layout; in reshape_position_store()
5219 mddev->new_chunk_sectors = mddev->chunk_sectors; in reshape_position_store()
5220 rdev_for_each(rdev, mddev) in reshape_position_store()
5224 mddev_unlock(mddev); in reshape_position_store()
5233 reshape_direction_show(struct mddev *mddev, char *page) in reshape_direction_show() argument
5236 mddev->reshape_backwards ? "backwards" : "forwards"); in reshape_direction_show()
5240 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) in reshape_direction_store() argument
5251 if (mddev->reshape_backwards == backwards) in reshape_direction_store()
5254 err = mddev_lock(mddev); in reshape_direction_store()
5258 if (mddev->delta_disks) in reshape_direction_store()
5260 else if (mddev->persistent && in reshape_direction_store()
5261 mddev->major_version == 0) in reshape_direction_store()
5264 mddev->reshape_backwards = backwards; in reshape_direction_store()
5265 mddev_unlock(mddev); in reshape_direction_store()
5274 array_size_show(struct mddev *mddev, char *page) in array_size_show() argument
5276 if (mddev->external_size) in array_size_show()
5278 (unsigned long long)mddev->array_sectors/2); in array_size_show()
5284 array_size_store(struct mddev *mddev, const char *buf, size_t len) in array_size_store() argument
5289 err = mddev_lock(mddev); in array_size_store()
5294 if (mddev_is_clustered(mddev)) { in array_size_store()
5295 mddev_unlock(mddev); in array_size_store()
5300 if (mddev->pers) in array_size_store()
5301 sectors = mddev->pers->size(mddev, 0, 0); in array_size_store()
5303 sectors = mddev->array_sectors; in array_size_store()
5305 mddev->external_size = 0; in array_size_store()
5309 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) in array_size_store()
5312 mddev->external_size = 1; in array_size_store()
5316 mddev->array_sectors = sectors; in array_size_store()
5317 if (mddev->pers) in array_size_store()
5318 set_capacity_and_notify(mddev->gendisk, in array_size_store()
5319 mddev->array_sectors); in array_size_store()
5321 mddev_unlock(mddev); in array_size_store()
5330 consistency_policy_show(struct mddev *mddev, char *page) in consistency_policy_show() argument
5334 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { in consistency_policy_show()
5336 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) { in consistency_policy_show()
5338 } else if (mddev->bitmap) { in consistency_policy_show()
5340 } else if (mddev->pers) { in consistency_policy_show()
5341 if (mddev->pers->sync_request) in consistency_policy_show()
5353 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len) in consistency_policy_store() argument
5357 if (mddev->pers) { in consistency_policy_store()
5358 if (mddev->pers->change_consistency_policy) in consistency_policy_store()
5359 err = mddev->pers->change_consistency_policy(mddev, buf); in consistency_policy_store()
5362 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) { in consistency_policy_store()
5363 set_bit(MD_HAS_PPL, &mddev->flags); in consistency_policy_store()
5375 static ssize_t fail_last_dev_show(struct mddev *mddev, char *page) in fail_last_dev_show() argument
5377 return sprintf(page, "%d\n", mddev->fail_last_dev); in fail_last_dev_show()
5385 fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len) in fail_last_dev_store() argument
5394 if (value != mddev->fail_last_dev) in fail_last_dev_store()
5395 mddev->fail_last_dev = value; in fail_last_dev_store()
5403 static ssize_t serialize_policy_show(struct mddev *mddev, char *page) in serialize_policy_show() argument
5405 if (mddev->pers == NULL || (mddev->pers->level != 1)) in serialize_policy_show()
5408 return sprintf(page, "%d\n", mddev->serialize_policy); in serialize_policy_show()
5416 serialize_policy_store(struct mddev *mddev, const char *buf, size_t len) in serialize_policy_store() argument
5425 if (value == mddev->serialize_policy) in serialize_policy_store()
5428 err = mddev_lock(mddev); in serialize_policy_store()
5431 if (mddev->pers == NULL || (mddev->pers->level != 1)) { in serialize_policy_store()
5437 mddev_suspend(mddev); in serialize_policy_store()
5439 mddev_create_serial_pool(mddev, NULL, true); in serialize_policy_store()
5441 mddev_destroy_serial_pool(mddev, NULL, true); in serialize_policy_store()
5442 mddev->serialize_policy = value; in serialize_policy_store()
5443 mddev_resume(mddev); in serialize_policy_store()
5445 mddev_unlock(mddev); in serialize_policy_store()
5512 struct mddev *mddev = container_of(kobj, struct mddev, kobj); in md_attr_show() local
5518 if (!mddev_get(mddev)) { in md_attr_show()
5524 rv = entry->show(mddev, page); in md_attr_show()
5525 mddev_put(mddev); in md_attr_show()
5534 struct mddev *mddev = container_of(kobj, struct mddev, kobj); in md_attr_store() local
5542 if (!mddev_get(mddev)) { in md_attr_store()
5547 rv = entry->store(mddev, page, length); in md_attr_store()
5548 mddev_put(mddev); in md_attr_store()
5554 struct mddev *mddev = container_of(ko, struct mddev, kobj); in md_kobj_release() local
5556 if (mddev->sysfs_state) in md_kobj_release()
5557 sysfs_put(mddev->sysfs_state); in md_kobj_release()
5558 if (mddev->sysfs_level) in md_kobj_release()
5559 sysfs_put(mddev->sysfs_level); in md_kobj_release()
5561 del_gendisk(mddev->gendisk); in md_kobj_release()
5562 put_disk(mddev->gendisk); in md_kobj_release()
5579 struct mddev *mddev = container_of(ws, struct mddev, del_work); in mddev_delayed_delete() local
5581 kobject_put(&mddev->kobj); in mddev_delayed_delete()
5586 int mddev_init_writes_pending(struct mddev *mddev) in mddev_init_writes_pending() argument
5588 if (mddev->writes_pending.percpu_count_ptr) in mddev_init_writes_pending()
5590 if (percpu_ref_init(&mddev->writes_pending, no_op, in mddev_init_writes_pending()
5594 percpu_ref_put(&mddev->writes_pending); in mddev_init_writes_pending()
5599 struct mddev *md_alloc(dev_t dev, char *name) in md_alloc()
5611 struct mddev *mddev; in md_alloc() local
5626 mddev = mddev_alloc(dev); in md_alloc()
5627 if (IS_ERR(mddev)) { in md_alloc()
5628 error = PTR_ERR(mddev); in md_alloc()
5632 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); in md_alloc()
5634 unit = MINOR(mddev->unit) >> shift; in md_alloc()
5639 struct mddev *mddev2; in md_alloc()
5655 mddev->hold_active = UNTIL_STOP; in md_alloc()
5662 disk->major = MAJOR(mddev->unit); in md_alloc()
5672 disk->private_data = mddev; in md_alloc()
5674 mddev->queue = disk->queue; in md_alloc()
5675 blk_set_stacking_limits(&mddev->queue->limits); in md_alloc()
5676 blk_queue_write_cache(mddev->queue, true, true); in md_alloc()
5678 mddev->gendisk = disk; in md_alloc()
5683 kobject_init(&mddev->kobj, &md_ktype); in md_alloc()
5684 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md"); in md_alloc()
5691 mddev->hold_active = 0; in md_alloc()
5693 mddev_put(mddev); in md_alloc()
5697 kobject_uevent(&mddev->kobj, KOBJ_ADD); in md_alloc()
5698 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); in md_alloc()
5699 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level"); in md_alloc()
5701 return mddev; in md_alloc()
5706 mddev_free(mddev); in md_alloc()
5714 struct mddev *mddev = md_alloc(dev, name); in md_alloc_and_put() local
5716 if (IS_ERR(mddev)) in md_alloc_and_put()
5717 return PTR_ERR(mddev); in md_alloc_and_put()
5718 mddev_put(mddev); in md_alloc_and_put()
5761 struct mddev *mddev = from_timer(mddev, t, safemode_timer); in md_safemode_timeout() local
5763 mddev->safemode = 1; in md_safemode_timeout()
5764 if (mddev->external) in md_safemode_timeout()
5765 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_safemode_timeout()
5767 md_wakeup_thread(mddev->thread); in md_safemode_timeout()
5772 int md_run(struct mddev *mddev) in md_run() argument
5779 if (list_empty(&mddev->disks)) in md_run()
5783 if (mddev->pers) in md_run()
5786 if (mddev->sysfs_active) in md_run()
5792 if (!mddev->raid_disks) { in md_run()
5793 if (!mddev->persistent) in md_run()
5795 err = analyze_sbs(mddev); in md_run()
5800 if (mddev->level != LEVEL_NONE) in md_run()
5801 request_module("md-level-%d", mddev->level); in md_run()
5802 else if (mddev->clevel[0]) in md_run()
5803 request_module("md-%s", mddev->clevel); in md_run()
5810 mddev->has_superblocks = false; in md_run()
5811 rdev_for_each(rdev, mddev) { in md_run()
5816 if (mddev->ro != 1 && rdev_read_only(rdev)) { in md_run()
5817 mddev->ro = 1; in md_run()
5818 if (mddev->gendisk) in md_run()
5819 set_disk_ro(mddev->gendisk, 1); in md_run()
5823 mddev->has_superblocks = true; in md_run()
5832 if (mddev->dev_sectors && in md_run()
5833 rdev->data_offset + mddev->dev_sectors in md_run()
5836 mdname(mddev)); in md_run()
5843 mdname(mddev)); in md_run()
5851 if (!bioset_initialized(&mddev->bio_set)) { in md_run()
5852 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); in md_run()
5856 if (!bioset_initialized(&mddev->sync_set)) { in md_run()
5857 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); in md_run()
5863 pers = find_pers(mddev->level, mddev->clevel); in md_run()
5866 if (mddev->level != LEVEL_NONE) in md_run()
5868 mddev->level); in md_run()
5871 mddev->clevel); in md_run()
5876 if (mddev->level != pers->level) { in md_run()
5877 mddev->level = pers->level; in md_run()
5878 mddev->new_level = pers->level; in md_run()
5880 strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); in md_run()
5882 if (mddev->reshape_position != MaxSector && in md_run()
5897 rdev_for_each(rdev, mddev) in md_run()
5898 rdev_for_each(rdev2, mddev) { in md_run()
5903 mdname(mddev), in md_run()
5914 mddev->recovery = 0; in md_run()
5916 mddev->resync_max_sectors = mddev->dev_sectors; in md_run()
5918 mddev->ok_start_degraded = start_dirty_degraded; in md_run()
5920 if (start_readonly && mddev->ro == 0) in md_run()
5921 mddev->ro = 2; /* read-only, but switch on first write */ in md_run()
5923 err = pers->run(mddev); in md_run()
5926 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { in md_run()
5927 WARN_ONCE(!mddev->external_size, in md_run()
5931 (unsigned long long)mddev->array_sectors / 2, in md_run()
5932 (unsigned long long)pers->size(mddev, 0, 0) / 2); in md_run()
5936 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { in md_run()
5939 bitmap = md_bitmap_create(mddev, -1); in md_run()
5943 mdname(mddev), err); in md_run()
5945 mddev->bitmap = bitmap; in md_run()
5951 if (mddev->bitmap_info.max_write_behind > 0) { in md_run()
5954 rdev_for_each(rdev, mddev) { in md_run()
5959 if (create_pool && mddev->serial_info_pool == NULL) { in md_run()
5960 mddev->serial_info_pool = in md_run()
5963 if (!mddev->serial_info_pool) { in md_run()
5970 if (mddev->queue) { in md_run()
5973 rdev_for_each(rdev, mddev) { in md_run()
5979 if (mddev->degraded) in md_run()
5982 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); in md_run()
5984 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); in md_run()
5985 blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue); in md_run()
5989 blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue); in md_run()
5992 if (mddev->kobj.sd && in md_run()
5993 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) in md_run()
5995 mdname(mddev)); in md_run()
5996 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); in md_run()
5997 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); in md_run()
5998 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); in md_run()
5999 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ in md_run()
6000 mddev->ro = 0; in md_run()
6002 atomic_set(&mddev->max_corr_read_errors, in md_run()
6004 mddev->safemode = 0; in md_run()
6005 if (mddev_is_clustered(mddev)) in md_run()
6006 mddev->safemode_delay = 0; in md_run()
6008 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; in md_run()
6009 mddev->in_sync = 1; in md_run()
6011 spin_lock(&mddev->lock); in md_run()
6012 mddev->pers = pers; in md_run()
6013 spin_unlock(&mddev->lock); in md_run()
6014 rdev_for_each(rdev, mddev) in md_run()
6016 sysfs_link_rdev(mddev, rdev); /* failure here is OK */ in md_run()
6018 if (mddev->degraded && !mddev->ro) in md_run()
6022 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_run()
6023 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_run()
6025 if (mddev->sb_flags) in md_run()
6026 md_update_sb(mddev, 0); in md_run()
6032 mddev_detach(mddev); in md_run()
6033 if (mddev->private) in md_run()
6034 pers->free(mddev, mddev->private); in md_run()
6035 mddev->private = NULL; in md_run()
6037 md_bitmap_destroy(mddev); in md_run()
6039 bioset_exit(&mddev->sync_set); in md_run()
6041 bioset_exit(&mddev->bio_set); in md_run()
6046 int do_md_run(struct mddev *mddev) in do_md_run() argument
6050 set_bit(MD_NOT_READY, &mddev->flags); in do_md_run()
6051 err = md_run(mddev); in do_md_run()
6054 err = md_bitmap_load(mddev); in do_md_run()
6056 md_bitmap_destroy(mddev); in do_md_run()
6060 if (mddev_is_clustered(mddev)) in do_md_run()
6061 md_allow_write(mddev); in do_md_run()
6064 md_start(mddev); in do_md_run()
6066 md_wakeup_thread(mddev->thread); in do_md_run()
6067 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ in do_md_run()
6069 set_capacity_and_notify(mddev->gendisk, mddev->array_sectors); in do_md_run()
6070 clear_bit(MD_NOT_READY, &mddev->flags); in do_md_run()
6071 mddev->changed = 1; in do_md_run()
6072 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); in do_md_run()
6073 sysfs_notify_dirent_safe(mddev->sysfs_state); in do_md_run()
6074 sysfs_notify_dirent_safe(mddev->sysfs_action); in do_md_run()
6075 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in do_md_run()
6077 clear_bit(MD_NOT_READY, &mddev->flags); in do_md_run()
6081 int md_start(struct mddev *mddev) in md_start() argument
6085 if (mddev->pers->start) { in md_start()
6086 set_bit(MD_RECOVERY_WAIT, &mddev->recovery); in md_start()
6087 md_wakeup_thread(mddev->thread); in md_start()
6088 ret = mddev->pers->start(mddev); in md_start()
6089 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery); in md_start()
6090 md_wakeup_thread(mddev->sync_thread); in md_start()
6096 static int restart_array(struct mddev *mddev) in restart_array() argument
6098 struct gendisk *disk = mddev->gendisk; in restart_array()
6104 if (list_empty(&mddev->disks)) in restart_array()
6106 if (!mddev->pers) in restart_array()
6108 if (!mddev->ro) in restart_array()
6112 rdev_for_each_rcu(rdev, mddev) { in restart_array()
6120 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal) in restart_array()
6126 mddev->safemode = 0; in restart_array()
6127 mddev->ro = 0; in restart_array()
6129 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev)); in restart_array()
6131 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in restart_array()
6132 md_wakeup_thread(mddev->thread); in restart_array()
6133 md_wakeup_thread(mddev->sync_thread); in restart_array()
6134 sysfs_notify_dirent_safe(mddev->sysfs_state); in restart_array()
6138 static void md_clean(struct mddev *mddev) in md_clean() argument
6140 mddev->array_sectors = 0; in md_clean()
6141 mddev->external_size = 0; in md_clean()
6142 mddev->dev_sectors = 0; in md_clean()
6143 mddev->raid_disks = 0; in md_clean()
6144 mddev->recovery_cp = 0; in md_clean()
6145 mddev->resync_min = 0; in md_clean()
6146 mddev->resync_max = MaxSector; in md_clean()
6147 mddev->reshape_position = MaxSector; in md_clean()
6148 mddev->external = 0; in md_clean()
6149 mddev->persistent = 0; in md_clean()
6150 mddev->level = LEVEL_NONE; in md_clean()
6151 mddev->clevel[0] = 0; in md_clean()
6152 mddev->flags = 0; in md_clean()
6153 mddev->sb_flags = 0; in md_clean()
6154 mddev->ro = 0; in md_clean()
6155 mddev->metadata_type[0] = 0; in md_clean()
6156 mddev->chunk_sectors = 0; in md_clean()
6157 mddev->ctime = mddev->utime = 0; in md_clean()
6158 mddev->layout = 0; in md_clean()
6159 mddev->max_disks = 0; in md_clean()
6160 mddev->events = 0; in md_clean()
6161 mddev->can_decrease_events = 0; in md_clean()
6162 mddev->delta_disks = 0; in md_clean()
6163 mddev->reshape_backwards = 0; in md_clean()
6164 mddev->new_level = LEVEL_NONE; in md_clean()
6165 mddev->new_layout = 0; in md_clean()
6166 mddev->new_chunk_sectors = 0; in md_clean()
6167 mddev->curr_resync = 0; in md_clean()
6168 atomic64_set(&mddev->resync_mismatches, 0); in md_clean()
6169 mddev->suspend_lo = mddev->suspend_hi = 0; in md_clean()
6170 mddev->sync_speed_min = mddev->sync_speed_max = 0; in md_clean()
6171 mddev->recovery = 0; in md_clean()
6172 mddev->in_sync = 0; in md_clean()
6173 mddev->changed = 0; in md_clean()
6174 mddev->degraded = 0; in md_clean()
6175 mddev->safemode = 0; in md_clean()
6176 mddev->private = NULL; in md_clean()
6177 mddev->cluster_info = NULL; in md_clean()
6178 mddev->bitmap_info.offset = 0; in md_clean()
6179 mddev->bitmap_info.default_offset = 0; in md_clean()
6180 mddev->bitmap_info.default_space = 0; in md_clean()
6181 mddev->bitmap_info.chunksize = 0; in md_clean()
6182 mddev->bitmap_info.daemon_sleep = 0; in md_clean()
6183 mddev->bitmap_info.max_write_behind = 0; in md_clean()
6184 mddev->bitmap_info.nodes = 0; in md_clean()
6187 static void __md_stop_writes(struct mddev *mddev) in __md_stop_writes() argument
6189 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in __md_stop_writes()
6190 if (work_pending(&mddev->del_work)) in __md_stop_writes()
6192 if (mddev->sync_thread) { in __md_stop_writes()
6193 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in __md_stop_writes()
6194 md_unregister_thread(&mddev->sync_thread); in __md_stop_writes()
6195 md_reap_sync_thread(mddev); in __md_stop_writes()
6198 del_timer_sync(&mddev->safemode_timer); in __md_stop_writes()
6200 if (mddev->pers && mddev->pers->quiesce) { in __md_stop_writes()
6201 mddev->pers->quiesce(mddev, 1); in __md_stop_writes()
6202 mddev->pers->quiesce(mddev, 0); in __md_stop_writes()
6204 md_bitmap_flush(mddev); in __md_stop_writes()
6206 if (mddev->ro == 0 && in __md_stop_writes()
6207 ((!mddev->in_sync && !mddev_is_clustered(mddev)) || in __md_stop_writes()
6208 mddev->sb_flags)) { in __md_stop_writes()
6210 if (!mddev_is_clustered(mddev)) in __md_stop_writes()
6211 mddev->in_sync = 1; in __md_stop_writes()
6212 md_update_sb(mddev, 1); in __md_stop_writes()
6215 mddev->serialize_policy = 0; in __md_stop_writes()
6216 mddev_destroy_serial_pool(mddev, NULL, true); in __md_stop_writes()
6219 void md_stop_writes(struct mddev *mddev) in md_stop_writes() argument
6221 mddev_lock_nointr(mddev); in md_stop_writes()
6222 __md_stop_writes(mddev); in md_stop_writes()
6223 mddev_unlock(mddev); in md_stop_writes()
6227 static void mddev_detach(struct mddev *mddev) in mddev_detach() argument
6229 md_bitmap_wait_behind_writes(mddev); in mddev_detach()
6230 if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) { in mddev_detach()
6231 mddev->pers->quiesce(mddev, 1); in mddev_detach()
6232 mddev->pers->quiesce(mddev, 0); in mddev_detach()
6234 md_unregister_thread(&mddev->thread); in mddev_detach()
6235 if (mddev->queue) in mddev_detach()
6236 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ in mddev_detach()
6239 static void __md_stop(struct mddev *mddev) in __md_stop() argument
6241 struct md_personality *pers = mddev->pers; in __md_stop()
6242 md_bitmap_destroy(mddev); in __md_stop()
6243 mddev_detach(mddev); in __md_stop()
6245 if (mddev->event_work.func) in __md_stop()
6247 spin_lock(&mddev->lock); in __md_stop()
6248 mddev->pers = NULL; in __md_stop()
6249 spin_unlock(&mddev->lock); in __md_stop()
6250 if (mddev->private) in __md_stop()
6251 pers->free(mddev, mddev->private); in __md_stop()
6252 mddev->private = NULL; in __md_stop()
6253 if (pers->sync_request && mddev->to_remove == NULL) in __md_stop()
6254 mddev->to_remove = &md_redundancy_group; in __md_stop()
6256 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in __md_stop()
6259 void md_stop(struct mddev *mddev) in md_stop() argument
6264 __md_stop_writes(mddev); in md_stop()
6265 __md_stop(mddev); in md_stop()
6266 bioset_exit(&mddev->bio_set); in md_stop()
6267 bioset_exit(&mddev->sync_set); in md_stop()
6272 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) in md_set_readonly() argument
6277 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { in md_set_readonly()
6279 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
6280 md_wakeup_thread(mddev->thread); in md_set_readonly()
6282 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in md_set_readonly()
6283 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_set_readonly()
6284 if (mddev->sync_thread) in md_set_readonly()
6287 wake_up_process(mddev->sync_thread->tsk); in md_set_readonly()
6289 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) in md_set_readonly()
6291 mddev_unlock(mddev); in md_set_readonly()
6293 &mddev->recovery)); in md_set_readonly()
6294 wait_event(mddev->sb_wait, in md_set_readonly()
6295 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); in md_set_readonly()
6296 mddev_lock_nointr(mddev); in md_set_readonly()
6298 mutex_lock(&mddev->open_mutex); in md_set_readonly()
6299 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || in md_set_readonly()
6300 mddev->sync_thread || in md_set_readonly()
6301 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { in md_set_readonly()
6302 pr_warn("md: %s still in use.\n",mdname(mddev)); in md_set_readonly()
6304 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
6305 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_set_readonly()
6306 md_wakeup_thread(mddev->thread); in md_set_readonly()
6311 if (mddev->pers) { in md_set_readonly()
6312 __md_stop_writes(mddev); in md_set_readonly()
6315 if (mddev->ro==1) in md_set_readonly()
6317 mddev->ro = 1; in md_set_readonly()
6318 set_disk_ro(mddev->gendisk, 1); in md_set_readonly()
6319 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
6320 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_set_readonly()
6321 md_wakeup_thread(mddev->thread); in md_set_readonly()
6322 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_set_readonly()
6326 mutex_unlock(&mddev->open_mutex); in md_set_readonly()
6334 static int do_md_stop(struct mddev *mddev, int mode, in do_md_stop() argument
6337 struct gendisk *disk = mddev->gendisk; in do_md_stop()
6341 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { in do_md_stop()
6343 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in do_md_stop()
6344 md_wakeup_thread(mddev->thread); in do_md_stop()
6346 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in do_md_stop()
6347 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in do_md_stop()
6348 if (mddev->sync_thread) in do_md_stop()
6351 wake_up_process(mddev->sync_thread->tsk); in do_md_stop()
6353 mddev_unlock(mddev); in do_md_stop()
6354 wait_event(resync_wait, (mddev->sync_thread == NULL && in do_md_stop()
6356 &mddev->recovery))); in do_md_stop()
6357 mddev_lock_nointr(mddev); in do_md_stop()
6359 mutex_lock(&mddev->open_mutex); in do_md_stop()
6360 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || in do_md_stop()
6361 mddev->sysfs_active || in do_md_stop()
6362 mddev->sync_thread || in do_md_stop()
6363 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { in do_md_stop()
6364 pr_warn("md: %s still in use.\n",mdname(mddev)); in do_md_stop()
6365 mutex_unlock(&mddev->open_mutex); in do_md_stop()
6367 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in do_md_stop()
6368 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in do_md_stop()
6369 md_wakeup_thread(mddev->thread); in do_md_stop()
6373 if (mddev->pers) { in do_md_stop()
6374 if (mddev->ro) in do_md_stop()
6377 __md_stop_writes(mddev); in do_md_stop()
6378 __md_stop(mddev); in do_md_stop()
6381 sysfs_notify_dirent_safe(mddev->sysfs_state); in do_md_stop()
6383 rdev_for_each(rdev, mddev) in do_md_stop()
6385 sysfs_unlink_rdev(mddev, rdev); in do_md_stop()
6388 mutex_unlock(&mddev->open_mutex); in do_md_stop()
6389 mddev->changed = 1; in do_md_stop()
6391 if (mddev->ro) in do_md_stop()
6392 mddev->ro = 0; in do_md_stop()
6394 mutex_unlock(&mddev->open_mutex); in do_md_stop()
6399 pr_info("md: %s stopped.\n", mdname(mddev)); in do_md_stop()
6401 if (mddev->bitmap_info.file) { in do_md_stop()
6402 struct file *f = mddev->bitmap_info.file; in do_md_stop()
6403 spin_lock(&mddev->lock); in do_md_stop()
6404 mddev->bitmap_info.file = NULL; in do_md_stop()
6405 spin_unlock(&mddev->lock); in do_md_stop()
6408 mddev->bitmap_info.offset = 0; in do_md_stop()
6410 export_array(mddev); in do_md_stop()
6412 md_clean(mddev); in do_md_stop()
6413 if (mddev->hold_active == UNTIL_STOP) in do_md_stop()
6414 mddev->hold_active = 0; in do_md_stop()
6417 sysfs_notify_dirent_safe(mddev->sysfs_state); in do_md_stop()
6422 static void autorun_array(struct mddev *mddev) in autorun_array() argument
6427 if (list_empty(&mddev->disks)) in autorun_array()
6432 rdev_for_each(rdev, mddev) { in autorun_array()
6437 err = do_md_run(mddev); in autorun_array()
6440 do_md_stop(mddev, 0, NULL); in autorun_array()
6459 struct mddev *mddev; in autorun_devices() local
6496 mddev = md_alloc(dev, NULL); in autorun_devices()
6497 if (IS_ERR(mddev)) in autorun_devices()
6500 if (mddev_lock(mddev)) in autorun_devices()
6501 pr_warn("md: %s locked, cannot run\n", mdname(mddev)); in autorun_devices()
6502 else if (mddev->raid_disks || mddev->major_version in autorun_devices()
6503 || !list_empty(&mddev->disks)) { in autorun_devices()
6505 mdname(mddev), rdev0->bdev); in autorun_devices()
6506 mddev_unlock(mddev); in autorun_devices()
6508 pr_debug("md: created %s\n", mdname(mddev)); in autorun_devices()
6509 mddev->persistent = 1; in autorun_devices()
6512 if (bind_rdev_to_array(rdev, mddev)) in autorun_devices()
6515 autorun_array(mddev); in autorun_devices()
6516 mddev_unlock(mddev); in autorun_devices()
6525 mddev_put(mddev); in autorun_devices()
6545 static int get_array_info(struct mddev *mddev, void __user *arg) in get_array_info() argument
6553 rdev_for_each_rcu(rdev, mddev) { in get_array_info()
6570 info.major_version = mddev->major_version; in get_array_info()
6571 info.minor_version = mddev->minor_version; in get_array_info()
6573 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); in get_array_info()
6574 info.level = mddev->level; in get_array_info()
6575 info.size = mddev->dev_sectors / 2; in get_array_info()
6576 if (info.size != mddev->dev_sectors / 2) /* overflow */ in get_array_info()
6579 info.raid_disks = mddev->raid_disks; in get_array_info()
6580 info.md_minor = mddev->md_minor; in get_array_info()
6581 info.not_persistent= !mddev->persistent; in get_array_info()
6583 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); in get_array_info()
6585 if (mddev->in_sync) in get_array_info()
6587 if (mddev->bitmap && mddev->bitmap_info.offset) in get_array_info()
6589 if (mddev_is_clustered(mddev)) in get_array_info()
6596 info.layout = mddev->layout; in get_array_info()
6597 info.chunk_size = mddev->chunk_sectors << 9; in get_array_info()
6605 static int get_bitmap_file(struct mddev *mddev, void __user * arg) in get_bitmap_file() argument
6616 spin_lock(&mddev->lock); in get_bitmap_file()
6618 if (mddev->bitmap_info.file) { in get_bitmap_file()
6619 ptr = file_path(mddev->bitmap_info.file, file->pathname, in get_bitmap_file()
6627 spin_unlock(&mddev->lock); in get_bitmap_file()
6637 static int get_disk_info(struct mddev *mddev, void __user * arg) in get_disk_info() argument
6646 rdev = md_find_rdev_nr_rcu(mddev, info.number); in get_disk_info()
6677 int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) in md_add_new_disk() argument
6682 if (mddev_is_clustered(mddev) && in md_add_new_disk()
6685 mdname(mddev)); in md_add_new_disk()
6692 if (!mddev->raid_disks) { in md_add_new_disk()
6695 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); in md_add_new_disk()
6701 if (!list_empty(&mddev->disks)) { in md_add_new_disk()
6703 = list_entry(mddev->disks.next, in md_add_new_disk()
6705 err = super_types[mddev->major_version] in md_add_new_disk()
6706 .load_super(rdev, rdev0, mddev->minor_version); in md_add_new_disk()
6715 err = bind_rdev_to_array(rdev, mddev); in md_add_new_disk()
6726 if (mddev->pers) { in md_add_new_disk()
6728 if (!mddev->pers->hot_add_disk) { in md_add_new_disk()
6730 mdname(mddev)); in md_add_new_disk()
6733 if (mddev->persistent) in md_add_new_disk()
6734 rdev = md_import_device(dev, mddev->major_version, in md_add_new_disk()
6735 mddev->minor_version); in md_add_new_disk()
6744 if (!mddev->persistent) { in md_add_new_disk()
6746 info->raid_disk < mddev->raid_disks) { in md_add_new_disk()
6754 super_types[mddev->major_version]. in md_add_new_disk()
6755 validate_super(mddev, rdev); in md_add_new_disk()
6780 rdev_for_each(rdev2, mddev) { in md_add_new_disk()
6786 if (has_journal || mddev->bitmap) { in md_add_new_disk()
6795 if (mddev_is_clustered(mddev)) { in md_add_new_disk()
6800 err = md_cluster_ops->add_new_disk(mddev, rdev); in md_add_new_disk()
6809 err = bind_rdev_to_array(rdev, mddev); in md_add_new_disk()
6814 if (mddev_is_clustered(mddev)) { in md_add_new_disk()
6817 err = md_cluster_ops->new_disk_ack(mddev, in md_add_new_disk()
6824 md_cluster_ops->add_new_disk_cancel(mddev); in md_add_new_disk()
6838 if (mddev->major_version != 0) { in md_add_new_disk()
6839 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev)); in md_add_new_disk()
6852 if (info->raid_disk < mddev->raid_disks) in md_add_new_disk()
6857 if (rdev->raid_disk < mddev->raid_disks) in md_add_new_disk()
6866 if (!mddev->persistent) { in md_add_new_disk()
6873 err = bind_rdev_to_array(rdev, mddev); in md_add_new_disk()
6883 static int hot_remove_disk(struct mddev *mddev, dev_t dev) in hot_remove_disk() argument
6887 if (!mddev->pers) in hot_remove_disk()
6890 rdev = find_rdev(mddev, dev); in hot_remove_disk()
6898 remove_and_add_spares(mddev, rdev); in hot_remove_disk()
6904 if (mddev_is_clustered(mddev)) { in hot_remove_disk()
6905 if (md_cluster_ops->remove_disk(mddev, rdev)) in hot_remove_disk()
6910 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in hot_remove_disk()
6911 if (mddev->thread) in hot_remove_disk()
6912 md_wakeup_thread(mddev->thread); in hot_remove_disk()
6914 md_update_sb(mddev, 1); in hot_remove_disk()
6920 rdev->bdev, mdname(mddev)); in hot_remove_disk()
6924 static int hot_add_disk(struct mddev *mddev, dev_t dev) in hot_add_disk() argument
6929 if (!mddev->pers) in hot_add_disk()
6932 if (mddev->major_version != 0) { in hot_add_disk()
6934 mdname(mddev)); in hot_add_disk()
6937 if (!mddev->pers->hot_add_disk) { in hot_add_disk()
6939 mdname(mddev)); in hot_add_disk()
6950 if (mddev->persistent) in hot_add_disk()
6959 rdev->bdev, mdname(mddev)); in hot_add_disk()
6967 err = bind_rdev_to_array(rdev, mddev); in hot_add_disk()
6978 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in hot_add_disk()
6979 if (!mddev->thread) in hot_add_disk()
6980 md_update_sb(mddev, 1); in hot_add_disk()
6987 mdname(mddev), rdev->bdev); in hot_add_disk()
6988 blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue); in hot_add_disk()
6994 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in hot_add_disk()
6995 md_wakeup_thread(mddev->thread); in hot_add_disk()
7004 static int set_bitmap_file(struct mddev *mddev, int fd) in set_bitmap_file() argument
7008 if (mddev->pers) { in set_bitmap_file()
7009 if (!mddev->pers->quiesce || !mddev->thread) in set_bitmap_file()
7011 if (mddev->recovery || mddev->sync_thread) in set_bitmap_file()
7020 if (mddev->bitmap || mddev->bitmap_info.file) in set_bitmap_file()
7026 mdname(mddev)); in set_bitmap_file()
7033 mdname(mddev)); in set_bitmap_file()
7037 mdname(mddev)); in set_bitmap_file()
7041 mdname(mddev)); in set_bitmap_file()
7048 mddev->bitmap_info.file = f; in set_bitmap_file()
7049 mddev->bitmap_info.offset = 0; /* file overrides offset */ in set_bitmap_file()
7050 } else if (mddev->bitmap == NULL) in set_bitmap_file()
7053 if (mddev->pers) { in set_bitmap_file()
7057 bitmap = md_bitmap_create(mddev, -1); in set_bitmap_file()
7058 mddev_suspend(mddev); in set_bitmap_file()
7060 mddev->bitmap = bitmap; in set_bitmap_file()
7061 err = md_bitmap_load(mddev); in set_bitmap_file()
7065 md_bitmap_destroy(mddev); in set_bitmap_file()
7068 mddev_resume(mddev); in set_bitmap_file()
7070 mddev_suspend(mddev); in set_bitmap_file()
7071 md_bitmap_destroy(mddev); in set_bitmap_file()
7072 mddev_resume(mddev); in set_bitmap_file()
7076 struct file *f = mddev->bitmap_info.file; in set_bitmap_file()
7078 spin_lock(&mddev->lock); in set_bitmap_file()
7079 mddev->bitmap_info.file = NULL; in set_bitmap_file()
7080 spin_unlock(&mddev->lock); in set_bitmap_file()
7101 int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info) in md_set_array_info() argument
7113 mddev->major_version = info->major_version; in md_set_array_info()
7114 mddev->minor_version = info->minor_version; in md_set_array_info()
7115 mddev->patch_version = info->patch_version; in md_set_array_info()
7116 mddev->persistent = !info->not_persistent; in md_set_array_info()
7120 mddev->ctime = ktime_get_real_seconds(); in md_set_array_info()
7123 mddev->major_version = MD_MAJOR_VERSION; in md_set_array_info()
7124 mddev->minor_version = MD_MINOR_VERSION; in md_set_array_info()
7125 mddev->patch_version = MD_PATCHLEVEL_VERSION; in md_set_array_info()
7126 mddev->ctime = ktime_get_real_seconds(); in md_set_array_info()
7128 mddev->level = info->level; in md_set_array_info()
7129 mddev->clevel[0] = 0; in md_set_array_info()
7130 mddev->dev_sectors = 2 * (sector_t)info->size; in md_set_array_info()
7131 mddev->raid_disks = info->raid_disks; in md_set_array_info()
7136 mddev->recovery_cp = MaxSector; in md_set_array_info()
7138 mddev->recovery_cp = 0; in md_set_array_info()
7139 mddev->persistent = ! info->not_persistent; in md_set_array_info()
7140 mddev->external = 0; in md_set_array_info()
7142 mddev->layout = info->layout; in md_set_array_info()
7143 if (mddev->level == 0) in md_set_array_info()
7145 mddev->layout = -1; in md_set_array_info()
7146 mddev->chunk_sectors = info->chunk_size >> 9; in md_set_array_info()
7148 if (mddev->persistent) { in md_set_array_info()
7149 mddev->max_disks = MD_SB_DISKS; in md_set_array_info()
7150 mddev->flags = 0; in md_set_array_info()
7151 mddev->sb_flags = 0; in md_set_array_info()
7153 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_set_array_info()
7155 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; in md_set_array_info()
7156 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); in md_set_array_info()
7157 mddev->bitmap_info.offset = 0; in md_set_array_info()
7159 mddev->reshape_position = MaxSector; in md_set_array_info()
7164 get_random_bytes(mddev->uuid, 16); in md_set_array_info()
7166 mddev->new_level = mddev->level; in md_set_array_info()
7167 mddev->new_chunk_sectors = mddev->chunk_sectors; in md_set_array_info()
7168 mddev->new_layout = mddev->layout; in md_set_array_info()
7169 mddev->delta_disks = 0; in md_set_array_info()
7170 mddev->reshape_backwards = 0; in md_set_array_info()
7175 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) in md_set_array_sectors() argument
7177 lockdep_assert_held(&mddev->reconfig_mutex); in md_set_array_sectors()
7179 if (mddev->external_size) in md_set_array_sectors()
7182 mddev->array_sectors = array_sectors; in md_set_array_sectors()
7186 static int update_size(struct mddev *mddev, sector_t num_sectors) in update_size() argument
7191 sector_t old_dev_sectors = mddev->dev_sectors; in update_size()
7193 if (mddev->pers->resize == NULL) in update_size()
7204 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in update_size()
7205 mddev->sync_thread) in update_size()
7207 if (mddev->ro) in update_size()
7210 rdev_for_each(rdev, mddev) { in update_size()
7218 rv = mddev->pers->resize(mddev, num_sectors); in update_size()
7220 if (mddev_is_clustered(mddev)) in update_size()
7221 md_cluster_ops->update_size(mddev, old_dev_sectors); in update_size()
7222 else if (mddev->queue) { in update_size()
7223 set_capacity_and_notify(mddev->gendisk, in update_size()
7224 mddev->array_sectors); in update_size()
7230 static int update_raid_disks(struct mddev *mddev, int raid_disks) in update_raid_disks() argument
7235 if (mddev->pers->check_reshape == NULL) in update_raid_disks()
7237 if (mddev->ro) in update_raid_disks()
7240 (mddev->max_disks && raid_disks >= mddev->max_disks)) in update_raid_disks()
7242 if (mddev->sync_thread || in update_raid_disks()
7243 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in update_raid_disks()
7244 test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) || in update_raid_disks()
7245 mddev->reshape_position != MaxSector) in update_raid_disks()
7248 rdev_for_each(rdev, mddev) { in update_raid_disks()
7249 if (mddev->raid_disks < raid_disks && in update_raid_disks()
7252 if (mddev->raid_disks > raid_disks && in update_raid_disks()
7257 mddev->delta_disks = raid_disks - mddev->raid_disks; in update_raid_disks()
7258 if (mddev->delta_disks < 0) in update_raid_disks()
7259 mddev->reshape_backwards = 1; in update_raid_disks()
7260 else if (mddev->delta_disks > 0) in update_raid_disks()
7261 mddev->reshape_backwards = 0; in update_raid_disks()
7263 rv = mddev->pers->check_reshape(mddev); in update_raid_disks()
7265 mddev->delta_disks = 0; in update_raid_disks()
7266 mddev->reshape_backwards = 0; in update_raid_disks()
7279 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) in update_array_info() argument
7286 if (mddev->bitmap && mddev->bitmap_info.offset) in update_array_info()
7289 if (mddev->major_version != info->major_version || in update_array_info()
7290 mddev->minor_version != info->minor_version || in update_array_info()
7292 mddev->ctime != info->ctime || in update_array_info()
7293 mddev->level != info->level || in update_array_info()
7295 mddev->persistent != !info->not_persistent || in update_array_info()
7296 mddev->chunk_sectors != info->chunk_size >> 9 || in update_array_info()
7302 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) in update_array_info()
7304 if (mddev->raid_disks != info->raid_disks) in update_array_info()
7306 if (mddev->layout != info->layout) in update_array_info()
7315 if (mddev->layout != info->layout) { in update_array_info()
7320 if (mddev->pers->check_reshape == NULL) in update_array_info()
7323 mddev->new_layout = info->layout; in update_array_info()
7324 rv = mddev->pers->check_reshape(mddev); in update_array_info()
7326 mddev->new_layout = mddev->layout; in update_array_info()
7330 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) in update_array_info()
7331 rv = update_size(mddev, (sector_t)info->size * 2); in update_array_info()
7333 if (mddev->raid_disks != info->raid_disks) in update_array_info()
7334 rv = update_raid_disks(mddev, info->raid_disks); in update_array_info()
7337 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { in update_array_info()
7341 if (mddev->recovery || mddev->sync_thread) { in update_array_info()
7348 if (mddev->bitmap) { in update_array_info()
7352 if (mddev->bitmap_info.default_offset == 0) { in update_array_info()
7356 mddev->bitmap_info.offset = in update_array_info()
7357 mddev->bitmap_info.default_offset; in update_array_info()
7358 mddev->bitmap_info.space = in update_array_info()
7359 mddev->bitmap_info.default_space; in update_array_info()
7360 bitmap = md_bitmap_create(mddev, -1); in update_array_info()
7361 mddev_suspend(mddev); in update_array_info()
7363 mddev->bitmap = bitmap; in update_array_info()
7364 rv = md_bitmap_load(mddev); in update_array_info()
7368 md_bitmap_destroy(mddev); in update_array_info()
7369 mddev_resume(mddev); in update_array_info()
7372 if (!mddev->bitmap) { in update_array_info()
7376 if (mddev->bitmap->storage.file) { in update_array_info()
7380 if (mddev->bitmap_info.nodes) { in update_array_info()
7382 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) { in update_array_info()
7385 md_cluster_ops->unlock_all_bitmaps(mddev); in update_array_info()
7389 mddev->bitmap_info.nodes = 0; in update_array_info()
7390 md_cluster_ops->leave(mddev); in update_array_info()
7392 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; in update_array_info()
7394 mddev_suspend(mddev); in update_array_info()
7395 md_bitmap_destroy(mddev); in update_array_info()
7396 mddev_resume(mddev); in update_array_info()
7397 mddev->bitmap_info.offset = 0; in update_array_info()
7400 md_update_sb(mddev, 1); in update_array_info()
7406 static int set_disk_faulty(struct mddev *mddev, dev_t dev) in set_disk_faulty() argument
7411 if (mddev->pers == NULL) in set_disk_faulty()
7415 rdev = md_find_rdev_rcu(mddev, dev); in set_disk_faulty()
7419 md_error(mddev, rdev); in set_disk_faulty()
7420 if (test_bit(MD_BROKEN, &mddev->flags)) in set_disk_faulty()
7435 struct mddev *mddev = bdev->bd_disk->private_data; in md_getgeo() local
7439 geo->cylinders = mddev->array_sectors / 8; in md_getgeo()
7472 struct mddev *mddev = NULL; in md_ioctl() local
7503 mddev = bdev->bd_disk->private_data; in md_ioctl()
7505 if (!mddev) { in md_ioctl()
7513 if (!mddev->raid_disks && !mddev->external) in md_ioctl()
7516 err = get_array_info(mddev, argp); in md_ioctl()
7520 if (!mddev->raid_disks && !mddev->external) in md_ioctl()
7523 err = get_disk_info(mddev, argp); in md_ioctl()
7527 err = set_disk_faulty(mddev, new_decode_dev(arg)); in md_ioctl()
7531 err = get_bitmap_file(mddev, argp); in md_ioctl()
7537 flush_rdev_wq(mddev); in md_ioctl()
7541 wait_event_interruptible_timeout(mddev->sb_wait, in md_ioctl()
7543 &mddev->recovery), in md_ioctl()
7549 mutex_lock(&mddev->open_mutex); in md_ioctl()
7550 if (mddev->pers && atomic_read(&mddev->openers) > 1) { in md_ioctl()
7551 mutex_unlock(&mddev->open_mutex); in md_ioctl()
7555 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) { in md_ioctl()
7556 mutex_unlock(&mddev->open_mutex); in md_ioctl()
7561 mutex_unlock(&mddev->open_mutex); in md_ioctl()
7564 err = mddev_lock(mddev); in md_ioctl()
7579 if (mddev->pers) { in md_ioctl()
7580 err = update_array_info(mddev, &info); in md_ioctl()
7587 if (!list_empty(&mddev->disks)) { in md_ioctl()
7588 pr_warn("md: array %s already has disks!\n", mdname(mddev)); in md_ioctl()
7592 if (mddev->raid_disks) { in md_ioctl()
7593 pr_warn("md: array %s already initialised!\n", mdname(mddev)); in md_ioctl()
7597 err = md_set_array_info(mddev, &info); in md_ioctl()
7610 if ((!mddev->raid_disks && !mddev->external) in md_ioctl()
7623 err = restart_array(mddev); in md_ioctl()
7627 err = do_md_stop(mddev, 0, bdev); in md_ioctl()
7631 err = md_set_readonly(mddev, bdev); in md_ioctl()
7635 err = hot_remove_disk(mddev, new_decode_dev(arg)); in md_ioctl()
7643 if (mddev->pers) { in md_ioctl()
7651 err = md_add_new_disk(mddev, &info); in md_ioctl()
7661 if (mddev->ro && mddev->pers) { in md_ioctl()
7662 if (mddev->ro == 2) { in md_ioctl()
7663 mddev->ro = 0; in md_ioctl()
7664 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_ioctl()
7665 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_ioctl()
7670 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) { in md_ioctl()
7671 mddev_unlock(mddev); in md_ioctl()
7672 wait_event(mddev->sb_wait, in md_ioctl()
7673 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) && in md_ioctl()
7674 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); in md_ioctl()
7675 mddev_lock_nointr(mddev); in md_ioctl()
7690 err = md_add_new_disk(mddev, &info); in md_ioctl()
7695 if (mddev_is_clustered(mddev)) in md_ioctl()
7696 md_cluster_ops->new_disk_ack(mddev, false); in md_ioctl()
7702 err = hot_add_disk(mddev, new_decode_dev(arg)); in md_ioctl()
7706 err = do_md_run(mddev); in md_ioctl()
7710 err = set_bitmap_file(mddev, (int)arg); in md_ioctl()
7719 if (mddev->hold_active == UNTIL_IOCTL && in md_ioctl()
7721 mddev->hold_active = 0; in md_ioctl()
7722 mddev_unlock(mddev); in md_ioctl()
7725 clear_bit(MD_CLOSING, &mddev->flags); in md_ioctl()
7750 struct mddev *mddev = bdev->bd_disk->private_data; in md_set_read_only() local
7753 err = mddev_lock(mddev); in md_set_read_only()
7757 if (!mddev->raid_disks && !mddev->external) { in md_set_read_only()
7766 if (!ro && mddev->ro == 1 && mddev->pers) { in md_set_read_only()
7767 err = restart_array(mddev); in md_set_read_only()
7770 mddev->ro = 2; in md_set_read_only()
7774 mddev_unlock(mddev); in md_set_read_only()
7780 struct mddev *mddev; in md_open() local
7784 mddev = mddev_get(bdev->bd_disk->private_data); in md_open()
7786 if (!mddev) in md_open()
7789 err = mutex_lock_interruptible(&mddev->open_mutex); in md_open()
7794 if (test_bit(MD_CLOSING, &mddev->flags)) in md_open()
7797 atomic_inc(&mddev->openers); in md_open()
7798 mutex_unlock(&mddev->open_mutex); in md_open()
7804 mutex_unlock(&mddev->open_mutex); in md_open()
7806 mddev_put(mddev); in md_open()
7812 struct mddev *mddev = disk->private_data; in md_release() local
7814 BUG_ON(!mddev); in md_release()
7815 atomic_dec(&mddev->openers); in md_release()
7816 mddev_put(mddev); in md_release()
7821 struct mddev *mddev = disk->private_data; in md_check_events() local
7824 if (mddev->changed) in md_check_events()
7826 mddev->changed = 0; in md_check_events()
7832 struct mddev *mddev = disk->private_data; in md_free_disk() local
7834 percpu_ref_exit(&mddev->writes_pending); in md_free_disk()
7835 bioset_exit(&mddev->bio_set); in md_free_disk()
7836 bioset_exit(&mddev->sync_set); in md_free_disk()
7838 mddev_free(mddev); in md_free_disk()
7911 struct mddev *mddev, const char *name) in md_register_thread() argument
7922 thread->mddev = mddev; in md_register_thread()
7926 mdname(thread->mddev), in md_register_thread()
7959 void md_error(struct mddev *mddev, struct md_rdev *rdev) in md_error() argument
7964 if (!mddev->pers || !mddev->pers->error_handler) in md_error()
7966 mddev->pers->error_handler(mddev, rdev); in md_error()
7968 if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags)) in md_error()
7969 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_error()
7971 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_error()
7972 if (!test_bit(MD_BROKEN, &mddev->flags)) { in md_error()
7973 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_error()
7974 md_wakeup_thread(mddev->thread); in md_error()
7976 if (mddev->event_work.func) in md_error()
7977 queue_work(md_misc_wq, &mddev->event_work); in md_error()
8001 static int status_resync(struct seq_file *seq, struct mddev *mddev) in status_resync() argument
8009 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in status_resync()
8010 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in status_resync()
8011 max_sectors = mddev->resync_max_sectors; in status_resync()
8013 max_sectors = mddev->dev_sectors; in status_resync()
8015 resync = mddev->curr_resync; in status_resync()
8017 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) in status_resync()
8023 resync -= atomic_read(&mddev->recovery_active); in status_resync()
8036 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) { in status_resync()
8039 rdev_for_each(rdev, mddev) in status_resync()
8047 if (mddev->reshape_position != MaxSector) in status_resync()
8053 if (mddev->recovery_cp < MaxSector) { in status_resync()
8090 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? in status_resync()
8092 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? in status_resync()
8094 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? in status_resync()
8117 dt = ((jiffies - mddev->resync_mark) / HZ); in status_resync()
8120 curr_mark_cnt = mddev->curr_mark_cnt; in status_resync()
8121 recovery_active = atomic_read(&mddev->recovery_active); in status_resync()
8122 resync_mark_cnt = mddev->resync_mark_cnt; in status_resync()
8143 struct mddev *mddev; in md_seq_start() local
8158 mddev = list_entry(tmp, struct mddev, all_mddevs); in md_seq_start()
8159 if (!mddev_get(mddev)) in md_seq_start()
8162 return mddev; in md_seq_start()
8173 struct mddev *next_mddev, *mddev = v; in md_seq_next() local
8174 struct mddev *to_put = NULL; in md_seq_next()
8184 to_put = mddev; in md_seq_next()
8185 tmp = mddev->all_mddevs.next; in md_seq_next()
8194 next_mddev = list_entry(tmp, struct mddev, all_mddevs); in md_seq_next()
8197 mddev = next_mddev; in md_seq_next()
8198 tmp = mddev->all_mddevs.next; in md_seq_next()
8203 mddev_put(mddev); in md_seq_next()
8210 struct mddev *mddev = v; in md_seq_stop() local
8212 if (mddev && v != (void*)1 && v != (void*)2) in md_seq_stop()
8213 mddev_put(mddev); in md_seq_stop()
8218 struct mddev *mddev = v; in md_seq_show() local
8239 spin_lock(&mddev->lock); in md_seq_show()
8240 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { in md_seq_show()
8241 seq_printf(seq, "%s : %sactive", mdname(mddev), in md_seq_show()
8242 mddev->pers ? "" : "in"); in md_seq_show()
8243 if (mddev->pers) { in md_seq_show()
8244 if (mddev->ro==1) in md_seq_show()
8246 if (mddev->ro==2) in md_seq_show()
8248 seq_printf(seq, " %s", mddev->pers->name); in md_seq_show()
8253 rdev_for_each_rcu(rdev, mddev) { in md_seq_show()
8272 if (!list_empty(&mddev->disks)) { in md_seq_show()
8273 if (mddev->pers) in md_seq_show()
8276 mddev->array_sectors / 2); in md_seq_show()
8281 if (mddev->persistent) { in md_seq_show()
8282 if (mddev->major_version != 0 || in md_seq_show()
8283 mddev->minor_version != 90) { in md_seq_show()
8285 mddev->major_version, in md_seq_show()
8286 mddev->minor_version); in md_seq_show()
8288 } else if (mddev->external) in md_seq_show()
8290 mddev->metadata_type); in md_seq_show()
8294 if (mddev->pers) { in md_seq_show()
8295 mddev->pers->status(seq, mddev); in md_seq_show()
8297 if (mddev->pers->sync_request) { in md_seq_show()
8298 if (status_resync(seq, mddev)) in md_seq_show()
8304 md_bitmap_status(seq, mddev->bitmap); in md_seq_show()
8308 spin_unlock(&mddev->lock); in md_seq_show()
8406 int md_setup_cluster(struct mddev *mddev, int nodes) in md_setup_cluster() argument
8420 ret = md_cluster_ops->join(mddev, nodes); in md_setup_cluster()
8422 mddev->safemode_delay = 0; in md_setup_cluster()
8426 void md_cluster_stop(struct mddev *mddev) in md_cluster_stop() argument
8430 md_cluster_ops->leave(mddev); in md_cluster_stop()
8434 static int is_mddev_idle(struct mddev *mddev, int init) in is_mddev_idle() argument
8442 rdev_for_each_rcu(rdev, mddev) { in is_mddev_idle()
8477 void md_done_sync(struct mddev *mddev, int blocks, int ok) in md_done_sync() argument
8480 atomic_sub(blocks, &mddev->recovery_active); in md_done_sync()
8481 wake_up(&mddev->recovery_wait); in md_done_sync()
8483 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_done_sync()
8484 set_bit(MD_RECOVERY_ERROR, &mddev->recovery); in md_done_sync()
8485 md_wakeup_thread(mddev->thread); in md_done_sync()
8498 bool md_write_start(struct mddev *mddev, struct bio *bi) in md_write_start() argument
8505 BUG_ON(mddev->ro == 1); in md_write_start()
8506 if (mddev->ro == 2) { in md_write_start()
8508 mddev->ro = 0; in md_write_start()
8509 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_write_start()
8510 md_wakeup_thread(mddev->thread); in md_write_start()
8511 md_wakeup_thread(mddev->sync_thread); in md_write_start()
8515 percpu_ref_get(&mddev->writes_pending); in md_write_start()
8517 if (mddev->safemode == 1) in md_write_start()
8518 mddev->safemode = 0; in md_write_start()
8520 if (mddev->in_sync || mddev->sync_checkers) { in md_write_start()
8521 spin_lock(&mddev->lock); in md_write_start()
8522 if (mddev->in_sync) { in md_write_start()
8523 mddev->in_sync = 0; in md_write_start()
8524 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_write_start()
8525 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_write_start()
8526 md_wakeup_thread(mddev->thread); in md_write_start()
8529 spin_unlock(&mddev->lock); in md_write_start()
8533 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_write_start()
8534 if (!mddev->has_superblocks) in md_write_start()
8536 wait_event(mddev->sb_wait, in md_write_start()
8537 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || in md_write_start()
8538 mddev->suspended); in md_write_start()
8539 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { in md_write_start()
8540 percpu_ref_put(&mddev->writes_pending); in md_write_start()
8555 void md_write_inc(struct mddev *mddev, struct bio *bi) in md_write_inc() argument
8559 WARN_ON_ONCE(mddev->in_sync || mddev->ro); in md_write_inc()
8560 percpu_ref_get(&mddev->writes_pending); in md_write_inc()
8564 void md_write_end(struct mddev *mddev) in md_write_end() argument
8566 percpu_ref_put(&mddev->writes_pending); in md_write_end()
8568 if (mddev->safemode == 2) in md_write_end()
8569 md_wakeup_thread(mddev->thread); in md_write_end()
8570 else if (mddev->safemode_delay) in md_write_end()
8574 mod_timer(&mddev->safemode_timer, in md_write_end()
8575 roundup(jiffies, mddev->safemode_delay) + in md_write_end()
8576 mddev->safemode_delay); in md_write_end()
8582 void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, in md_submit_discard_bio() argument
8593 if (mddev->gendisk) in md_submit_discard_bio()
8595 disk_devt(mddev->gendisk), in md_submit_discard_bio()
8601 int acct_bioset_init(struct mddev *mddev) in acct_bioset_init() argument
8605 if (!bioset_initialized(&mddev->io_acct_set)) in acct_bioset_init()
8606 err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE, in acct_bioset_init()
8612 void acct_bioset_exit(struct mddev *mddev) in acct_bioset_exit() argument
8614 bioset_exit(&mddev->io_acct_set); in acct_bioset_exit()
8634 void md_account_bio(struct mddev *mddev, struct bio **bio) in md_account_bio() argument
8643 clone = bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_acct_set); in md_account_bio()
8660 void md_allow_write(struct mddev *mddev) in md_allow_write() argument
8662 if (!mddev->pers) in md_allow_write()
8664 if (mddev->ro) in md_allow_write()
8666 if (!mddev->pers->sync_request) in md_allow_write()
8669 spin_lock(&mddev->lock); in md_allow_write()
8670 if (mddev->in_sync) { in md_allow_write()
8671 mddev->in_sync = 0; in md_allow_write()
8672 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_allow_write()
8673 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_allow_write()
8674 if (mddev->safemode_delay && in md_allow_write()
8675 mddev->safemode == 0) in md_allow_write()
8676 mddev->safemode = 1; in md_allow_write()
8677 spin_unlock(&mddev->lock); in md_allow_write()
8678 md_update_sb(mddev, 0); in md_allow_write()
8679 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_allow_write()
8681 wait_event(mddev->sb_wait, in md_allow_write()
8682 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); in md_allow_write()
8684 spin_unlock(&mddev->lock); in md_allow_write()
8693 struct mddev *mddev = thread->mddev; in md_do_sync() local
8694 struct mddev *mddev2; in md_do_sync()
8709 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) || in md_do_sync()
8710 test_bit(MD_RECOVERY_WAIT, &mddev->recovery)) in md_do_sync()
8712 if (mddev->ro) {/* never try to sync a read-only array */ in md_do_sync()
8713 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_do_sync()
8717 if (mddev_is_clustered(mddev)) { in md_do_sync()
8718 ret = md_cluster_ops->resync_start(mddev); in md_do_sync()
8722 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags); in md_do_sync()
8723 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in md_do_sync()
8724 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) || in md_do_sync()
8725 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) in md_do_sync()
8726 && ((unsigned long long)mddev->curr_resync_completed in md_do_sync()
8727 < (unsigned long long)mddev->resync_max_sectors)) in md_do_sync()
8731 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
8732 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { in md_do_sync()
8735 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in md_do_sync()
8740 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in md_do_sync()
8745 mddev->last_sync_action = action ?: desc; in md_do_sync()
8759 mddev->curr_resync = MD_RESYNC_DELAYED; in md_do_sync()
8762 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
8768 if (mddev2 == mddev) in md_do_sync()
8770 if (!mddev->parallel_resync in md_do_sync()
8772 && match_mddev_units(mddev, mddev2)) { in md_do_sync()
8774 if (mddev < mddev2 && in md_do_sync()
8775 mddev->curr_resync == MD_RESYNC_DELAYED) { in md_do_sync()
8777 mddev->curr_resync = MD_RESYNC_YIELDED; in md_do_sync()
8780 if (mddev > mddev2 && in md_do_sync()
8781 mddev->curr_resync == MD_RESYNC_YIELDED) in md_do_sync()
8791 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_do_sync()
8792 mddev2->curr_resync >= mddev->curr_resync) { in md_do_sync()
8796 desc, mdname(mddev), in md_do_sync()
8811 } while (mddev->curr_resync < MD_RESYNC_DELAYED); in md_do_sync()
8814 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
8818 max_sectors = mddev->resync_max_sectors; in md_do_sync()
8819 atomic64_set(&mddev->resync_mismatches, 0); in md_do_sync()
8821 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
8822 j = mddev->resync_min; in md_do_sync()
8823 else if (!mddev->bitmap) in md_do_sync()
8824 j = mddev->recovery_cp; in md_do_sync()
8826 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { in md_do_sync()
8827 max_sectors = mddev->resync_max_sectors; in md_do_sync()
8833 if (mddev_is_clustered(mddev) && in md_do_sync()
8834 mddev->reshape_position != MaxSector) in md_do_sync()
8835 j = mddev->reshape_position; in md_do_sync()
8838 max_sectors = mddev->dev_sectors; in md_do_sync()
8841 rdev_for_each_rcu(rdev, mddev) in md_do_sync()
8858 if (mddev->bitmap) { in md_do_sync()
8859 mddev->pers->quiesce(mddev, 1); in md_do_sync()
8860 mddev->pers->quiesce(mddev, 0); in md_do_sync()
8864 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev)); in md_do_sync()
8865 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev)); in md_do_sync()
8867 speed_max(mddev), desc); in md_do_sync()
8869 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ in md_do_sync()
8877 mddev->resync_mark = mark[last_mark]; in md_do_sync()
8878 mddev->resync_mark_cnt = mark_cnt[last_mark]; in md_do_sync()
8887 atomic_set(&mddev->recovery_active, 0); in md_do_sync()
8892 desc, mdname(mddev)); in md_do_sync()
8893 mddev->curr_resync = j; in md_do_sync()
8895 mddev->curr_resync = MD_RESYNC_ACTIVE; /* no longer delayed */ in md_do_sync()
8896 mddev->curr_resync_completed = j; in md_do_sync()
8897 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_do_sync()
8907 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
8908 ((mddev->curr_resync > mddev->curr_resync_completed && in md_do_sync()
8909 (mddev->curr_resync - mddev->curr_resync_completed) in md_do_sync()
8912 (j - mddev->curr_resync_completed)*2 in md_do_sync()
8913 >= mddev->resync_max - mddev->curr_resync_completed || in md_do_sync()
8914 mddev->curr_resync_completed > mddev->resync_max in md_do_sync()
8917 wait_event(mddev->recovery_wait, in md_do_sync()
8918 atomic_read(&mddev->recovery_active) == 0); in md_do_sync()
8919 mddev->curr_resync_completed = j; in md_do_sync()
8920 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && in md_do_sync()
8921 j > mddev->recovery_cp) in md_do_sync()
8922 mddev->recovery_cp = j; in md_do_sync()
8924 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_do_sync()
8925 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_do_sync()
8928 while (j >= mddev->resync_max && in md_do_sync()
8929 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
8935 wait_event_interruptible(mddev->recovery_wait, in md_do_sync()
8936 mddev->resync_max > j in md_do_sync()
8938 &mddev->recovery)); in md_do_sync()
8941 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
8944 sectors = mddev->pers->sync_request(mddev, j, &skipped); in md_do_sync()
8946 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_do_sync()
8952 atomic_add(sectors, &mddev->recovery_active); in md_do_sync()
8955 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
8963 mddev->curr_resync = j; in md_do_sync()
8964 mddev->curr_mark_cnt = io_sectors; in md_do_sync()
8980 mddev->resync_mark = mark[next]; in md_do_sync()
8981 mddev->resync_mark_cnt = mark_cnt[next]; in md_do_sync()
8983 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); in md_do_sync()
8987 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
9000 recovery_done = io_sectors - atomic_read(&mddev->recovery_active); in md_do_sync()
9001 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 in md_do_sync()
9002 /((jiffies-mddev->resync_mark)/HZ +1) +1; in md_do_sync()
9004 if (currspeed > speed_min(mddev)) { in md_do_sync()
9005 if (currspeed > speed_max(mddev)) { in md_do_sync()
9009 if (!is_mddev_idle(mddev, 0)) { in md_do_sync()
9014 wait_event(mddev->recovery_wait, in md_do_sync()
9015 !atomic_read(&mddev->recovery_active)); in md_do_sync()
9019 pr_info("md: %s: %s %s.\n",mdname(mddev), desc, in md_do_sync()
9020 test_bit(MD_RECOVERY_INTR, &mddev->recovery) in md_do_sync()
9026 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); in md_do_sync()
9028 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
9029 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_do_sync()
9030 mddev->curr_resync >= MD_RESYNC_ACTIVE) { in md_do_sync()
9031 mddev->curr_resync_completed = mddev->curr_resync; in md_do_sync()
9032 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_do_sync()
9034 mddev->pers->sync_request(mddev, max_sectors, &skipped); in md_do_sync()
9036 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && in md_do_sync()
9037 mddev->curr_resync >= MD_RESYNC_ACTIVE) { in md_do_sync()
9038 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
9039 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
9040 if (mddev->curr_resync >= mddev->recovery_cp) { in md_do_sync()
9042 desc, mdname(mddev)); in md_do_sync()
9044 &mddev->recovery)) in md_do_sync()
9045 mddev->recovery_cp = in md_do_sync()
9046 mddev->curr_resync_completed; in md_do_sync()
9048 mddev->recovery_cp = in md_do_sync()
9049 mddev->curr_resync; in md_do_sync()
9052 mddev->recovery_cp = MaxSector; in md_do_sync()
9054 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
9055 mddev->curr_resync = MaxSector; in md_do_sync()
9056 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
9057 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) { in md_do_sync()
9059 rdev_for_each_rcu(rdev, mddev) in md_do_sync()
9061 mddev->delta_disks >= 0 && in md_do_sync()
9065 rdev->recovery_offset < mddev->curr_resync) in md_do_sync()
9066 rdev->recovery_offset = mddev->curr_resync; in md_do_sync()
9075 set_mask_bits(&mddev->sb_flags, 0, in md_do_sync()
9078 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
9079 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_do_sync()
9080 mddev->delta_disks > 0 && in md_do_sync()
9081 mddev->pers->finish_reshape && in md_do_sync()
9082 mddev->pers->size && in md_do_sync()
9083 mddev->queue) { in md_do_sync()
9084 mddev_lock_nointr(mddev); in md_do_sync()
9085 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); in md_do_sync()
9086 mddev_unlock(mddev); in md_do_sync()
9087 if (!mddev_is_clustered(mddev)) in md_do_sync()
9088 set_capacity_and_notify(mddev->gendisk, in md_do_sync()
9089 mddev->array_sectors); in md_do_sync()
9092 spin_lock(&mddev->lock); in md_do_sync()
9093 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
9095 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
9096 mddev->resync_min = 0; in md_do_sync()
9097 mddev->resync_max = MaxSector; in md_do_sync()
9098 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
9099 mddev->resync_min = mddev->curr_resync_completed; in md_do_sync()
9100 set_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_do_sync()
9101 mddev->curr_resync = MD_RESYNC_NONE; in md_do_sync()
9102 spin_unlock(&mddev->lock); in md_do_sync()
9105 md_wakeup_thread(mddev->thread); in md_do_sync()
9110 static int remove_and_add_spares(struct mddev *mddev, in remove_and_add_spares() argument
9118 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in remove_and_add_spares()
9122 rdev_for_each(rdev, mddev) { in remove_and_add_spares()
9140 rdev_for_each(rdev, mddev) { in remove_and_add_spares()
9148 if (mddev->pers->hot_remove_disk( in remove_and_add_spares()
9149 mddev, rdev) == 0) { in remove_and_add_spares()
9150 sysfs_unlink_rdev(mddev, rdev); in remove_and_add_spares()
9160 if (removed && mddev->kobj.sd) in remove_and_add_spares()
9161 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in remove_and_add_spares()
9166 rdev_for_each(rdev, mddev) { in remove_and_add_spares()
9181 if (mddev->ro && in remove_and_add_spares()
9188 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) { in remove_and_add_spares()
9190 sysfs_link_rdev(mddev, rdev); in remove_and_add_spares()
9194 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in remove_and_add_spares()
9199 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in remove_and_add_spares()
9205 struct mddev *mddev = container_of(ws, struct mddev, del_work); in md_start_sync() local
9207 mddev->sync_thread = md_register_thread(md_do_sync, in md_start_sync()
9208 mddev, in md_start_sync()
9210 if (!mddev->sync_thread) { in md_start_sync()
9212 mdname(mddev)); in md_start_sync()
9214 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_start_sync()
9215 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_start_sync()
9216 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_start_sync()
9217 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_start_sync()
9218 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_start_sync()
9221 &mddev->recovery)) in md_start_sync()
9222 if (mddev->sysfs_action) in md_start_sync()
9223 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_start_sync()
9225 md_wakeup_thread(mddev->sync_thread); in md_start_sync()
9226 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_start_sync()
9252 void md_check_recovery(struct mddev *mddev) in md_check_recovery() argument
9254 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) { in md_check_recovery()
9258 set_bit(MD_UPDATING_SB, &mddev->flags); in md_check_recovery()
9260 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags)) in md_check_recovery()
9261 md_update_sb(mddev, 0); in md_check_recovery()
9262 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags); in md_check_recovery()
9263 wake_up(&mddev->sb_wait); in md_check_recovery()
9266 if (mddev->suspended) in md_check_recovery()
9269 if (mddev->bitmap) in md_check_recovery()
9270 md_bitmap_daemon_work(mddev); in md_check_recovery()
9273 if (mddev->pers->sync_request && !mddev->external) { in md_check_recovery()
9275 mdname(mddev)); in md_check_recovery()
9276 mddev->safemode = 2; in md_check_recovery()
9281 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) in md_check_recovery()
9284 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) || in md_check_recovery()
9285 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || in md_check_recovery()
9286 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || in md_check_recovery()
9287 (mddev->external == 0 && mddev->safemode == 1) || in md_check_recovery()
9288 (mddev->safemode == 2 in md_check_recovery()
9289 && !mddev->in_sync && mddev->recovery_cp == MaxSector) in md_check_recovery()
9293 if (mddev_trylock(mddev)) { in md_check_recovery()
9295 bool try_set_sync = mddev->safemode != 0; in md_check_recovery()
9297 if (!mddev->external && mddev->safemode == 1) in md_check_recovery()
9298 mddev->safemode = 0; in md_check_recovery()
9300 if (mddev->ro) { in md_check_recovery()
9302 if (!mddev->external && mddev->in_sync) in md_check_recovery()
9308 rdev_for_each(rdev, mddev) in md_check_recovery()
9317 remove_and_add_spares(mddev, NULL); in md_check_recovery()
9321 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_check_recovery()
9322 md_unregister_thread(&mddev->sync_thread); in md_check_recovery()
9323 md_reap_sync_thread(mddev); in md_check_recovery()
9324 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9325 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_check_recovery()
9326 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_check_recovery()
9330 if (mddev_is_clustered(mddev)) { in md_check_recovery()
9335 rdev_for_each_safe(rdev, tmp, mddev) { in md_check_recovery()
9342 if (try_set_sync && !mddev->external && !mddev->in_sync) { in md_check_recovery()
9343 spin_lock(&mddev->lock); in md_check_recovery()
9344 set_in_sync(mddev); in md_check_recovery()
9345 spin_unlock(&mddev->lock); in md_check_recovery()
9348 if (mddev->sb_flags) in md_check_recovery()
9349 md_update_sb(mddev, 0); in md_check_recovery()
9351 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && in md_check_recovery()
9352 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { in md_check_recovery()
9354 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_check_recovery()
9357 if (mddev->sync_thread) { in md_check_recovery()
9358 md_unregister_thread(&mddev->sync_thread); in md_check_recovery()
9359 md_reap_sync_thread(mddev); in md_check_recovery()
9365 mddev->curr_resync_completed = 0; in md_check_recovery()
9366 spin_lock(&mddev->lock); in md_check_recovery()
9367 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_check_recovery()
9368 spin_unlock(&mddev->lock); in md_check_recovery()
9372 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_check_recovery()
9373 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_check_recovery()
9375 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || in md_check_recovery()
9376 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) in md_check_recovery()
9385 if (mddev->reshape_position != MaxSector) { in md_check_recovery()
9386 if (mddev->pers->check_reshape == NULL || in md_check_recovery()
9387 mddev->pers->check_reshape(mddev) != 0) in md_check_recovery()
9390 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_check_recovery()
9391 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9392 } else if ((spares = remove_and_add_spares(mddev, NULL))) { in md_check_recovery()
9393 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_check_recovery()
9394 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_check_recovery()
9395 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_check_recovery()
9396 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9397 } else if (mddev->recovery_cp < MaxSector) { in md_check_recovery()
9398 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_check_recovery()
9399 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9400 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) in md_check_recovery()
9404 if (mddev->pers->sync_request) { in md_check_recovery()
9410 md_bitmap_write_all(mddev->bitmap); in md_check_recovery()
9412 INIT_WORK(&mddev->del_work, md_start_sync); in md_check_recovery()
9413 queue_work(md_misc_wq, &mddev->del_work); in md_check_recovery()
9417 if (!mddev->sync_thread) { in md_check_recovery()
9418 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_check_recovery()
9421 &mddev->recovery)) in md_check_recovery()
9422 if (mddev->sysfs_action) in md_check_recovery()
9423 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_check_recovery()
9426 wake_up(&mddev->sb_wait); in md_check_recovery()
9427 mddev_unlock(mddev); in md_check_recovery()
9432 void md_reap_sync_thread(struct mddev *mddev) in md_reap_sync_thread() argument
9435 sector_t old_dev_sectors = mddev->dev_sectors; in md_reap_sync_thread()
9439 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_reap_sync_thread()
9440 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in md_reap_sync_thread()
9441 mddev->degraded != mddev->raid_disks) { in md_reap_sync_thread()
9444 if (mddev->pers->spare_active(mddev)) { in md_reap_sync_thread()
9445 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in md_reap_sync_thread()
9446 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_reap_sync_thread()
9449 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_reap_sync_thread()
9450 mddev->pers->finish_reshape) { in md_reap_sync_thread()
9451 mddev->pers->finish_reshape(mddev); in md_reap_sync_thread()
9452 if (mddev_is_clustered(mddev)) in md_reap_sync_thread()
9459 if (!mddev->degraded) in md_reap_sync_thread()
9460 rdev_for_each(rdev, mddev) in md_reap_sync_thread()
9463 md_update_sb(mddev, 1); in md_reap_sync_thread()
9467 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags)) in md_reap_sync_thread()
9468 md_cluster_ops->resync_finish(mddev); in md_reap_sync_thread()
9469 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_reap_sync_thread()
9470 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_reap_sync_thread()
9471 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_reap_sync_thread()
9472 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_reap_sync_thread()
9473 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_reap_sync_thread()
9474 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_reap_sync_thread()
9480 if (mddev_is_clustered(mddev) && is_reshaped in md_reap_sync_thread()
9481 && !test_bit(MD_CLOSING, &mddev->flags)) in md_reap_sync_thread()
9482 md_cluster_ops->update_size(mddev, old_dev_sectors); in md_reap_sync_thread()
9485 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_reap_sync_thread()
9486 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_reap_sync_thread()
9487 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_reap_sync_thread()
9489 if (mddev->event_work.func) in md_reap_sync_thread()
9490 queue_work(md_misc_wq, &mddev->event_work); in md_reap_sync_thread()
9494 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) in md_wait_for_blocked_rdev() argument
9501 rdev_dec_pending(rdev, mddev); in md_wait_for_blocked_rdev()
9505 void md_finish_reshape(struct mddev *mddev) in md_finish_reshape() argument
9510 rdev_for_each(rdev, mddev) { in md_finish_reshape()
9526 struct mddev *mddev = rdev->mddev; in rdev_set_badblocks() local
9538 set_mask_bits(&mddev->sb_flags, 0, in rdev_set_badblocks()
9540 md_wakeup_thread(rdev->mddev->thread); in rdev_set_badblocks()
9565 struct mddev *mddev, *n; in md_notify_reboot() local
9569 list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) { in md_notify_reboot()
9570 if (!mddev_get(mddev)) in md_notify_reboot()
9573 if (mddev_trylock(mddev)) { in md_notify_reboot()
9574 if (mddev->pers) in md_notify_reboot()
9575 __md_stop_writes(mddev); in md_notify_reboot()
9576 if (mddev->persistent) in md_notify_reboot()
9577 mddev->safemode = 2; in md_notify_reboot()
9578 mddev_unlock(mddev); in md_notify_reboot()
9581 mddev_put(mddev); in md_notify_reboot()
9654 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) in check_sb_changes() argument
9664 if (mddev->dev_sectors != le64_to_cpu(sb->size)) { in check_sb_changes()
9665 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size)); in check_sb_changes()
9669 md_bitmap_update_sb(mddev->bitmap); in check_sb_changes()
9673 rdev_for_each_safe(rdev2, tmp, mddev) { in check_sb_changes()
9699 ret = remove_and_add_spares(mddev, rdev2); in check_sb_changes()
9704 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in check_sb_changes()
9705 md_wakeup_thread(mddev->thread); in check_sb_changes()
9714 md_error(mddev, rdev2); in check_sb_changes()
9720 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) { in check_sb_changes()
9721 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); in check_sb_changes()
9730 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && in check_sb_changes()
9736 mddev->reshape_position = le64_to_cpu(sb->reshape_position); in check_sb_changes()
9737 if (mddev->pers->update_reshape_pos) in check_sb_changes()
9738 mddev->pers->update_reshape_pos(mddev); in check_sb_changes()
9739 if (mddev->pers->start_reshape) in check_sb_changes()
9740 mddev->pers->start_reshape(mddev); in check_sb_changes()
9741 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && in check_sb_changes()
9742 mddev->reshape_position != MaxSector && in check_sb_changes()
9745 mddev->reshape_position = MaxSector; in check_sb_changes()
9746 if (mddev->pers->update_reshape_pos) in check_sb_changes()
9747 mddev->pers->update_reshape_pos(mddev); in check_sb_changes()
9751 mddev->events = le64_to_cpu(sb->events); in check_sb_changes()
9754 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) in read_rdev() argument
9768 err = super_types[mddev->major_version]. in read_rdev()
9769 load_super(rdev, NULL, mddev->minor_version); in read_rdev()
9794 mddev->pers->spare_active(mddev)) in read_rdev()
9795 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in read_rdev()
9801 void md_reload_sb(struct mddev *mddev, int nr) in md_reload_sb() argument
9807 rdev_for_each_rcu(iter, mddev) { in md_reload_sb()
9819 err = read_rdev(mddev, rdev); in md_reload_sb()
9823 check_sb_changes(mddev, rdev); in md_reload_sb()
9826 rdev_for_each_rcu(rdev, mddev) { in md_reload_sb()
9828 read_rdev(mddev, rdev); in md_reload_sb()
9904 struct mddev *mddev, *n; in md_exit() local
9925 list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) { in md_exit()
9926 if (!mddev_get(mddev)) in md_exit()
9929 export_array(mddev); in md_exit()
9930 mddev->ctime = 0; in md_exit()
9931 mddev->hold_active = 0; in md_exit()
9937 mddev_put(mddev); in md_exit()