Lines Matching refs:conf

79 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)  in stripe_hash()  argument
82 return &conf->stripe_hashtbl[hash]; in stripe_hash()
90 static inline void lock_device_hash_lock(struct r5conf *conf, int hash) in lock_device_hash_lock() argument
92 spin_lock_irq(conf->hash_locks + hash); in lock_device_hash_lock()
93 spin_lock(&conf->device_lock); in lock_device_hash_lock()
96 static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) in unlock_device_hash_lock() argument
98 spin_unlock(&conf->device_lock); in unlock_device_hash_lock()
99 spin_unlock_irq(conf->hash_locks + hash); in unlock_device_hash_lock()
102 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) in lock_all_device_hash_locks_irq() argument
105 spin_lock_irq(conf->hash_locks); in lock_all_device_hash_locks_irq()
107 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); in lock_all_device_hash_locks_irq()
108 spin_lock(&conf->device_lock); in lock_all_device_hash_locks_irq()
111 static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) in unlock_all_device_hash_locks_irq() argument
114 spin_unlock(&conf->device_lock); in unlock_all_device_hash_locks_irq()
116 spin_unlock(conf->hash_locks + i); in unlock_all_device_hash_locks_irq()
117 spin_unlock_irq(conf->hash_locks); in unlock_all_device_hash_locks_irq()
159 static void print_raid5_conf (struct r5conf *conf);
177 struct r5conf *conf = sh->raid_conf; in raid5_wakeup_stripe_thread() local
189 group = conf->worker_groups + cpu_to_group(cpu); in raid5_wakeup_stripe_thread()
198 if (conf->worker_cnt_per_group == 0) { in raid5_wakeup_stripe_thread()
199 md_wakeup_thread(conf->mddev->thread); in raid5_wakeup_stripe_thread()
203 group = conf->worker_groups + cpu_to_group(sh->cpu); in raid5_wakeup_stripe_thread()
211 for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) { in raid5_wakeup_stripe_thread()
221 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, in do_release_stripe() argument
228 BUG_ON(atomic_read(&conf->active_stripes)==0); in do_release_stripe()
230 if (r5c_is_writeback(conf->log)) in do_release_stripe()
242 (conf->quiesce && r5c_is_writeback(conf->log) && in do_release_stripe()
252 list_add_tail(&sh->lru, &conf->delayed_list); in do_release_stripe()
254 sh->bm_seq - conf->seq_write > 0) in do_release_stripe()
255 list_add_tail(&sh->lru, &conf->bitmap_list); in do_release_stripe()
259 if (conf->worker_cnt_per_group == 0) { in do_release_stripe()
262 &conf->loprio_list); in do_release_stripe()
265 &conf->handle_list); in do_release_stripe()
271 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
275 if (atomic_dec_return(&conf->preread_active_stripes) in do_release_stripe()
277 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
278 atomic_dec(&conf->active_stripes); in do_release_stripe()
280 if (!r5c_is_writeback(conf->log)) in do_release_stripe()
286 else if (injournal == conf->raid_disks - conf->max_degraded) { in do_release_stripe()
289 atomic_inc(&conf->r5c_cached_full_stripes); in do_release_stripe()
291 atomic_dec(&conf->r5c_cached_partial_stripes); in do_release_stripe()
292 list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); in do_release_stripe()
293 r5c_check_cached_full_stripe(conf); in do_release_stripe()
300 list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list); in do_release_stripe()
306 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, in __release_stripe() argument
310 do_release_stripe(conf, sh, temp_inactive_list); in __release_stripe()
320 static void release_inactive_stripe_list(struct r5conf *conf, in release_inactive_stripe_list() argument
341 spin_lock_irqsave(conf->hash_locks + hash, flags); in release_inactive_stripe_list()
342 if (list_empty(conf->inactive_list + hash) && in release_inactive_stripe_list()
344 atomic_dec(&conf->empty_inactive_list_nr); in release_inactive_stripe_list()
345 list_splice_tail_init(list, conf->inactive_list + hash); in release_inactive_stripe_list()
347 spin_unlock_irqrestore(conf->hash_locks + hash, flags); in release_inactive_stripe_list()
354 wake_up(&conf->wait_for_stripe); in release_inactive_stripe_list()
355 if (atomic_read(&conf->active_stripes) == 0) in release_inactive_stripe_list()
356 wake_up(&conf->wait_for_quiescent); in release_inactive_stripe_list()
357 if (conf->retry_read_aligned) in release_inactive_stripe_list()
358 md_wakeup_thread(conf->mddev->thread); in release_inactive_stripe_list()
363 static int release_stripe_list(struct r5conf *conf, in release_stripe_list() argument
370 head = llist_del_all(&conf->released_stripes); in release_stripe_list()
384 __release_stripe(conf, sh, &temp_inactive_list[hash]); in release_stripe_list()
393 struct r5conf *conf = sh->raid_conf; in raid5_release_stripe() local
404 if (unlikely(!conf->mddev->thread) || in raid5_release_stripe()
407 wakeup = llist_add(&sh->release_list, &conf->released_stripes); in raid5_release_stripe()
409 md_wakeup_thread(conf->mddev->thread); in raid5_release_stripe()
413 if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) { in raid5_release_stripe()
416 do_release_stripe(conf, sh, &list); in raid5_release_stripe()
417 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_release_stripe()
418 release_inactive_stripe_list(conf, &list, hash); in raid5_release_stripe()
430 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) in insert_hash() argument
432 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
441 static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) in get_free_stripe() argument
446 if (list_empty(conf->inactive_list + hash)) in get_free_stripe()
448 first = (conf->inactive_list + hash)->next; in get_free_stripe()
452 atomic_inc(&conf->active_stripes); in get_free_stripe()
454 if (list_empty(conf->inactive_list + hash)) in get_free_stripe()
455 atomic_inc(&conf->empty_inactive_list_nr); in get_free_stripe()
494 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
499 struct r5conf *conf = sh->raid_conf; in init_stripe() local
510 seq = read_seqcount_begin(&conf->gen_lock); in init_stripe()
511 sh->generation = conf->generation - previous; in init_stripe()
512 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; in init_stripe()
514 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
531 if (read_seqcount_retry(&conf->gen_lock, seq)) in init_stripe()
534 insert_hash(conf, sh); in init_stripe()
539 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, in __find_stripe() argument
545 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) in __find_stripe()
565 int raid5_calc_degraded(struct r5conf *conf) in raid5_calc_degraded() argument
572 for (i = 0; i < conf->previous_raid_disks; i++) { in raid5_calc_degraded()
573 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in raid5_calc_degraded()
575 rdev = rcu_dereference(conf->disks[i].replacement); in raid5_calc_degraded()
590 if (conf->raid_disks >= conf->previous_raid_disks) in raid5_calc_degraded()
594 if (conf->raid_disks == conf->previous_raid_disks) in raid5_calc_degraded()
598 for (i = 0; i < conf->raid_disks; i++) { in raid5_calc_degraded()
599 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in raid5_calc_degraded()
601 rdev = rcu_dereference(conf->disks[i].replacement); in raid5_calc_degraded()
612 if (conf->raid_disks <= conf->previous_raid_disks) in raid5_calc_degraded()
621 static int has_failed(struct r5conf *conf) in has_failed() argument
625 if (conf->mddev->reshape_position == MaxSector) in has_failed()
626 return conf->mddev->degraded > conf->max_degraded; in has_failed()
628 degraded = raid5_calc_degraded(conf); in has_failed()
629 if (degraded > conf->max_degraded) in has_failed()
635 raid5_get_active_stripe(struct r5conf *conf, sector_t sector, in raid5_get_active_stripe() argument
644 spin_lock_irq(conf->hash_locks + hash); in raid5_get_active_stripe()
647 wait_event_lock_irq(conf->wait_for_quiescent, in raid5_get_active_stripe()
648 conf->quiesce == 0 || noquiesce, in raid5_get_active_stripe()
649 *(conf->hash_locks + hash)); in raid5_get_active_stripe()
650 sh = __find_stripe(conf, sector, conf->generation - previous); in raid5_get_active_stripe()
652 if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) { in raid5_get_active_stripe()
653 sh = get_free_stripe(conf, hash); in raid5_get_active_stripe()
655 &conf->cache_state)) in raid5_get_active_stripe()
657 &conf->cache_state); in raid5_get_active_stripe()
662 r5c_check_stripe_cache_usage(conf); in raid5_get_active_stripe()
665 &conf->cache_state); in raid5_get_active_stripe()
666 r5l_wake_reclaim(conf->log, 0); in raid5_get_active_stripe()
668 conf->wait_for_stripe, in raid5_get_active_stripe()
669 !list_empty(conf->inactive_list + hash) && in raid5_get_active_stripe()
670 (atomic_read(&conf->active_stripes) in raid5_get_active_stripe()
671 < (conf->max_nr_stripes * 3 / 4) in raid5_get_active_stripe()
673 &conf->cache_state)), in raid5_get_active_stripe()
674 *(conf->hash_locks + hash)); in raid5_get_active_stripe()
676 &conf->cache_state); in raid5_get_active_stripe()
682 spin_lock(&conf->device_lock); in raid5_get_active_stripe()
685 atomic_inc(&conf->active_stripes); in raid5_get_active_stripe()
689 if (!list_empty(conf->inactive_list + hash)) in raid5_get_active_stripe()
692 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) in raid5_get_active_stripe()
693 atomic_inc(&conf->empty_inactive_list_nr); in raid5_get_active_stripe()
700 spin_unlock(&conf->device_lock); in raid5_get_active_stripe()
704 spin_unlock_irq(conf->hash_locks + hash); in raid5_get_active_stripe()
734 struct r5conf *conf = sh->raid_conf; in stripe_can_batch() local
736 if (raid5_has_log(conf) || raid5_has_ppl(conf)) in stripe_can_batch()
744 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) in stripe_add_to_batch_list() argument
754 if (!sector_div(tmp_sec, conf->chunk_sectors)) in stripe_add_to_batch_list()
759 spin_lock_irq(conf->hash_locks + hash); in stripe_add_to_batch_list()
760 head = __find_stripe(conf, head_sector, conf->generation); in stripe_add_to_batch_list()
762 spin_lock(&conf->device_lock); in stripe_add_to_batch_list()
765 atomic_inc(&conf->active_stripes); in stripe_add_to_batch_list()
769 if (!list_empty(conf->inactive_list + hash)) in stripe_add_to_batch_list()
772 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) in stripe_add_to_batch_list()
773 atomic_inc(&conf->empty_inactive_list_nr); in stripe_add_to_batch_list()
780 spin_unlock(&conf->device_lock); in stripe_add_to_batch_list()
782 spin_unlock_irq(conf->hash_locks + hash); in stripe_add_to_batch_list()
835 if (atomic_dec_return(&conf->preread_active_stripes) in stripe_add_to_batch_list()
837 md_wakeup_thread(conf->mddev->thread); in stripe_add_to_batch_list()
858 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) in use_new_offset() argument
860 sector_t progress = conf->reshape_progress; in use_new_offset()
868 if (sh->generation == conf->generation - 1) in use_new_offset()
897 static void dispatch_defer_bios(struct r5conf *conf, int target, in dispatch_defer_bios() argument
904 if (conf->pending_data_cnt == 0) in dispatch_defer_bios()
907 list_sort(NULL, &conf->pending_list, cmp_stripe); in dispatch_defer_bios()
909 first = conf->pending_list.next; in dispatch_defer_bios()
912 if (conf->next_pending_data) in dispatch_defer_bios()
913 list_move_tail(&conf->pending_list, in dispatch_defer_bios()
914 &conf->next_pending_data->sibling); in dispatch_defer_bios()
916 while (!list_empty(&conf->pending_list)) { in dispatch_defer_bios()
917 data = list_first_entry(&conf->pending_list, in dispatch_defer_bios()
924 list_move(&data->sibling, &conf->free_list); in dispatch_defer_bios()
929 conf->pending_data_cnt -= cnt; in dispatch_defer_bios()
930 BUG_ON(conf->pending_data_cnt < 0 || cnt < target); in dispatch_defer_bios()
932 if (next != &conf->pending_list) in dispatch_defer_bios()
933 conf->next_pending_data = list_entry(next, in dispatch_defer_bios()
936 conf->next_pending_data = NULL; in dispatch_defer_bios()
938 if (first != &conf->pending_list) in dispatch_defer_bios()
939 list_move_tail(&conf->pending_list, first); in dispatch_defer_bios()
942 static void flush_deferred_bios(struct r5conf *conf) in flush_deferred_bios() argument
946 if (conf->pending_data_cnt == 0) in flush_deferred_bios()
949 spin_lock(&conf->pending_bios_lock); in flush_deferred_bios()
950 dispatch_defer_bios(conf, conf->pending_data_cnt, &tmp); in flush_deferred_bios()
951 BUG_ON(conf->pending_data_cnt != 0); in flush_deferred_bios()
952 spin_unlock(&conf->pending_bios_lock); in flush_deferred_bios()
957 static void defer_issue_bios(struct r5conf *conf, sector_t sector, in defer_issue_bios() argument
963 spin_lock(&conf->pending_bios_lock); in defer_issue_bios()
964 ent = list_first_entry(&conf->free_list, struct r5pending_data, in defer_issue_bios()
966 list_move_tail(&ent->sibling, &conf->pending_list); in defer_issue_bios()
970 conf->pending_data_cnt++; in defer_issue_bios()
971 if (conf->pending_data_cnt >= PENDING_IO_MAX) in defer_issue_bios()
972 dispatch_defer_bios(conf, PENDING_IO_ONE_FLUSH, &tmp); in defer_issue_bios()
974 spin_unlock(&conf->pending_bios_lock); in defer_issue_bios()
986 struct r5conf *conf = sh->raid_conf; in ops_run_io() local
997 should_defer = conf->batch_bio_dispatch && conf->group_cnt; in ops_run_io()
1028 rrdev = rcu_dereference(conf->disks[i].replacement); in ops_run_io()
1030 rdev = rcu_dereference(conf->disks[i].rdev); in ops_run_io()
1072 if (!conf->mddev->external && in ops_run_io()
1073 conf->mddev->sb_flags) { in ops_run_io()
1078 md_check_recovery(conf->mddev); in ops_run_io()
1086 md_wait_for_blocked_rdev(rdev, conf->mddev); in ops_run_io()
1089 rdev_dec_pending(rdev, conf->mddev); in ops_run_io()
1114 if (use_new_offset(conf, sh)) in ops_run_io()
1152 if (conf->mddev->gendisk) in ops_run_io()
1154 bi, disk_devt(conf->mddev->gendisk), in ops_run_io()
1181 if (use_new_offset(conf, sh)) in ops_run_io()
1202 if (conf->mddev->gendisk) in ops_run_io()
1204 rbi, disk_devt(conf->mddev->gendisk), in ops_run_io()
1229 defer_issue_bios(conf, head_sh->sector, &pending_bios); in ops_run_io()
1733 struct r5conf *conf = sh->raid_conf; in ops_run_biodrain() local
1776 r5c_is_writeback(conf->log)); in ops_run_biodrain()
1778 !r5c_is_writeback(conf->log)) { in ops_run_biodrain()
2067 struct r5conf *conf = sh->raid_conf; in raid_run_ops() local
2068 int level = conf->level; in raid_run_ops()
2073 percpu = per_cpu_ptr(conf->percpu, cpu); in raid_run_ops()
2143 int disks, struct r5conf *conf) in alloc_stripe() argument
2157 sh->raid_conf = conf; in alloc_stripe()
2166 if (raid5_has_ppl(conf)) { in alloc_stripe()
2176 static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) in grow_one_stripe() argument
2180 sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf); in grow_one_stripe()
2186 free_stripe(conf->slab_cache, sh); in grow_one_stripe()
2190 conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; in grow_one_stripe()
2192 atomic_inc(&conf->active_stripes); in grow_one_stripe()
2195 conf->max_nr_stripes++; in grow_one_stripe()
2199 static int grow_stripes(struct r5conf *conf, int num) in grow_stripes() argument
2202 size_t namelen = sizeof(conf->cache_name[0]); in grow_stripes()
2203 int devs = max(conf->raid_disks, conf->previous_raid_disks); in grow_stripes()
2205 if (conf->mddev->gendisk) in grow_stripes()
2206 snprintf(conf->cache_name[0], namelen, in grow_stripes()
2207 "raid%d-%s", conf->level, mdname(conf->mddev)); in grow_stripes()
2209 snprintf(conf->cache_name[0], namelen, in grow_stripes()
2210 "raid%d-%p", conf->level, conf->mddev); in grow_stripes()
2211 snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]); in grow_stripes()
2213 conf->active_name = 0; in grow_stripes()
2214 sc = kmem_cache_create(conf->cache_name[conf->active_name], in grow_stripes()
2219 conf->slab_cache = sc; in grow_stripes()
2220 conf->pool_size = devs; in grow_stripes()
2222 if (!grow_one_stripe(conf, GFP_KERNEL)) in grow_stripes()
2258 static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) in resize_chunks() argument
2268 if (conf->scribble_disks >= new_disks && in resize_chunks()
2269 conf->scribble_sectors >= new_sectors) in resize_chunks()
2271 mddev_suspend(conf->mddev); in resize_chunks()
2277 percpu = per_cpu_ptr(conf->percpu, cpu); in resize_chunks()
2291 mddev_resume(conf->mddev); in resize_chunks()
2293 conf->scribble_disks = new_disks; in resize_chunks()
2294 conf->scribble_sectors = new_sectors; in resize_chunks()
2299 static int resize_stripes(struct r5conf *conf, int newsize) in resize_stripes() argument
2332 md_allow_write(conf->mddev); in resize_stripes()
2335 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], in resize_stripes()
2342 mutex_lock(&conf->cache_size_mutex); in resize_stripes()
2344 for (i = conf->max_nr_stripes; i; i--) { in resize_stripes()
2345 nsh = alloc_stripe(sc, GFP_KERNEL, newsize, conf); in resize_stripes()
2359 mutex_unlock(&conf->cache_size_mutex); in resize_stripes()
2369 lock_device_hash_lock(conf, hash); in resize_stripes()
2370 wait_event_cmd(conf->wait_for_stripe, in resize_stripes()
2371 !list_empty(conf->inactive_list + hash), in resize_stripes()
2372 unlock_device_hash_lock(conf, hash), in resize_stripes()
2373 lock_device_hash_lock(conf, hash)); in resize_stripes()
2374 osh = get_free_stripe(conf, hash); in resize_stripes()
2375 unlock_device_hash_lock(conf, hash); in resize_stripes()
2377 for(i=0; i<conf->pool_size; i++) { in resize_stripes()
2382 free_stripe(conf->slab_cache, osh); in resize_stripes()
2384 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + in resize_stripes()
2385 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { in resize_stripes()
2390 kmem_cache_destroy(conf->slab_cache); in resize_stripes()
2399 for (i = 0; i < conf->pool_size; i++) in resize_stripes()
2400 ndisks[i] = conf->disks[i]; in resize_stripes()
2402 for (i = conf->pool_size; i < newsize; i++) { in resize_stripes()
2409 for (i = conf->pool_size; i < newsize; i++) in resize_stripes()
2414 kfree(conf->disks); in resize_stripes()
2415 conf->disks = ndisks; in resize_stripes()
2420 mutex_unlock(&conf->cache_size_mutex); in resize_stripes()
2422 conf->slab_cache = sc; in resize_stripes()
2423 conf->active_name = 1-conf->active_name; in resize_stripes()
2430 for (i=conf->raid_disks; i < newsize; i++) in resize_stripes()
2443 conf->pool_size = newsize; in resize_stripes()
2447 static int drop_one_stripe(struct r5conf *conf) in drop_one_stripe() argument
2450 int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK; in drop_one_stripe()
2452 spin_lock_irq(conf->hash_locks + hash); in drop_one_stripe()
2453 sh = get_free_stripe(conf, hash); in drop_one_stripe()
2454 spin_unlock_irq(conf->hash_locks + hash); in drop_one_stripe()
2459 free_stripe(conf->slab_cache, sh); in drop_one_stripe()
2460 atomic_dec(&conf->active_stripes); in drop_one_stripe()
2461 conf->max_nr_stripes--; in drop_one_stripe()
2465 static void shrink_stripes(struct r5conf *conf) in shrink_stripes() argument
2467 while (conf->max_nr_stripes && in shrink_stripes()
2468 drop_one_stripe(conf)) in shrink_stripes()
2471 kmem_cache_destroy(conf->slab_cache); in shrink_stripes()
2472 conf->slab_cache = NULL; in shrink_stripes()
2478 struct r5conf *conf = sh->raid_conf; in raid5_end_read_request() local
2502 rdev = conf->disks[i].replacement; in raid5_end_read_request()
2504 rdev = conf->disks[i].rdev; in raid5_end_read_request()
2506 if (use_new_offset(conf, sh)) in raid5_end_read_request()
2519 mdname(conf->mddev), STRIPE_SECTORS, in raid5_end_read_request()
2547 mdname(conf->mddev), in raid5_end_read_request()
2550 else if (conf->mddev->degraded >= conf->max_degraded) { in raid5_end_read_request()
2554 mdname(conf->mddev), in raid5_end_read_request()
2562 mdname(conf->mddev), in raid5_end_read_request()
2566 > conf->max_nr_stripes) in raid5_end_read_request()
2568 mdname(conf->mddev), bdn); in raid5_end_read_request()
2587 md_error(conf->mddev, rdev); in raid5_end_read_request()
2590 rdev_dec_pending(rdev, conf->mddev); in raid5_end_read_request()
2600 struct r5conf *conf = sh->raid_conf; in raid5_end_write_request() local
2609 rdev = conf->disks[i].rdev; in raid5_end_write_request()
2613 rdev = conf->disks[i].replacement; in raid5_end_write_request()
2621 rdev = conf->disks[i].rdev; in raid5_end_write_request()
2636 md_error(conf->mddev, rdev); in raid5_end_write_request()
2661 rdev_dec_pending(rdev, conf->mddev); in raid5_end_write_request()
2679 struct r5conf *conf = mddev->private; in raid5_error() local
2683 spin_lock_irqsave(&conf->device_lock, flags); in raid5_error()
2686 mddev->degraded = raid5_calc_degraded(conf); in raid5_error()
2687 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_error()
2698 conf->raid_disks - mddev->degraded); in raid5_error()
2706 sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, in raid5_compute_sector() argument
2716 int algorithm = previous ? conf->prev_algo in raid5_compute_sector()
2717 : conf->algorithm; in raid5_compute_sector()
2718 int sectors_per_chunk = previous ? conf->prev_chunk_sectors in raid5_compute_sector()
2719 : conf->chunk_sectors; in raid5_compute_sector()
2720 int raid_disks = previous ? conf->previous_raid_disks in raid5_compute_sector()
2721 : conf->raid_disks; in raid5_compute_sector()
2722 int data_disks = raid_disks - conf->max_degraded; in raid5_compute_sector()
2742 switch(conf->level) { in raid5_compute_sector()
2910 struct r5conf *conf = sh->raid_conf; in raid5_compute_blocknr() local
2912 int data_disks = raid_disks - conf->max_degraded; in raid5_compute_blocknr()
2914 int sectors_per_chunk = previous ? conf->prev_chunk_sectors in raid5_compute_blocknr()
2915 : conf->chunk_sectors; in raid5_compute_blocknr()
2916 int algorithm = previous ? conf->prev_algo in raid5_compute_blocknr()
2917 : conf->algorithm; in raid5_compute_blocknr()
2930 switch(conf->level) { in raid5_compute_blocknr()
3017 check = raid5_compute_sector(conf, r_sector, in raid5_compute_blocknr()
3022 mdname(conf->mddev)); in raid5_compute_blocknr()
3066 static inline bool delay_towrite(struct r5conf *conf, in delay_towrite() argument
3075 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && in delay_towrite()
3089 struct r5conf *conf = sh->raid_conf; in schedule_reconstruction() local
3090 int level = conf->level; in schedule_reconstruction()
3104 if (dev->towrite && !delay_towrite(conf, dev, s)) { in schedule_reconstruction()
3130 if (s->locked + conf->max_degraded == disks) in schedule_reconstruction()
3132 atomic_inc(&conf->pending_full_writes); in schedule_reconstruction()
3202 struct r5conf *conf = sh->raid_conf; in add_stripe_bio() local
3228 if (forwrite && raid5_has_ppl(conf)) { in add_stripe_bio()
3254 if (first + conf->chunk_sectors * (count - 1) != last) in add_stripe_bio()
3266 md_write_inc(conf->mddev, bi); in add_stripe_bio()
3287 if (conf->mddev->bitmap && firstwrite) { in add_stripe_bio()
3302 md_bitmap_startwrite(conf->mddev->bitmap, sh->sector, in add_stripe_bio()
3307 sh->bm_seq = conf->seq_flush+1; in add_stripe_bio()
3314 stripe_add_to_batch_list(conf, sh); in add_stripe_bio()
3323 static void end_reshape(struct r5conf *conf);
3325 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, in stripe_set_idx() argument
3329 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; in stripe_set_idx()
3332 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; in stripe_set_idx()
3334 raid5_compute_sector(conf, in stripe_set_idx()
3335 stripe * (disks - conf->max_degraded) in stripe_set_idx()
3342 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, in handle_failed_stripe() argument
3354 rdev = rcu_dereference(conf->disks[i].rdev); in handle_failed_stripe()
3366 md_error(conf->mddev, rdev); in handle_failed_stripe()
3367 rdev_dec_pending(rdev, conf->mddev); in handle_failed_stripe()
3382 wake_up(&conf->wait_for_overlap); in handle_failed_stripe()
3388 md_write_end(conf->mddev); in handle_failed_stripe()
3393 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3409 md_write_end(conf->mddev); in handle_failed_stripe()
3418 s->failed > conf->max_degraded && in handle_failed_stripe()
3426 wake_up(&conf->wait_for_overlap); in handle_failed_stripe()
3439 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3450 if (atomic_dec_and_test(&conf->pending_full_writes)) in handle_failed_stripe()
3451 md_wakeup_thread(conf->mddev->thread); in handle_failed_stripe()
3455 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, in handle_failed_sync() argument
3464 wake_up(&conf->wait_for_overlap); in handle_failed_sync()
3474 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { in handle_failed_sync()
3479 for (i = 0; i < conf->raid_disks; i++) { in handle_failed_sync()
3480 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in handle_failed_sync()
3487 rdev = rcu_dereference(conf->disks[i].replacement); in handle_failed_sync()
3497 conf->recovery_disabled = in handle_failed_sync()
3498 conf->mddev->recovery_disabled; in handle_failed_sync()
3500 md_done_sync(conf->mddev, STRIPE_SECTORS, !abort); in handle_failed_sync()
3743 static void handle_stripe_clean_event(struct r5conf *conf, in handle_stripe_clean_event() argument
3776 md_write_end(conf->mddev); in handle_stripe_clean_event()
3780 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_stripe_clean_event()
3819 spin_lock_irq(conf->hash_locks + hash); in handle_stripe_clean_event()
3821 spin_unlock_irq(conf->hash_locks + hash); in handle_stripe_clean_event()
3836 if (atomic_dec_and_test(&conf->pending_full_writes)) in handle_stripe_clean_event()
3837 md_wakeup_thread(conf->mddev->thread); in handle_stripe_clean_event()
3858 static int handle_stripe_dirtying(struct r5conf *conf, in handle_stripe_dirtying() argument
3864 sector_t recovery_cp = conf->mddev->recovery_cp; in handle_stripe_dirtying()
3873 if (conf->rmw_level == PARITY_DISABLE_RMW || in handle_stripe_dirtying()
3881 conf->rmw_level, (unsigned long long)recovery_cp, in handle_stripe_dirtying()
3886 if (((dev->towrite && !delay_towrite(conf, dev, s)) || in handle_stripe_dirtying()
3913 if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) { in handle_stripe_dirtying()
3915 if (conf->mddev->queue) in handle_stripe_dirtying()
3916 blk_add_trace_msg(conf->mddev->queue, in handle_stripe_dirtying()
3937 &conf->cache_state)) { in handle_stripe_dirtying()
3951 if (((dev->towrite && !delay_towrite(conf, dev, s)) || in handle_stripe_dirtying()
3972 if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_PREFER_RMW)) && rcw > 0) { in handle_stripe_dirtying()
3999 if (rcw && conf->mddev->queue) in handle_stripe_dirtying()
4000 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", in handle_stripe_dirtying()
4026 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks5() argument
4088 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); in handle_parity_checks5()
4089 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { in handle_parity_checks5()
4093 "%llu-%llu\n", mdname(conf->mddev), in handle_parity_checks5()
4119 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks6() argument
4245 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); in handle_parity_checks6()
4246 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { in handle_parity_checks6()
4250 "%llu-%llu\n", mdname(conf->mddev), in handle_parity_checks6()
4288 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) in handle_stripe_expansion() argument
4305 sector_t s = raid5_compute_sector(conf, bn, 0, in handle_stripe_expansion()
4307 sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1); in handle_stripe_expansion()
4329 for (j = 0; j < conf->raid_disks; j++) in handle_stripe_expansion()
4334 if (j == conf->raid_disks) { in handle_stripe_expansion()
4361 struct r5conf *conf = sh->raid_conf; in analyse_stripe() local
4373 s->log_failed = r5l_log_disk_error(conf); in analyse_stripe()
4421 rdev = rcu_dereference(conf->disks[i].replacement); in analyse_stripe()
4432 rdev = rcu_dereference(conf->disks[i].rdev); in analyse_stripe()
4480 conf->disks[i].rdev); in analyse_stripe()
4493 conf->disks[i].rdev); in analyse_stripe()
4502 conf->disks[i].replacement); in analyse_stripe()
4524 conf->disks[i].replacement); in analyse_stripe()
4545 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4546 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) in analyse_stripe()
4657 struct r5conf *conf = sh->raid_conf; in handle_stripe() local
4709 test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) { in handle_stripe()
4721 rdev_dec_pending(s.blocked_rdev, conf->mddev); in handle_stripe()
4741 if (s.failed > conf->max_degraded || in handle_stripe()
4747 handle_failed_stripe(conf, sh, &s, disks); in handle_stripe()
4749 handle_failed_sync(conf, sh, &s); in handle_stripe()
4802 || conf->level < 6; in handle_stripe()
4813 handle_stripe_clean_event(conf, sh, disks); in handle_stripe()
4816 r5c_handle_cached_data_endio(conf, sh, disks); in handle_stripe()
4824 || (conf->level == 6 && s.to_write && s.failed) in handle_stripe()
4835 r5c_finish_stripe_write_out(conf, sh, &s); in handle_stripe()
4847 if (!r5c_is_writeback(conf->log)) { in handle_stripe()
4849 handle_stripe_dirtying(conf, sh, &s, disks); in handle_stripe()
4855 ret = r5c_try_caching_write(conf, sh, &s, in handle_stripe()
4868 ret = handle_stripe_dirtying(conf, sh, &s, in handle_stripe()
4885 if (conf->level == 6) in handle_stripe()
4886 handle_parity_checks6(conf, sh, &s, disks); in handle_stripe()
4888 handle_parity_checks5(conf, sh, &s, disks); in handle_stripe()
4895 for (i = 0; i < conf->raid_disks; i++) in handle_stripe()
4909 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); in handle_stripe()
4912 wake_up(&conf->wait_for_overlap); in handle_stripe()
4918 if (s.failed <= conf->max_degraded && !conf->mddev->ro) in handle_stripe()
4942 = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1); in handle_stripe()
4951 atomic_inc(&conf->preread_active_stripes); in handle_stripe()
4960 for (i = conf->raid_disks; i--; ) { in handle_stripe()
4970 sh->disks = conf->raid_disks; in handle_stripe()
4971 stripe_set_idx(sh->sector, conf, 0, sh); in handle_stripe()
4975 atomic_dec(&conf->reshape_stripes); in handle_stripe()
4976 wake_up(&conf->wait_for_overlap); in handle_stripe()
4977 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); in handle_stripe()
4982 handle_stripe_expansion(conf, sh); in handle_stripe()
4987 if (conf->mddev->external) in handle_stripe()
4989 conf->mddev); in handle_stripe()
4996 conf->mddev); in handle_stripe()
5005 rdev = conf->disks[i].rdev; in handle_stripe()
5008 md_error(conf->mddev, rdev); in handle_stripe()
5009 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5012 rdev = conf->disks[i].rdev; in handle_stripe()
5015 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5018 rdev = conf->disks[i].replacement; in handle_stripe()
5021 rdev = conf->disks[i].rdev; in handle_stripe()
5024 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5038 atomic_dec(&conf->preread_active_stripes); in handle_stripe()
5039 if (atomic_read(&conf->preread_active_stripes) < in handle_stripe()
5041 md_wakeup_thread(conf->mddev->thread); in handle_stripe()
5047 static void raid5_activate_delayed(struct r5conf *conf) in raid5_activate_delayed() argument
5049 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { in raid5_activate_delayed()
5050 while (!list_empty(&conf->delayed_list)) { in raid5_activate_delayed()
5051 struct list_head *l = conf->delayed_list.next; in raid5_activate_delayed()
5057 atomic_inc(&conf->preread_active_stripes); in raid5_activate_delayed()
5058 list_add_tail(&sh->lru, &conf->hold_list); in raid5_activate_delayed()
5064 static void activate_bit_delay(struct r5conf *conf, in activate_bit_delay() argument
5069 list_add(&head, &conf->bitmap_list); in activate_bit_delay()
5070 list_del_init(&conf->bitmap_list); in activate_bit_delay()
5077 __release_stripe(conf, sh, &temp_inactive_list[hash]); in activate_bit_delay()
5083 struct r5conf *conf = mddev->private; in raid5_congested() local
5089 if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) in raid5_congested()
5093 if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) in raid5_congested()
5095 if (conf->quiesce) in raid5_congested()
5097 if (atomic_read(&conf->empty_inactive_list_nr)) in raid5_congested()
5105 struct r5conf *conf = mddev->private; in in_chunk_boundary() local
5112 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); in in_chunk_boundary()
5121 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) in add_bio_to_retry() argument
5125 spin_lock_irqsave(&conf->device_lock, flags); in add_bio_to_retry()
5127 bi->bi_next = conf->retry_read_aligned_list; in add_bio_to_retry()
5128 conf->retry_read_aligned_list = bi; in add_bio_to_retry()
5130 spin_unlock_irqrestore(&conf->device_lock, flags); in add_bio_to_retry()
5131 md_wakeup_thread(conf->mddev->thread); in add_bio_to_retry()
5134 static struct bio *remove_bio_from_retry(struct r5conf *conf, in remove_bio_from_retry() argument
5139 bi = conf->retry_read_aligned; in remove_bio_from_retry()
5141 *offset = conf->retry_read_offset; in remove_bio_from_retry()
5142 conf->retry_read_aligned = NULL; in remove_bio_from_retry()
5145 bi = conf->retry_read_aligned_list; in remove_bio_from_retry()
5147 conf->retry_read_aligned_list = bi->bi_next; in remove_bio_from_retry()
5165 struct r5conf *conf; in raid5_align_endio() local
5174 conf = mddev->private; in raid5_align_endio()
5176 rdev_dec_pending(rdev, conf->mddev); in raid5_align_endio()
5180 if (atomic_dec_and_test(&conf->active_aligned_reads)) in raid5_align_endio()
5181 wake_up(&conf->wait_for_quiescent); in raid5_align_endio()
5187 add_bio_to_retry(raid_bi, conf); in raid5_align_endio()
5192 struct r5conf *conf = mddev->private; in raid5_read_one_chunk() local
5218 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, in raid5_read_one_chunk()
5223 rdev = rcu_dereference(conf->disks[dd_idx].replacement); in raid5_read_one_chunk()
5226 rdev = rcu_dereference(conf->disks[dd_idx].rdev); in raid5_read_one_chunk()
5234 if (r5c_big_stripe_cached(conf, align_bi->bi_iter.bi_sector)) { in raid5_read_one_chunk()
5261 spin_lock_irq(&conf->device_lock); in raid5_read_one_chunk()
5262 wait_event_lock_irq(conf->wait_for_quiescent, in raid5_read_one_chunk()
5263 conf->quiesce == 0, in raid5_read_one_chunk()
5264 conf->device_lock); in raid5_read_one_chunk()
5265 atomic_inc(&conf->active_aligned_reads); in raid5_read_one_chunk()
5266 spin_unlock_irq(&conf->device_lock); in raid5_read_one_chunk()
5289 struct r5conf *conf = mddev->private; in chunk_aligned_read() local
5290 split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split); in chunk_aligned_read()
5312 static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) in __get_priority_stripe() argument
5317 bool second_try = !r5c_is_writeback(conf->log) && in __get_priority_stripe()
5318 !r5l_log_disk_error(conf); in __get_priority_stripe()
5319 bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state) || in __get_priority_stripe()
5320 r5l_log_disk_error(conf); in __get_priority_stripe()
5325 if (conf->worker_cnt_per_group == 0) { in __get_priority_stripe()
5326 handle_list = try_loprio ? &conf->loprio_list : in __get_priority_stripe()
5327 &conf->handle_list; in __get_priority_stripe()
5329 handle_list = try_loprio ? &conf->worker_groups[group].loprio_list : in __get_priority_stripe()
5330 &conf->worker_groups[group].handle_list; in __get_priority_stripe()
5331 wg = &conf->worker_groups[group]; in __get_priority_stripe()
5334 for (i = 0; i < conf->group_cnt; i++) { in __get_priority_stripe()
5335 handle_list = try_loprio ? &conf->worker_groups[i].loprio_list : in __get_priority_stripe()
5336 &conf->worker_groups[i].handle_list; in __get_priority_stripe()
5337 wg = &conf->worker_groups[i]; in __get_priority_stripe()
5346 list_empty(&conf->hold_list) ? "empty" : "busy", in __get_priority_stripe()
5347 atomic_read(&conf->pending_full_writes), conf->bypass_count); in __get_priority_stripe()
5352 if (list_empty(&conf->hold_list)) in __get_priority_stripe()
5353 conf->bypass_count = 0; in __get_priority_stripe()
5355 if (conf->hold_list.next == conf->last_hold) in __get_priority_stripe()
5356 conf->bypass_count++; in __get_priority_stripe()
5358 conf->last_hold = conf->hold_list.next; in __get_priority_stripe()
5359 conf->bypass_count -= conf->bypass_threshold; in __get_priority_stripe()
5360 if (conf->bypass_count < 0) in __get_priority_stripe()
5361 conf->bypass_count = 0; in __get_priority_stripe()
5364 } else if (!list_empty(&conf->hold_list) && in __get_priority_stripe()
5365 ((conf->bypass_threshold && in __get_priority_stripe()
5366 conf->bypass_count > conf->bypass_threshold) || in __get_priority_stripe()
5367 atomic_read(&conf->pending_full_writes) == 0)) { in __get_priority_stripe()
5369 list_for_each_entry(tmp, &conf->hold_list, lru) { in __get_priority_stripe()
5370 if (conf->worker_cnt_per_group == 0 || in __get_priority_stripe()
5380 conf->bypass_count -= conf->bypass_threshold; in __get_priority_stripe()
5381 if (conf->bypass_count < 0) in __get_priority_stripe()
5382 conf->bypass_count = 0; in __get_priority_stripe()
5416 struct r5conf *conf = mddev->private; in raid5_unplug() local
5421 spin_lock_irq(&conf->device_lock); in raid5_unplug()
5437 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); in raid5_unplug()
5440 spin_unlock_irq(&conf->device_lock); in raid5_unplug()
5442 release_inactive_stripe_list(conf, cb->temp_inactive_list, in raid5_unplug()
5479 struct r5conf *conf = mddev->private; in make_discard_request() local
5493 stripe_sectors = conf->chunk_sectors * in make_discard_request()
5494 (conf->raid_disks - conf->max_degraded); in make_discard_request()
5499 logical_sector *= conf->chunk_sectors; in make_discard_request()
5500 last_sector *= conf->chunk_sectors; in make_discard_request()
5507 sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0); in make_discard_request()
5508 prepare_to_wait(&conf->wait_for_overlap, &w, in make_discard_request()
5518 for (d = 0; d < conf->raid_disks; d++) { in make_discard_request()
5530 finish_wait(&conf->wait_for_overlap, &w); in make_discard_request()
5532 for (d = 0; d < conf->raid_disks; d++) { in make_discard_request()
5542 if (conf->mddev->bitmap) { in make_discard_request()
5544 d < conf->raid_disks - conf->max_degraded; in make_discard_request()
5550 sh->bm_seq = conf->seq_flush + 1; in make_discard_request()
5557 atomic_inc(&conf->preread_active_stripes); in make_discard_request()
5566 struct r5conf *conf = mddev->private; in raid5_make_request() local
5577 int ret = log_handle_flush_request(conf, bi); in raid5_make_request()
5617 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); in raid5_make_request()
5624 seq = read_seqcount_begin(&conf->gen_lock); in raid5_make_request()
5627 prepare_to_wait(&conf->wait_for_overlap, &w, in raid5_make_request()
5629 if (unlikely(conf->reshape_progress != MaxSector)) { in raid5_make_request()
5638 spin_lock_irq(&conf->device_lock); in raid5_make_request()
5640 ? logical_sector < conf->reshape_progress in raid5_make_request()
5641 : logical_sector >= conf->reshape_progress) { in raid5_make_request()
5645 ? logical_sector < conf->reshape_safe in raid5_make_request()
5646 : logical_sector >= conf->reshape_safe) { in raid5_make_request()
5647 spin_unlock_irq(&conf->device_lock); in raid5_make_request()
5653 spin_unlock_irq(&conf->device_lock); in raid5_make_request()
5656 new_sector = raid5_compute_sector(conf, logical_sector, in raid5_make_request()
5663 sh = raid5_get_active_stripe(conf, new_sector, previous, in raid5_make_request()
5676 spin_lock_irq(&conf->device_lock); in raid5_make_request()
5678 ? logical_sector >= conf->reshape_progress in raid5_make_request()
5679 : logical_sector < conf->reshape_progress) in raid5_make_request()
5682 spin_unlock_irq(&conf->device_lock); in raid5_make_request()
5690 if (read_seqcount_retry(&conf->gen_lock, seq)) { in raid5_make_request()
5721 atomic_inc(&conf->preread_active_stripes); in raid5_make_request()
5729 finish_wait(&conf->wait_for_overlap, &w); in raid5_make_request()
5750 struct r5conf *conf = mddev->private; in reshape_request() local
5754 int raid_disks = conf->previous_raid_disks; in reshape_request()
5755 int data_disks = raid_disks - conf->max_degraded; in reshape_request()
5756 int new_data_disks = conf->raid_disks - conf->max_degraded; in reshape_request()
5768 conf->reshape_progress < raid5_size(mddev, 0, 0)) { in reshape_request()
5770 - conf->reshape_progress; in reshape_request()
5772 conf->reshape_progress == MaxSector) { in reshape_request()
5776 conf->reshape_progress > 0) in reshape_request()
5777 sector_nr = conf->reshape_progress; in reshape_request()
5793 reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors); in reshape_request()
5801 writepos = conf->reshape_progress; in reshape_request()
5803 readpos = conf->reshape_progress; in reshape_request()
5805 safepos = conf->reshape_safe; in reshape_request()
5826 BUG_ON(conf->reshape_progress == 0); in reshape_request()
5857 if (conf->min_offset_diff < 0) { in reshape_request()
5858 safepos += -conf->min_offset_diff; in reshape_request()
5859 readpos += -conf->min_offset_diff; in reshape_request()
5861 writepos += conf->min_offset_diff; in reshape_request()
5866 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { in reshape_request()
5868 wait_event(conf->wait_for_overlap, in reshape_request()
5869 atomic_read(&conf->reshape_stripes)==0 in reshape_request()
5871 if (atomic_read(&conf->reshape_stripes) != 0) in reshape_request()
5873 mddev->reshape_position = conf->reshape_progress; in reshape_request()
5884 conf->reshape_checkpoint = jiffies; in reshape_request()
5891 spin_lock_irq(&conf->device_lock); in reshape_request()
5892 conf->reshape_safe = mddev->reshape_position; in reshape_request()
5893 spin_unlock_irq(&conf->device_lock); in reshape_request()
5894 wake_up(&conf->wait_for_overlap); in reshape_request()
5902 sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1); in reshape_request()
5904 atomic_inc(&conf->reshape_stripes); in reshape_request()
5912 if (conf->level == 6 && in reshape_request()
5930 spin_lock_irq(&conf->device_lock); in reshape_request()
5932 conf->reshape_progress -= reshape_sectors * new_data_disks; in reshape_request()
5934 conf->reshape_progress += reshape_sectors * new_data_disks; in reshape_request()
5935 spin_unlock_irq(&conf->device_lock); in reshape_request()
5942 raid5_compute_sector(conf, stripe_addr*(new_data_disks), in reshape_request()
5945 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) in reshape_request()
5951 sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1); in reshape_request()
5975 wait_event(conf->wait_for_overlap, in reshape_request()
5976 atomic_read(&conf->reshape_stripes) == 0 in reshape_request()
5978 if (atomic_read(&conf->reshape_stripes) != 0) in reshape_request()
5980 mddev->reshape_position = conf->reshape_progress; in reshape_request()
5990 conf->reshape_checkpoint = jiffies; in reshape_request()
5998 spin_lock_irq(&conf->device_lock); in reshape_request()
5999 conf->reshape_safe = mddev->reshape_position; in reshape_request()
6000 spin_unlock_irq(&conf->device_lock); in reshape_request()
6001 wake_up(&conf->wait_for_overlap); in reshape_request()
6011 struct r5conf *conf = mddev->private; in raid5_sync_request() local
6022 end_reshape(conf); in raid5_sync_request()
6030 conf->fullsync = 0; in raid5_sync_request()
6037 wait_event(conf->wait_for_overlap, conf->quiesce != 2); in raid5_sync_request()
6052 if (mddev->degraded >= conf->max_degraded && in raid5_sync_request()
6059 !conf->fullsync && in raid5_sync_request()
6070 sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); in raid5_sync_request()
6072 sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0); in raid5_sync_request()
6083 for (i = 0; i < conf->raid_disks; i++) { in raid5_sync_request()
6084 struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev); in raid5_sync_request()
6101 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio, in retry_aligned_read() argument
6122 sector = raid5_compute_sector(conf, logical_sector, in retry_aligned_read()
6135 sh = raid5_get_active_stripe(conf, sector, 0, 1, 1); in retry_aligned_read()
6139 conf->retry_read_aligned = raid_bio; in retry_aligned_read()
6140 conf->retry_read_offset = scnt; in retry_aligned_read()
6146 conf->retry_read_aligned = raid_bio; in retry_aligned_read()
6147 conf->retry_read_offset = scnt; in retry_aligned_read()
6159 if (atomic_dec_and_test(&conf->active_aligned_reads)) in retry_aligned_read()
6160 wake_up(&conf->wait_for_quiescent); in retry_aligned_read()
6164 static int handle_active_stripes(struct r5conf *conf, int group, in handle_active_stripes() argument
6173 (sh = __get_priority_stripe(conf, group)) != NULL) in handle_active_stripes()
6181 spin_unlock_irq(&conf->device_lock); in handle_active_stripes()
6182 log_flush_stripe_to_raid(conf); in handle_active_stripes()
6183 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
6188 spin_unlock_irq(&conf->device_lock); in handle_active_stripes()
6190 release_inactive_stripe_list(conf, temp_inactive_list, in handle_active_stripes()
6193 r5l_flush_stripe_to_raid(conf->log); in handle_active_stripes()
6195 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
6201 log_write_stripe_run(conf); in handle_active_stripes()
6205 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
6208 __release_stripe(conf, batch[i], &temp_inactive_list[hash]); in handle_active_stripes()
6217 struct r5conf *conf = group->conf; in raid5_do_work() local
6218 struct mddev *mddev = conf->mddev; in raid5_do_work()
6219 int group_id = group - conf->worker_groups; in raid5_do_work()
6227 spin_lock_irq(&conf->device_lock); in raid5_do_work()
6231 released = release_stripe_list(conf, worker->temp_inactive_list); in raid5_do_work()
6233 batch_size = handle_active_stripes(conf, group_id, worker, in raid5_do_work()
6241 conf->device_lock); in raid5_do_work()
6245 spin_unlock_irq(&conf->device_lock); in raid5_do_work()
6247 flush_deferred_bios(conf); in raid5_do_work()
6249 r5l_flush_stripe_to_raid(conf->log); in raid5_do_work()
6267 struct r5conf *conf = mddev->private; in raid5d() local
6277 spin_lock_irq(&conf->device_lock); in raid5d()
6283 released = release_stripe_list(conf, conf->temp_inactive_list); in raid5d()
6285 clear_bit(R5_DID_ALLOC, &conf->cache_state); in raid5d()
6288 !list_empty(&conf->bitmap_list)) { in raid5d()
6290 conf->seq_flush++; in raid5d()
6291 spin_unlock_irq(&conf->device_lock); in raid5d()
6293 spin_lock_irq(&conf->device_lock); in raid5d()
6294 conf->seq_write = conf->seq_flush; in raid5d()
6295 activate_bit_delay(conf, conf->temp_inactive_list); in raid5d()
6297 raid5_activate_delayed(conf); in raid5d()
6299 while ((bio = remove_bio_from_retry(conf, &offset))) { in raid5d()
6301 spin_unlock_irq(&conf->device_lock); in raid5d()
6302 ok = retry_aligned_read(conf, bio, offset); in raid5d()
6303 spin_lock_irq(&conf->device_lock); in raid5d()
6309 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, in raid5d()
6310 conf->temp_inactive_list); in raid5d()
6316 spin_unlock_irq(&conf->device_lock); in raid5d()
6318 spin_lock_irq(&conf->device_lock); in raid5d()
6323 spin_unlock_irq(&conf->device_lock); in raid5d()
6324 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) && in raid5d()
6325 mutex_trylock(&conf->cache_size_mutex)) { in raid5d()
6326 grow_one_stripe(conf, __GFP_NOWARN); in raid5d()
6330 set_bit(R5_DID_ALLOC, &conf->cache_state); in raid5d()
6331 mutex_unlock(&conf->cache_size_mutex); in raid5d()
6334 flush_deferred_bios(conf); in raid5d()
6336 r5l_flush_stripe_to_raid(conf->log); in raid5d()
6347 struct r5conf *conf; in raid5_show_stripe_cache_size() local
6350 conf = mddev->private; in raid5_show_stripe_cache_size()
6351 if (conf) in raid5_show_stripe_cache_size()
6352 ret = sprintf(page, "%d\n", conf->min_nr_stripes); in raid5_show_stripe_cache_size()
6360 struct r5conf *conf = mddev->private; in raid5_set_cache_size() local
6365 conf->min_nr_stripes = size; in raid5_set_cache_size()
6366 mutex_lock(&conf->cache_size_mutex); in raid5_set_cache_size()
6367 while (size < conf->max_nr_stripes && in raid5_set_cache_size()
6368 drop_one_stripe(conf)) in raid5_set_cache_size()
6370 mutex_unlock(&conf->cache_size_mutex); in raid5_set_cache_size()
6374 mutex_lock(&conf->cache_size_mutex); in raid5_set_cache_size()
6375 while (size > conf->max_nr_stripes) in raid5_set_cache_size()
6376 if (!grow_one_stripe(conf, GFP_KERNEL)) in raid5_set_cache_size()
6378 mutex_unlock(&conf->cache_size_mutex); in raid5_set_cache_size()
6387 struct r5conf *conf; in raid5_store_stripe_cache_size() local
6398 conf = mddev->private; in raid5_store_stripe_cache_size()
6399 if (!conf) in raid5_store_stripe_cache_size()
6416 struct r5conf *conf = mddev->private; in raid5_show_rmw_level() local
6417 if (conf) in raid5_show_rmw_level()
6418 return sprintf(page, "%d\n", conf->rmw_level); in raid5_show_rmw_level()
6426 struct r5conf *conf = mddev->private; in raid5_store_rmw_level() local
6429 if (!conf) in raid5_store_rmw_level()
6446 conf->rmw_level = new; in raid5_store_rmw_level()
6459 struct r5conf *conf; in raid5_show_preread_threshold() local
6462 conf = mddev->private; in raid5_show_preread_threshold()
6463 if (conf) in raid5_show_preread_threshold()
6464 ret = sprintf(page, "%d\n", conf->bypass_threshold); in raid5_show_preread_threshold()
6472 struct r5conf *conf; in raid5_store_preread_threshold() local
6484 conf = mddev->private; in raid5_store_preread_threshold()
6485 if (!conf) in raid5_store_preread_threshold()
6487 else if (new > conf->min_nr_stripes) in raid5_store_preread_threshold()
6490 conf->bypass_threshold = new; in raid5_store_preread_threshold()
6504 struct r5conf *conf; in raid5_show_skip_copy() local
6507 conf = mddev->private; in raid5_show_skip_copy()
6508 if (conf) in raid5_show_skip_copy()
6509 ret = sprintf(page, "%d\n", conf->skip_copy); in raid5_show_skip_copy()
6517 struct r5conf *conf; in raid5_store_skip_copy() local
6530 conf = mddev->private; in raid5_store_skip_copy()
6531 if (!conf) in raid5_store_skip_copy()
6533 else if (new != conf->skip_copy) { in raid5_store_skip_copy()
6535 conf->skip_copy = new; in raid5_store_skip_copy()
6556 struct r5conf *conf = mddev->private; in stripe_cache_active_show() local
6557 if (conf) in stripe_cache_active_show()
6558 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); in stripe_cache_active_show()
6569 struct r5conf *conf; in raid5_show_group_thread_cnt() local
6572 conf = mddev->private; in raid5_show_group_thread_cnt()
6573 if (conf) in raid5_show_group_thread_cnt()
6574 ret = sprintf(page, "%d\n", conf->worker_cnt_per_group); in raid5_show_group_thread_cnt()
6579 static int alloc_thread_groups(struct r5conf *conf, int cnt,
6586 struct r5conf *conf; in raid5_store_group_thread_cnt() local
6603 conf = mddev->private; in raid5_store_group_thread_cnt()
6604 if (!conf) in raid5_store_group_thread_cnt()
6606 else if (new != conf->worker_cnt_per_group) { in raid5_store_group_thread_cnt()
6609 old_groups = conf->worker_groups; in raid5_store_group_thread_cnt()
6613 err = alloc_thread_groups(conf, new, in raid5_store_group_thread_cnt()
6617 spin_lock_irq(&conf->device_lock); in raid5_store_group_thread_cnt()
6618 conf->group_cnt = group_cnt; in raid5_store_group_thread_cnt()
6619 conf->worker_cnt_per_group = worker_cnt_per_group; in raid5_store_group_thread_cnt()
6620 conf->worker_groups = new_groups; in raid5_store_group_thread_cnt()
6621 spin_unlock_irq(&conf->device_lock); in raid5_store_group_thread_cnt()
6654 static int alloc_thread_groups(struct r5conf *conf, int cnt, in alloc_thread_groups() argument
6686 group->conf = conf; in alloc_thread_groups()
6702 static void free_thread_groups(struct r5conf *conf) in free_thread_groups() argument
6704 if (conf->worker_groups) in free_thread_groups()
6705 kfree(conf->worker_groups[0].workers); in free_thread_groups()
6706 kfree(conf->worker_groups); in free_thread_groups()
6707 conf->worker_groups = NULL; in free_thread_groups()
6713 struct r5conf *conf = mddev->private; in raid5_size() local
6719 raid_disks = min(conf->raid_disks, conf->previous_raid_disks); in raid5_size()
6721 sectors &= ~((sector_t)conf->chunk_sectors - 1); in raid5_size()
6722 sectors &= ~((sector_t)conf->prev_chunk_sectors - 1); in raid5_size()
6723 return sectors * (raid_disks - conf->max_degraded); in raid5_size()
6726 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in free_scratch_buffer() argument
6735 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in alloc_scratch_buffer() argument
6737 if (conf->level == 6 && !percpu->spare_page) in alloc_scratch_buffer()
6740 percpu->scribble = scribble_alloc(max(conf->raid_disks, in alloc_scratch_buffer()
6741 conf->previous_raid_disks), in alloc_scratch_buffer()
6742 max(conf->chunk_sectors, in alloc_scratch_buffer()
6743 conf->prev_chunk_sectors) in alloc_scratch_buffer()
6747 if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { in alloc_scratch_buffer()
6748 free_scratch_buffer(conf, percpu); in alloc_scratch_buffer()
6757 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); in raid456_cpu_dead() local
6759 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid456_cpu_dead()
6763 static void raid5_free_percpu(struct r5conf *conf) in raid5_free_percpu() argument
6765 if (!conf->percpu) in raid5_free_percpu()
6768 cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); in raid5_free_percpu()
6769 free_percpu(conf->percpu); in raid5_free_percpu()
6772 static void free_conf(struct r5conf *conf) in free_conf() argument
6776 log_exit(conf); in free_conf()
6778 unregister_shrinker(&conf->shrinker); in free_conf()
6779 free_thread_groups(conf); in free_conf()
6780 shrink_stripes(conf); in free_conf()
6781 raid5_free_percpu(conf); in free_conf()
6782 for (i = 0; i < conf->pool_size; i++) in free_conf()
6783 if (conf->disks[i].extra_page) in free_conf()
6784 put_page(conf->disks[i].extra_page); in free_conf()
6785 kfree(conf->disks); in free_conf()
6786 bioset_exit(&conf->bio_split); in free_conf()
6787 kfree(conf->stripe_hashtbl); in free_conf()
6788 kfree(conf->pending_data); in free_conf()
6789 kfree(conf); in free_conf()
6794 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); in raid456_cpu_up_prepare() local
6795 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); in raid456_cpu_up_prepare()
6797 if (alloc_scratch_buffer(conf, percpu)) { in raid456_cpu_up_prepare()
6805 static int raid5_alloc_percpu(struct r5conf *conf) in raid5_alloc_percpu() argument
6809 conf->percpu = alloc_percpu(struct raid5_percpu); in raid5_alloc_percpu()
6810 if (!conf->percpu) in raid5_alloc_percpu()
6813 err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); in raid5_alloc_percpu()
6815 conf->scribble_disks = max(conf->raid_disks, in raid5_alloc_percpu()
6816 conf->previous_raid_disks); in raid5_alloc_percpu()
6817 conf->scribble_sectors = max(conf->chunk_sectors, in raid5_alloc_percpu()
6818 conf->prev_chunk_sectors); in raid5_alloc_percpu()
6826 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); in raid5_cache_scan() local
6829 if (mutex_trylock(&conf->cache_size_mutex)) { in raid5_cache_scan()
6832 conf->max_nr_stripes > conf->min_nr_stripes) { in raid5_cache_scan()
6833 if (drop_one_stripe(conf) == 0) { in raid5_cache_scan()
6839 mutex_unlock(&conf->cache_size_mutex); in raid5_cache_scan()
6847 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); in raid5_cache_count() local
6849 if (conf->max_nr_stripes < conf->min_nr_stripes) in raid5_cache_count()
6852 return conf->max_nr_stripes - conf->min_nr_stripes; in raid5_cache_count()
6857 struct r5conf *conf; in setup_conf() local
6896 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); in setup_conf()
6897 if (conf == NULL) in setup_conf()
6899 INIT_LIST_HEAD(&conf->free_list); in setup_conf()
6900 INIT_LIST_HEAD(&conf->pending_list); in setup_conf()
6901 conf->pending_data = kcalloc(PENDING_IO_MAX, in setup_conf()
6904 if (!conf->pending_data) in setup_conf()
6907 list_add(&conf->pending_data[i].sibling, &conf->free_list); in setup_conf()
6909 if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, in setup_conf()
6911 conf->group_cnt = group_cnt; in setup_conf()
6912 conf->worker_cnt_per_group = worker_cnt_per_group; in setup_conf()
6913 conf->worker_groups = new_group; in setup_conf()
6916 spin_lock_init(&conf->device_lock); in setup_conf()
6917 seqcount_init(&conf->gen_lock); in setup_conf()
6918 mutex_init(&conf->cache_size_mutex); in setup_conf()
6919 init_waitqueue_head(&conf->wait_for_quiescent); in setup_conf()
6920 init_waitqueue_head(&conf->wait_for_stripe); in setup_conf()
6921 init_waitqueue_head(&conf->wait_for_overlap); in setup_conf()
6922 INIT_LIST_HEAD(&conf->handle_list); in setup_conf()
6923 INIT_LIST_HEAD(&conf->loprio_list); in setup_conf()
6924 INIT_LIST_HEAD(&conf->hold_list); in setup_conf()
6925 INIT_LIST_HEAD(&conf->delayed_list); in setup_conf()
6926 INIT_LIST_HEAD(&conf->bitmap_list); in setup_conf()
6927 init_llist_head(&conf->released_stripes); in setup_conf()
6928 atomic_set(&conf->active_stripes, 0); in setup_conf()
6929 atomic_set(&conf->preread_active_stripes, 0); in setup_conf()
6930 atomic_set(&conf->active_aligned_reads, 0); in setup_conf()
6931 spin_lock_init(&conf->pending_bios_lock); in setup_conf()
6932 conf->batch_bio_dispatch = true; in setup_conf()
6937 conf->batch_bio_dispatch = false; in setup_conf()
6942 conf->bypass_threshold = BYPASS_THRESHOLD; in setup_conf()
6943 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
6945 conf->raid_disks = mddev->raid_disks; in setup_conf()
6947 conf->previous_raid_disks = mddev->raid_disks; in setup_conf()
6949 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; in setup_conf()
6950 max_disks = max(conf->raid_disks, conf->previous_raid_disks); in setup_conf()
6952 conf->disks = kcalloc(max_disks, sizeof(struct disk_info), in setup_conf()
6955 if (!conf->disks) in setup_conf()
6959 conf->disks[i].extra_page = alloc_page(GFP_KERNEL); in setup_conf()
6960 if (!conf->disks[i].extra_page) in setup_conf()
6964 ret = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); in setup_conf()
6967 conf->mddev = mddev; in setup_conf()
6969 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) in setup_conf()
6977 spin_lock_init(conf->hash_locks); in setup_conf()
6979 spin_lock_init(conf->hash_locks + i); in setup_conf()
6982 INIT_LIST_HEAD(conf->inactive_list + i); in setup_conf()
6985 INIT_LIST_HEAD(conf->temp_inactive_list + i); in setup_conf()
6987 atomic_set(&conf->r5c_cached_full_stripes, 0); in setup_conf()
6988 INIT_LIST_HEAD(&conf->r5c_full_stripe_list); in setup_conf()
6989 atomic_set(&conf->r5c_cached_partial_stripes, 0); in setup_conf()
6990 INIT_LIST_HEAD(&conf->r5c_partial_stripe_list); in setup_conf()
6991 atomic_set(&conf->r5c_flushing_full_stripes, 0); in setup_conf()
6992 atomic_set(&conf->r5c_flushing_partial_stripes, 0); in setup_conf()
6994 conf->level = mddev->new_level; in setup_conf()
6995 conf->chunk_sectors = mddev->new_chunk_sectors; in setup_conf()
6996 if (raid5_alloc_percpu(conf) != 0) in setup_conf()
7006 disk = conf->disks + raid_disk; in setup_conf()
7024 conf->fullsync = 1; in setup_conf()
7027 conf->level = mddev->new_level; in setup_conf()
7028 if (conf->level == 6) { in setup_conf()
7029 conf->max_degraded = 2; in setup_conf()
7031 conf->rmw_level = PARITY_ENABLE_RMW; in setup_conf()
7033 conf->rmw_level = PARITY_DISABLE_RMW; in setup_conf()
7035 conf->max_degraded = 1; in setup_conf()
7036 conf->rmw_level = PARITY_ENABLE_RMW; in setup_conf()
7038 conf->algorithm = mddev->new_layout; in setup_conf()
7039 conf->reshape_progress = mddev->reshape_position; in setup_conf()
7040 if (conf->reshape_progress != MaxSector) { in setup_conf()
7041 conf->prev_chunk_sectors = mddev->chunk_sectors; in setup_conf()
7042 conf->prev_algo = mddev->layout; in setup_conf()
7044 conf->prev_chunk_sectors = conf->chunk_sectors; in setup_conf()
7045 conf->prev_algo = conf->algorithm; in setup_conf()
7048 conf->min_nr_stripes = NR_STRIPES; in setup_conf()
7053 conf->min_nr_stripes = max(NR_STRIPES, stripes); in setup_conf()
7054 if (conf->min_nr_stripes != NR_STRIPES) in setup_conf()
7056 mdname(mddev), conf->min_nr_stripes); in setup_conf()
7058 memory = conf->min_nr_stripes * (sizeof(struct stripe_head) + in setup_conf()
7060 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); in setup_conf()
7061 if (grow_stripes(conf, conf->min_nr_stripes)) { in setup_conf()
7072 conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4; in setup_conf()
7073 conf->shrinker.scan_objects = raid5_cache_scan; in setup_conf()
7074 conf->shrinker.count_objects = raid5_cache_count; in setup_conf()
7075 conf->shrinker.batch = 128; in setup_conf()
7076 conf->shrinker.flags = 0; in setup_conf()
7077 if (register_shrinker(&conf->shrinker)) { in setup_conf()
7084 conf->thread = md_register_thread(raid5d, mddev, pers_name); in setup_conf()
7085 if (!conf->thread) { in setup_conf()
7091 return conf; in setup_conf()
7094 if (conf) { in setup_conf()
7095 free_conf(conf); in setup_conf()
7129 struct r5conf *conf; in raid5_run() local
7270 conf = setup_conf(mddev); in raid5_run()
7272 conf = mddev->private; in raid5_run()
7274 if (IS_ERR(conf)) in raid5_run()
7275 return PTR_ERR(conf); in raid5_run()
7287 conf->min_offset_diff = min_offset_diff; in raid5_run()
7288 mddev->thread = conf->thread; in raid5_run()
7289 conf->thread = NULL; in raid5_run()
7290 mddev->private = conf; in raid5_run()
7292 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks; in raid5_run()
7294 rdev = conf->disks[i].rdev; in raid5_run()
7295 if (!rdev && conf->disks[i].replacement) { in raid5_run()
7297 rdev = conf->disks[i].replacement; in raid5_run()
7298 conf->disks[i].replacement = NULL; in raid5_run()
7300 conf->disks[i].rdev = rdev; in raid5_run()
7304 if (conf->disks[i].replacement && in raid5_run()
7305 conf->reshape_progress != MaxSector) { in raid5_run()
7330 conf->algorithm, in raid5_run()
7331 conf->raid_disks, in raid5_run()
7332 conf->max_degraded)) in raid5_run()
7336 conf->prev_algo, in raid5_run()
7337 conf->previous_raid_disks, in raid5_run()
7338 conf->max_degraded)) in raid5_run()
7346 mddev->degraded = raid5_calc_degraded(conf); in raid5_run()
7348 if (has_failed(conf)) { in raid5_run()
7350 mdname(mddev), mddev->degraded, conf->raid_disks); in raid5_run()
7374 mdname(mddev), conf->level, in raid5_run()
7378 print_raid5_conf(conf); in raid5_run()
7380 if (conf->reshape_progress != MaxSector) { in raid5_run()
7381 conf->reshape_safe = conf->reshape_progress; in raid5_run()
7382 atomic_set(&conf->reshape_stripes, 0); in raid5_run()
7406 int data_disks = conf->previous_raid_disks - conf->max_degraded; in raid5_run()
7415 (conf->raid_disks - conf->max_degraded)); in raid5_run()
7466 if (log_init(conf, journal_dev, raid5_has_ppl(conf))) in raid5_run()
7472 print_raid5_conf(conf); in raid5_run()
7473 free_conf(conf); in raid5_run()
7481 struct r5conf *conf = priv; in raid5_free() local
7483 free_conf(conf); in raid5_free()
7489 struct r5conf *conf = mddev->private; in raid5_status() local
7493 conf->chunk_sectors / 2, mddev->layout); in raid5_status()
7494 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); in raid5_status()
7496 for (i = 0; i < conf->raid_disks; i++) { in raid5_status()
7497 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in raid5_status()
7504 static void print_raid5_conf (struct r5conf *conf) in print_raid5_conf() argument
7510 if (!conf) { in print_raid5_conf()
7514 pr_debug(" --- level:%d rd:%d wd:%d\n", conf->level, in print_raid5_conf()
7515 conf->raid_disks, in print_raid5_conf()
7516 conf->raid_disks - conf->mddev->degraded); in print_raid5_conf()
7518 for (i = 0; i < conf->raid_disks; i++) { in print_raid5_conf()
7520 tmp = conf->disks + i; in print_raid5_conf()
7531 struct r5conf *conf = mddev->private; in raid5_spare_active() local
7536 for (i = 0; i < conf->raid_disks; i++) { in raid5_spare_active()
7537 tmp = conf->disks + i; in raid5_spare_active()
7564 spin_lock_irqsave(&conf->device_lock, flags); in raid5_spare_active()
7565 mddev->degraded = raid5_calc_degraded(conf); in raid5_spare_active()
7566 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_spare_active()
7567 print_raid5_conf(conf); in raid5_spare_active()
7573 struct r5conf *conf = mddev->private; in raid5_remove_disk() local
7577 struct disk_info *p = conf->disks + number; in raid5_remove_disk()
7579 print_raid5_conf(conf); in raid5_remove_disk()
7580 if (test_bit(Journal, &rdev->flags) && conf->log) { in raid5_remove_disk()
7587 if (atomic_read(&conf->active_stripes) || in raid5_remove_disk()
7588 atomic_read(&conf->r5c_cached_full_stripes) || in raid5_remove_disk()
7589 atomic_read(&conf->r5c_cached_partial_stripes)) { in raid5_remove_disk()
7592 log_exit(conf); in raid5_remove_disk()
7602 if (number >= conf->raid_disks && in raid5_remove_disk()
7603 conf->reshape_progress == MaxSector) in raid5_remove_disk()
7615 mddev->recovery_disabled != conf->recovery_disabled && in raid5_remove_disk()
7616 !has_failed(conf) && in raid5_remove_disk()
7618 number < conf->raid_disks) { in raid5_remove_disk()
7632 err = log_modify(conf, rdev, false); in raid5_remove_disk()
7646 err = log_modify(conf, p->rdev, true); in raid5_remove_disk()
7652 print_raid5_conf(conf); in raid5_remove_disk()
7658 struct r5conf *conf = mddev->private; in raid5_add_disk() local
7663 int last = conf->raid_disks - 1; in raid5_add_disk()
7666 if (conf->log) in raid5_add_disk()
7674 log_init(conf, rdev, false); in raid5_add_disk()
7677 if (mddev->recovery_disabled == conf->recovery_disabled) in raid5_add_disk()
7680 if (rdev->saved_raid_disk < 0 && has_failed(conf)) in raid5_add_disk()
7693 conf->disks[rdev->saved_raid_disk].rdev == NULL) in raid5_add_disk()
7697 p = conf->disks + disk; in raid5_add_disk()
7702 conf->fullsync = 1; in raid5_add_disk()
7705 err = log_modify(conf, rdev, true); in raid5_add_disk()
7711 p = conf->disks + disk; in raid5_add_disk()
7718 conf->fullsync = 1; in raid5_add_disk()
7724 print_raid5_conf(conf); in raid5_add_disk()
7738 struct r5conf *conf = mddev->private; in raid5_resize() local
7740 if (raid5_has_log(conf) || raid5_has_ppl(conf)) in raid5_resize()
7742 sectors &= ~((sector_t)conf->chunk_sectors - 1); in raid5_resize()
7773 struct r5conf *conf = mddev->private; in check_stripe_cache() local
7775 > conf->min_nr_stripes || in check_stripe_cache()
7777 > conf->min_nr_stripes) { in check_stripe_cache()
7789 struct r5conf *conf = mddev->private; in check_reshape() local
7791 if (raid5_has_log(conf) || raid5_has_ppl(conf)) in check_reshape()
7797 if (has_failed(conf)) in check_reshape()
7817 if (resize_chunks(conf, in check_reshape()
7818 conf->previous_raid_disks in check_reshape()
7825 if (conf->previous_raid_disks + mddev->delta_disks <= conf->pool_size) in check_reshape()
7827 return resize_stripes(conf, (conf->previous_raid_disks in check_reshape()
7833 struct r5conf *conf = mddev->private; in raid5_start_reshape() local
7844 if (has_failed(conf)) in raid5_start_reshape()
7853 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) in raid5_start_reshape()
7863 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) in raid5_start_reshape()
7870 atomic_set(&conf->reshape_stripes, 0); in raid5_start_reshape()
7871 spin_lock_irq(&conf->device_lock); in raid5_start_reshape()
7872 write_seqcount_begin(&conf->gen_lock); in raid5_start_reshape()
7873 conf->previous_raid_disks = conf->raid_disks; in raid5_start_reshape()
7874 conf->raid_disks += mddev->delta_disks; in raid5_start_reshape()
7875 conf->prev_chunk_sectors = conf->chunk_sectors; in raid5_start_reshape()
7876 conf->chunk_sectors = mddev->new_chunk_sectors; in raid5_start_reshape()
7877 conf->prev_algo = conf->algorithm; in raid5_start_reshape()
7878 conf->algorithm = mddev->new_layout; in raid5_start_reshape()
7879 conf->generation++; in raid5_start_reshape()
7885 conf->reshape_progress = raid5_size(mddev, 0, 0); in raid5_start_reshape()
7887 conf->reshape_progress = 0; in raid5_start_reshape()
7888 conf->reshape_safe = conf->reshape_progress; in raid5_start_reshape()
7889 write_seqcount_end(&conf->gen_lock); in raid5_start_reshape()
7890 spin_unlock_irq(&conf->device_lock); in raid5_start_reshape()
7912 >= conf->previous_raid_disks) in raid5_start_reshape()
7920 } else if (rdev->raid_disk >= conf->previous_raid_disks in raid5_start_reshape()
7930 spin_lock_irqsave(&conf->device_lock, flags); in raid5_start_reshape()
7931 mddev->degraded = raid5_calc_degraded(conf); in raid5_start_reshape()
7932 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_start_reshape()
7934 mddev->raid_disks = conf->raid_disks; in raid5_start_reshape()
7935 mddev->reshape_position = conf->reshape_progress; in raid5_start_reshape()
7947 spin_lock_irq(&conf->device_lock); in raid5_start_reshape()
7948 write_seqcount_begin(&conf->gen_lock); in raid5_start_reshape()
7949 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; in raid5_start_reshape()
7951 conf->chunk_sectors = conf->prev_chunk_sectors; in raid5_start_reshape()
7952 mddev->new_layout = conf->algorithm = conf->prev_algo; in raid5_start_reshape()
7956 conf->generation --; in raid5_start_reshape()
7957 conf->reshape_progress = MaxSector; in raid5_start_reshape()
7959 write_seqcount_end(&conf->gen_lock); in raid5_start_reshape()
7960 spin_unlock_irq(&conf->device_lock); in raid5_start_reshape()
7963 conf->reshape_checkpoint = jiffies; in raid5_start_reshape()
7972 static void end_reshape(struct r5conf *conf) in end_reshape() argument
7975 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in end_reshape()
7978 spin_lock_irq(&conf->device_lock); in end_reshape()
7979 conf->previous_raid_disks = conf->raid_disks; in end_reshape()
7980 md_finish_reshape(conf->mddev); in end_reshape()
7982 conf->reshape_progress = MaxSector; in end_reshape()
7983 conf->mddev->reshape_position = MaxSector; in end_reshape()
7984 rdev_for_each(rdev, conf->mddev) in end_reshape()
7989 spin_unlock_irq(&conf->device_lock); in end_reshape()
7990 wake_up(&conf->wait_for_overlap); in end_reshape()
7995 if (conf->mddev->queue) { in end_reshape()
7996 int data_disks = conf->raid_disks - conf->max_degraded; in end_reshape()
7997 int stripe = data_disks * ((conf->chunk_sectors << 9) in end_reshape()
7999 if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) in end_reshape()
8000 conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; in end_reshape()
8010 struct r5conf *conf = mddev->private; in raid5_finish_reshape() local
8016 spin_lock_irq(&conf->device_lock); in raid5_finish_reshape()
8017 mddev->degraded = raid5_calc_degraded(conf); in raid5_finish_reshape()
8018 spin_unlock_irq(&conf->device_lock); in raid5_finish_reshape()
8019 for (d = conf->raid_disks ; in raid5_finish_reshape()
8020 d < conf->raid_disks - mddev->delta_disks; in raid5_finish_reshape()
8022 struct md_rdev *rdev = conf->disks[d].rdev; in raid5_finish_reshape()
8025 rdev = conf->disks[d].replacement; in raid5_finish_reshape()
8030 mddev->layout = conf->algorithm; in raid5_finish_reshape()
8031 mddev->chunk_sectors = conf->chunk_sectors; in raid5_finish_reshape()
8040 struct r5conf *conf = mddev->private; in raid5_quiesce() local
8044 lock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8048 r5c_flush_cache(conf, INT_MAX); in raid5_quiesce()
8049 conf->quiesce = 2; in raid5_quiesce()
8050 wait_event_cmd(conf->wait_for_quiescent, in raid5_quiesce()
8051 atomic_read(&conf->active_stripes) == 0 && in raid5_quiesce()
8052 atomic_read(&conf->active_aligned_reads) == 0, in raid5_quiesce()
8053 unlock_all_device_hash_locks_irq(conf), in raid5_quiesce()
8054 lock_all_device_hash_locks_irq(conf)); in raid5_quiesce()
8055 conf->quiesce = 1; in raid5_quiesce()
8056 unlock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8058 wake_up(&conf->wait_for_overlap); in raid5_quiesce()
8061 lock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8062 conf->quiesce = 0; in raid5_quiesce()
8063 wake_up(&conf->wait_for_quiescent); in raid5_quiesce()
8064 wake_up(&conf->wait_for_overlap); in raid5_quiesce()
8065 unlock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8067 log_quiesce(conf, quiesce); in raid5_quiesce()
8168 struct r5conf *conf = mddev->private; in raid5_check_reshape() local
8188 conf->algorithm = mddev->new_layout; in raid5_check_reshape()
8192 conf->chunk_sectors = new_chunk ; in raid5_check_reshape()
8311 struct r5conf *conf; in raid5_change_consistency_policy() local
8317 conf = mddev->private; in raid5_change_consistency_policy()
8318 if (!conf) { in raid5_change_consistency_policy()
8325 if (!raid5_has_ppl(conf) && conf->level == 5) { in raid5_change_consistency_policy()
8326 err = log_init(conf, NULL, true); in raid5_change_consistency_policy()
8328 err = resize_stripes(conf, conf->pool_size); in raid5_change_consistency_policy()
8330 log_exit(conf); in raid5_change_consistency_policy()
8335 if (raid5_has_ppl(conf)) { in raid5_change_consistency_policy()
8337 log_exit(conf); in raid5_change_consistency_policy()
8339 err = resize_stripes(conf, conf->pool_size); in raid5_change_consistency_policy()
8340 } else if (test_bit(MD_HAS_JOURNAL, &conf->mddev->flags) && in raid5_change_consistency_policy()
8341 r5l_log_disk_error(conf)) { in raid5_change_consistency_policy()
8373 struct r5conf *conf = mddev->private; in raid5_start() local
8375 return r5l_start(conf->log); in raid5_start()