Lines Matching refs:sh

112 static inline int raid6_d0(struct stripe_head *sh)  in raid6_d0()  argument
114 if (sh->ddf_layout) in raid6_d0()
118 if (sh->qd_idx == sh->disks - 1) in raid6_d0()
121 return sh->qd_idx + 1; in raid6_d0()
134 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, in raid6_idx_to_slot() argument
139 if (sh->ddf_layout) in raid6_idx_to_slot()
141 if (idx == sh->pd_idx) in raid6_idx_to_slot()
143 if (idx == sh->qd_idx) in raid6_idx_to_slot()
145 if (!sh->ddf_layout) in raid6_idx_to_slot()
152 static int stripe_operations_active(struct stripe_head *sh) in stripe_operations_active() argument
154 return sh->check_state || sh->reconstruct_state || in stripe_operations_active()
155 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || in stripe_operations_active()
156 test_bit(STRIPE_COMPUTE_RUN, &sh->state); in stripe_operations_active()
159 static bool stripe_is_lowprio(struct stripe_head *sh) in stripe_is_lowprio() argument
161 return (test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) || in stripe_is_lowprio()
162 test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) && in stripe_is_lowprio()
163 !test_bit(STRIPE_R5C_CACHING, &sh->state); in stripe_is_lowprio()
166 static void raid5_wakeup_stripe_thread(struct stripe_head *sh) in raid5_wakeup_stripe_thread() argument
168 struct r5conf *conf = sh->raid_conf; in raid5_wakeup_stripe_thread()
171 int i, cpu = sh->cpu; in raid5_wakeup_stripe_thread()
175 sh->cpu = cpu; in raid5_wakeup_stripe_thread()
178 if (list_empty(&sh->lru)) { in raid5_wakeup_stripe_thread()
181 if (stripe_is_lowprio(sh)) in raid5_wakeup_stripe_thread()
182 list_add_tail(&sh->lru, &group->loprio_list); in raid5_wakeup_stripe_thread()
184 list_add_tail(&sh->lru, &group->handle_list); in raid5_wakeup_stripe_thread()
186 sh->group = group; in raid5_wakeup_stripe_thread()
194 group = conf->worker_groups + cpu_to_group(sh->cpu); in raid5_wakeup_stripe_thread()
198 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); in raid5_wakeup_stripe_thread()
205 queue_work_on(sh->cpu, raid5_wq, in raid5_wakeup_stripe_thread()
212 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, in do_release_stripe() argument
218 BUG_ON(!list_empty(&sh->lru)); in do_release_stripe()
222 for (i = sh->disks; i--; ) in do_release_stripe()
223 if (test_bit(R5_InJournal, &sh->dev[i].flags)) in do_release_stripe()
232 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) || in do_release_stripe()
234 !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0)) { in do_release_stripe()
235 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) in do_release_stripe()
236 r5c_make_stripe_write_out(sh); in do_release_stripe()
237 set_bit(STRIPE_HANDLE, &sh->state); in do_release_stripe()
240 if (test_bit(STRIPE_HANDLE, &sh->state)) { in do_release_stripe()
241 if (test_bit(STRIPE_DELAYED, &sh->state) && in do_release_stripe()
242 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in do_release_stripe()
243 list_add_tail(&sh->lru, &conf->delayed_list); in do_release_stripe()
244 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && in do_release_stripe()
245 sh->bm_seq - conf->seq_write > 0) in do_release_stripe()
246 list_add_tail(&sh->lru, &conf->bitmap_list); in do_release_stripe()
248 clear_bit(STRIPE_DELAYED, &sh->state); in do_release_stripe()
249 clear_bit(STRIPE_BIT_DELAY, &sh->state); in do_release_stripe()
251 if (stripe_is_lowprio(sh)) in do_release_stripe()
252 list_add_tail(&sh->lru, in do_release_stripe()
255 list_add_tail(&sh->lru, in do_release_stripe()
258 raid5_wakeup_stripe_thread(sh); in do_release_stripe()
264 BUG_ON(stripe_operations_active(sh)); in do_release_stripe()
265 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in do_release_stripe()
270 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { in do_release_stripe()
272 list_add_tail(&sh->lru, temp_inactive_list); in do_release_stripe()
274 WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)); in do_release_stripe()
276 list_add_tail(&sh->lru, temp_inactive_list); in do_release_stripe()
279 if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) in do_release_stripe()
281 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) in do_release_stripe()
283 list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); in do_release_stripe()
291 list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list); in do_release_stripe()
297 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, in __release_stripe() argument
300 if (atomic_dec_and_test(&sh->count)) in __release_stripe()
301 do_release_stripe(conf, sh, temp_inactive_list); in __release_stripe()
357 struct stripe_head *sh, *t; in release_stripe_list() local
363 llist_for_each_entry_safe(sh, t, head, release_list) { in release_stripe_list()
368 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); in release_stripe_list()
374 hash = sh->hash_lock_index; in release_stripe_list()
375 __release_stripe(conf, sh, &temp_inactive_list[hash]); in release_stripe_list()
382 void raid5_release_stripe(struct stripe_head *sh) in raid5_release_stripe() argument
384 struct r5conf *conf = sh->raid_conf; in raid5_release_stripe()
392 if (atomic_add_unless(&sh->count, -1, 1)) in raid5_release_stripe()
396 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) in raid5_release_stripe()
398 wakeup = llist_add(&sh->release_list, &conf->released_stripes); in raid5_release_stripe()
404 if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) { in raid5_release_stripe()
406 hash = sh->hash_lock_index; in raid5_release_stripe()
407 do_release_stripe(conf, sh, &list); in raid5_release_stripe()
413 static inline void remove_hash(struct stripe_head *sh) in remove_hash() argument
416 (unsigned long long)sh->sector); in remove_hash()
418 hlist_del_init(&sh->hash); in remove_hash()
421 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) in insert_hash() argument
423 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
426 (unsigned long long)sh->sector); in insert_hash()
428 hlist_add_head(&sh->hash, hp); in insert_hash()
434 struct stripe_head *sh = NULL; in get_free_stripe() local
440 sh = list_entry(first, struct stripe_head, lru); in get_free_stripe()
442 remove_hash(sh); in get_free_stripe()
444 BUG_ON(hash != sh->hash_lock_index); in get_free_stripe()
448 return sh; in get_free_stripe()
451 static void shrink_buffers(struct stripe_head *sh) in shrink_buffers() argument
455 int num = sh->raid_conf->pool_size; in shrink_buffers()
458 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page); in shrink_buffers()
459 p = sh->dev[i].page; in shrink_buffers()
462 sh->dev[i].page = NULL; in shrink_buffers()
467 static int grow_buffers(struct stripe_head *sh, gfp_t gfp) in grow_buffers() argument
470 int num = sh->raid_conf->pool_size; in grow_buffers()
478 sh->dev[i].page = page; in grow_buffers()
479 sh->dev[i].orig_page = page; in grow_buffers()
486 struct stripe_head *sh);
488 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) in init_stripe() argument
490 struct r5conf *conf = sh->raid_conf; in init_stripe()
493 BUG_ON(atomic_read(&sh->count) != 0); in init_stripe()
494 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); in init_stripe()
495 BUG_ON(stripe_operations_active(sh)); in init_stripe()
496 BUG_ON(sh->batch_head); in init_stripe()
502 sh->generation = conf->generation - previous; in init_stripe()
503 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; in init_stripe()
504 sh->sector = sector; in init_stripe()
505 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
506 sh->state = 0; in init_stripe()
508 for (i = sh->disks; i--; ) { in init_stripe()
509 struct r5dev *dev = &sh->dev[i]; in init_stripe()
514 (unsigned long long)sh->sector, i, dev->toread, in init_stripe()
520 dev->sector = raid5_compute_blocknr(sh, i, previous); in init_stripe()
524 sh->overwrite_disks = 0; in init_stripe()
525 insert_hash(conf, sh); in init_stripe()
526 sh->cpu = smp_processor_id(); in init_stripe()
527 set_bit(STRIPE_BATCH_READY, &sh->state); in init_stripe()
533 struct stripe_head *sh; in __find_stripe() local
536 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) in __find_stripe()
537 if (sh->sector == sector && sh->generation == generation) in __find_stripe()
538 return sh; in __find_stripe()
629 struct stripe_head *sh; in raid5_get_active_stripe() local
641 sh = __find_stripe(conf, sector, conf->generation - previous); in raid5_get_active_stripe()
642 if (!sh) { in raid5_get_active_stripe()
644 sh = get_free_stripe(conf, hash); in raid5_get_active_stripe()
645 if (!sh && !test_bit(R5_DID_ALLOC, in raid5_get_active_stripe()
650 if (noblock && sh == NULL) in raid5_get_active_stripe()
654 if (!sh) { in raid5_get_active_stripe()
669 init_stripe(sh, sector, previous); in raid5_get_active_stripe()
670 atomic_inc(&sh->count); in raid5_get_active_stripe()
672 } else if (!atomic_inc_not_zero(&sh->count)) { in raid5_get_active_stripe()
674 if (!atomic_read(&sh->count)) { in raid5_get_active_stripe()
675 if (!test_bit(STRIPE_HANDLE, &sh->state)) in raid5_get_active_stripe()
677 BUG_ON(list_empty(&sh->lru) && in raid5_get_active_stripe()
678 !test_bit(STRIPE_EXPANDING, &sh->state)); in raid5_get_active_stripe()
682 list_del_init(&sh->lru); in raid5_get_active_stripe()
685 if (sh->group) { in raid5_get_active_stripe()
686 sh->group->stripes_cnt--; in raid5_get_active_stripe()
687 sh->group = NULL; in raid5_get_active_stripe()
690 atomic_inc(&sh->count); in raid5_get_active_stripe()
693 } while (sh == NULL); in raid5_get_active_stripe()
696 return sh; in raid5_get_active_stripe()
699 static bool is_full_stripe_write(struct stripe_head *sh) in is_full_stripe_write() argument
701 BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded)); in is_full_stripe_write()
702 return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded); in is_full_stripe_write()
727 static bool stripe_can_batch(struct stripe_head *sh) in stripe_can_batch() argument
729 struct r5conf *conf = sh->raid_conf; in stripe_can_batch()
733 return test_bit(STRIPE_BATCH_READY, &sh->state) && in stripe_can_batch()
734 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && in stripe_can_batch()
735 is_full_stripe_write(sh); in stripe_can_batch()
739 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) in stripe_add_to_batch_list() argument
748 tmp_sec = sh->sector; in stripe_add_to_batch_list()
751 head_sector = sh->sector - STRIPE_SECTORS; in stripe_add_to_batch_list()
784 lock_two_stripes(head, sh); in stripe_add_to_batch_list()
786 if (!stripe_can_batch(head) || !stripe_can_batch(sh)) in stripe_add_to_batch_list()
789 if (sh->batch_head) in stripe_add_to_batch_list()
793 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_add_to_batch_list()
795 if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf || in stripe_add_to_batch_list()
796 bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) in stripe_add_to_batch_list()
813 sh->batch_head = head->batch_head; in stripe_add_to_batch_list()
819 list_add(&sh->batch_list, &head->batch_list); in stripe_add_to_batch_list()
823 sh->batch_head = head->batch_head; in stripe_add_to_batch_list()
825 list_add_tail(&sh->batch_list, &head->batch_list); in stripe_add_to_batch_list()
829 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in stripe_add_to_batch_list()
834 if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { in stripe_add_to_batch_list()
835 int seq = sh->bm_seq; in stripe_add_to_batch_list()
836 if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && in stripe_add_to_batch_list()
837 sh->batch_head->bm_seq > seq) in stripe_add_to_batch_list()
838 seq = sh->batch_head->bm_seq; in stripe_add_to_batch_list()
839 set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); in stripe_add_to_batch_list()
840 sh->batch_head->bm_seq = seq; in stripe_add_to_batch_list()
843 atomic_inc(&sh->count); in stripe_add_to_batch_list()
845 unlock_two_stripes(head, sh); in stripe_add_to_batch_list()
853 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) in use_new_offset() argument
863 if (sh->generation == conf->generation - 1) in use_new_offset()
979 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) in ops_run_io() argument
981 struct r5conf *conf = sh->raid_conf; in ops_run_io()
982 int i, disks = sh->disks; in ops_run_io()
983 struct stripe_head *head_sh = sh; in ops_run_io()
989 if (log_stripe(sh, s) == 0) in ops_run_io()
1000 sh = head_sh; in ops_run_io()
1001 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { in ops_run_io()
1003 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) in ops_run_io()
1005 if (test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_io()
1007 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) in ops_run_io()
1010 &sh->dev[i].flags)) { in ops_run_io()
1015 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) in ops_run_io()
1019 bi = &sh->dev[i].req; in ops_run_io()
1020 rbi = &sh->dev[i].rreq; /* For writing to replacement */ in ops_run_io()
1060 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, in ops_run_io()
1094 set_bit(STRIPE_IO_STARTED, &sh->state); in ops_run_io()
1101 bi->bi_private = sh; in ops_run_io()
1104 __func__, (unsigned long long)sh->sector, in ops_run_io()
1106 atomic_inc(&sh->count); in ops_run_io()
1107 if (sh != head_sh) in ops_run_io()
1109 if (use_new_offset(conf, sh)) in ops_run_io()
1110 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1113 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1118 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) in ops_run_io()
1119 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in ops_run_io()
1122 test_bit(R5_InJournal, &sh->dev[i].flags)) in ops_run_io()
1128 sh->dev[i].vec.bv_page = sh->dev[i].orig_page; in ops_run_io()
1130 sh->dev[i].vec.bv_page = sh->dev[i].page; in ops_run_io()
1135 bi->bi_write_hint = sh->dev[i].write_hint; in ops_run_io()
1137 sh->dev[i].write_hint = RWF_WRITE_LIFE_NOT_SET; in ops_run_io()
1145 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); in ops_run_io()
1150 sh->dev[i].sector); in ops_run_io()
1161 set_bit(STRIPE_IO_STARTED, &sh->state); in ops_run_io()
1167 rbi->bi_private = sh; in ops_run_io()
1171 __func__, (unsigned long long)sh->sector, in ops_run_io()
1173 atomic_inc(&sh->count); in ops_run_io()
1174 if (sh != head_sh) in ops_run_io()
1176 if (use_new_offset(conf, sh)) in ops_run_io()
1177 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1180 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1182 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) in ops_run_io()
1183 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in ops_run_io()
1184 sh->dev[i].rvec.bv_page = sh->dev[i].page; in ops_run_io()
1189 rbi->bi_write_hint = sh->dev[i].write_hint; in ops_run_io()
1190 sh->dev[i].write_hint = RWF_WRITE_LIFE_NOT_SET; in ops_run_io()
1200 sh->dev[i].sector); in ops_run_io()
1208 set_bit(STRIPE_DEGRADED, &sh->state); in ops_run_io()
1210 bi->bi_opf, i, (unsigned long long)sh->sector); in ops_run_io()
1211 clear_bit(R5_LOCKED, &sh->dev[i].flags); in ops_run_io()
1212 set_bit(STRIPE_HANDLE, &sh->state); in ops_run_io()
1217 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_io()
1219 if (sh != head_sh) in ops_run_io()
1230 struct stripe_head *sh, int no_skipcopy) in async_copy_data() argument
1268 if (sh->raid_conf->skip_copy && in async_copy_data()
1293 struct stripe_head *sh = stripe_head_ref; in ops_complete_biofill() local
1297 (unsigned long long)sh->sector); in ops_complete_biofill()
1300 for (i = sh->disks; i--; ) { in ops_complete_biofill()
1301 struct r5dev *dev = &sh->dev[i]; in ops_complete_biofill()
1322 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); in ops_complete_biofill()
1324 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_biofill()
1325 raid5_release_stripe(sh); in ops_complete_biofill()
1328 static void ops_run_biofill(struct stripe_head *sh) in ops_run_biofill() argument
1334 BUG_ON(sh->batch_head); in ops_run_biofill()
1336 (unsigned long long)sh->sector); in ops_run_biofill()
1338 for (i = sh->disks; i--; ) { in ops_run_biofill()
1339 struct r5dev *dev = &sh->dev[i]; in ops_run_biofill()
1342 spin_lock_irq(&sh->stripe_lock); in ops_run_biofill()
1345 spin_unlock_irq(&sh->stripe_lock); in ops_run_biofill()
1349 dev->sector, tx, sh, 0); in ops_run_biofill()
1355 atomic_inc(&sh->count); in ops_run_biofill()
1356 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); in ops_run_biofill()
1360 static void mark_target_uptodate(struct stripe_head *sh, int target) in mark_target_uptodate() argument
1367 tgt = &sh->dev[target]; in mark_target_uptodate()
1375 struct stripe_head *sh = stripe_head_ref; in ops_complete_compute() local
1378 (unsigned long long)sh->sector); in ops_complete_compute()
1381 mark_target_uptodate(sh, sh->ops.target); in ops_complete_compute()
1382 mark_target_uptodate(sh, sh->ops.target2); in ops_complete_compute()
1384 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); in ops_complete_compute()
1385 if (sh->check_state == check_state_compute_run) in ops_complete_compute()
1386 sh->check_state = check_state_compute_result; in ops_complete_compute()
1387 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_compute()
1388 raid5_release_stripe(sh); in ops_complete_compute()
1398 static addr_conv_t *to_addr_conv(struct stripe_head *sh, in to_addr_conv() argument
1401 return (void *) (to_addr_page(percpu, i) + sh->disks + 2); in to_addr_conv()
1405 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute5() argument
1407 int disks = sh->disks; in ops_run_compute5()
1409 int target = sh->ops.target; in ops_run_compute5()
1410 struct r5dev *tgt = &sh->dev[target]; in ops_run_compute5()
1417 BUG_ON(sh->batch_head); in ops_run_compute5()
1420 __func__, (unsigned long long)sh->sector, target); in ops_run_compute5()
1425 xor_srcs[count++] = sh->dev[i].page; in ops_run_compute5()
1427 atomic_inc(&sh->count); in ops_run_compute5()
1430 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); in ops_run_compute5()
1449 struct stripe_head *sh, in set_syndrome_sources() argument
1452 int disks = sh->disks; in set_syndrome_sources()
1453 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); in set_syndrome_sources()
1454 int d0_idx = raid6_d0(sh); in set_syndrome_sources()
1464 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); in set_syndrome_sources()
1465 struct r5dev *dev = &sh->dev[i]; in set_syndrome_sources()
1467 if (i == sh->qd_idx || i == sh->pd_idx || in set_syndrome_sources()
1476 srcs[slot] = sh->dev[i].orig_page; in set_syndrome_sources()
1478 srcs[slot] = sh->dev[i].page; in set_syndrome_sources()
1487 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_1() argument
1489 int disks = sh->disks; in ops_run_compute6_1()
1492 int qd_idx = sh->qd_idx; in ops_run_compute6_1()
1500 BUG_ON(sh->batch_head); in ops_run_compute6_1()
1501 if (sh->ops.target < 0) in ops_run_compute6_1()
1502 target = sh->ops.target2; in ops_run_compute6_1()
1503 else if (sh->ops.target2 < 0) in ops_run_compute6_1()
1504 target = sh->ops.target; in ops_run_compute6_1()
1510 __func__, (unsigned long long)sh->sector, target); in ops_run_compute6_1()
1512 tgt = &sh->dev[target]; in ops_run_compute6_1()
1516 atomic_inc(&sh->count); in ops_run_compute6_1()
1519 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); in ops_run_compute6_1()
1523 ops_complete_compute, sh, in ops_run_compute6_1()
1524 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1532 blocks[count++] = sh->dev[i].page; in ops_run_compute6_1()
1536 NULL, ops_complete_compute, sh, in ops_run_compute6_1()
1537 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1545 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_2() argument
1547 int i, count, disks = sh->disks; in ops_run_compute6_2()
1548 int syndrome_disks = sh->ddf_layout ? disks : disks-2; in ops_run_compute6_2()
1549 int d0_idx = raid6_d0(sh); in ops_run_compute6_2()
1551 int target = sh->ops.target; in ops_run_compute6_2()
1552 int target2 = sh->ops.target2; in ops_run_compute6_2()
1553 struct r5dev *tgt = &sh->dev[target]; in ops_run_compute6_2()
1554 struct r5dev *tgt2 = &sh->dev[target2]; in ops_run_compute6_2()
1559 BUG_ON(sh->batch_head); in ops_run_compute6_2()
1561 __func__, (unsigned long long)sh->sector, target, target2); in ops_run_compute6_2()
1574 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); in ops_run_compute6_2()
1576 blocks[slot] = sh->dev[i].page; in ops_run_compute6_2()
1589 __func__, (unsigned long long)sh->sector, faila, failb); in ops_run_compute6_2()
1591 atomic_inc(&sh->count); in ops_run_compute6_2()
1598 ops_complete_compute, sh, in ops_run_compute6_2()
1599 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1605 int qd_idx = sh->qd_idx; in ops_run_compute6_2()
1617 blocks[count++] = sh->dev[i].page; in ops_run_compute6_2()
1619 dest = sh->dev[data_target].page; in ops_run_compute6_2()
1623 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1627 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); in ops_run_compute6_2()
1629 ops_complete_compute, sh, in ops_run_compute6_2()
1630 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1636 ops_complete_compute, sh, in ops_run_compute6_2()
1637 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1654 struct stripe_head *sh = stripe_head_ref; in ops_complete_prexor() local
1657 (unsigned long long)sh->sector); in ops_complete_prexor()
1659 if (r5c_is_writeback(sh->raid_conf->log)) in ops_complete_prexor()
1664 r5c_release_extra_page(sh); in ops_complete_prexor()
1668 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor5() argument
1671 int disks = sh->disks; in ops_run_prexor5()
1673 int count = 0, pd_idx = sh->pd_idx, i; in ops_run_prexor5()
1677 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_prexor5()
1679 BUG_ON(sh->batch_head); in ops_run_prexor5()
1681 (unsigned long long)sh->sector); in ops_run_prexor5()
1684 struct r5dev *dev = &sh->dev[i]; in ops_run_prexor5()
1693 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor5()
1700 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor6() argument
1708 (unsigned long long)sh->sector); in ops_run_prexor6()
1710 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN); in ops_run_prexor6()
1713 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor6()
1720 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) in ops_run_biodrain() argument
1722 struct r5conf *conf = sh->raid_conf; in ops_run_biodrain()
1723 int disks = sh->disks; in ops_run_biodrain()
1725 struct stripe_head *head_sh = sh; in ops_run_biodrain()
1728 (unsigned long long)sh->sector); in ops_run_biodrain()
1734 sh = head_sh; in ops_run_biodrain()
1739 dev = &sh->dev[i]; in ops_run_biodrain()
1745 spin_lock_irq(&sh->stripe_lock); in ops_run_biodrain()
1748 sh->overwrite_disks = 0; in ops_run_biodrain()
1751 spin_unlock_irq(&sh->stripe_lock); in ops_run_biodrain()
1764 dev->sector, tx, sh, in ops_run_biodrain()
1777 sh = list_first_entry(&sh->batch_list, in ops_run_biodrain()
1780 if (sh == head_sh) in ops_run_biodrain()
1792 struct stripe_head *sh = stripe_head_ref; in ops_complete_reconstruct() local
1793 int disks = sh->disks; in ops_complete_reconstruct()
1794 int pd_idx = sh->pd_idx; in ops_complete_reconstruct()
1795 int qd_idx = sh->qd_idx; in ops_complete_reconstruct()
1800 (unsigned long long)sh->sector); in ops_complete_reconstruct()
1803 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); in ops_complete_reconstruct()
1804 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); in ops_complete_reconstruct()
1805 discard |= test_bit(R5_Discard, &sh->dev[i].flags); in ops_complete_reconstruct()
1809 struct r5dev *dev = &sh->dev[i]; in ops_complete_reconstruct()
1814 if (test_bit(STRIPE_EXPAND_READY, &sh->state)) in ops_complete_reconstruct()
1824 if (sh->reconstruct_state == reconstruct_state_drain_run) in ops_complete_reconstruct()
1825 sh->reconstruct_state = reconstruct_state_drain_result; in ops_complete_reconstruct()
1826 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) in ops_complete_reconstruct()
1827 sh->reconstruct_state = reconstruct_state_prexor_drain_result; in ops_complete_reconstruct()
1829 BUG_ON(sh->reconstruct_state != reconstruct_state_run); in ops_complete_reconstruct()
1830 sh->reconstruct_state = reconstruct_state_result; in ops_complete_reconstruct()
1833 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_reconstruct()
1834 raid5_release_stripe(sh); in ops_complete_reconstruct()
1838 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct5() argument
1841 int disks = sh->disks; in ops_run_reconstruct5()
1844 int count, pd_idx = sh->pd_idx, i; in ops_run_reconstruct5()
1849 struct stripe_head *head_sh = sh; in ops_run_reconstruct5()
1853 (unsigned long long)sh->sector); in ops_run_reconstruct5()
1855 for (i = 0; i < sh->disks; i++) { in ops_run_reconstruct5()
1858 if (!test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_reconstruct5()
1861 if (i >= sh->disks) { in ops_run_reconstruct5()
1862 atomic_inc(&sh->count); in ops_run_reconstruct5()
1863 set_bit(R5_Discard, &sh->dev[pd_idx].flags); in ops_run_reconstruct5()
1864 ops_complete_reconstruct(sh); in ops_run_reconstruct5()
1875 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_reconstruct5()
1877 struct r5dev *dev = &sh->dev[i]; in ops_run_reconstruct5()
1883 xor_dest = sh->dev[pd_idx].page; in ops_run_reconstruct5()
1885 struct r5dev *dev = &sh->dev[i]; in ops_run_reconstruct5()
1897 list_first_entry(&sh->batch_list, in ops_run_reconstruct5()
1905 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
1909 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
1918 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_reconstruct5()
1925 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct6() argument
1931 struct stripe_head *head_sh = sh; in ops_run_reconstruct6()
1936 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); in ops_run_reconstruct6()
1938 for (i = 0; i < sh->disks; i++) { in ops_run_reconstruct6()
1939 if (sh->pd_idx == i || sh->qd_idx == i) in ops_run_reconstruct6()
1941 if (!test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_reconstruct6()
1944 if (i >= sh->disks) { in ops_run_reconstruct6()
1945 atomic_inc(&sh->count); in ops_run_reconstruct6()
1946 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in ops_run_reconstruct6()
1947 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); in ops_run_reconstruct6()
1948 ops_complete_reconstruct(sh); in ops_run_reconstruct6()
1955 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { in ops_run_reconstruct6()
1963 count = set_syndrome_sources(blocks, sh, synflags); in ops_run_reconstruct6()
1965 list_first_entry(&sh->batch_list, in ops_run_reconstruct6()
1971 head_sh, to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
1974 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
1978 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_reconstruct6()
1986 struct stripe_head *sh = stripe_head_ref; in ops_complete_check() local
1989 (unsigned long long)sh->sector); in ops_complete_check()
1991 sh->check_state = check_state_check_result; in ops_complete_check()
1992 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_check()
1993 raid5_release_stripe(sh); in ops_complete_check()
1996 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_check_p() argument
1998 int disks = sh->disks; in ops_run_check_p()
1999 int pd_idx = sh->pd_idx; in ops_run_check_p()
2000 int qd_idx = sh->qd_idx; in ops_run_check_p()
2009 (unsigned long long)sh->sector); in ops_run_check_p()
2011 BUG_ON(sh->batch_head); in ops_run_check_p()
2013 xor_dest = sh->dev[pd_idx].page; in ops_run_check_p()
2018 xor_srcs[count++] = sh->dev[i].page; in ops_run_check_p()
2022 to_addr_conv(sh, percpu, 0)); in ops_run_check_p()
2024 &sh->ops.zero_sum_result, &submit); in ops_run_check_p()
2026 atomic_inc(&sh->count); in ops_run_check_p()
2027 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); in ops_run_check_p()
2031 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) in ops_run_check_pq() argument
2038 (unsigned long long)sh->sector, checkp); in ops_run_check_pq()
2040 BUG_ON(sh->batch_head); in ops_run_check_pq()
2041 count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL); in ops_run_check_pq()
2045 atomic_inc(&sh->count); in ops_run_check_pq()
2047 sh, to_addr_conv(sh, percpu, 0)); in ops_run_check_pq()
2049 &sh->ops.zero_sum_result, percpu->spare_page, &submit); in ops_run_check_pq()
2052 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) in raid_run_ops() argument
2054 int overlap_clear = 0, i, disks = sh->disks; in raid_run_ops()
2056 struct r5conf *conf = sh->raid_conf; in raid_run_ops()
2064 ops_run_biofill(sh); in raid_run_ops()
2070 tx = ops_run_compute5(sh, percpu); in raid_run_ops()
2072 if (sh->ops.target2 < 0 || sh->ops.target < 0) in raid_run_ops()
2073 tx = ops_run_compute6_1(sh, percpu); in raid_run_ops()
2075 tx = ops_run_compute6_2(sh, percpu); in raid_run_ops()
2084 tx = ops_run_prexor5(sh, percpu, tx); in raid_run_ops()
2086 tx = ops_run_prexor6(sh, percpu, tx); in raid_run_ops()
2090 tx = ops_run_partial_parity(sh, percpu, tx); in raid_run_ops()
2093 tx = ops_run_biodrain(sh, tx); in raid_run_ops()
2099 ops_run_reconstruct5(sh, percpu, tx); in raid_run_ops()
2101 ops_run_reconstruct6(sh, percpu, tx); in raid_run_ops()
2105 if (sh->check_state == check_state_run) in raid_run_ops()
2106 ops_run_check_p(sh, percpu); in raid_run_ops()
2107 else if (sh->check_state == check_state_run_q) in raid_run_ops()
2108 ops_run_check_pq(sh, percpu, 0); in raid_run_ops()
2109 else if (sh->check_state == check_state_run_pq) in raid_run_ops()
2110 ops_run_check_pq(sh, percpu, 1); in raid_run_ops()
2115 if (overlap_clear && !sh->batch_head) in raid_run_ops()
2117 struct r5dev *dev = &sh->dev[i]; in raid_run_ops()
2119 wake_up(&sh->raid_conf->wait_for_overlap); in raid_run_ops()
2124 static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh) in free_stripe() argument
2126 if (sh->ppl_page) in free_stripe()
2127 __free_page(sh->ppl_page); in free_stripe()
2128 kmem_cache_free(sc, sh); in free_stripe()
2134 struct stripe_head *sh; in alloc_stripe() local
2137 sh = kmem_cache_zalloc(sc, gfp); in alloc_stripe()
2138 if (sh) { in alloc_stripe()
2139 spin_lock_init(&sh->stripe_lock); in alloc_stripe()
2140 spin_lock_init(&sh->batch_lock); in alloc_stripe()
2141 INIT_LIST_HEAD(&sh->batch_list); in alloc_stripe()
2142 INIT_LIST_HEAD(&sh->lru); in alloc_stripe()
2143 INIT_LIST_HEAD(&sh->r5c); in alloc_stripe()
2144 INIT_LIST_HEAD(&sh->log_list); in alloc_stripe()
2145 atomic_set(&sh->count, 1); in alloc_stripe()
2146 sh->raid_conf = conf; in alloc_stripe()
2147 sh->log_start = MaxSector; in alloc_stripe()
2149 struct r5dev *dev = &sh->dev[i]; in alloc_stripe()
2156 sh->ppl_page = alloc_page(gfp); in alloc_stripe()
2157 if (!sh->ppl_page) { in alloc_stripe()
2158 free_stripe(sc, sh); in alloc_stripe()
2159 sh = NULL; in alloc_stripe()
2163 return sh; in alloc_stripe()
2167 struct stripe_head *sh; in grow_one_stripe() local
2169 sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf); in grow_one_stripe()
2170 if (!sh) in grow_one_stripe()
2173 if (grow_buffers(sh, gfp)) { in grow_one_stripe()
2174 shrink_buffers(sh); in grow_one_stripe()
2175 free_stripe(conf->slab_cache, sh); in grow_one_stripe()
2178 sh->hash_lock_index = in grow_one_stripe()
2183 raid5_release_stripe(sh); in grow_one_stripe()
2435 struct stripe_head *sh; in drop_one_stripe() local
2439 sh = get_free_stripe(conf, hash); in drop_one_stripe()
2441 if (!sh) in drop_one_stripe()
2443 BUG_ON(atomic_read(&sh->count)); in drop_one_stripe()
2444 shrink_buffers(sh); in drop_one_stripe()
2445 free_stripe(conf->slab_cache, sh); in drop_one_stripe()
2463 struct stripe_head *sh = bi->bi_private; in raid5_end_read_request() local
2464 struct r5conf *conf = sh->raid_conf; in raid5_end_read_request()
2465 int disks = sh->disks, i; in raid5_end_read_request()
2471 if (bi == &sh->dev[i].req) in raid5_end_read_request()
2475 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_read_request()
2482 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) in raid5_end_read_request()
2492 if (use_new_offset(conf, sh)) in raid5_end_read_request()
2493 s = sh->sector + rdev->new_data_offset; in raid5_end_read_request()
2495 s = sh->sector + rdev->data_offset; in raid5_end_read_request()
2497 set_bit(R5_UPTODATE, &sh->dev[i].flags); in raid5_end_read_request()
2498 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { in raid5_end_read_request()
2509 clear_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2510 clear_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_read_request()
2511 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) in raid5_end_read_request()
2512 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2514 if (test_bit(R5_InJournal, &sh->dev[i].flags)) in raid5_end_read_request()
2519 set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); in raid5_end_read_request()
2528 clear_bit(R5_UPTODATE, &sh->dev[i].flags); in raid5_end_read_request()
2531 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) in raid5_end_read_request()
2544 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { in raid5_end_read_request()
2565 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) in raid5_end_read_request()
2568 if (sh->qd_idx >= 0 && sh->pd_idx == i) in raid5_end_read_request()
2569 set_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2570 else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { in raid5_end_read_request()
2571 set_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2572 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2574 set_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2576 clear_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2577 clear_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_read_request()
2581 rdev, sh->sector, STRIPE_SECTORS, 0))) in raid5_end_read_request()
2587 clear_bit(R5_LOCKED, &sh->dev[i].flags); in raid5_end_read_request()
2588 set_bit(STRIPE_HANDLE, &sh->state); in raid5_end_read_request()
2589 raid5_release_stripe(sh); in raid5_end_read_request()
2594 struct stripe_head *sh = bi->bi_private; in raid5_end_write_request() local
2595 struct r5conf *conf = sh->raid_conf; in raid5_end_write_request()
2596 int disks = sh->disks, i; in raid5_end_write_request()
2603 if (bi == &sh->dev[i].req) { in raid5_end_write_request()
2607 if (bi == &sh->dev[i].rreq) { in raid5_end_write_request()
2621 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_write_request()
2632 else if (is_badblock(rdev, sh->sector, in raid5_end_write_request()
2635 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); in raid5_end_write_request()
2638 set_bit(STRIPE_DEGRADED, &sh->state); in raid5_end_write_request()
2640 set_bit(R5_WriteError, &sh->dev[i].flags); in raid5_end_write_request()
2644 } else if (is_badblock(rdev, sh->sector, in raid5_end_write_request()
2647 set_bit(R5_MadeGood, &sh->dev[i].flags); in raid5_end_write_request()
2648 if (test_bit(R5_ReadError, &sh->dev[i].flags)) in raid5_end_write_request()
2653 set_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_write_request()
2658 if (sh->batch_head && bi->bi_status && !replacement) in raid5_end_write_request()
2659 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); in raid5_end_write_request()
2662 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) in raid5_end_write_request()
2663 clear_bit(R5_LOCKED, &sh->dev[i].flags); in raid5_end_write_request()
2664 set_bit(STRIPE_HANDLE, &sh->state); in raid5_end_write_request()
2665 raid5_release_stripe(sh); in raid5_end_write_request()
2667 if (sh->batch_head && sh != sh->batch_head) in raid5_end_write_request()
2668 raid5_release_stripe(sh->batch_head); in raid5_end_write_request()
2715 struct stripe_head *sh) in raid5_compute_sector() argument
2903 if (sh) { in raid5_compute_sector()
2904 sh->pd_idx = pd_idx; in raid5_compute_sector()
2905 sh->qd_idx = qd_idx; in raid5_compute_sector()
2906 sh->ddf_layout = ddf_layout; in raid5_compute_sector()
2915 sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) in raid5_compute_blocknr() argument
2917 struct r5conf *conf = sh->raid_conf; in raid5_compute_blocknr()
2918 int raid_disks = sh->disks; in raid5_compute_blocknr()
2920 sector_t new_sector = sh->sector, check; in raid5_compute_blocknr()
2935 if (i == sh->pd_idx) in raid5_compute_blocknr()
2943 if (i > sh->pd_idx) in raid5_compute_blocknr()
2948 if (i < sh->pd_idx) in raid5_compute_blocknr()
2950 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
2962 if (i == sh->qd_idx) in raid5_compute_blocknr()
2969 if (sh->pd_idx == raid_disks-1) in raid5_compute_blocknr()
2971 else if (i > sh->pd_idx) in raid5_compute_blocknr()
2976 if (sh->pd_idx == raid_disks-1) in raid5_compute_blocknr()
2980 if (i < sh->pd_idx) in raid5_compute_blocknr()
2982 i -= (sh->pd_idx + 2); in raid5_compute_blocknr()
2992 if (sh->pd_idx == 0) in raid5_compute_blocknr()
2996 if (i < sh->pd_idx) in raid5_compute_blocknr()
2998 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3003 if (i > sh->pd_idx) in raid5_compute_blocknr()
3008 if (i < sh->pd_idx) in raid5_compute_blocknr()
3010 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3026 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx in raid5_compute_blocknr()
3027 || sh2.qd_idx != sh->qd_idx) { in raid5_compute_blocknr()
3092 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, in schedule_reconstruction() argument
3095 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; in schedule_reconstruction()
3096 struct r5conf *conf = sh->raid_conf; in schedule_reconstruction()
3106 r5c_release_extra_page(sh); in schedule_reconstruction()
3109 struct r5dev *dev = &sh->dev[i]; in schedule_reconstruction()
3130 sh->reconstruct_state = reconstruct_state_drain_run; in schedule_reconstruction()
3133 sh->reconstruct_state = reconstruct_state_run; in schedule_reconstruction()
3138 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) in schedule_reconstruction()
3141 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || in schedule_reconstruction()
3142 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); in schedule_reconstruction()
3144 (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) || in schedule_reconstruction()
3145 test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags)))); in schedule_reconstruction()
3148 struct r5dev *dev = &sh->dev[i]; in schedule_reconstruction()
3167 sh->reconstruct_state = reconstruct_state_prexor_drain_run; in schedule_reconstruction()
3176 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); in schedule_reconstruction()
3177 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in schedule_reconstruction()
3181 int qd_idx = sh->qd_idx; in schedule_reconstruction()
3182 struct r5dev *dev = &sh->dev[qd_idx]; in schedule_reconstruction()
3189 if (raid5_has_ppl(sh->raid_conf) && sh->ppl_page && in schedule_reconstruction()
3191 !test_bit(STRIPE_FULL_WRITE, &sh->state) && in schedule_reconstruction()
3192 test_bit(R5_Insync, &sh->dev[pd_idx].flags)) in schedule_reconstruction()
3196 __func__, (unsigned long long)sh->sector, in schedule_reconstruction()
3205 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in add_stripe_bio() argument
3209 struct r5conf *conf = sh->raid_conf; in add_stripe_bio()
3214 (unsigned long long)sh->sector); in add_stripe_bio()
3216 spin_lock_irq(&sh->stripe_lock); in add_stripe_bio()
3217 sh->dev[dd_idx].write_hint = bi->bi_write_hint; in add_stripe_bio()
3219 if (sh->batch_head) in add_stripe_bio()
3222 bip = &sh->dev[dd_idx].towrite; in add_stripe_bio()
3226 bip = &sh->dev[dd_idx].toread; in add_stripe_bio()
3249 for (i = 0; i < sh->disks; i++) { in add_stripe_bio()
3250 if (i != sh->pd_idx && in add_stripe_bio()
3251 (i == dd_idx || sh->dev[i].towrite)) { in add_stripe_bio()
3252 sector = sh->dev[i].sector; in add_stripe_bio()
3266 clear_bit(STRIPE_BATCH_READY, &sh->state); in add_stripe_bio()
3277 sector_t sector = sh->dev[dd_idx].sector; in add_stripe_bio()
3278 for (bi=sh->dev[dd_idx].towrite; in add_stripe_bio()
3279 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && in add_stripe_bio()
3281 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { in add_stripe_bio()
3285 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) in add_stripe_bio()
3286 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) in add_stripe_bio()
3287 sh->overwrite_disks++; in add_stripe_bio()
3292 (unsigned long long)sh->sector, dd_idx); in add_stripe_bio()
3307 set_bit(STRIPE_BITMAP_PENDING, &sh->state); in add_stripe_bio()
3308 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3309 md_bitmap_startwrite(conf->mddev->bitmap, sh->sector, in add_stripe_bio()
3311 spin_lock_irq(&sh->stripe_lock); in add_stripe_bio()
3312 clear_bit(STRIPE_BITMAP_PENDING, &sh->state); in add_stripe_bio()
3313 if (!sh->batch_head) { in add_stripe_bio()
3314 sh->bm_seq = conf->seq_flush+1; in add_stripe_bio()
3315 set_bit(STRIPE_BIT_DELAY, &sh->state); in add_stripe_bio()
3318 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3320 if (stripe_can_batch(sh)) in add_stripe_bio()
3321 stripe_add_to_batch_list(conf, sh); in add_stripe_bio()
3325 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); in add_stripe_bio()
3326 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3333 struct stripe_head *sh) in stripe_set_idx() argument
3345 &dd_idx, sh); in stripe_set_idx()
3349 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, in handle_failed_stripe() argument
3353 BUG_ON(sh->batch_head); in handle_failed_stripe()
3358 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { in handle_failed_stripe()
3371 sh->sector, in handle_failed_stripe()
3377 spin_lock_irq(&sh->stripe_lock); in handle_failed_stripe()
3379 bi = sh->dev[i].towrite; in handle_failed_stripe()
3380 sh->dev[i].towrite = NULL; in handle_failed_stripe()
3381 sh->overwrite_disks = 0; in handle_failed_stripe()
3382 spin_unlock_irq(&sh->stripe_lock); in handle_failed_stripe()
3386 log_stripe_write_finished(sh); in handle_failed_stripe()
3388 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in handle_failed_stripe()
3392 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
3393 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
3400 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3404 bi = sh->dev[i].written; in handle_failed_stripe()
3405 sh->dev[i].written = NULL; in handle_failed_stripe()
3406 if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { in handle_failed_stripe()
3407 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in handle_failed_stripe()
3408 sh->dev[i].page = sh->dev[i].orig_page; in handle_failed_stripe()
3413 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
3414 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
3424 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && in handle_failed_stripe()
3426 (!test_bit(R5_Insync, &sh->dev[i].flags) || in handle_failed_stripe()
3427 test_bit(R5_ReadError, &sh->dev[i].flags))) { in handle_failed_stripe()
3428 spin_lock_irq(&sh->stripe_lock); in handle_failed_stripe()
3429 bi = sh->dev[i].toread; in handle_failed_stripe()
3430 sh->dev[i].toread = NULL; in handle_failed_stripe()
3431 spin_unlock_irq(&sh->stripe_lock); in handle_failed_stripe()
3432 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in handle_failed_stripe()
3437 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
3439 r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
3446 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3451 clear_bit(R5_LOCKED, &sh->dev[i].flags); in handle_failed_stripe()
3456 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) in handle_failed_stripe()
3462 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, in handle_failed_sync() argument
3468 BUG_ON(sh->batch_head); in handle_failed_sync()
3469 clear_bit(STRIPE_SYNCING, &sh->state); in handle_failed_sync()
3470 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_failed_sync()
3491 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3498 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3510 static int want_replace(struct stripe_head *sh, int disk_idx) in want_replace() argument
3516 rdev = rcu_dereference(sh->raid_conf->disks[disk_idx].replacement); in want_replace()
3520 && (rdev->recovery_offset <= sh->sector in want_replace()
3521 || rdev->mddev->recovery_cp <= sh->sector)) in want_replace()
3527 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, in need_this_block() argument
3530 struct r5dev *dev = &sh->dev[disk_idx]; in need_this_block()
3531 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], in need_this_block()
3532 &sh->dev[s->failed_num[1]] }; in need_this_block()
3549 (s->replacing && want_replace(sh, disk_idx))) in need_this_block()
3574 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in need_this_block()
3602 if (sh->raid_conf->level != 6 && in need_this_block()
3603 sh->sector < sh->raid_conf->mddev->recovery_cp) in need_this_block()
3607 if (s->failed_num[i] != sh->pd_idx && in need_this_block()
3608 s->failed_num[i] != sh->qd_idx && in need_this_block()
3623 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, in fetch_block() argument
3626 struct r5dev *dev = &sh->dev[disk_idx]; in fetch_block()
3629 if (need_this_block(sh, s, disk_idx, disks)) { in fetch_block()
3635 BUG_ON(sh->batch_head); in fetch_block()
3647 ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) || in fetch_block()
3654 (unsigned long long)sh->sector, disk_idx); in fetch_block()
3655 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in fetch_block()
3658 sh->ops.target = disk_idx; in fetch_block()
3659 sh->ops.target2 = -1; /* no 2nd target */ in fetch_block()
3678 &sh->dev[other].flags)) in fetch_block()
3683 (unsigned long long)sh->sector, in fetch_block()
3685 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in fetch_block()
3687 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); in fetch_block()
3688 set_bit(R5_Wantcompute, &sh->dev[other].flags); in fetch_block()
3689 sh->ops.target = disk_idx; in fetch_block()
3690 sh->ops.target2 = other; in fetch_block()
3709 static void handle_stripe_fill(struct stripe_head *sh, in handle_stripe_fill() argument
3719 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && in handle_stripe_fill()
3720 !sh->reconstruct_state) { in handle_stripe_fill()
3730 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) in handle_stripe_fill()
3731 r5c_make_stripe_write_out(sh); in handle_stripe_fill()
3736 if (fetch_block(sh, s, i, disks)) in handle_stripe_fill()
3740 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_fill()
3751 struct stripe_head *sh, int disks) in handle_stripe_clean_event() argument
3756 struct stripe_head *head_sh = sh; in handle_stripe_clean_event()
3760 if (sh->dev[i].written) { in handle_stripe_clean_event()
3761 dev = &sh->dev[i]; in handle_stripe_clean_event()
3787 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_stripe_clean_event()
3789 !test_bit(STRIPE_DEGRADED, &sh->state), in handle_stripe_clean_event()
3792 sh = list_first_entry(&sh->batch_list, in handle_stripe_clean_event()
3795 if (sh != head_sh) { in handle_stripe_clean_event()
3796 dev = &sh->dev[i]; in handle_stripe_clean_event()
3800 sh = head_sh; in handle_stripe_clean_event()
3801 dev = &sh->dev[i]; in handle_stripe_clean_event()
3806 log_stripe_write_finished(sh); in handle_stripe_clean_event()
3809 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { in handle_stripe_clean_event()
3811 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
3812 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
3813 if (sh->qd_idx >= 0) { in handle_stripe_clean_event()
3814 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); in handle_stripe_clean_event()
3815 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); in handle_stripe_clean_event()
3818 clear_bit(STRIPE_DISCARD, &sh->state); in handle_stripe_clean_event()
3825 hash = sh->hash_lock_index; in handle_stripe_clean_event()
3827 remove_hash(sh); in handle_stripe_clean_event()
3830 sh = list_first_entry(&sh->batch_list, in handle_stripe_clean_event()
3832 if (sh != head_sh) in handle_stripe_clean_event()
3835 sh = head_sh; in handle_stripe_clean_event()
3837 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) in handle_stripe_clean_event()
3838 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_clean_event()
3842 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) in handle_stripe_clean_event()
3866 struct stripe_head *sh, in handle_stripe_dirtying() argument
3881 (recovery_cp < MaxSector && sh->sector >= recovery_cp && in handle_stripe_dirtying()
3889 (unsigned long long)sh->sector); in handle_stripe_dirtying()
3892 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
3894 i == sh->pd_idx || i == sh->qd_idx || in handle_stripe_dirtying()
3906 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
3918 (unsigned long long)sh->sector, sh->state, rmw, rcw); in handle_stripe_dirtying()
3919 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_dirtying()
3925 (unsigned long long)sh->sector, rmw); in handle_stripe_dirtying()
3927 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
3930 !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) { in handle_stripe_dirtying()
3945 r5c_use_extra_page(sh); in handle_stripe_dirtying()
3950 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
3957 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
3959 i == sh->pd_idx || i == sh->qd_idx || in handle_stripe_dirtying()
3966 &sh->state)) { in handle_stripe_dirtying()
3973 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
3974 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_dirtying()
3984 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
3986 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
3993 &sh->state)) { in handle_stripe_dirtying()
4001 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
4002 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_dirtying()
4008 (unsigned long long)sh->sector, in handle_stripe_dirtying()
4009 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); in handle_stripe_dirtying()
4013 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in handle_stripe_dirtying()
4014 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
4026 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && in handle_stripe_dirtying()
4028 !test_bit(STRIPE_BIT_DELAY, &sh->state))) in handle_stripe_dirtying()
4029 schedule_reconstruction(sh, s, rcw == 0, 0); in handle_stripe_dirtying()
4033 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks5() argument
4038 BUG_ON(sh->batch_head); in handle_parity_checks5()
4039 set_bit(STRIPE_HANDLE, &sh->state); in handle_parity_checks5()
4041 switch (sh->check_state) { in handle_parity_checks5()
4046 sh->check_state = check_state_run; in handle_parity_checks5()
4048 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
4052 dev = &sh->dev[s->failed_num[0]]; in handle_parity_checks5()
4055 sh->check_state = check_state_idle; in handle_parity_checks5()
4057 dev = &sh->dev[sh->pd_idx]; in handle_parity_checks5()
4060 if (test_bit(STRIPE_INSYNC, &sh->state)) in handle_parity_checks5()
4071 clear_bit(STRIPE_DEGRADED, &sh->state); in handle_parity_checks5()
4072 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
4077 sh->check_state = check_state_idle; in handle_parity_checks5()
4089 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) in handle_parity_checks5()
4093 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
4098 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
4101 (unsigned long long) sh->sector, in handle_parity_checks5()
4102 (unsigned long long) sh->sector + in handle_parity_checks5()
4105 sh->check_state = check_state_compute_run; in handle_parity_checks5()
4106 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in handle_parity_checks5()
4109 &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
4110 sh->ops.target = sh->pd_idx; in handle_parity_checks5()
4111 sh->ops.target2 = -1; in handle_parity_checks5()
4120 __func__, sh->check_state, in handle_parity_checks5()
4121 (unsigned long long) sh->sector); in handle_parity_checks5()
4126 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks6() argument
4130 int pd_idx = sh->pd_idx; in handle_parity_checks6()
4131 int qd_idx = sh->qd_idx; in handle_parity_checks6()
4134 BUG_ON(sh->batch_head); in handle_parity_checks6()
4135 set_bit(STRIPE_HANDLE, &sh->state); in handle_parity_checks6()
4145 switch (sh->check_state) { in handle_parity_checks6()
4153 sh->check_state = check_state_run; in handle_parity_checks6()
4159 if (sh->check_state == check_state_run) in handle_parity_checks6()
4160 sh->check_state = check_state_run_pq; in handle_parity_checks6()
4162 sh->check_state = check_state_run_q; in handle_parity_checks6()
4166 sh->ops.zero_sum_result = 0; in handle_parity_checks6()
4168 if (sh->check_state == check_state_run) { in handle_parity_checks6()
4170 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in handle_parity_checks6()
4173 if (sh->check_state >= check_state_run && in handle_parity_checks6()
4174 sh->check_state <= check_state_run_pq) { in handle_parity_checks6()
4186 sh->check_state = check_state_idle; in handle_parity_checks6()
4189 if (test_bit(STRIPE_INSYNC, &sh->state)) in handle_parity_checks6()
4197 dev = &sh->dev[s->failed_num[1]]; in handle_parity_checks6()
4203 dev = &sh->dev[s->failed_num[0]]; in handle_parity_checks6()
4208 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { in handle_parity_checks6()
4209 dev = &sh->dev[pd_idx]; in handle_parity_checks6()
4214 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { in handle_parity_checks6()
4215 dev = &sh->dev[qd_idx]; in handle_parity_checks6()
4223 dev - (struct r5dev *) &sh->dev)) { in handle_parity_checks6()
4228 clear_bit(STRIPE_DEGRADED, &sh->state); in handle_parity_checks6()
4230 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
4237 sh->check_state = check_state_idle; in handle_parity_checks6()
4243 if (sh->ops.zero_sum_result == 0) { in handle_parity_checks6()
4246 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
4252 sh->check_state = check_state_compute_result; in handle_parity_checks6()
4263 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
4266 (unsigned long long) sh->sector, in handle_parity_checks6()
4267 (unsigned long long) sh->sector + in handle_parity_checks6()
4270 int *target = &sh->ops.target; in handle_parity_checks6()
4272 sh->ops.target = -1; in handle_parity_checks6()
4273 sh->ops.target2 = -1; in handle_parity_checks6()
4274 sh->check_state = check_state_compute_run; in handle_parity_checks6()
4275 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in handle_parity_checks6()
4277 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { in handle_parity_checks6()
4279 &sh->dev[pd_idx].flags); in handle_parity_checks6()
4281 target = &sh->ops.target2; in handle_parity_checks6()
4284 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { in handle_parity_checks6()
4286 &sh->dev[qd_idx].flags); in handle_parity_checks6()
4297 __func__, sh->check_state, in handle_parity_checks6()
4298 (unsigned long long) sh->sector); in handle_parity_checks6()
4303 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) in handle_stripe_expansion() argument
4311 BUG_ON(sh->batch_head); in handle_stripe_expansion()
4312 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); in handle_stripe_expansion()
4313 for (i = 0; i < sh->disks; i++) in handle_stripe_expansion()
4314 if (i != sh->pd_idx && i != sh->qd_idx) { in handle_stripe_expansion()
4319 sector_t bn = raid5_compute_blocknr(sh, i, 1); in handle_stripe_expansion()
4339 sh->dev[i].page, 0, 0, STRIPE_SIZE, in handle_stripe_expansion()
4374 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) in analyse_stripe() argument
4376 struct r5conf *conf = sh->raid_conf; in analyse_stripe()
4377 int disks = sh->disks; in analyse_stripe()
4384 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head; in analyse_stripe()
4385 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; in analyse_stripe()
4398 dev = &sh->dev[i]; in analyse_stripe()
4409 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) in analyse_stripe()
4438 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS && in analyse_stripe()
4439 !is_badblock(rdev, sh->sector, STRIPE_SECTORS, in analyse_stripe()
4453 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, in analyse_stripe()
4480 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) in analyse_stripe()
4550 if (test_bit(STRIPE_SYNCING, &sh->state)) { in analyse_stripe()
4560 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4569 static int clear_batch_ready(struct stripe_head *sh) in clear_batch_ready() argument
4576 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) in clear_batch_ready()
4577 return (sh->batch_head && sh->batch_head != sh); in clear_batch_ready()
4578 spin_lock(&sh->stripe_lock); in clear_batch_ready()
4579 if (!sh->batch_head) { in clear_batch_ready()
4580 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4588 if (sh->batch_head != sh) { in clear_batch_ready()
4589 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4592 spin_lock(&sh->batch_lock); in clear_batch_ready()
4593 list_for_each_entry(tmp, &sh->batch_list, batch_list) in clear_batch_ready()
4595 spin_unlock(&sh->batch_lock); in clear_batch_ready()
4596 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4608 struct stripe_head *sh, *next; in break_stripe_batch_list() local
4612 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { in break_stripe_batch_list()
4614 list_del_init(&sh->batch_list); in break_stripe_batch_list()
4616 WARN_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | in break_stripe_batch_list()
4628 "stripe state: %lx\n", sh->state); in break_stripe_batch_list()
4633 set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | in break_stripe_batch_list()
4639 sh->check_state = head_sh->check_state; in break_stripe_batch_list()
4640 sh->reconstruct_state = head_sh->reconstruct_state; in break_stripe_batch_list()
4641 spin_lock_irq(&sh->stripe_lock); in break_stripe_batch_list()
4642 sh->batch_head = NULL; in break_stripe_batch_list()
4643 spin_unlock_irq(&sh->stripe_lock); in break_stripe_batch_list()
4644 for (i = 0; i < sh->disks; i++) { in break_stripe_batch_list()
4645 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in break_stripe_batch_list()
4647 sh->dev[i].flags = head_sh->dev[i].flags & in break_stripe_batch_list()
4651 sh->state & handle_flags) in break_stripe_batch_list()
4652 set_bit(STRIPE_HANDLE, &sh->state); in break_stripe_batch_list()
4653 raid5_release_stripe(sh); in break_stripe_batch_list()
4668 static void handle_stripe(struct stripe_head *sh) in handle_stripe() argument
4671 struct r5conf *conf = sh->raid_conf; in handle_stripe()
4674 int disks = sh->disks; in handle_stripe()
4677 clear_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4678 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { in handle_stripe()
4681 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4685 if (clear_batch_ready(sh) ) { in handle_stripe()
4686 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); in handle_stripe()
4690 if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) in handle_stripe()
4691 break_stripe_batch_list(sh, 0); in handle_stripe()
4693 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { in handle_stripe()
4694 spin_lock(&sh->stripe_lock); in handle_stripe()
4699 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) && in handle_stripe()
4700 !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) && in handle_stripe()
4701 !test_bit(STRIPE_DISCARD, &sh->state) && in handle_stripe()
4702 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { in handle_stripe()
4703 set_bit(STRIPE_SYNCING, &sh->state); in handle_stripe()
4704 clear_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
4705 clear_bit(STRIPE_REPLACED, &sh->state); in handle_stripe()
4707 spin_unlock(&sh->stripe_lock); in handle_stripe()
4709 clear_bit(STRIPE_DELAYED, &sh->state); in handle_stripe()
4713 (unsigned long long)sh->sector, sh->state, in handle_stripe()
4714 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, in handle_stripe()
4715 sh->check_state, sh->reconstruct_state); in handle_stripe()
4717 analyse_stripe(sh, &s); in handle_stripe()
4719 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) in handle_stripe()
4724 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4731 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4739 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { in handle_stripe()
4741 set_bit(STRIPE_BIOFILL_RUN, &sh->state); in handle_stripe()
4757 sh->check_state = 0; in handle_stripe()
4758 sh->reconstruct_state = 0; in handle_stripe()
4759 break_stripe_batch_list(sh, 0); in handle_stripe()
4761 handle_failed_stripe(conf, sh, &s, disks); in handle_stripe()
4763 handle_failed_sync(conf, sh, &s); in handle_stripe()
4770 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) in handle_stripe()
4772 if (sh->reconstruct_state == reconstruct_state_drain_result || in handle_stripe()
4773 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { in handle_stripe()
4774 sh->reconstruct_state = reconstruct_state_idle; in handle_stripe()
4779 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && in handle_stripe()
4780 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); in handle_stripe()
4781 BUG_ON(sh->qd_idx >= 0 && in handle_stripe()
4782 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && in handle_stripe()
4783 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); in handle_stripe()
4785 struct r5dev *dev = &sh->dev[i]; in handle_stripe()
4787 (i == sh->pd_idx || i == sh->qd_idx || in handle_stripe()
4797 ((i == sh->pd_idx || i == sh->qd_idx) && in handle_stripe()
4799 set_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
4802 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in handle_stripe()
4810 pdev = &sh->dev[sh->pd_idx]; in handle_stripe()
4811 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) in handle_stripe()
4812 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); in handle_stripe()
4813 qdev = &sh->dev[sh->qd_idx]; in handle_stripe()
4814 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) in handle_stripe()
4815 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) in handle_stripe()
4827 handle_stripe_clean_event(conf, sh, disks); in handle_stripe()
4830 r5c_handle_cached_data_endio(conf, sh, disks); in handle_stripe()
4831 log_stripe_write_finished(sh); in handle_stripe()
4842 handle_stripe_fill(sh, &s, disks); in handle_stripe()
4849 r5c_finish_stripe_write_out(conf, sh, &s); in handle_stripe()
4860 if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) { in handle_stripe()
4863 handle_stripe_dirtying(conf, sh, &s, disks); in handle_stripe()
4869 ret = r5c_try_caching_write(conf, sh, &s, in handle_stripe()
4880 (!test_bit(STRIPE_R5C_CACHING, &sh->state) && in handle_stripe()
4882 ret = handle_stripe_dirtying(conf, sh, &s, in handle_stripe()
4895 if (sh->check_state || in handle_stripe()
4897 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && in handle_stripe()
4898 !test_bit(STRIPE_INSYNC, &sh->state))) { in handle_stripe()
4900 handle_parity_checks6(conf, sh, &s, disks); in handle_stripe()
4902 handle_parity_checks5(conf, sh, &s, disks); in handle_stripe()
4906 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) in handle_stripe()
4907 && !test_bit(STRIPE_REPLACED, &sh->state)) { in handle_stripe()
4910 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { in handle_stripe()
4911 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); in handle_stripe()
4912 set_bit(R5_WantReplace, &sh->dev[i].flags); in handle_stripe()
4913 set_bit(R5_LOCKED, &sh->dev[i].flags); in handle_stripe()
4917 set_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
4918 set_bit(STRIPE_REPLACED, &sh->state); in handle_stripe()
4921 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && in handle_stripe()
4922 test_bit(STRIPE_INSYNC, &sh->state)) { in handle_stripe()
4924 clear_bit(STRIPE_SYNCING, &sh->state); in handle_stripe()
4925 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_stripe()
4934 struct r5dev *dev = &sh->dev[s.failed_num[i]]; in handle_stripe()
4954 if (sh->reconstruct_state == reconstruct_state_result) { in handle_stripe()
4956 = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1); in handle_stripe()
4961 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe()
4962 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4972 sh->reconstruct_state = reconstruct_state_idle; in handle_stripe()
4973 clear_bit(STRIPE_EXPANDING, &sh->state); in handle_stripe()
4975 set_bit(R5_Wantwrite, &sh->dev[i].flags); in handle_stripe()
4976 set_bit(R5_LOCKED, &sh->dev[i].flags); in handle_stripe()
4981 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && in handle_stripe()
4982 !sh->reconstruct_state) { in handle_stripe()
4984 sh->disks = conf->raid_disks; in handle_stripe()
4985 stripe_set_idx(sh->sector, conf, 0, sh); in handle_stripe()
4986 schedule_reconstruction(sh, &s, 1, 1); in handle_stripe()
4987 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { in handle_stripe()
4988 clear_bit(STRIPE_EXPAND_READY, &sh->state); in handle_stripe()
4995 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) in handle_stripe()
4996 handle_stripe_expansion(conf, sh); in handle_stripe()
5016 struct r5dev *dev = &sh->dev[i]; in handle_stripe()
5020 if (!rdev_set_badblocks(rdev, sh->sector, in handle_stripe()
5027 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
5036 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
5043 raid_run_ops(sh, s.ops_request); in handle_stripe()
5045 ops_run_io(sh, &s); in handle_stripe()
5058 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); in handle_stripe()
5066 struct stripe_head *sh; in raid5_activate_delayed() local
5067 sh = list_entry(l, struct stripe_head, lru); in raid5_activate_delayed()
5069 clear_bit(STRIPE_DELAYED, &sh->state); in raid5_activate_delayed()
5070 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in raid5_activate_delayed()
5072 list_add_tail(&sh->lru, &conf->hold_list); in raid5_activate_delayed()
5073 raid5_wakeup_stripe_thread(sh); in raid5_activate_delayed()
5086 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); in activate_bit_delay() local
5088 list_del_init(&sh->lru); in activate_bit_delay()
5089 atomic_inc(&sh->count); in activate_bit_delay()
5090 hash = sh->hash_lock_index; in activate_bit_delay()
5091 __release_stripe(conf, sh, &temp_inactive_list[hash]); in activate_bit_delay()
5327 struct stripe_head *sh, *tmp; in __get_priority_stripe() local
5337 sh = NULL; in __get_priority_stripe()
5363 sh = list_entry(handle_list->next, typeof(*sh), lru); in __get_priority_stripe()
5367 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { in __get_priority_stripe()
5387 sh = tmp; in __get_priority_stripe()
5392 if (sh) { in __get_priority_stripe()
5400 if (!sh) { in __get_priority_stripe()
5410 sh->group = NULL; in __get_priority_stripe()
5412 list_del_init(&sh->lru); in __get_priority_stripe()
5413 BUG_ON(atomic_inc_return(&sh->count) != 1); in __get_priority_stripe()
5414 return sh; in __get_priority_stripe()
5427 struct stripe_head *sh; in raid5_unplug() local
5436 sh = list_first_entry(&cb->list, struct stripe_head, lru); in raid5_unplug()
5437 list_del_init(&sh->lru); in raid5_unplug()
5444 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); in raid5_unplug()
5449 hash = sh->hash_lock_index; in raid5_unplug()
5450 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); in raid5_unplug()
5463 struct stripe_head *sh) in release_stripe_plug() argument
5471 raid5_release_stripe(sh); in release_stripe_plug()
5484 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) in release_stripe_plug()
5485 list_add_tail(&sh->lru, &cb->list); in release_stripe_plug()
5487 raid5_release_stripe(sh); in release_stripe_plug()
5494 struct stripe_head *sh; in make_discard_request() local
5520 sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0); in make_discard_request()
5523 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5524 if (test_bit(STRIPE_SYNCING, &sh->state)) { in make_discard_request()
5525 raid5_release_stripe(sh); in make_discard_request()
5529 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5530 spin_lock_irq(&sh->stripe_lock); in make_discard_request()
5532 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
5534 if (sh->dev[d].towrite || sh->dev[d].toread) { in make_discard_request()
5535 set_bit(R5_Overlap, &sh->dev[d].flags); in make_discard_request()
5536 spin_unlock_irq(&sh->stripe_lock); in make_discard_request()
5537 raid5_release_stripe(sh); in make_discard_request()
5542 set_bit(STRIPE_DISCARD, &sh->state); in make_discard_request()
5544 sh->overwrite_disks = 0; in make_discard_request()
5546 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
5548 sh->dev[d].towrite = bi; in make_discard_request()
5549 set_bit(R5_OVERWRITE, &sh->dev[d].flags); in make_discard_request()
5552 sh->overwrite_disks++; in make_discard_request()
5554 spin_unlock_irq(&sh->stripe_lock); in make_discard_request()
5560 sh->sector, in make_discard_request()
5563 sh->bm_seq = conf->seq_flush + 1; in make_discard_request()
5564 set_bit(STRIPE_BIT_DELAY, &sh->state); in make_discard_request()
5567 set_bit(STRIPE_HANDLE, &sh->state); in make_discard_request()
5568 clear_bit(STRIPE_DELAYED, &sh->state); in make_discard_request()
5569 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in make_discard_request()
5571 release_stripe_plug(mddev, sh); in make_discard_request()
5583 struct stripe_head *sh; in raid5_make_request() local
5676 sh = raid5_get_active_stripe(conf, new_sector, previous, in raid5_make_request()
5678 if (sh) { in raid5_make_request()
5697 raid5_release_stripe(sh); in raid5_make_request()
5707 raid5_release_stripe(sh); in raid5_make_request()
5711 if (test_bit(STRIPE_EXPANDING, &sh->state) || in raid5_make_request()
5712 !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { in raid5_make_request()
5718 raid5_release_stripe(sh); in raid5_make_request()
5724 set_bit(STRIPE_R5C_PREFLUSH, &sh->state); in raid5_make_request()
5729 if (!sh->batch_head) in raid5_make_request()
5730 set_bit(STRIPE_HANDLE, &sh->state); in raid5_make_request()
5731 clear_bit(STRIPE_DELAYED, &sh->state); in raid5_make_request()
5732 if ((!sh->batch_head || sh == sh->batch_head) && in raid5_make_request()
5734 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in raid5_make_request()
5736 release_stripe_plug(mddev, sh); in raid5_make_request()
5765 struct stripe_head *sh; in reshape_request() local
5916 sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1); in reshape_request()
5917 set_bit(STRIPE_EXPANDING, &sh->state); in reshape_request()
5922 for (j=sh->disks; j--;) { in reshape_request()
5924 if (j == sh->pd_idx) in reshape_request()
5927 j == sh->qd_idx) in reshape_request()
5929 s = raid5_compute_blocknr(sh, j, 0); in reshape_request()
5934 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); in reshape_request()
5935 set_bit(R5_Expanded, &sh->dev[j].flags); in reshape_request()
5936 set_bit(R5_UPTODATE, &sh->dev[j].flags); in reshape_request()
5939 set_bit(STRIPE_EXPAND_READY, &sh->state); in reshape_request()
5940 set_bit(STRIPE_HANDLE, &sh->state); in reshape_request()
5942 list_add(&sh->lru, &stripes); in reshape_request()
5965 sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1); in reshape_request()
5966 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); in reshape_request()
5967 set_bit(STRIPE_HANDLE, &sh->state); in reshape_request()
5968 raid5_release_stripe(sh); in reshape_request()
5975 sh = list_entry(stripes.next, struct stripe_head, lru); in reshape_request()
5976 list_del_init(&sh->lru); in reshape_request()
5977 raid5_release_stripe(sh); in reshape_request()
6026 struct stripe_head *sh; in raid5_sync_request() local
6084 sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); in raid5_sync_request()
6085 if (sh == NULL) { in raid5_sync_request()
6086 sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0); in raid5_sync_request()
6107 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); in raid5_sync_request()
6108 set_bit(STRIPE_HANDLE, &sh->state); in raid5_sync_request()
6110 raid5_release_stripe(sh); in raid5_sync_request()
6128 struct stripe_head *sh; in retry_aligned_read() local
6149 sh = raid5_get_active_stripe(conf, sector, 0, 1, 1); in retry_aligned_read()
6151 if (!sh) { in retry_aligned_read()
6158 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { in retry_aligned_read()
6159 raid5_release_stripe(sh); in retry_aligned_read()
6165 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); in retry_aligned_read()
6166 handle_stripe(sh); in retry_aligned_read()
6167 raid5_release_stripe(sh); in retry_aligned_read()
6184 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; in handle_active_stripes() local
6189 (sh = __get_priority_stripe(conf, group)) != NULL) in handle_active_stripes()
6190 batch[batch_size++] = sh; in handle_active_stripes()