Lines Matching full:device

52 	struct drbd_device *device;  in drbd_md_endio()  local
54 device = bio->bi_private; in drbd_md_endio()
55 device->md_io.error = blk_status_to_errno(bio->bi_status); in drbd_md_endio()
58 if (device->ldev) in drbd_md_endio()
59 put_ldev(device); in drbd_md_endio()
63 * to timeout on the lower level device, and eventually detach from it. in drbd_md_endio()
71 * ASSERT(atomic_read(&device->md_io_in_use) == 1) there. in drbd_md_endio()
73 drbd_md_put_buffer(device); in drbd_md_endio()
74 device->md_io.done = 1; in drbd_md_endio()
75 wake_up(&device->misc_wait); in drbd_md_endio()
85 struct drbd_device *device = peer_device->device; in drbd_endio_read_sec_final() local
87 spin_lock_irqsave(&device->resource->req_lock, flags); in drbd_endio_read_sec_final()
88 device->read_cnt += peer_req->i.size >> 9; in drbd_endio_read_sec_final()
90 if (list_empty(&device->read_ee)) in drbd_endio_read_sec_final()
91 wake_up(&device->ee_wait); in drbd_endio_read_sec_final()
93 __drbd_chk_io_error(device, DRBD_READ_ERROR); in drbd_endio_read_sec_final()
94 spin_unlock_irqrestore(&device->resource->req_lock, flags); in drbd_endio_read_sec_final()
97 put_ldev(device); in drbd_endio_read_sec_final()
106 struct drbd_device *device = peer_device->device; in drbd_endio_write_sec_final() local
126 inc_unacked(device); in drbd_endio_write_sec_final()
127 drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size); in drbd_endio_write_sec_final()
130 spin_lock_irqsave(&device->resource->req_lock, flags); in drbd_endio_write_sec_final()
131 device->writ_cnt += peer_req->i.size >> 9; in drbd_endio_write_sec_final()
132 list_move_tail(&peer_req->w.list, &device->done_ee); in drbd_endio_write_sec_final()
142 do_wake = list_empty(block_id == ID_SYNCER ? &device->sync_ee : &device->active_ee); in drbd_endio_write_sec_final()
147 __drbd_chk_io_error(device, DRBD_WRITE_ERROR); in drbd_endio_write_sec_final()
150 kref_get(&device->kref); /* put is in drbd_send_acks_wf() */ in drbd_endio_write_sec_final()
152 kref_put(&device->kref, drbd_destroy_device); in drbd_endio_write_sec_final()
154 spin_unlock_irqrestore(&device->resource->req_lock, flags); in drbd_endio_write_sec_final()
157 drbd_rs_complete_io(device, i.sector); in drbd_endio_write_sec_final()
160 wake_up(&device->ee_wait); in drbd_endio_write_sec_final()
163 drbd_al_complete_io(device, &i); in drbd_endio_write_sec_final()
165 put_ldev(device); in drbd_endio_write_sec_final()
174 struct drbd_device *device = peer_req->peer_device->device; in drbd_peer_request_endio() local
180 drbd_warn(device, "%s: error=%d s=%llus\n", in drbd_peer_request_endio()
198 drbd_panic_after_delayed_completion_of_aborted_request(struct drbd_device *device) in drbd_panic_after_delayed_completion_of_aborted_request() argument
201 device->minor, device->resource->name, device->vnr); in drbd_panic_after_delayed_completion_of_aborted_request()
210 struct drbd_device *device = req->device; in drbd_request_endio() local
230 * If later the local backing device "recovers", and now DMAs some data in drbd_request_endio()
244 …drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressiv… in drbd_request_endio()
247 drbd_panic_after_delayed_completion_of_aborted_request(device); in drbd_request_endio()
278 spin_lock_irqsave(&device->resource->req_lock, flags); in drbd_request_endio()
280 spin_unlock_irqrestore(&device->resource->req_lock, flags); in drbd_request_endio()
281 put_ldev(device); in drbd_request_endio()
284 complete_master_bio(device, &m); in drbd_request_endio()
342 struct drbd_device *device = peer_device->device; in w_e_send_csum() local
364 drbd_free_peer_req(device, peer_req); in w_e_send_csum()
366 inc_rs_pending(device); in w_e_send_csum()
372 drbd_err(device, "kmalloc() of digest failed.\n"); in w_e_send_csum()
378 drbd_free_peer_req(device, peer_req); in w_e_send_csum()
381 drbd_err(device, "drbd_send_drequest(..., csum) failed\n"); in w_e_send_csum()
389 struct drbd_device *device = peer_device->device; in read_for_csum() local
392 if (!get_ldev(device)) in read_for_csum()
403 spin_lock_irq(&device->resource->req_lock); in read_for_csum()
404 list_add_tail(&peer_req->w.list, &device->read_ee); in read_for_csum()
405 spin_unlock_irq(&device->resource->req_lock); in read_for_csum()
407 atomic_add(size >> 9, &device->rs_sect_ev); in read_for_csum()
408 if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, in read_for_csum()
416 spin_lock_irq(&device->resource->req_lock); in read_for_csum()
418 spin_unlock_irq(&device->resource->req_lock); in read_for_csum()
420 drbd_free_peer_req(device, peer_req); in read_for_csum()
422 put_ldev(device); in read_for_csum()
428 struct drbd_device *device = in w_resync_timer() local
431 switch (device->state.conn) { in w_resync_timer()
433 make_ov_request(device, cancel); in w_resync_timer()
436 make_resync_request(device, cancel); in w_resync_timer()
445 struct drbd_device *device = from_timer(device, t, resync_timer); in resync_timer_fn() local
448 &first_peer_device(device)->connection->sender_work, in resync_timer_fn()
449 &device->resync_work); in resync_timer_fn()
496 static int drbd_rs_controller(struct drbd_device *device, unsigned int sect_in) in drbd_rs_controller() argument
508 dc = rcu_dereference(device->ldev->disk_conf); in drbd_rs_controller()
509 plan = rcu_dereference(device->rs_plan_s); in drbd_rs_controller()
513 if (device->rs_in_flight + sect_in == 0) { /* At start of resync */ in drbd_rs_controller()
520 correction = want - device->rs_in_flight - plan->total; in drbd_rs_controller()
540 drbd_warn(device, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n", in drbd_rs_controller()
541 sect_in, device->rs_in_flight, want, correction, in drbd_rs_controller()
542 steps, cps, device->rs_planed, curr_corr, req_sect); in drbd_rs_controller()
548 static int drbd_rs_number_requests(struct drbd_device *device) in drbd_rs_number_requests() argument
553 sect_in = atomic_xchg(&device->rs_sect_in, 0); in drbd_rs_number_requests()
554 device->rs_in_flight -= sect_in; in drbd_rs_number_requests()
557 mxb = drbd_get_max_buffers(device) / 2; in drbd_rs_number_requests()
558 if (rcu_dereference(device->rs_plan_s)->size) { in drbd_rs_number_requests()
559 number = drbd_rs_controller(device, sect_in) >> (BM_BLOCK_SHIFT - 9); in drbd_rs_number_requests()
560 device->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; in drbd_rs_number_requests()
562 device->c_sync_rate = rcu_dereference(device->ldev->disk_conf)->resync_rate; in drbd_rs_number_requests()
563 number = SLEEP_TIME * device->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); in drbd_rs_number_requests()
577 if (mxb - device->rs_in_flight/8 < number) in drbd_rs_number_requests()
578 number = mxb - device->rs_in_flight/8; in drbd_rs_number_requests()
583 static int make_resync_request(struct drbd_device *const device, int cancel) in make_resync_request() argument
585 struct drbd_peer_device *const peer_device = first_peer_device(device); in make_resync_request()
589 const sector_t capacity = get_capacity(device->vdisk); in make_resync_request()
599 if (device->rs_total == 0) { in make_resync_request()
601 drbd_resync_finished(device); in make_resync_request()
605 if (!get_ldev(device)) { in make_resync_request()
606 /* Since we only need to access device->rsync a in make_resync_request()
607 get_ldev_if_state(device,D_FAILED) would be sufficient, but in make_resync_request()
610 drbd_err(device, "Disk broke down during resync!\n"); in make_resync_request()
616 discard_granularity = rcu_dereference(device->ldev->disk_conf)->rs_discard_granularity; in make_resync_request()
620 max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9; in make_resync_request()
621 number = drbd_rs_number_requests(device); in make_resync_request()
646 bit = drbd_bm_find_next(device, device->bm_resync_fo); in make_resync_request()
649 device->bm_resync_fo = drbd_bm_bits(device); in make_resync_request()
650 put_ldev(device); in make_resync_request()
656 if (drbd_try_rs_begin_io(device, sector)) { in make_resync_request()
657 device->bm_resync_fo = bit; in make_resync_request()
660 device->bm_resync_fo = bit + 1; in make_resync_request()
662 if (unlikely(drbd_bm_test_bit(device, bit) == 0)) { in make_resync_request()
663 drbd_rs_complete_io(device, sector); in make_resync_request()
695 if (drbd_bm_test_bit(device, bit+1) != 1) in make_resync_request()
706 device->bm_resync_fo = bit + 1; in make_resync_request()
713 if (device->use_csums) { in make_resync_request()
716 put_ldev(device); in make_resync_request()
719 drbd_rs_complete_io(device, sector); in make_resync_request()
720 device->bm_resync_fo = BM_SECT_TO_BIT(sector); in make_resync_request()
732 inc_rs_pending(device); in make_resync_request()
737 drbd_err(device, "drbd_send_drequest() failed, aborting...\n"); in make_resync_request()
738 dec_rs_pending(device); in make_resync_request()
739 put_ldev(device); in make_resync_request()
745 if (device->bm_resync_fo >= drbd_bm_bits(device)) { in make_resync_request()
752 put_ldev(device); in make_resync_request()
757 device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9)); in make_resync_request()
758 mod_timer(&device->resync_timer, jiffies + SLEEP_TIME); in make_resync_request()
759 put_ldev(device); in make_resync_request()
763 static int make_ov_request(struct drbd_device *device, int cancel) in make_ov_request() argument
767 const sector_t capacity = get_capacity(device->vdisk); in make_ov_request()
773 number = drbd_rs_number_requests(device); in make_ov_request()
775 sector = device->ov_position; in make_ov_request()
784 && verify_can_do_stop_sector(device) in make_ov_request()
785 && sector >= device->ov_stop_sector; in make_ov_request()
791 if (drbd_try_rs_begin_io(device, sector)) { in make_ov_request()
792 device->ov_position = sector; in make_ov_request()
799 inc_rs_pending(device); in make_ov_request()
800 if (drbd_send_ov_request(first_peer_device(device), sector, size)) { in make_ov_request()
801 dec_rs_pending(device); in make_ov_request()
806 device->ov_position = sector; in make_ov_request()
809 device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9)); in make_ov_request()
811 mod_timer(&device->resync_timer, jiffies + SLEEP_TIME); in make_ov_request()
819 struct drbd_device *device = dw->device; in w_ov_finished() local
821 ov_out_of_sync_print(device); in w_ov_finished()
822 drbd_resync_finished(device); in w_ov_finished()
831 struct drbd_device *device = dw->device; in w_resync_finished() local
834 drbd_resync_finished(device); in w_resync_finished()
839 static void ping_peer(struct drbd_device *device) in ping_peer() argument
841 struct drbd_connection *connection = first_peer_device(device)->connection; in ping_peer()
846 test_bit(GOT_PING_ACK, &connection->flags) || device->state.conn < C_CONNECTED); in ping_peer()
849 int drbd_resync_finished(struct drbd_device *device) in drbd_resync_finished() argument
851 struct drbd_connection *connection = first_peer_device(device)->connection; in drbd_resync_finished()
862 if (drbd_rs_del_all(device)) { in drbd_resync_finished()
872 dw->device = device; in drbd_resync_finished()
876 drbd_err(device, "Warn failed to drbd_rs_del_all() and to kmalloc(dw).\n"); in drbd_resync_finished()
879 dt = (jiffies - device->rs_start - device->rs_paused) / HZ; in drbd_resync_finished()
883 db = device->rs_total; in drbd_resync_finished()
885 if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T) in drbd_resync_finished()
886 db -= device->ov_left; in drbd_resync_finished()
889 device->rs_paused /= HZ; in drbd_resync_finished()
891 if (!get_ldev(device)) in drbd_resync_finished()
894 ping_peer(device); in drbd_resync_finished()
896 spin_lock_irq(&device->resource->req_lock); in drbd_resync_finished()
897 os = drbd_read_state(device); in drbd_resync_finished()
909 drbd_info(device, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n", in drbd_resync_finished()
911 dt + device->rs_paused, device->rs_paused, dbdt); in drbd_resync_finished()
913 n_oos = drbd_bm_total_weight(device); in drbd_resync_finished()
917 drbd_alert(device, "Online verify found %lu %dk block out of sync!\n", in drbd_resync_finished()
922 D_ASSERT(device, (n_oos - device->rs_failed) == 0); in drbd_resync_finished()
927 if (device->use_csums && device->rs_total) { in drbd_resync_finished()
928 const unsigned long s = device->rs_same_csum; in drbd_resync_finished()
929 const unsigned long t = device->rs_total; in drbd_resync_finished()
933 drbd_info(device, "%u %% had equal checksums, eliminated: %luK; " in drbd_resync_finished()
936 Bit2KB(device->rs_same_csum), in drbd_resync_finished()
937 Bit2KB(device->rs_total - device->rs_same_csum), in drbd_resync_finished()
938 Bit2KB(device->rs_total)); in drbd_resync_finished()
942 if (device->rs_failed) { in drbd_resync_finished()
943 drbd_info(device, " %lu failed blocks\n", device->rs_failed); in drbd_resync_finished()
957 if (device->p_uuid) { in drbd_resync_finished()
960 _drbd_uuid_set(device, i, device->p_uuid[i]); in drbd_resync_finished()
961 drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_CURRENT]); in drbd_resync_finished()
962 _drbd_uuid_set(device, UI_CURRENT, device->p_uuid[UI_CURRENT]); in drbd_resync_finished()
964 drbd_err(device, "device->p_uuid is NULL! BUG\n"); in drbd_resync_finished()
971 drbd_uuid_set_bm(device, 0UL); in drbd_resync_finished()
972 drbd_print_uuids(device, "updated UUIDs"); in drbd_resync_finished()
973 if (device->p_uuid) { in drbd_resync_finished()
978 device->p_uuid[i] = device->ldev->md.uuid[i]; in drbd_resync_finished()
983 _drbd_set_state(device, ns, CS_VERBOSE, NULL); in drbd_resync_finished()
985 spin_unlock_irq(&device->resource->req_lock); in drbd_resync_finished()
995 fp = rcu_dereference(device->ldev->disk_conf)->fencing; in drbd_resync_finished()
1000 struct drbd_device *device = peer_device->device; in drbd_resync_finished() local
1001 disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk); in drbd_resync_finished()
1002 pdsk_state = min_t(enum drbd_disk_state, pdsk_state, device->state.pdsk); in drbd_resync_finished()
1010 put_ldev(device); in drbd_resync_finished()
1012 device->rs_total = 0; in drbd_resync_finished()
1013 device->rs_failed = 0; in drbd_resync_finished()
1014 device->rs_paused = 0; in drbd_resync_finished()
1016 /* reset start sector, if we reached end of device */ in drbd_resync_finished()
1017 if (verify_done && device->ov_left == 0) in drbd_resync_finished()
1018 device->ov_start_sector = 0; in drbd_resync_finished()
1020 drbd_md_sync(device); in drbd_resync_finished()
1023 drbd_khelper(device, khelper_cmd); in drbd_resync_finished()
1029 static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req) in move_to_net_ee_or_free() argument
1034 atomic_add(i, &device->pp_in_use_by_net); in move_to_net_ee_or_free()
1035 atomic_sub(i, &device->pp_in_use); in move_to_net_ee_or_free()
1036 spin_lock_irq(&device->resource->req_lock); in move_to_net_ee_or_free()
1037 list_add_tail(&peer_req->w.list, &device->net_ee); in move_to_net_ee_or_free()
1038 spin_unlock_irq(&device->resource->req_lock); in move_to_net_ee_or_free()
1041 drbd_free_peer_req(device, peer_req); in move_to_net_ee_or_free()
1053 struct drbd_device *device = peer_device->device; in w_e_end_data_req() local
1057 drbd_free_peer_req(device, peer_req); in w_e_end_data_req()
1058 dec_unacked(device); in w_e_end_data_req()
1066 drbd_err(device, "Sending NegDReply. sector=%llus.\n", in w_e_end_data_req()
1072 dec_unacked(device); in w_e_end_data_req()
1074 move_to_net_ee_or_free(device, peer_req); in w_e_end_data_req()
1077 drbd_err(device, "drbd_send_block() failed\n"); in w_e_end_data_req()
1114 struct drbd_device *device = peer_device->device; in w_e_end_rsdata_req() local
1118 drbd_free_peer_req(device, peer_req); in w_e_end_rsdata_req()
1119 dec_unacked(device); in w_e_end_rsdata_req()
1123 if (get_ldev_if_state(device, D_FAILED)) { in w_e_end_rsdata_req()
1124 drbd_rs_complete_io(device, peer_req->i.sector); in w_e_end_rsdata_req()
1125 put_ldev(device); in w_e_end_rsdata_req()
1128 if (device->state.conn == C_AHEAD) { in w_e_end_rsdata_req()
1131 if (likely(device->state.pdsk >= D_INCONSISTENT)) { in w_e_end_rsdata_req()
1132 inc_rs_pending(device); in w_e_end_rsdata_req()
1139 drbd_err(device, "Not sending RSDataReply, " in w_e_end_rsdata_req()
1145 drbd_err(device, "Sending NegRSDReply. sector %llus.\n", in w_e_end_rsdata_req()
1151 drbd_rs_failed_io(device, peer_req->i.sector, peer_req->i.size); in w_e_end_rsdata_req()
1154 dec_unacked(device); in w_e_end_rsdata_req()
1156 move_to_net_ee_or_free(device, peer_req); in w_e_end_rsdata_req()
1159 drbd_err(device, "drbd_send_block() failed\n"); in w_e_end_rsdata_req()
1167 struct drbd_device *device = peer_device->device; in w_e_end_csum_rs_req() local
1174 drbd_free_peer_req(device, peer_req); in w_e_end_csum_rs_req()
1175 dec_unacked(device); in w_e_end_csum_rs_req()
1179 if (get_ldev(device)) { in w_e_end_csum_rs_req()
1180 drbd_rs_complete_io(device, peer_req->i.sector); in w_e_end_csum_rs_req()
1181 put_ldev(device); in w_e_end_csum_rs_req()
1192 D_ASSERT(device, digest_size == di->digest_size); in w_e_end_csum_rs_req()
1202 drbd_set_in_sync(device, peer_req->i.sector, peer_req->i.size); in w_e_end_csum_rs_req()
1204 device->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT; in w_e_end_csum_rs_req()
1207 inc_rs_pending(device); in w_e_end_csum_rs_req()
1216 drbd_err(device, "Sending NegDReply. I guess it gets messy.\n"); in w_e_end_csum_rs_req()
1219 dec_unacked(device); in w_e_end_csum_rs_req()
1220 move_to_net_ee_or_free(device, peer_req); in w_e_end_csum_rs_req()
1223 drbd_err(device, "drbd_send_block/ack() failed\n"); in w_e_end_csum_rs_req()
1231 struct drbd_device *device = peer_device->device; in w_e_end_ov_req() local
1258 drbd_free_peer_req(device, peer_req); in w_e_end_ov_req()
1260 inc_rs_pending(device); in w_e_end_ov_req()
1263 dec_rs_pending(device); in w_e_end_ov_req()
1268 drbd_free_peer_req(device, peer_req); in w_e_end_ov_req()
1269 dec_unacked(device); in w_e_end_ov_req()
1273 void drbd_ov_out_of_sync_found(struct drbd_device *device, sector_t sector, int size) in drbd_ov_out_of_sync_found() argument
1275 if (device->ov_last_oos_start + device->ov_last_oos_size == sector) { in drbd_ov_out_of_sync_found()
1276 device->ov_last_oos_size += size>>9; in drbd_ov_out_of_sync_found()
1278 device->ov_last_oos_start = sector; in drbd_ov_out_of_sync_found()
1279 device->ov_last_oos_size = size>>9; in drbd_ov_out_of_sync_found()
1281 drbd_set_out_of_sync(device, sector, size); in drbd_ov_out_of_sync_found()
1288 struct drbd_device *device = peer_device->device; in w_e_end_ov_reply() local
1298 drbd_free_peer_req(device, peer_req); in w_e_end_ov_reply()
1299 dec_unacked(device); in w_e_end_ov_reply()
1305 if (get_ldev(device)) { in w_e_end_ov_reply()
1306 drbd_rs_complete_io(device, peer_req->i.sector); in w_e_end_ov_reply()
1307 put_ldev(device); in w_e_end_ov_reply()
1318 D_ASSERT(device, digest_size == di->digest_size); in w_e_end_ov_reply()
1329 drbd_free_peer_req(device, peer_req); in w_e_end_ov_reply()
1331 drbd_ov_out_of_sync_found(device, sector, size); in w_e_end_ov_reply()
1333 ov_out_of_sync_print(device); in w_e_end_ov_reply()
1338 dec_unacked(device); in w_e_end_ov_reply()
1340 --device->ov_left; in w_e_end_ov_reply()
1343 if ((device->ov_left & 0x200) == 0x200) in w_e_end_ov_reply()
1344 drbd_advance_rs_marks(device, device->ov_left); in w_e_end_ov_reply()
1346 stop_sector_reached = verify_can_do_stop_sector(device) && in w_e_end_ov_reply()
1347 (sector + (size>>9)) >= device->ov_stop_sector; in w_e_end_ov_reply()
1349 if (device->ov_left == 0 || stop_sector_reached) { in w_e_end_ov_reply()
1350 ov_out_of_sync_print(device); in w_e_end_ov_reply()
1351 drbd_resync_finished(device); in w_e_end_ov_reply()
1389 struct drbd_device *device = in w_send_write_hint() local
1394 return pd_send_unplug_remote(first_peer_device(device)); in w_send_write_hint()
1422 struct drbd_device *device = req->device; in w_send_out_of_sync() local
1423 struct drbd_peer_device *const peer_device = first_peer_device(device); in w_send_out_of_sync()
1453 struct drbd_device *device = req->device; in w_send_dblock() local
1454 struct drbd_peer_device *const peer_device = first_peer_device(device); in w_send_dblock()
1486 struct drbd_device *device = req->device; in w_send_read_req() local
1487 struct drbd_peer_device *const peer_device = first_peer_device(device); in w_send_read_req()
1516 struct drbd_device *device = req->device; in w_restart_disk_io() local
1519 drbd_al_begin_io(device, &req->i); in w_restart_disk_io()
1521 req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, in w_restart_disk_io()
1531 static int _drbd_may_sync_now(struct drbd_device *device) in _drbd_may_sync_now() argument
1533 struct drbd_device *odev = device; in _drbd_may_sync_now()
1557 * @device: DRBD device.
1561 static bool drbd_pause_after(struct drbd_device *device) in drbd_pause_after() argument
1583 * @device: DRBD device.
1587 static bool drbd_resume_next(struct drbd_device *device) in drbd_resume_next() argument
1608 void resume_next_sg(struct drbd_device *device) in resume_next_sg() argument
1611 drbd_resume_next(device); in resume_next_sg()
1615 void suspend_other_sg(struct drbd_device *device) in suspend_other_sg() argument
1618 drbd_pause_after(device); in suspend_other_sg()
1623 enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor) in drbd_resync_after_valid() argument
1636 if (odev == device) in drbd_resync_after_valid()
1661 void drbd_resync_after_changed(struct drbd_device *device) in drbd_resync_after_changed() argument
1666 changed = drbd_pause_after(device); in drbd_resync_after_changed()
1667 changed |= drbd_resume_next(device); in drbd_resync_after_changed()
1671 void drbd_rs_controller_reset(struct drbd_device *device) in drbd_rs_controller_reset() argument
1673 struct gendisk *disk = device->ldev->backing_bdev->bd_disk; in drbd_rs_controller_reset()
1676 atomic_set(&device->rs_sect_in, 0); in drbd_rs_controller_reset()
1677 atomic_set(&device->rs_sect_ev, 0); in drbd_rs_controller_reset()
1678 device->rs_in_flight = 0; in drbd_rs_controller_reset()
1679 device->rs_last_events = in drbd_rs_controller_reset()
1687 plan = rcu_dereference(device->rs_plan_s); in drbd_rs_controller_reset()
1695 struct drbd_device *device = from_timer(device, t, start_resync_timer); in start_resync_timer_fn() local
1696 drbd_device_post_work(device, RS_START); in start_resync_timer_fn()
1699 static void do_start_resync(struct drbd_device *device) in do_start_resync() argument
1701 if (atomic_read(&device->unacked_cnt) || atomic_read(&device->rs_pending_cnt)) { in do_start_resync()
1702 drbd_warn(device, "postponing start_resync ...\n"); in do_start_resync()
1703 device->start_resync_timer.expires = jiffies + HZ/10; in do_start_resync()
1704 add_timer(&device->start_resync_timer); in do_start_resync()
1708 drbd_start_resync(device, C_SYNC_SOURCE); in do_start_resync()
1709 clear_bit(AHEAD_TO_SYNC_SOURCE, &device->flags); in do_start_resync()
1712 …atic bool use_checksum_based_resync(struct drbd_connection *connection, struct drbd_device *device) in use_checksum_based_resync() argument
1721 || test_bit(CRASHED_PRIMARY, &device->flags)); /* or only after Primary crash? */ in use_checksum_based_resync()
1726 * @device: DRBD device.
1732 void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) in drbd_start_resync() argument
1734 struct drbd_peer_device *peer_device = first_peer_device(device); in drbd_start_resync()
1739 if (device->state.conn >= C_SYNC_SOURCE && device->state.conn < C_AHEAD) { in drbd_start_resync()
1740 drbd_err(device, "Resync already running!\n"); in drbd_start_resync()
1745 drbd_err(device, "No connection to peer, aborting!\n"); in drbd_start_resync()
1749 if (!test_bit(B_RS_H_DONE, &device->flags)) { in drbd_start_resync()
1754 r = drbd_khelper(device, "before-resync-target"); in drbd_start_resync()
1757 drbd_info(device, "before-resync-target handler returned %d, " in drbd_start_resync()
1763 r = drbd_khelper(device, "before-resync-source"); in drbd_start_resync()
1767 drbd_info(device, "before-resync-source handler returned %d, " in drbd_start_resync()
1770 drbd_info(device, "before-resync-source handler returned %d, " in drbd_start_resync()
1783 if (!mutex_trylock(device->state_mutex)) { in drbd_start_resync()
1784 set_bit(B_RS_H_DONE, &device->flags); in drbd_start_resync()
1785 device->start_resync_timer.expires = jiffies + HZ/5; in drbd_start_resync()
1786 add_timer(&device->start_resync_timer); in drbd_start_resync()
1790 mutex_lock(device->state_mutex); in drbd_start_resync()
1794 clear_bit(B_RS_H_DONE, &device->flags); in drbd_start_resync()
1796 if (device->state.conn < C_CONNECTED in drbd_start_resync()
1797 || !get_ldev_if_state(device, D_NEGOTIATING)) { in drbd_start_resync()
1802 ns = drbd_read_state(device); in drbd_start_resync()
1804 ns.aftr_isp = !_drbd_may_sync_now(device); in drbd_start_resync()
1813 r = _drbd_set_state(device, ns, CS_VERBOSE, NULL); in drbd_start_resync()
1814 ns = drbd_read_state(device); in drbd_start_resync()
1820 unsigned long tw = drbd_bm_total_weight(device); in drbd_start_resync()
1824 device->rs_failed = 0; in drbd_start_resync()
1825 device->rs_paused = 0; in drbd_start_resync()
1826 device->rs_same_csum = 0; in drbd_start_resync()
1827 device->rs_last_sect_ev = 0; in drbd_start_resync()
1828 device->rs_total = tw; in drbd_start_resync()
1829 device->rs_start = now; in drbd_start_resync()
1831 device->rs_mark_left[i] = tw; in drbd_start_resync()
1832 device->rs_mark_time[i] = now; in drbd_start_resync()
1834 drbd_pause_after(device); in drbd_start_resync()
1836 * Open coded drbd_rs_cancel_all(device), we already have IRQs in drbd_start_resync()
1838 spin_lock(&device->al_lock); in drbd_start_resync()
1839 lc_reset(device->resync); in drbd_start_resync()
1840 device->resync_locked = 0; in drbd_start_resync()
1841 device->resync_wenr = LC_FREE; in drbd_start_resync()
1842 spin_unlock(&device->al_lock); in drbd_start_resync()
1847 wake_up(&device->al_wait); /* for lc_reset() above */ in drbd_start_resync()
1850 device->rs_last_bcast = jiffies - HZ; in drbd_start_resync()
1852 drbd_info(device, "Began resync as %s (will sync %lu KB [%lu bits set]).\n", in drbd_start_resync()
1854 (unsigned long) device->rs_total << (BM_BLOCK_SHIFT-10), in drbd_start_resync()
1855 (unsigned long) device->rs_total); in drbd_start_resync()
1857 device->bm_resync_fo = 0; in drbd_start_resync()
1858 device->use_csums = use_checksum_based_resync(connection, device); in drbd_start_resync()
1860 device->use_csums = false; in drbd_start_resync()
1873 if (connection->agreed_pro_version < 95 && device->rs_total == 0) { in drbd_start_resync()
1894 drbd_resync_finished(device); in drbd_start_resync()
1897 drbd_rs_controller_reset(device); in drbd_start_resync()
1898 /* ns.conn may already be != device->state.conn, in drbd_start_resync()
1903 mod_timer(&device->resync_timer, jiffies); in drbd_start_resync()
1905 drbd_md_sync(device); in drbd_start_resync()
1907 put_ldev(device); in drbd_start_resync()
1909 mutex_unlock(device->state_mutex); in drbd_start_resync()
1912 static void update_on_disk_bitmap(struct drbd_device *device, bool resync_done) in update_on_disk_bitmap() argument
1915 device->rs_last_bcast = jiffies; in update_on_disk_bitmap()
1917 if (!get_ldev(device)) in update_on_disk_bitmap()
1920 drbd_bm_write_lazy(device, 0); in update_on_disk_bitmap()
1921 if (resync_done && is_sync_state(device->state.conn)) in update_on_disk_bitmap()
1922 drbd_resync_finished(device); in update_on_disk_bitmap()
1924 drbd_bcast_event(device, &sib); in update_on_disk_bitmap()
1926 device->rs_last_bcast = jiffies; in update_on_disk_bitmap()
1927 put_ldev(device); in update_on_disk_bitmap()
1930 static void drbd_ldev_destroy(struct drbd_device *device) in drbd_ldev_destroy() argument
1932 lc_destroy(device->resync); in drbd_ldev_destroy()
1933 device->resync = NULL; in drbd_ldev_destroy()
1934 lc_destroy(device->act_log); in drbd_ldev_destroy()
1935 device->act_log = NULL; in drbd_ldev_destroy()
1938 drbd_backing_dev_free(device, device->ldev); in drbd_ldev_destroy()
1939 device->ldev = NULL; in drbd_ldev_destroy()
1942 clear_bit(GOING_DISKLESS, &device->flags); in drbd_ldev_destroy()
1943 wake_up(&device->misc_wait); in drbd_ldev_destroy()
1946 static void go_diskless(struct drbd_device *device) in go_diskless() argument
1948 D_ASSERT(device, device->state.disk == D_FAILED); in go_diskless()
1967 if (device->bitmap && device->ldev) { in go_diskless()
1972 if (drbd_bitmap_io_from_worker(device, drbd_bm_write, in go_diskless()
1974 if (test_bit(WAS_READ_ERROR, &device->flags)) { in go_diskless()
1975 drbd_md_set_flag(device, MDF_FULL_SYNC); in go_diskless()
1976 drbd_md_sync(device); in go_diskless()
1981 drbd_force_state(device, NS(disk, D_DISKLESS)); in go_diskless()
1984 static int do_md_sync(struct drbd_device *device) in do_md_sync() argument
1986 drbd_warn(device, "md_sync_timer expired! Worker calls drbd_md_sync().\n"); in do_md_sync()
1987 drbd_md_sync(device); in do_md_sync()
2014 static void do_device_work(struct drbd_device *device, const unsigned long todo) in do_device_work() argument
2017 do_md_sync(device); in do_device_work()
2020 update_on_disk_bitmap(device, test_bit(RS_DONE, &todo)); in do_device_work()
2022 go_diskless(device); in do_device_work()
2024 drbd_ldev_destroy(device); in do_device_work()
2026 do_start_resync(device); in do_device_work()
2055 struct drbd_device *device = peer_device->device; in do_unqueued_work() local
2056 unsigned long todo = get_work_bits(&device->flags); in do_unqueued_work()
2060 kref_get(&device->kref); in do_unqueued_work()
2062 do_device_work(device, todo); in do_unqueued_work()
2063 kref_put(&device->kref, drbd_destroy_device); in do_unqueued_work()
2222 struct drbd_device *device = peer_device->device; in drbd_worker() local
2223 D_ASSERT(device, device->state.disk == D_DISKLESS && device->state.conn == C_STANDALONE); in drbd_worker()
2224 kref_get(&device->kref); in drbd_worker()
2226 drbd_device_cleanup(device); in drbd_worker()
2227 kref_put(&device->kref, drbd_destroy_device); in drbd_worker()