Lines Matching refs:peer_req
80 static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local) in drbd_endio_read_sec_final() argument
83 struct drbd_peer_device *peer_device = peer_req->peer_device; in drbd_endio_read_sec_final()
87 device->read_cnt += peer_req->i.size >> 9; in drbd_endio_read_sec_final()
88 list_del(&peer_req->w.list); in drbd_endio_read_sec_final()
91 if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) in drbd_endio_read_sec_final()
95 drbd_queue_work(&peer_device->connection->sender_work, &peer_req->w); in drbd_endio_read_sec_final()
101 void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local) in drbd_endio_write_sec_final() argument
104 struct drbd_peer_device *peer_device = peer_req->peer_device; in drbd_endio_write_sec_final()
116 i = peer_req->i; in drbd_endio_write_sec_final()
117 do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO; in drbd_endio_write_sec_final()
118 block_id = peer_req->block_id; in drbd_endio_write_sec_final()
119 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO; in drbd_endio_write_sec_final()
121 if (peer_req->flags & EE_WAS_ERROR) { in drbd_endio_write_sec_final()
124 if (!__test_and_set_bit(__EE_SEND_WRITE_ACK, &peer_req->flags)) in drbd_endio_write_sec_final()
126 drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size); in drbd_endio_write_sec_final()
130 device->writ_cnt += peer_req->i.size >> 9; in drbd_endio_write_sec_final()
131 list_move_tail(&peer_req->w.list, &device->done_ee); in drbd_endio_write_sec_final()
145 if (peer_req->flags & EE_WAS_ERROR) in drbd_endio_write_sec_final()
172 struct drbd_peer_request *peer_req = bio->bi_private; in drbd_peer_request_endio() local
173 struct drbd_device *device = peer_req->peer_device->device; in drbd_peer_request_endio()
182 (unsigned long long)peer_req->i.sector); in drbd_peer_request_endio()
185 set_bit(__EE_WAS_ERROR, &peer_req->flags); in drbd_peer_request_endio()
188 if (atomic_dec_and_test(&peer_req->pending_bios)) { in drbd_peer_request_endio()
190 drbd_endio_write_sec_final(peer_req); in drbd_peer_request_endio()
192 drbd_endio_read_sec_final(peer_req); in drbd_peer_request_endio()
286 void drbd_csum_ee(struct crypto_shash *tfm, struct drbd_peer_request *peer_req, void *digest) in drbd_csum_ee() argument
289 struct page *page = peer_req->pages; in drbd_csum_ee()
307 len = peer_req->i.size & (PAGE_SIZE - 1); in drbd_csum_ee()
344 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); in w_e_send_csum() local
345 struct drbd_peer_device *peer_device = peer_req->peer_device; in w_e_send_csum()
354 if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0)) in w_e_send_csum()
360 sector_t sector = peer_req->i.sector; in w_e_send_csum()
361 unsigned int size = peer_req->i.size; in w_e_send_csum()
362 drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest); in w_e_send_csum()
368 drbd_free_peer_req(device, peer_req); in w_e_send_csum()
369 peer_req = NULL; in w_e_send_csum()
381 if (peer_req) in w_e_send_csum()
382 drbd_free_peer_req(device, peer_req); in w_e_send_csum()
394 struct drbd_peer_request *peer_req; in read_for_csum() local
401 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector, in read_for_csum()
403 if (!peer_req) in read_for_csum()
406 peer_req->w.cb = w_e_send_csum; in read_for_csum()
408 list_add_tail(&peer_req->w.list, &device->read_ee); in read_for_csum()
412 if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0, in read_for_csum()
421 list_del(&peer_req->w.list); in read_for_csum()
424 drbd_free_peer_req(device, peer_req); in read_for_csum()
1033 static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req) in move_to_net_ee_or_free() argument
1035 if (drbd_peer_req_has_active_page(peer_req)) { in move_to_net_ee_or_free()
1037 int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT; in move_to_net_ee_or_free()
1041 list_add_tail(&peer_req->w.list, &device->net_ee); in move_to_net_ee_or_free()
1045 drbd_free_peer_req(device, peer_req); in move_to_net_ee_or_free()
1055 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); in w_e_end_data_req() local
1056 struct drbd_peer_device *peer_device = peer_req->peer_device; in w_e_end_data_req()
1061 drbd_free_peer_req(device, peer_req); in w_e_end_data_req()
1066 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { in w_e_end_data_req()
1067 err = drbd_send_block(peer_device, P_DATA_REPLY, peer_req); in w_e_end_data_req()
1071 (unsigned long long)peer_req->i.sector); in w_e_end_data_req()
1073 err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req); in w_e_end_data_req()
1078 move_to_net_ee_or_free(device, peer_req); in w_e_end_data_req()
1085 static bool all_zero(struct drbd_peer_request *peer_req) in all_zero() argument
1087 struct page *page = peer_req->pages; in all_zero()
1088 unsigned int len = peer_req->i.size; in all_zero()
1116 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); in w_e_end_rsdata_req() local
1117 struct drbd_peer_device *peer_device = peer_req->peer_device; in w_e_end_rsdata_req()
1122 drbd_free_peer_req(device, peer_req); in w_e_end_rsdata_req()
1128 drbd_rs_complete_io(device, peer_req->i.sector); in w_e_end_rsdata_req()
1133 err = drbd_send_ack(peer_device, P_RS_CANCEL, peer_req); in w_e_end_rsdata_req()
1134 } else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { in w_e_end_rsdata_req()
1137 if (peer_req->flags & EE_RS_THIN_REQ && all_zero(peer_req)) in w_e_end_rsdata_req()
1138 err = drbd_send_rs_deallocated(peer_device, peer_req); in w_e_end_rsdata_req()
1140 err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req); in w_e_end_rsdata_req()
1150 (unsigned long long)peer_req->i.sector); in w_e_end_rsdata_req()
1152 err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req); in w_e_end_rsdata_req()
1155 drbd_rs_failed_io(device, peer_req->i.sector, peer_req->i.size); in w_e_end_rsdata_req()
1160 move_to_net_ee_or_free(device, peer_req); in w_e_end_rsdata_req()
1169 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); in w_e_end_csum_rs_req() local
1170 struct drbd_peer_device *peer_device = peer_req->peer_device; in w_e_end_csum_rs_req()
1178 drbd_free_peer_req(device, peer_req); in w_e_end_csum_rs_req()
1184 drbd_rs_complete_io(device, peer_req->i.sector); in w_e_end_csum_rs_req()
1188 di = peer_req->digest; in w_e_end_csum_rs_req()
1190 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { in w_e_end_csum_rs_req()
1200 drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest); in w_e_end_csum_rs_req()
1206 drbd_set_in_sync(device, peer_req->i.sector, peer_req->i.size); in w_e_end_csum_rs_req()
1208 device->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT; in w_e_end_csum_rs_req()
1209 err = drbd_send_ack(peer_device, P_RS_IS_IN_SYNC, peer_req); in w_e_end_csum_rs_req()
1212 peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */ in w_e_end_csum_rs_req()
1213 peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */ in w_e_end_csum_rs_req()
1215 err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req); in w_e_end_csum_rs_req()
1218 err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req); in w_e_end_csum_rs_req()
1224 move_to_net_ee_or_free(device, peer_req); in w_e_end_csum_rs_req()
1233 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); in w_e_end_ov_req() local
1234 struct drbd_peer_device *peer_device = peer_req->peer_device; in w_e_end_ov_req()
1236 sector_t sector = peer_req->i.sector; in w_e_end_ov_req()
1237 unsigned int size = peer_req->i.size; in w_e_end_ov_req()
1252 if (likely(!(peer_req->flags & EE_WAS_ERROR))) in w_e_end_ov_req()
1253 drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest); in w_e_end_ov_req()
1262 drbd_free_peer_req(device, peer_req); in w_e_end_ov_req()
1263 peer_req = NULL; in w_e_end_ov_req()
1271 if (peer_req) in w_e_end_ov_req()
1272 drbd_free_peer_req(device, peer_req); in w_e_end_ov_req()
1290 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); in w_e_end_ov_reply() local
1291 struct drbd_peer_device *peer_device = peer_req->peer_device; in w_e_end_ov_reply()
1295 sector_t sector = peer_req->i.sector; in w_e_end_ov_reply()
1296 unsigned int size = peer_req->i.size; in w_e_end_ov_reply()
1302 drbd_free_peer_req(device, peer_req); in w_e_end_ov_reply()
1310 drbd_rs_complete_io(device, peer_req->i.sector); in w_e_end_ov_reply()
1314 di = peer_req->digest; in w_e_end_ov_reply()
1316 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { in w_e_end_ov_reply()
1320 drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest); in w_e_end_ov_reply()
1333 drbd_free_peer_req(device, peer_req); in w_e_end_ov_reply()