Lines Matching refs:peer_req

81 static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)  in drbd_endio_read_sec_final()  argument
84 struct drbd_peer_device *peer_device = peer_req->peer_device; in drbd_endio_read_sec_final()
88 device->read_cnt += peer_req->i.size >> 9; in drbd_endio_read_sec_final()
89 list_del(&peer_req->w.list); in drbd_endio_read_sec_final()
92 if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) in drbd_endio_read_sec_final()
96 drbd_queue_work(&peer_device->connection->sender_work, &peer_req->w); in drbd_endio_read_sec_final()
102 void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local) in drbd_endio_write_sec_final() argument
105 struct drbd_peer_device *peer_device = peer_req->peer_device; in drbd_endio_write_sec_final()
117 i = peer_req->i; in drbd_endio_write_sec_final()
118 do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO; in drbd_endio_write_sec_final()
119 block_id = peer_req->block_id; in drbd_endio_write_sec_final()
120 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO; in drbd_endio_write_sec_final()
122 if (peer_req->flags & EE_WAS_ERROR) { in drbd_endio_write_sec_final()
125 if (!__test_and_set_bit(__EE_SEND_WRITE_ACK, &peer_req->flags)) in drbd_endio_write_sec_final()
127 drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size); in drbd_endio_write_sec_final()
131 device->writ_cnt += peer_req->i.size >> 9; in drbd_endio_write_sec_final()
132 list_move_tail(&peer_req->w.list, &device->done_ee); in drbd_endio_write_sec_final()
146 if (peer_req->flags & EE_WAS_ERROR) in drbd_endio_write_sec_final()
173 struct drbd_peer_request *peer_req = bio->bi_private; in drbd_peer_request_endio() local
174 struct drbd_device *device = peer_req->peer_device->device; in drbd_peer_request_endio()
183 (unsigned long long)peer_req->i.sector); in drbd_peer_request_endio()
186 set_bit(__EE_WAS_ERROR, &peer_req->flags); in drbd_peer_request_endio()
189 if (atomic_dec_and_test(&peer_req->pending_bios)) { in drbd_peer_request_endio()
191 drbd_endio_write_sec_final(peer_req); in drbd_peer_request_endio()
193 drbd_endio_read_sec_final(peer_req); in drbd_peer_request_endio()
287 void drbd_csum_ee(struct crypto_shash *tfm, struct drbd_peer_request *peer_req, void *digest) in drbd_csum_ee() argument
290 struct page *page = peer_req->pages; in drbd_csum_ee()
308 len = peer_req->i.size & (PAGE_SIZE - 1); in drbd_csum_ee()
345 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); in w_e_send_csum() local
346 struct drbd_peer_device *peer_device = peer_req->peer_device; in w_e_send_csum()
355 if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0)) in w_e_send_csum()
361 sector_t sector = peer_req->i.sector; in w_e_send_csum()
362 unsigned int size = peer_req->i.size; in w_e_send_csum()
363 drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest); in w_e_send_csum()
369 drbd_free_peer_req(device, peer_req); in w_e_send_csum()
370 peer_req = NULL; in w_e_send_csum()
382 if (peer_req) in w_e_send_csum()
383 drbd_free_peer_req(device, peer_req); in w_e_send_csum()
395 struct drbd_peer_request *peer_req; in read_for_csum() local
402 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector, in read_for_csum()
404 if (!peer_req) in read_for_csum()
407 peer_req->w.cb = w_e_send_csum; in read_for_csum()
409 list_add_tail(&peer_req->w.list, &device->read_ee); in read_for_csum()
413 if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0, in read_for_csum()
422 list_del(&peer_req->w.list); in read_for_csum()
425 drbd_free_peer_req(device, peer_req); in read_for_csum()
1034 static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req) in move_to_net_ee_or_free() argument
1036 if (drbd_peer_req_has_active_page(peer_req)) { in move_to_net_ee_or_free()
1038 int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT; in move_to_net_ee_or_free()
1042 list_add_tail(&peer_req->w.list, &device->net_ee); in move_to_net_ee_or_free()
1046 drbd_free_peer_req(device, peer_req); in move_to_net_ee_or_free()
1056 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); in w_e_end_data_req() local
1057 struct drbd_peer_device *peer_device = peer_req->peer_device; in w_e_end_data_req()
1062 drbd_free_peer_req(device, peer_req); in w_e_end_data_req()
1067 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { in w_e_end_data_req()
1068 err = drbd_send_block(peer_device, P_DATA_REPLY, peer_req); in w_e_end_data_req()
1072 (unsigned long long)peer_req->i.sector); in w_e_end_data_req()
1074 err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req); in w_e_end_data_req()
1079 move_to_net_ee_or_free(device, peer_req); in w_e_end_data_req()
1086 static bool all_zero(struct drbd_peer_request *peer_req) in all_zero() argument
1088 struct page *page = peer_req->pages; in all_zero()
1089 unsigned int len = peer_req->i.size; in all_zero()
1117 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); in w_e_end_rsdata_req() local
1118 struct drbd_peer_device *peer_device = peer_req->peer_device; in w_e_end_rsdata_req()
1123 drbd_free_peer_req(device, peer_req); in w_e_end_rsdata_req()
1129 drbd_rs_complete_io(device, peer_req->i.sector); in w_e_end_rsdata_req()
1134 err = drbd_send_ack(peer_device, P_RS_CANCEL, peer_req); in w_e_end_rsdata_req()
1135 } else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { in w_e_end_rsdata_req()
1138 if (peer_req->flags & EE_RS_THIN_REQ && all_zero(peer_req)) in w_e_end_rsdata_req()
1139 err = drbd_send_rs_deallocated(peer_device, peer_req); in w_e_end_rsdata_req()
1141 err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req); in w_e_end_rsdata_req()
1151 (unsigned long long)peer_req->i.sector); in w_e_end_rsdata_req()
1153 err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req); in w_e_end_rsdata_req()
1156 drbd_rs_failed_io(device, peer_req->i.sector, peer_req->i.size); in w_e_end_rsdata_req()
1161 move_to_net_ee_or_free(device, peer_req); in w_e_end_rsdata_req()
1170 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); in w_e_end_csum_rs_req() local
1171 struct drbd_peer_device *peer_device = peer_req->peer_device; in w_e_end_csum_rs_req()
1179 drbd_free_peer_req(device, peer_req); in w_e_end_csum_rs_req()
1185 drbd_rs_complete_io(device, peer_req->i.sector); in w_e_end_csum_rs_req()
1189 di = peer_req->digest; in w_e_end_csum_rs_req()
1191 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { in w_e_end_csum_rs_req()
1201 drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest); in w_e_end_csum_rs_req()
1207 drbd_set_in_sync(device, peer_req->i.sector, peer_req->i.size); in w_e_end_csum_rs_req()
1209 device->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT; in w_e_end_csum_rs_req()
1210 err = drbd_send_ack(peer_device, P_RS_IS_IN_SYNC, peer_req); in w_e_end_csum_rs_req()
1213 peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */ in w_e_end_csum_rs_req()
1214 peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */ in w_e_end_csum_rs_req()
1216 err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req); in w_e_end_csum_rs_req()
1219 err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req); in w_e_end_csum_rs_req()
1225 move_to_net_ee_or_free(device, peer_req); in w_e_end_csum_rs_req()
1234 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); in w_e_end_ov_req() local
1235 struct drbd_peer_device *peer_device = peer_req->peer_device; in w_e_end_ov_req()
1237 sector_t sector = peer_req->i.sector; in w_e_end_ov_req()
1238 unsigned int size = peer_req->i.size; in w_e_end_ov_req()
1253 if (likely(!(peer_req->flags & EE_WAS_ERROR))) in w_e_end_ov_req()
1254 drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest); in w_e_end_ov_req()
1263 drbd_free_peer_req(device, peer_req); in w_e_end_ov_req()
1264 peer_req = NULL; in w_e_end_ov_req()
1272 if (peer_req) in w_e_end_ov_req()
1273 drbd_free_peer_req(device, peer_req); in w_e_end_ov_req()
1291 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); in w_e_end_ov_reply() local
1292 struct drbd_peer_device *peer_device = peer_req->peer_device; in w_e_end_ov_reply()
1296 sector_t sector = peer_req->i.sector; in w_e_end_ov_reply()
1297 unsigned int size = peer_req->i.size; in w_e_end_ov_reply()
1303 drbd_free_peer_req(device, peer_req); in w_e_end_ov_reply()
1311 drbd_rs_complete_io(device, peer_req->i.sector); in w_e_end_ov_reply()
1315 di = peer_req->digest; in w_e_end_ov_reply()
1317 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { in w_e_end_ov_reply()
1321 drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest); in w_e_end_ov_reply()
1334 drbd_free_peer_req(device, peer_req); in w_e_end_ov_reply()