Lines Matching refs:peer_req

206 	struct drbd_peer_request *peer_req, *tmp;  in reclaim_finished_net_peer_reqs()  local
213 list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) { in reclaim_finished_net_peer_reqs()
214 if (drbd_peer_req_has_active_page(peer_req)) in reclaim_finished_net_peer_reqs()
216 list_move(&peer_req->w.list, to_be_freed); in reclaim_finished_net_peer_reqs()
223 struct drbd_peer_request *peer_req, *t; in drbd_reclaim_net_peer_reqs() local
228 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) in drbd_reclaim_net_peer_reqs()
229 drbd_free_net_peer_req(device, peer_req); in drbd_reclaim_net_peer_reqs()
374 struct drbd_peer_request *peer_req; in drbd_alloc_peer_req() local
381 peer_req = mempool_alloc(&drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); in drbd_alloc_peer_req()
382 if (!peer_req) { in drbd_alloc_peer_req()
395 memset(peer_req, 0, sizeof(*peer_req)); in drbd_alloc_peer_req()
396 INIT_LIST_HEAD(&peer_req->w.list); in drbd_alloc_peer_req()
397 drbd_clear_interval(&peer_req->i); in drbd_alloc_peer_req()
398 peer_req->i.size = request_size; in drbd_alloc_peer_req()
399 peer_req->i.sector = sector; in drbd_alloc_peer_req()
400 peer_req->submit_jif = jiffies; in drbd_alloc_peer_req()
401 peer_req->peer_device = peer_device; in drbd_alloc_peer_req()
402 peer_req->pages = page; in drbd_alloc_peer_req()
407 peer_req->block_id = id; in drbd_alloc_peer_req()
409 return peer_req; in drbd_alloc_peer_req()
412 mempool_free(peer_req, &drbd_ee_mempool); in drbd_alloc_peer_req()
416 void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req, in __drbd_free_peer_req() argument
420 if (peer_req->flags & EE_HAS_DIGEST) in __drbd_free_peer_req()
421 kfree(peer_req->digest); in __drbd_free_peer_req()
422 drbd_free_pages(device, peer_req->pages, is_net); in __drbd_free_peer_req()
423 D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0); in __drbd_free_peer_req()
424 D_ASSERT(device, drbd_interval_empty(&peer_req->i)); in __drbd_free_peer_req()
425 if (!expect(!(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) { in __drbd_free_peer_req()
426 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO; in __drbd_free_peer_req()
427 drbd_al_complete_io(device, &peer_req->i); in __drbd_free_peer_req()
429 mempool_free(peer_req, &drbd_ee_mempool); in __drbd_free_peer_req()
435 struct drbd_peer_request *peer_req, *t; in drbd_free_peer_reqs() local
443 list_for_each_entry_safe(peer_req, t, &work_list, w.list) { in drbd_free_peer_reqs()
444 __drbd_free_peer_req(device, peer_req, is_net); in drbd_free_peer_reqs()
457 struct drbd_peer_request *peer_req, *t; in drbd_finish_peer_reqs() local
465 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) in drbd_finish_peer_reqs()
466 drbd_free_net_peer_req(device, peer_req); in drbd_finish_peer_reqs()
472 list_for_each_entry_safe(peer_req, t, &work_list, w.list) { in drbd_finish_peer_reqs()
476 err2 = peer_req->w.cb(&peer_req->w, !!err); in drbd_finish_peer_reqs()
479 drbd_free_peer_req(device, peer_req); in drbd_finish_peer_reqs()
1493 static void drbd_issue_peer_discard(struct drbd_device *device, struct drbd_peer_request *peer_req) in drbd_issue_peer_discard() argument
1497 if (blkdev_issue_zeroout(bdev, peer_req->i.sector, peer_req->i.size >> 9, in drbd_issue_peer_discard()
1499 peer_req->flags |= EE_WAS_ERROR; in drbd_issue_peer_discard()
1501 drbd_endio_write_sec_final(peer_req); in drbd_issue_peer_discard()
1505 struct drbd_peer_request *peer_req) in drbd_issue_peer_wsame() argument
1508 sector_t s = peer_req->i.sector; in drbd_issue_peer_wsame()
1509 sector_t nr = peer_req->i.size >> 9; in drbd_issue_peer_wsame()
1510 if (blkdev_issue_write_same(bdev, s, nr, GFP_NOIO, peer_req->pages)) in drbd_issue_peer_wsame()
1511 peer_req->flags |= EE_WAS_ERROR; in drbd_issue_peer_wsame()
1512 drbd_endio_write_sec_final(peer_req); in drbd_issue_peer_wsame()
1534 struct drbd_peer_request *peer_req, in drbd_submit_peer_request() argument
1540 struct page *page = peer_req->pages; in drbd_submit_peer_request()
1541 sector_t sector = peer_req->i.sector; in drbd_submit_peer_request()
1542 unsigned data_size = peer_req->i.size; in drbd_submit_peer_request()
1553 if (peer_req->flags & (EE_IS_TRIM|EE_WRITE_SAME)) { in drbd_submit_peer_request()
1556 conn_wait_active_ee_empty(peer_req->peer_device->connection); in drbd_submit_peer_request()
1559 peer_req->submit_jif = jiffies; in drbd_submit_peer_request()
1560 peer_req->flags |= EE_SUBMITTED; in drbd_submit_peer_request()
1564 if (list_empty(&peer_req->w.list)) { in drbd_submit_peer_request()
1566 list_add_tail(&peer_req->w.list, &device->active_ee); in drbd_submit_peer_request()
1570 if (peer_req->flags & EE_IS_TRIM) in drbd_submit_peer_request()
1571 drbd_issue_peer_discard(device, peer_req); in drbd_submit_peer_request()
1573 drbd_issue_peer_wsame(device, peer_req); in drbd_submit_peer_request()
1595 bio->bi_private = peer_req; in drbd_submit_peer_request()
1613 atomic_set(&peer_req->pending_bios, n_bios); in drbd_submit_peer_request()
1615 peer_req->submit_jif = jiffies; in drbd_submit_peer_request()
1616 peer_req->flags |= EE_SUBMITTED; in drbd_submit_peer_request()
1636 struct drbd_peer_request *peer_req) in drbd_remove_epoch_entry_interval() argument
1638 struct drbd_interval *i = &peer_req->i; in drbd_remove_epoch_entry_interval()
1760 struct drbd_peer_request *peer_req; in read_in_block() local
1824 peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO); in read_in_block()
1825 if (!peer_req) in read_in_block()
1828 peer_req->flags |= EE_WRITE; in read_in_block()
1830 peer_req->flags |= EE_IS_TRIM; in read_in_block()
1831 return peer_req; in read_in_block()
1834 peer_req->flags |= EE_WRITE_SAME; in read_in_block()
1838 page = peer_req->pages; in read_in_block()
1849 drbd_free_peer_req(device, peer_req); in read_in_block()
1856 drbd_csum_ee_size(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv, data_size); in read_in_block()
1860 drbd_free_peer_req(device, peer_req); in read_in_block()
1865 return peer_req; in read_in_block()
1950 struct drbd_peer_request *peer_req = in e_end_resync_block() local
1952 struct drbd_peer_device *peer_device = peer_req->peer_device; in e_end_resync_block()
1954 sector_t sector = peer_req->i.sector; in e_end_resync_block()
1957 D_ASSERT(device, drbd_interval_empty(&peer_req->i)); in e_end_resync_block()
1959 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { in e_end_resync_block()
1960 drbd_set_in_sync(device, sector, peer_req->i.size); in e_end_resync_block()
1961 err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req); in e_end_resync_block()
1964 drbd_rs_failed_io(device, sector, peer_req->i.size); in e_end_resync_block()
1966 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req); in e_end_resync_block()
1977 struct drbd_peer_request *peer_req; in recv_resync_read() local
1979 peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi); in recv_resync_read()
1980 if (!peer_req) in recv_resync_read()
1989 peer_req->w.cb = e_end_resync_block; in recv_resync_read()
1990 peer_req->submit_jif = jiffies; in recv_resync_read()
1993 list_add_tail(&peer_req->w.list, &device->sync_ee); in recv_resync_read()
1997 if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0, in recv_resync_read()
2004 list_del(&peer_req->w.list); in recv_resync_read()
2007 drbd_free_peer_req(device, peer_req); in recv_resync_read()
2124 struct drbd_peer_request *peer_req = in e_end_block() local
2126 struct drbd_peer_device *peer_device = peer_req->peer_device; in e_end_block()
2128 sector_t sector = peer_req->i.sector; in e_end_block()
2131 if (peer_req->flags & EE_SEND_WRITE_ACK) { in e_end_block()
2132 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { in e_end_block()
2135 peer_req->flags & EE_MAY_SET_IN_SYNC) ? in e_end_block()
2137 err = drbd_send_ack(peer_device, pcmd, peer_req); in e_end_block()
2139 drbd_set_in_sync(device, sector, peer_req->i.size); in e_end_block()
2141 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req); in e_end_block()
2150 if (peer_req->flags & EE_IN_INTERVAL_TREE) { in e_end_block()
2152 D_ASSERT(device, !drbd_interval_empty(&peer_req->i)); in e_end_block()
2153 drbd_remove_epoch_entry_interval(device, peer_req); in e_end_block()
2154 if (peer_req->flags & EE_RESTART_REQUESTS) in e_end_block()
2155 restart_conflicting_writes(device, sector, peer_req->i.size); in e_end_block()
2158 D_ASSERT(device, drbd_interval_empty(&peer_req->i)); in e_end_block()
2160 …drbd_may_finish_epoch(peer_device->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)… in e_end_block()
2167 struct drbd_peer_request *peer_req = in e_send_ack() local
2169 struct drbd_peer_device *peer_device = peer_req->peer_device; in e_send_ack()
2172 err = drbd_send_ack(peer_device, ack, peer_req); in e_send_ack()
2185 struct drbd_peer_request *peer_req = in e_send_retry_write() local
2187 struct drbd_connection *connection = peer_req->peer_device->connection; in e_send_retry_write()
2230 static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req) in overlapping_resync_write() argument
2237 if (overlaps(peer_req->i.sector, peer_req->i.size, in overlapping_resync_write()
2361 struct drbd_peer_request *peer_req) in handle_write_conflicts() argument
2363 struct drbd_connection *connection = peer_req->peer_device->connection; in handle_write_conflicts()
2365 sector_t sector = peer_req->i.sector; in handle_write_conflicts()
2366 const unsigned int size = peer_req->i.size; in handle_write_conflicts()
2375 drbd_insert_interval(&device->write_requests, &peer_req->i); in handle_write_conflicts()
2379 if (i == &peer_req->i) in handle_write_conflicts()
2415 peer_req->w.cb = superseded ? e_send_superseded : in handle_write_conflicts()
2417 list_add_tail(&peer_req->w.list, &device->done_ee); in handle_write_conflicts()
2418 queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work); in handle_write_conflicts()
2457 peer_req->flags |= EE_RESTART_REQUESTS; in handle_write_conflicts()
2464 drbd_remove_epoch_entry_interval(device, peer_req); in handle_write_conflicts()
2475 struct drbd_peer_request *peer_req; in receive_Data() local
2506 peer_req = read_in_block(peer_device, p->block_id, sector, pi); in receive_Data()
2507 if (!peer_req) { in receive_Data()
2512 peer_req->w.cb = e_end_block; in receive_Data()
2513 peer_req->submit_jif = jiffies; in receive_Data()
2514 peer_req->flags |= EE_APPLICATION; in receive_Data()
2520 D_ASSERT(peer_device, peer_req->i.size > 0); in receive_Data()
2522 D_ASSERT(peer_device, peer_req->pages == NULL); in receive_Data()
2523 } else if (peer_req->pages == NULL) { in receive_Data()
2524 D_ASSERT(device, peer_req->i.size == 0); in receive_Data()
2529 peer_req->flags |= EE_MAY_SET_IN_SYNC; in receive_Data()
2532 peer_req->epoch = connection->current_epoch; in receive_Data()
2533 atomic_inc(&peer_req->epoch->epoch_size); in receive_Data()
2534 atomic_inc(&peer_req->epoch->active); in receive_Data()
2553 peer_req->flags |= EE_SEND_WRITE_ACK; in receive_Data()
2562 drbd_send_ack(peer_device, P_RECV_ACK, peer_req); in receive_Data()
2568 peer_req->flags |= EE_IN_INTERVAL_TREE; in receive_Data()
2573 err = handle_write_conflicts(device, peer_req); in receive_Data()
2590 if ((peer_req->flags & (EE_IS_TRIM|EE_WRITE_SAME)) == 0) in receive_Data()
2591 list_add_tail(&peer_req->w.list, &device->active_ee); in receive_Data()
2595 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req)); in receive_Data()
2599 drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size); in receive_Data()
2600 peer_req->flags &= ~EE_MAY_SET_IN_SYNC; in receive_Data()
2601 drbd_al_begin_io(device, &peer_req->i); in receive_Data()
2602 peer_req->flags |= EE_CALL_AL_COMPLETE_IO; in receive_Data()
2605 err = drbd_submit_peer_request(device, peer_req, op, op_flags, in receive_Data()
2613 list_del(&peer_req->w.list); in receive_Data()
2614 drbd_remove_epoch_entry_interval(device, peer_req); in receive_Data()
2616 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) { in receive_Data()
2617 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO; in receive_Data()
2618 drbd_al_complete_io(device, &peer_req->i); in receive_Data()
2622 drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT | EV_CLEANUP); in receive_Data()
2624 drbd_free_peer_req(device, peer_req); in receive_Data()
2714 struct drbd_peer_request *peer_req; in receive_DataRequest() local
2771 peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size, in receive_DataRequest()
2773 if (!peer_req) { in receive_DataRequest()
2780 peer_req->w.cb = w_e_end_data_req; in receive_DataRequest()
2783 peer_req->flags |= EE_APPLICATION; in receive_DataRequest()
2791 peer_req->flags |= EE_RS_THIN_REQ; in receive_DataRequest()
2794 peer_req->w.cb = w_e_end_rsdata_req; in receive_DataRequest()
2810 peer_req->digest = di; in receive_DataRequest()
2811 peer_req->flags |= EE_HAS_DIGEST; in receive_DataRequest()
2818 peer_req->w.cb = w_e_end_csum_rs_req; in receive_DataRequest()
2826 peer_req->w.cb = w_e_end_ov_reply; in receive_DataRequest()
2850 peer_req->w.cb = w_e_end_ov_req; in receive_DataRequest()
2886 list_add_tail(&peer_req->w.list, &device->read_ee); in receive_DataRequest()
2903 if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0, in receive_DataRequest()
2912 list_del(&peer_req->w.list); in receive_DataRequest()
2917 drbd_free_peer_req(device, peer_req); in receive_DataRequest()
4835 struct drbd_peer_request *peer_req; in receive_rs_deallocated() local
4838 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector, in receive_rs_deallocated()
4840 if (!peer_req) { in receive_rs_deallocated()
4845 peer_req->w.cb = e_end_resync_block; in receive_rs_deallocated()
4846 peer_req->submit_jif = jiffies; in receive_rs_deallocated()
4847 peer_req->flags |= EE_IS_TRIM; in receive_rs_deallocated()
4850 list_add_tail(&peer_req->w.list, &device->sync_ee); in receive_rs_deallocated()
4854 err = drbd_submit_peer_request(device, peer_req, op, 0, DRBD_FAULT_RS_WR); in receive_rs_deallocated()
4858 list_del(&peer_req->w.list); in receive_rs_deallocated()
4861 drbd_free_peer_req(device, peer_req); in receive_rs_deallocated()