Lines Matching refs:req

26 	struct drbd_request *req;  in drbd_req_new()  local
28 req = mempool_alloc(&drbd_request_mempool, GFP_NOIO); in drbd_req_new()
29 if (!req) in drbd_req_new()
31 memset(req, 0, sizeof(*req)); in drbd_req_new()
33 req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0) in drbd_req_new()
36 req->device = device; in drbd_req_new()
37 req->master_bio = bio_src; in drbd_req_new()
38 req->epoch = 0; in drbd_req_new()
40 drbd_clear_interval(&req->i); in drbd_req_new()
41 req->i.sector = bio_src->bi_iter.bi_sector; in drbd_req_new()
42 req->i.size = bio_src->bi_iter.bi_size; in drbd_req_new()
43 req->i.local = true; in drbd_req_new()
44 req->i.waiting = false; in drbd_req_new()
46 INIT_LIST_HEAD(&req->tl_requests); in drbd_req_new()
47 INIT_LIST_HEAD(&req->w.list); in drbd_req_new()
48 INIT_LIST_HEAD(&req->req_pending_master_completion); in drbd_req_new()
49 INIT_LIST_HEAD(&req->req_pending_local); in drbd_req_new()
52 atomic_set(&req->completion_ref, 1); in drbd_req_new()
54 kref_init(&req->kref); in drbd_req_new()
55 return req; in drbd_req_new()
59 struct drbd_request *req) in drbd_remove_request_interval() argument
61 struct drbd_device *device = req->device; in drbd_remove_request_interval()
62 struct drbd_interval *i = &req->i; in drbd_remove_request_interval()
73 struct drbd_request *req = container_of(kref, struct drbd_request, kref); in drbd_req_destroy() local
74 struct drbd_device *device = req->device; in drbd_req_destroy()
75 const unsigned s = req->rq_state; in drbd_req_destroy()
77 if ((req->master_bio && !(s & RQ_POSTPONED)) || in drbd_req_destroy()
78 atomic_read(&req->completion_ref) || in drbd_req_destroy()
82 s, atomic_read(&req->completion_ref)); in drbd_req_destroy()
94 list_del_init(&req->tl_requests); in drbd_req_destroy()
98 if (!drbd_interval_empty(&req->i)) { in drbd_req_destroy()
105 drbd_remove_request_interval(root, req); in drbd_req_destroy()
106 } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0) in drbd_req_destroy()
108 s, (unsigned long long)req->i.sector, req->i.size); in drbd_req_destroy()
127 drbd_set_out_of_sync(device, req->i.sector, req->i.size); in drbd_req_destroy()
130 drbd_set_in_sync(device, req->i.sector, req->i.size); in drbd_req_destroy()
145 drbd_al_complete_io(device, &req->i); in drbd_req_destroy()
150 (unsigned long long) req->i.sector, req->i.size); in drbd_req_destroy()
155 mempool_free(req, &drbd_request_mempool); in drbd_req_destroy()
192 void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) in drbd_req_complete() argument
194 const unsigned s = req->rq_state; in drbd_req_complete()
195 struct drbd_device *device = req->device; in drbd_req_complete()
214 if (!req->master_bio) { in drbd_req_complete()
233 error = PTR_ERR(req->private_bio); in drbd_req_complete()
242 if (op_is_write(bio_op(req->master_bio)) && in drbd_req_complete()
243 req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr)) in drbd_req_complete()
247 bio_end_io_acct(req->master_bio, req->start_jif); in drbd_req_complete()
264 bio_op(req->master_bio) == REQ_OP_READ && in drbd_req_complete()
265 !(req->master_bio->bi_opf & REQ_RAHEAD) && in drbd_req_complete()
266 !list_empty(&req->tl_requests)) in drbd_req_complete()
267 req->rq_state |= RQ_POSTPONED; in drbd_req_complete()
269 if (!(req->rq_state & RQ_POSTPONED)) { in drbd_req_complete()
271 m->bio = req->master_bio; in drbd_req_complete()
272 req->master_bio = NULL; in drbd_req_complete()
277 req->i.completed = true; in drbd_req_complete()
280 if (req->i.waiting) in drbd_req_complete()
287 list_del_init(&req->req_pending_master_completion); in drbd_req_complete()
291 static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) in drbd_req_put_completion_ref() argument
293 struct drbd_device *device = req->device; in drbd_req_put_completion_ref()
294 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); in drbd_req_put_completion_ref()
299 if (!atomic_sub_and_test(put, &req->completion_ref)) in drbd_req_put_completion_ref()
302 drbd_req_complete(req, m); in drbd_req_put_completion_ref()
306 if (req->rq_state & RQ_LOCAL_ABORTED) in drbd_req_put_completion_ref()
309 if (req->rq_state & RQ_POSTPONED) { in drbd_req_put_completion_ref()
312 drbd_restart_request(req); in drbd_req_put_completion_ref()
316 kref_put(&req->kref, drbd_req_destroy); in drbd_req_put_completion_ref()
319 static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) in set_if_null_req_next() argument
325 connection->req_next = req; in set_if_null_req_next()
328 static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) in advance_conn_req_next() argument
331 struct drbd_request *iter = req; in advance_conn_req_next()
334 if (connection->req_next != req) in advance_conn_req_next()
337 req = NULL; in advance_conn_req_next()
342 req = iter; in advance_conn_req_next()
346 connection->req_next = req; in advance_conn_req_next()
349 …ic void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) in set_if_null_req_ack_pending() argument
355 connection->req_ack_pending = req; in set_if_null_req_ack_pending()
358 …c void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) in advance_conn_req_ack_pending() argument
361 struct drbd_request *iter = req; in advance_conn_req_ack_pending()
364 if (connection->req_ack_pending != req) in advance_conn_req_ack_pending()
367 req = NULL; in advance_conn_req_ack_pending()
372 req = iter; in advance_conn_req_ack_pending()
376 connection->req_ack_pending = req; in advance_conn_req_ack_pending()
379 …c void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) in set_if_null_req_not_net_done() argument
385 connection->req_not_net_done = req; in set_if_null_req_not_net_done()
388 … void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) in advance_conn_req_not_net_done() argument
391 struct drbd_request *iter = req; in advance_conn_req_not_net_done()
394 if (connection->req_not_net_done != req) in advance_conn_req_not_net_done()
397 req = NULL; in advance_conn_req_not_net_done()
402 req = iter; in advance_conn_req_not_net_done()
406 connection->req_not_net_done = req; in advance_conn_req_not_net_done()
411 static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, in mod_rq_state() argument
414 struct drbd_device *device = req->device; in mod_rq_state()
416 unsigned s = req->rq_state; in mod_rq_state()
424 req->rq_state &= ~clear; in mod_rq_state()
425 req->rq_state |= set; in mod_rq_state()
428 if (req->rq_state == s) in mod_rq_state()
433 kref_get(&req->kref); in mod_rq_state()
436 atomic_inc(&req->completion_ref); in mod_rq_state()
440 atomic_inc(&req->completion_ref); in mod_rq_state()
444 atomic_inc(&req->completion_ref); in mod_rq_state()
445 set_if_null_req_next(peer_device, req); in mod_rq_state()
449 kref_get(&req->kref); /* wait for the DONE */ in mod_rq_state()
454 atomic_add(req->i.size >> 9, &device->ap_in_flight); in mod_rq_state()
455 set_if_null_req_not_net_done(peer_device, req); in mod_rq_state()
457 if (req->rq_state & RQ_NET_PENDING) in mod_rq_state()
458 set_if_null_req_ack_pending(peer_device, req); in mod_rq_state()
462 atomic_inc(&req->completion_ref); in mod_rq_state()
470 D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING); in mod_rq_state()
475 if (req->rq_state & RQ_LOCAL_ABORTED) in mod_rq_state()
476 kref_put(&req->kref, drbd_req_destroy); in mod_rq_state()
479 list_del_init(&req->req_pending_local); in mod_rq_state()
485 req->acked_jif = jiffies; in mod_rq_state()
486 advance_conn_req_ack_pending(peer_device, req); in mod_rq_state()
491 advance_conn_req_next(peer_device, req); in mod_rq_state()
496 atomic_sub(req->i.size >> 9, &device->ap_in_flight); in mod_rq_state()
498 kref_put(&req->kref, drbd_req_destroy); in mod_rq_state()
499 req->net_done_jif = jiffies; in mod_rq_state()
504 advance_conn_req_next(peer_device, req); in mod_rq_state()
505 advance_conn_req_ack_pending(peer_device, req); in mod_rq_state()
506 advance_conn_req_not_net_done(peer_device, req); in mod_rq_state()
512 if (req->i.waiting) in mod_rq_state()
515 drbd_req_put_completion_ref(req, m, c_put); in mod_rq_state()
516 kref_put(&req->kref, drbd_req_destroy); in mod_rq_state()
519 static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) in drbd_report_io_error() argument
525 (req->rq_state & RQ_WRITE) ? "WRITE" : "READ", in drbd_report_io_error()
526 (unsigned long long)req->i.sector, in drbd_report_io_error()
527 req->i.size >> 9, in drbd_report_io_error()
537 static inline bool is_pending_write_protocol_A(struct drbd_request *req) in is_pending_write_protocol_A() argument
539 return (req->rq_state & in is_pending_write_protocol_A()
556 int __req_mod(struct drbd_request *req, enum drbd_req_event what, in __req_mod() argument
559 struct drbd_device *const device = req->device; in __req_mod()
582 D_ASSERT(device, !(req->rq_state & RQ_NET_MASK)); in __req_mod()
587 req->rq_state |= in __req_mod()
590 mod_rq_state(req, m, 0, RQ_NET_PENDING); in __req_mod()
595 D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK)); in __req_mod()
596 mod_rq_state(req, m, 0, RQ_LOCAL_PENDING); in __req_mod()
600 if (req->rq_state & RQ_WRITE) in __req_mod()
601 device->writ_cnt += req->i.size >> 9; in __req_mod()
603 device->read_cnt += req->i.size >> 9; in __req_mod()
605 mod_rq_state(req, m, RQ_LOCAL_PENDING, in __req_mod()
610 mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED); in __req_mod()
614 drbd_report_io_error(device, req); in __req_mod()
616 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); in __req_mod()
620 drbd_set_out_of_sync(device, req->i.sector, req->i.size); in __req_mod()
621 drbd_report_io_error(device, req); in __req_mod()
626 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); in __req_mod()
633 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); in __req_mod()
647 D_ASSERT(device, drbd_interval_empty(&req->i)); in __req_mod()
648 drbd_insert_interval(&device->read_requests, &req->i); in __req_mod()
652 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
653 D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0); in __req_mod()
654 mod_rq_state(req, m, 0, RQ_NET_QUEUED); in __req_mod()
655 req->w.cb = w_send_read_req; in __req_mod()
657 &req->w); in __req_mod()
666 D_ASSERT(device, drbd_interval_empty(&req->i)); in __req_mod()
667 drbd_insert_interval(&device->write_requests, &req->i); in __req_mod()
689 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
690 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); in __req_mod()
691 req->w.cb = w_send_dblock; in __req_mod()
693 &req->w); in __req_mod()
706 mod_rq_state(req, m, 0, RQ_NET_QUEUED); in __req_mod()
707 req->w.cb = w_send_out_of_sync; in __req_mod()
709 &req->w); in __req_mod()
717 mod_rq_state(req, m, RQ_NET_QUEUED, 0); in __req_mod()
722 if (is_pending_write_protocol_A(req)) in __req_mod()
725 mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING, in __req_mod()
728 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT); in __req_mod()
737 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE); in __req_mod()
742 mod_rq_state(req, m, in __req_mod()
755 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
756 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); in __req_mod()
757 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK); in __req_mod()
761 req->rq_state |= RQ_NET_SIS; in __req_mod()
773 D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK); in __req_mod()
778 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); in __req_mod()
782 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); in __req_mod()
787 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
788 req->rq_state |= RQ_POSTPONED; in __req_mod()
789 if (req->i.waiting) in __req_mod()
797 mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0); in __req_mod()
801 if (!(req->rq_state & RQ_LOCAL_COMPLETED)) in __req_mod()
803 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); in __req_mod()
807 if (!(req->rq_state & RQ_LOCAL_COMPLETED)) in __req_mod()
810 mod_rq_state(req, m, in __req_mod()
815 if (bio_data_dir(req->master_bio) == WRITE) in __req_mod()
819 req->w.cb = w_restart_disk_io; in __req_mod()
821 &req->w); in __req_mod()
826 if (!(req->rq_state & RQ_WRITE) && !req->w.cb) { in __req_mod()
827 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); in __req_mod()
836 if (!(req->rq_state & RQ_NET_OK)) { in __req_mod()
840 mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); in __req_mod()
841 if (req->w.cb) { in __req_mod()
844 &req->w); in __req_mod()
845 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; in __req_mod()
853 if (!(req->rq_state & RQ_WRITE)) in __req_mod()
856 if (req->rq_state & RQ_NET_PENDING) { in __req_mod()
866 mod_rq_state(req, m, RQ_COMPLETION_SUSP, in __req_mod()
867 (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0); in __req_mod()
871 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
872 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE); in __req_mod()
877 mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE); in __req_mod()
949 static void complete_conflicting_writes(struct drbd_request *req) in complete_conflicting_writes() argument
952 struct drbd_device *device = req->device; in complete_conflicting_writes()
954 sector_t sector = req->i.sector; in complete_conflicting_writes()
955 int size = req->i.size; in complete_conflicting_writes()
1037 static bool do_remote_read(struct drbd_request *req) in do_remote_read() argument
1039 struct drbd_device *device = req->device; in do_remote_read()
1042 if (req->private_bio) { in do_remote_read()
1044 req->i.sector, req->i.size)) { in do_remote_read()
1045 bio_put(req->private_bio); in do_remote_read()
1046 req->private_bio = NULL; in do_remote_read()
1054 if (req->private_bio == NULL) in do_remote_read()
1064 if (rbm == RB_PREFER_LOCAL && req->private_bio) in do_remote_read()
1067 if (remote_due_to_read_balancing(device, req->i.sector, rbm)) { in do_remote_read()
1068 if (req->private_bio) { in do_remote_read()
1069 bio_put(req->private_bio); in do_remote_read()
1070 req->private_bio = NULL; in do_remote_read()
1100 static int drbd_process_write_request(struct drbd_request *req) in drbd_process_write_request() argument
1102 struct drbd_device *device = req->device; in drbd_process_write_request()
1114 if (unlikely(req->i.size == 0)) { in drbd_process_write_request()
1116 D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH); in drbd_process_write_request()
1118 _req_mod(req, QUEUE_AS_DRBD_BARRIER); in drbd_process_write_request()
1128 _req_mod(req, TO_BE_SENT); in drbd_process_write_request()
1129 _req_mod(req, QUEUE_FOR_NET_WRITE); in drbd_process_write_request()
1130 } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size)) in drbd_process_write_request()
1131 _req_mod(req, QUEUE_FOR_SEND_OOS); in drbd_process_write_request()
1136 static void drbd_process_discard_or_zeroes_req(struct drbd_request *req, int flags) in drbd_process_discard_or_zeroes_req() argument
1138 int err = drbd_issue_discard_or_zero_out(req->device, in drbd_process_discard_or_zeroes_req()
1139 req->i.sector, req->i.size >> 9, flags); in drbd_process_discard_or_zeroes_req()
1141 req->private_bio->bi_status = BLK_STS_IOERR; in drbd_process_discard_or_zeroes_req()
1142 bio_endio(req->private_bio); in drbd_process_discard_or_zeroes_req()
1146 drbd_submit_req_private_bio(struct drbd_request *req) in drbd_submit_req_private_bio() argument
1148 struct drbd_device *device = req->device; in drbd_submit_req_private_bio()
1149 struct bio *bio = req->private_bio; in drbd_submit_req_private_bio()
1168 drbd_process_discard_or_zeroes_req(req, EE_ZEROOUT | in drbd_submit_req_private_bio()
1171 drbd_process_discard_or_zeroes_req(req, EE_TRIM); in drbd_submit_req_private_bio()
1179 static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req) in drbd_queue_write() argument
1182 list_add_tail(&req->tl_requests, &device->submit.writes); in drbd_queue_write()
1183 list_add_tail(&req->req_pending_master_completion, in drbd_queue_write()
1200 struct drbd_request *req; in drbd_request_prepare() local
1203 req = drbd_req_new(device, bio); in drbd_request_prepare()
1204 if (!req) { in drbd_request_prepare()
1215 req->start_jif = bio_start_io_acct(req->master_bio); in drbd_request_prepare()
1218 req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, in drbd_request_prepare()
1221 req->private_bio->bi_private = req; in drbd_request_prepare()
1222 req->private_bio->bi_end_io = drbd_request_endio; in drbd_request_prepare()
1230 if (rw == WRITE && req->private_bio && req->i.size in drbd_request_prepare()
1232 if (!drbd_al_begin_io_fastpath(device, &req->i)) in drbd_request_prepare()
1234 req->rq_state |= RQ_IN_ACT_LOG; in drbd_request_prepare()
1235 req->in_actlog_jif = jiffies; in drbd_request_prepare()
1237 return req; in drbd_request_prepare()
1241 drbd_queue_write(device, req); in drbd_request_prepare()
1271 struct drbd_request *req = plug->most_recent_req; in drbd_unplug() local
1274 if (!req) in drbd_unplug()
1280 req->rq_state |= RQ_UNPLUG; in drbd_unplug()
1282 drbd_queue_unplug(req->device); in drbd_unplug()
1283 kref_put(&req->kref, drbd_req_destroy); in drbd_unplug()
1301 static void drbd_update_plug(struct drbd_plug_cb *plug, struct drbd_request *req) in drbd_update_plug() argument
1306 kref_get(&req->kref); in drbd_update_plug()
1307 plug->most_recent_req = req; in drbd_update_plug()
1312 static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req) in drbd_send_and_submit() argument
1315 const int rw = bio_data_dir(req->master_bio); in drbd_send_and_submit()
1325 complete_conflicting_writes(req); in drbd_send_and_submit()
1336 req->rq_state |= RQ_POSTPONED; in drbd_send_and_submit()
1337 if (req->private_bio) { in drbd_send_and_submit()
1338 bio_put(req->private_bio); in drbd_send_and_submit()
1339 req->private_bio = NULL; in drbd_send_and_submit()
1349 if (!do_remote_read(req) && !req->private_bio) in drbd_send_and_submit()
1354 req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr); in drbd_send_and_submit()
1358 if (likely(req->i.size!=0)) { in drbd_send_and_submit()
1362 list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log); in drbd_send_and_submit()
1366 if (req->private_bio && !may_do_writes(device)) { in drbd_send_and_submit()
1367 bio_put(req->private_bio); in drbd_send_and_submit()
1368 req->private_bio = NULL; in drbd_send_and_submit()
1372 if (!drbd_process_write_request(req)) in drbd_send_and_submit()
1377 if (req->private_bio == NULL) { in drbd_send_and_submit()
1378 _req_mod(req, TO_BE_SENT); in drbd_send_and_submit()
1379 _req_mod(req, QUEUE_FOR_NET_READ); in drbd_send_and_submit()
1387 drbd_update_plug(plug, req); in drbd_send_and_submit()
1392 if (list_empty(&req->req_pending_master_completion)) in drbd_send_and_submit()
1393 list_add_tail(&req->req_pending_master_completion, in drbd_send_and_submit()
1395 if (req->private_bio) { in drbd_send_and_submit()
1397 req->pre_submit_jif = jiffies; in drbd_send_and_submit()
1398 list_add_tail(&req->req_pending_local, in drbd_send_and_submit()
1400 _req_mod(req, TO_BE_SUBMITTED); in drbd_send_and_submit()
1407 (unsigned long long)req->i.sector, req->i.size >> 9); in drbd_send_and_submit()
1413 drbd_req_put_completion_ref(req, &m, 1); in drbd_send_and_submit()
1423 drbd_submit_req_private_bio(req); in drbd_send_and_submit()
1430 struct drbd_request *req = drbd_request_prepare(device, bio); in __drbd_make_request() local
1431 if (IS_ERR_OR_NULL(req)) in __drbd_make_request()
1433 drbd_send_and_submit(device, req); in __drbd_make_request()
1439 struct drbd_request *req, *tmp; in submit_fast_path() local
1442 list_for_each_entry_safe(req, tmp, incoming, tl_requests) { in submit_fast_path()
1443 const int rw = bio_data_dir(req->master_bio); in submit_fast_path()
1446 && req->private_bio && req->i.size in submit_fast_path()
1448 if (!drbd_al_begin_io_fastpath(device, &req->i)) in submit_fast_path()
1451 req->rq_state |= RQ_IN_ACT_LOG; in submit_fast_path()
1452 req->in_actlog_jif = jiffies; in submit_fast_path()
1456 list_del_init(&req->tl_requests); in submit_fast_path()
1457 drbd_send_and_submit(device, req); in submit_fast_path()
1467 struct drbd_request *req; in prepare_al_transaction_nonblock() local
1472 while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) { in prepare_al_transaction_nonblock()
1473 err = drbd_al_begin_io_nonblock(device, &req->i); in prepare_al_transaction_nonblock()
1479 list_move_tail(&req->tl_requests, later); in prepare_al_transaction_nonblock()
1481 list_move_tail(&req->tl_requests, pending); in prepare_al_transaction_nonblock()
1492 struct drbd_request *req; in send_and_submit_pending() local
1495 while ((req = list_first_entry_or_null(pending, struct drbd_request, tl_requests))) { in send_and_submit_pending()
1496 req->rq_state |= RQ_IN_ACT_LOG; in send_and_submit_pending()
1497 req->in_actlog_jif = jiffies; in send_and_submit_pending()
1499 list_del_init(&req->tl_requests); in send_and_submit_pending()
1500 drbd_send_and_submit(device, req); in send_and_submit_pending()