1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 drbd_req.c
4
5 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6
7 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10
11
12 */
13
14 #include <linux/module.h>
15
16 #include <linux/slab.h>
17 #include <linux/drbd.h>
18 #include "drbd_int.h"
19 #include "drbd_req.h"
20
21
22 static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
23
drbd_req_new(struct drbd_device * device,struct bio * bio_src)24 static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src)
25 {
26 struct drbd_request *req;
27
28 req = mempool_alloc(&drbd_request_mempool, GFP_NOIO);
29 if (!req)
30 return NULL;
31 memset(req, 0, sizeof(*req));
32
33 drbd_req_make_private_bio(req, bio_src);
34 req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
35 | (bio_op(bio_src) == REQ_OP_WRITE_SAME ? RQ_WSAME : 0)
36 | (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_ZEROES : 0)
37 | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0);
38 req->device = device;
39 req->master_bio = bio_src;
40 req->epoch = 0;
41
42 drbd_clear_interval(&req->i);
43 req->i.sector = bio_src->bi_iter.bi_sector;
44 req->i.size = bio_src->bi_iter.bi_size;
45 req->i.local = true;
46 req->i.waiting = false;
47
48 INIT_LIST_HEAD(&req->tl_requests);
49 INIT_LIST_HEAD(&req->w.list);
50 INIT_LIST_HEAD(&req->req_pending_master_completion);
51 INIT_LIST_HEAD(&req->req_pending_local);
52
53 /* one reference to be put by __drbd_make_request */
54 atomic_set(&req->completion_ref, 1);
55 /* one kref as long as completion_ref > 0 */
56 kref_init(&req->kref);
57 return req;
58 }
59
drbd_remove_request_interval(struct rb_root * root,struct drbd_request * req)60 static void drbd_remove_request_interval(struct rb_root *root,
61 struct drbd_request *req)
62 {
63 struct drbd_device *device = req->device;
64 struct drbd_interval *i = &req->i;
65
66 drbd_remove_interval(root, i);
67
68 /* Wake up any processes waiting for this request to complete. */
69 if (i->waiting)
70 wake_up(&device->misc_wait);
71 }
72
drbd_req_destroy(struct kref * kref)73 void drbd_req_destroy(struct kref *kref)
74 {
75 struct drbd_request *req = container_of(kref, struct drbd_request, kref);
76 struct drbd_device *device = req->device;
77 const unsigned s = req->rq_state;
78
79 if ((req->master_bio && !(s & RQ_POSTPONED)) ||
80 atomic_read(&req->completion_ref) ||
81 (s & RQ_LOCAL_PENDING) ||
82 ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
83 drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
84 s, atomic_read(&req->completion_ref));
85 return;
86 }
87
88 /* If called from mod_rq_state (expected normal case) or
89 * drbd_send_and_submit (the less likely normal path), this holds the
90 * req_lock, and req->tl_requests will typicaly be on ->transfer_log,
91 * though it may be still empty (never added to the transfer log).
92 *
93 * If called from do_retry(), we do NOT hold the req_lock, but we are
94 * still allowed to unconditionally list_del(&req->tl_requests),
95 * because it will be on a local on-stack list only. */
96 list_del_init(&req->tl_requests);
97
98 /* finally remove the request from the conflict detection
99 * respective block_id verification interval tree. */
100 if (!drbd_interval_empty(&req->i)) {
101 struct rb_root *root;
102
103 if (s & RQ_WRITE)
104 root = &device->write_requests;
105 else
106 root = &device->read_requests;
107 drbd_remove_request_interval(root, req);
108 } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0)
109 drbd_err(device, "drbd_req_destroy: Logic BUG: interval empty, but: rq_state=0x%x, sect=%llu, size=%u\n",
110 s, (unsigned long long)req->i.sector, req->i.size);
111
112 /* if it was a write, we may have to set the corresponding
113 * bit(s) out-of-sync first. If it had a local part, we need to
114 * release the reference to the activity log. */
115 if (s & RQ_WRITE) {
116 /* Set out-of-sync unless both OK flags are set
117 * (local only or remote failed).
118 * Other places where we set out-of-sync:
119 * READ with local io-error */
120
121 /* There is a special case:
122 * we may notice late that IO was suspended,
123 * and postpone, or schedule for retry, a write,
124 * before it even was submitted or sent.
125 * In that case we do not want to touch the bitmap at all.
126 */
127 if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
128 if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
129 drbd_set_out_of_sync(device, req->i.sector, req->i.size);
130
131 if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
132 drbd_set_in_sync(device, req->i.sector, req->i.size);
133 }
134
135 /* one might be tempted to move the drbd_al_complete_io
136 * to the local io completion callback drbd_request_endio.
137 * but, if this was a mirror write, we may only
138 * drbd_al_complete_io after this is RQ_NET_DONE,
139 * otherwise the extent could be dropped from the al
140 * before it has actually been written on the peer.
141 * if we crash before our peer knows about the request,
142 * but after the extent has been dropped from the al,
143 * we would forget to resync the corresponding extent.
144 */
145 if (s & RQ_IN_ACT_LOG) {
146 if (get_ldev_if_state(device, D_FAILED)) {
147 drbd_al_complete_io(device, &req->i);
148 put_ldev(device);
149 } else if (__ratelimit(&drbd_ratelimit_state)) {
150 drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), "
151 "but my Disk seems to have failed :(\n",
152 (unsigned long long) req->i.sector, req->i.size);
153 }
154 }
155 }
156
157 mempool_free(req, &drbd_request_mempool);
158 }
159
wake_all_senders(struct drbd_connection * connection)160 static void wake_all_senders(struct drbd_connection *connection)
161 {
162 wake_up(&connection->sender_work.q_wait);
163 }
164
165 /* must hold resource->req_lock */
start_new_tl_epoch(struct drbd_connection * connection)166 void start_new_tl_epoch(struct drbd_connection *connection)
167 {
168 /* no point closing an epoch, if it is empty, anyways. */
169 if (connection->current_tle_writes == 0)
170 return;
171
172 connection->current_tle_writes = 0;
173 atomic_inc(&connection->current_tle_nr);
174 wake_all_senders(connection);
175 }
176
complete_master_bio(struct drbd_device * device,struct bio_and_error * m)177 void complete_master_bio(struct drbd_device *device,
178 struct bio_and_error *m)
179 {
180 m->bio->bi_status = errno_to_blk_status(m->error);
181 bio_endio(m->bio);
182 dec_ap_bio(device);
183 }
184
185
186 /* Helper for __req_mod().
187 * Set m->bio to the master bio, if it is fit to be completed,
188 * or leave it alone (it is initialized to NULL in __req_mod),
189 * if it has already been completed, or cannot be completed yet.
190 * If m->bio is set, the error status to be returned is placed in m->error.
191 */
192 static
drbd_req_complete(struct drbd_request * req,struct bio_and_error * m)193 void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
194 {
195 const unsigned s = req->rq_state;
196 struct drbd_device *device = req->device;
197 int error, ok;
198
199 /* we must not complete the master bio, while it is
200 * still being processed by _drbd_send_zc_bio (drbd_send_dblock)
201 * not yet acknowledged by the peer
202 * not yet completed by the local io subsystem
203 * these flags may get cleared in any order by
204 * the worker,
205 * the receiver,
206 * the bio_endio completion callbacks.
207 */
208 if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
209 (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
210 (s & RQ_COMPLETION_SUSP)) {
211 drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
212 return;
213 }
214
215 if (!req->master_bio) {
216 drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
217 return;
218 }
219
220 /*
221 * figure out whether to report success or failure.
222 *
223 * report success when at least one of the operations succeeded.
224 * or, to put the other way,
225 * only report failure, when both operations failed.
226 *
227 * what to do about the failures is handled elsewhere.
228 * what we need to do here is just: complete the master_bio.
229 *
230 * local completion error, if any, has been stored as ERR_PTR
231 * in private_bio within drbd_request_endio.
232 */
233 ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
234 error = PTR_ERR(req->private_bio);
235
236 /* Before we can signal completion to the upper layers,
237 * we may need to close the current transfer log epoch.
238 * We are within the request lock, so we can simply compare
239 * the request epoch number with the current transfer log
240 * epoch number. If they match, increase the current_tle_nr,
241 * and reset the transfer log epoch write_cnt.
242 */
243 if (op_is_write(bio_op(req->master_bio)) &&
244 req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
245 start_new_tl_epoch(first_peer_device(device)->connection);
246
247 /* Update disk stats */
248 bio_end_io_acct(req->master_bio, req->start_jif);
249
250 /* If READ failed,
251 * have it be pushed back to the retry work queue,
252 * so it will re-enter __drbd_make_request(),
253 * and be re-assigned to a suitable local or remote path,
254 * or failed if we do not have access to good data anymore.
255 *
256 * Unless it was failed early by __drbd_make_request(),
257 * because no path was available, in which case
258 * it was not even added to the transfer_log.
259 *
260 * read-ahead may fail, and will not be retried.
261 *
262 * WRITE should have used all available paths already.
263 */
264 if (!ok &&
265 bio_op(req->master_bio) == REQ_OP_READ &&
266 !(req->master_bio->bi_opf & REQ_RAHEAD) &&
267 !list_empty(&req->tl_requests))
268 req->rq_state |= RQ_POSTPONED;
269
270 if (!(req->rq_state & RQ_POSTPONED)) {
271 m->error = ok ? 0 : (error ?: -EIO);
272 m->bio = req->master_bio;
273 req->master_bio = NULL;
274 /* We leave it in the tree, to be able to verify later
275 * write-acks in protocol != C during resync.
276 * But we mark it as "complete", so it won't be counted as
277 * conflict in a multi-primary setup. */
278 req->i.completed = true;
279 }
280
281 if (req->i.waiting)
282 wake_up(&device->misc_wait);
283
284 /* Either we are about to complete to upper layers,
285 * or we will restart this request.
286 * In either case, the request object will be destroyed soon,
287 * so better remove it from all lists. */
288 list_del_init(&req->req_pending_master_completion);
289 }
290
291 /* still holds resource->req_lock */
drbd_req_put_completion_ref(struct drbd_request * req,struct bio_and_error * m,int put)292 static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
293 {
294 struct drbd_device *device = req->device;
295 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
296
297 if (!put)
298 return;
299
300 if (!atomic_sub_and_test(put, &req->completion_ref))
301 return;
302
303 drbd_req_complete(req, m);
304
305 /* local completion may still come in later,
306 * we need to keep the req object around. */
307 if (req->rq_state & RQ_LOCAL_ABORTED)
308 return;
309
310 if (req->rq_state & RQ_POSTPONED) {
311 /* don't destroy the req object just yet,
312 * but queue it for retry */
313 drbd_restart_request(req);
314 return;
315 }
316
317 kref_put(&req->kref, drbd_req_destroy);
318 }
319
set_if_null_req_next(struct drbd_peer_device * peer_device,struct drbd_request * req)320 static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
321 {
322 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
323 if (!connection)
324 return;
325 if (connection->req_next == NULL)
326 connection->req_next = req;
327 }
328
advance_conn_req_next(struct drbd_peer_device * peer_device,struct drbd_request * req)329 static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
330 {
331 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
332 if (!connection)
333 return;
334 if (connection->req_next != req)
335 return;
336 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
337 const unsigned s = req->rq_state;
338 if (s & RQ_NET_QUEUED)
339 break;
340 }
341 if (&req->tl_requests == &connection->transfer_log)
342 req = NULL;
343 connection->req_next = req;
344 }
345
set_if_null_req_ack_pending(struct drbd_peer_device * peer_device,struct drbd_request * req)346 static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
347 {
348 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
349 if (!connection)
350 return;
351 if (connection->req_ack_pending == NULL)
352 connection->req_ack_pending = req;
353 }
354
advance_conn_req_ack_pending(struct drbd_peer_device * peer_device,struct drbd_request * req)355 static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
356 {
357 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
358 if (!connection)
359 return;
360 if (connection->req_ack_pending != req)
361 return;
362 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
363 const unsigned s = req->rq_state;
364 if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING))
365 break;
366 }
367 if (&req->tl_requests == &connection->transfer_log)
368 req = NULL;
369 connection->req_ack_pending = req;
370 }
371
set_if_null_req_not_net_done(struct drbd_peer_device * peer_device,struct drbd_request * req)372 static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
373 {
374 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
375 if (!connection)
376 return;
377 if (connection->req_not_net_done == NULL)
378 connection->req_not_net_done = req;
379 }
380
advance_conn_req_not_net_done(struct drbd_peer_device * peer_device,struct drbd_request * req)381 static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
382 {
383 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
384 if (!connection)
385 return;
386 if (connection->req_not_net_done != req)
387 return;
388 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
389 const unsigned s = req->rq_state;
390 if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE))
391 break;
392 }
393 if (&req->tl_requests == &connection->transfer_log)
394 req = NULL;
395 connection->req_not_net_done = req;
396 }
397
398 /* I'd like this to be the only place that manipulates
399 * req->completion_ref and req->kref. */
mod_rq_state(struct drbd_request * req,struct bio_and_error * m,int clear,int set)400 static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
401 int clear, int set)
402 {
403 struct drbd_device *device = req->device;
404 struct drbd_peer_device *peer_device = first_peer_device(device);
405 unsigned s = req->rq_state;
406 int c_put = 0;
407
408 if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP))
409 set |= RQ_COMPLETION_SUSP;
410
411 /* apply */
412
413 req->rq_state &= ~clear;
414 req->rq_state |= set;
415
416 /* no change? */
417 if (req->rq_state == s)
418 return;
419
420 /* intent: get references */
421
422 kref_get(&req->kref);
423
424 if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING))
425 atomic_inc(&req->completion_ref);
426
427 if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) {
428 inc_ap_pending(device);
429 atomic_inc(&req->completion_ref);
430 }
431
432 if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) {
433 atomic_inc(&req->completion_ref);
434 set_if_null_req_next(peer_device, req);
435 }
436
437 if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK))
438 kref_get(&req->kref); /* wait for the DONE */
439
440 if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) {
441 /* potentially already completed in the ack_receiver thread */
442 if (!(s & RQ_NET_DONE)) {
443 atomic_add(req->i.size >> 9, &device->ap_in_flight);
444 set_if_null_req_not_net_done(peer_device, req);
445 }
446 if (req->rq_state & RQ_NET_PENDING)
447 set_if_null_req_ack_pending(peer_device, req);
448 }
449
450 if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP))
451 atomic_inc(&req->completion_ref);
452
453 /* progress: put references */
454
455 if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP))
456 ++c_put;
457
458 if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
459 D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING);
460 ++c_put;
461 }
462
463 if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) {
464 if (req->rq_state & RQ_LOCAL_ABORTED)
465 kref_put(&req->kref, drbd_req_destroy);
466 else
467 ++c_put;
468 list_del_init(&req->req_pending_local);
469 }
470
471 if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
472 dec_ap_pending(device);
473 ++c_put;
474 req->acked_jif = jiffies;
475 advance_conn_req_ack_pending(peer_device, req);
476 }
477
478 if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) {
479 ++c_put;
480 advance_conn_req_next(peer_device, req);
481 }
482
483 if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
484 if (s & RQ_NET_SENT)
485 atomic_sub(req->i.size >> 9, &device->ap_in_flight);
486 if (s & RQ_EXP_BARR_ACK)
487 kref_put(&req->kref, drbd_req_destroy);
488 req->net_done_jif = jiffies;
489
490 /* in ahead/behind mode, or just in case,
491 * before we finally destroy this request,
492 * the caching pointers must not reference it anymore */
493 advance_conn_req_next(peer_device, req);
494 advance_conn_req_ack_pending(peer_device, req);
495 advance_conn_req_not_net_done(peer_device, req);
496 }
497
498 /* potentially complete and destroy */
499
500 /* If we made progress, retry conflicting peer requests, if any. */
501 if (req->i.waiting)
502 wake_up(&device->misc_wait);
503
504 drbd_req_put_completion_ref(req, m, c_put);
505 kref_put(&req->kref, drbd_req_destroy);
506 }
507
drbd_report_io_error(struct drbd_device * device,struct drbd_request * req)508 static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
509 {
510 char b[BDEVNAME_SIZE];
511
512 if (!__ratelimit(&drbd_ratelimit_state))
513 return;
514
515 drbd_warn(device, "local %s IO error sector %llu+%u on %s\n",
516 (req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
517 (unsigned long long)req->i.sector,
518 req->i.size >> 9,
519 bdevname(device->ldev->backing_bdev, b));
520 }
521
522 /* Helper for HANDED_OVER_TO_NETWORK.
523 * Is this a protocol A write (neither WRITE_ACK nor RECEIVE_ACK expected)?
524 * Is it also still "PENDING"?
525 * --> If so, clear PENDING and set NET_OK below.
526 * If it is a protocol A write, but not RQ_PENDING anymore, neg-ack was faster
527 * (and we must not set RQ_NET_OK) */
is_pending_write_protocol_A(struct drbd_request * req)528 static inline bool is_pending_write_protocol_A(struct drbd_request *req)
529 {
530 return (req->rq_state &
531 (RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK))
532 == (RQ_WRITE|RQ_NET_PENDING);
533 }
534
535 /* obviously this could be coded as many single functions
536 * instead of one huge switch,
537 * or by putting the code directly in the respective locations
538 * (as it has been before).
539 *
540 * but having it this way
541 * enforces that it is all in this one place, where it is easier to audit,
542 * it makes it obvious that whatever "event" "happens" to a request should
543 * happen "atomically" within the req_lock,
544 * and it enforces that we have to think in a very structured manner
545 * about the "events" that may happen to a request during its life time ...
546 */
__req_mod(struct drbd_request * req,enum drbd_req_event what,struct bio_and_error * m)547 int __req_mod(struct drbd_request *req, enum drbd_req_event what,
548 struct bio_and_error *m)
549 {
550 struct drbd_device *const device = req->device;
551 struct drbd_peer_device *const peer_device = first_peer_device(device);
552 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
553 struct net_conf *nc;
554 int p, rv = 0;
555
556 if (m)
557 m->bio = NULL;
558
559 switch (what) {
560 default:
561 drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
562 break;
563
564 /* does not happen...
565 * initialization done in drbd_req_new
566 case CREATED:
567 break;
568 */
569
570 case TO_BE_SENT: /* via network */
571 /* reached via __drbd_make_request
572 * and from w_read_retry_remote */
573 D_ASSERT(device, !(req->rq_state & RQ_NET_MASK));
574 rcu_read_lock();
575 nc = rcu_dereference(connection->net_conf);
576 p = nc->wire_protocol;
577 rcu_read_unlock();
578 req->rq_state |=
579 p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
580 p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
581 mod_rq_state(req, m, 0, RQ_NET_PENDING);
582 break;
583
584 case TO_BE_SUBMITTED: /* locally */
585 /* reached via __drbd_make_request */
586 D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK));
587 mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
588 break;
589
590 case COMPLETED_OK:
591 if (req->rq_state & RQ_WRITE)
592 device->writ_cnt += req->i.size >> 9;
593 else
594 device->read_cnt += req->i.size >> 9;
595
596 mod_rq_state(req, m, RQ_LOCAL_PENDING,
597 RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
598 break;
599
600 case ABORT_DISK_IO:
601 mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED);
602 break;
603
604 case WRITE_COMPLETED_WITH_ERROR:
605 drbd_report_io_error(device, req);
606 __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
607 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
608 break;
609
610 case READ_COMPLETED_WITH_ERROR:
611 drbd_set_out_of_sync(device, req->i.sector, req->i.size);
612 drbd_report_io_error(device, req);
613 __drbd_chk_io_error(device, DRBD_READ_ERROR);
614 fallthrough;
615 case READ_AHEAD_COMPLETED_WITH_ERROR:
616 /* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */
617 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
618 break;
619
620 case DISCARD_COMPLETED_NOTSUPP:
621 case DISCARD_COMPLETED_WITH_ERROR:
622 /* I'd rather not detach from local disk just because it
623 * failed a REQ_OP_DISCARD. */
624 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
625 break;
626
627 case QUEUE_FOR_NET_READ:
628 /* READ, and
629 * no local disk,
630 * or target area marked as invalid,
631 * or just got an io-error. */
632 /* from __drbd_make_request
633 * or from bio_endio during read io-error recovery */
634
635 /* So we can verify the handle in the answer packet.
636 * Corresponding drbd_remove_request_interval is in
637 * drbd_req_complete() */
638 D_ASSERT(device, drbd_interval_empty(&req->i));
639 drbd_insert_interval(&device->read_requests, &req->i);
640
641 set_bit(UNPLUG_REMOTE, &device->flags);
642
643 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
644 D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0);
645 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
646 req->w.cb = w_send_read_req;
647 drbd_queue_work(&connection->sender_work,
648 &req->w);
649 break;
650
651 case QUEUE_FOR_NET_WRITE:
652 /* assert something? */
653 /* from __drbd_make_request only */
654
655 /* Corresponding drbd_remove_request_interval is in
656 * drbd_req_complete() */
657 D_ASSERT(device, drbd_interval_empty(&req->i));
658 drbd_insert_interval(&device->write_requests, &req->i);
659
660 /* NOTE
661 * In case the req ended up on the transfer log before being
662 * queued on the worker, it could lead to this request being
663 * missed during cleanup after connection loss.
664 * So we have to do both operations here,
665 * within the same lock that protects the transfer log.
666 *
667 * _req_add_to_epoch(req); this has to be after the
668 * _maybe_start_new_epoch(req); which happened in
669 * __drbd_make_request, because we now may set the bit
670 * again ourselves to close the current epoch.
671 *
672 * Add req to the (now) current epoch (barrier). */
673
674 /* otherwise we may lose an unplug, which may cause some remote
675 * io-scheduler timeout to expire, increasing maximum latency,
676 * hurting performance. */
677 set_bit(UNPLUG_REMOTE, &device->flags);
678
679 /* queue work item to send data */
680 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
681 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
682 req->w.cb = w_send_dblock;
683 drbd_queue_work(&connection->sender_work,
684 &req->w);
685
686 /* close the epoch, in case it outgrew the limit */
687 rcu_read_lock();
688 nc = rcu_dereference(connection->net_conf);
689 p = nc->max_epoch_size;
690 rcu_read_unlock();
691 if (connection->current_tle_writes >= p)
692 start_new_tl_epoch(connection);
693
694 break;
695
696 case QUEUE_FOR_SEND_OOS:
697 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
698 req->w.cb = w_send_out_of_sync;
699 drbd_queue_work(&connection->sender_work,
700 &req->w);
701 break;
702
703 case READ_RETRY_REMOTE_CANCELED:
704 case SEND_CANCELED:
705 case SEND_FAILED:
706 /* real cleanup will be done from tl_clear. just update flags
707 * so it is no longer marked as on the worker queue */
708 mod_rq_state(req, m, RQ_NET_QUEUED, 0);
709 break;
710
711 case HANDED_OVER_TO_NETWORK:
712 /* assert something? */
713 if (is_pending_write_protocol_A(req))
714 /* this is what is dangerous about protocol A:
715 * pretend it was successfully written on the peer. */
716 mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING,
717 RQ_NET_SENT|RQ_NET_OK);
718 else
719 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT);
720 /* It is still not yet RQ_NET_DONE until the
721 * corresponding epoch barrier got acked as well,
722 * so we know what to dirty on connection loss. */
723 break;
724
725 case OOS_HANDED_TO_NETWORK:
726 /* Was not set PENDING, no longer QUEUED, so is now DONE
727 * as far as this connection is concerned. */
728 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE);
729 break;
730
731 case CONNECTION_LOST_WHILE_PENDING:
732 /* transfer log cleanup after connection loss */
733 mod_rq_state(req, m,
734 RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP,
735 RQ_NET_DONE);
736 break;
737
738 case CONFLICT_RESOLVED:
739 /* for superseded conflicting writes of multiple primaries,
740 * there is no need to keep anything in the tl, potential
741 * node crashes are covered by the activity log.
742 *
743 * If this request had been marked as RQ_POSTPONED before,
744 * it will actually not be completed, but "restarted",
745 * resubmitted from the retry worker context. */
746 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
747 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
748 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK);
749 break;
750
751 case WRITE_ACKED_BY_PEER_AND_SIS:
752 req->rq_state |= RQ_NET_SIS;
753 case WRITE_ACKED_BY_PEER:
754 /* Normal operation protocol C: successfully written on peer.
755 * During resync, even in protocol != C,
756 * we requested an explicit write ack anyways.
757 * Which means we cannot even assert anything here.
758 * Nothing more to do here.
759 * We want to keep the tl in place for all protocols, to cater
760 * for volatile write-back caches on lower level devices. */
761 goto ack_common;
762 case RECV_ACKED_BY_PEER:
763 D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK);
764 /* protocol B; pretends to be successfully written on peer.
765 * see also notes above in HANDED_OVER_TO_NETWORK about
766 * protocol != C */
767 ack_common:
768 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
769 break;
770
771 case POSTPONE_WRITE:
772 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
773 /* If this node has already detected the write conflict, the
774 * worker will be waiting on misc_wait. Wake it up once this
775 * request has completed locally.
776 */
777 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
778 req->rq_state |= RQ_POSTPONED;
779 if (req->i.waiting)
780 wake_up(&device->misc_wait);
781 /* Do not clear RQ_NET_PENDING. This request will make further
782 * progress via restart_conflicting_writes() or
783 * fail_postponed_requests(). Hopefully. */
784 break;
785
786 case NEG_ACKED:
787 mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0);
788 break;
789
790 case FAIL_FROZEN_DISK_IO:
791 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
792 break;
793 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
794 break;
795
796 case RESTART_FROZEN_DISK_IO:
797 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
798 break;
799
800 mod_rq_state(req, m,
801 RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED,
802 RQ_LOCAL_PENDING);
803
804 rv = MR_READ;
805 if (bio_data_dir(req->master_bio) == WRITE)
806 rv = MR_WRITE;
807
808 get_ldev(device); /* always succeeds in this call path */
809 req->w.cb = w_restart_disk_io;
810 drbd_queue_work(&connection->sender_work,
811 &req->w);
812 break;
813
814 case RESEND:
815 /* Simply complete (local only) READs. */
816 if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
817 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
818 break;
819 }
820
821 /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
822 before the connection loss (B&C only); only P_BARRIER_ACK
823 (or the local completion?) was missing when we suspended.
824 Throwing them out of the TL here by pretending we got a BARRIER_ACK.
825 During connection handshake, we ensure that the peer was not rebooted. */
826 if (!(req->rq_state & RQ_NET_OK)) {
827 /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync?
828 * in that case we must not set RQ_NET_PENDING. */
829
830 mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
831 if (req->w.cb) {
832 /* w.cb expected to be w_send_dblock, or w_send_read_req */
833 drbd_queue_work(&connection->sender_work,
834 &req->w);
835 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
836 } /* else: FIXME can this happen? */
837 break;
838 }
839 fallthrough; /* to BARRIER_ACKED */
840
841 case BARRIER_ACKED:
842 /* barrier ack for READ requests does not make sense */
843 if (!(req->rq_state & RQ_WRITE))
844 break;
845
846 if (req->rq_state & RQ_NET_PENDING) {
847 /* barrier came in before all requests were acked.
848 * this is bad, because if the connection is lost now,
849 * we won't be able to clean them up... */
850 drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n");
851 }
852 /* Allowed to complete requests, even while suspended.
853 * As this is called for all requests within a matching epoch,
854 * we need to filter, and only set RQ_NET_DONE for those that
855 * have actually been on the wire. */
856 mod_rq_state(req, m, RQ_COMPLETION_SUSP,
857 (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0);
858 break;
859
860 case DATA_RECEIVED:
861 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
862 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
863 break;
864
865 case QUEUE_AS_DRBD_BARRIER:
866 start_new_tl_epoch(connection);
867 mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
868 break;
869 }
870
871 return rv;
872 }
873
874 /* we may do a local read if:
875 * - we are consistent (of course),
876 * - or we are generally inconsistent,
877 * BUT we are still/already IN SYNC for this area.
878 * since size may be bigger than BM_BLOCK_SIZE,
879 * we may need to check several bits.
880 */
drbd_may_do_local_read(struct drbd_device * device,sector_t sector,int size)881 static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size)
882 {
883 unsigned long sbnr, ebnr;
884 sector_t esector, nr_sectors;
885
886 if (device->state.disk == D_UP_TO_DATE)
887 return true;
888 if (device->state.disk != D_INCONSISTENT)
889 return false;
890 esector = sector + (size >> 9) - 1;
891 nr_sectors = get_capacity(device->vdisk);
892 D_ASSERT(device, sector < nr_sectors);
893 D_ASSERT(device, esector < nr_sectors);
894
895 sbnr = BM_SECT_TO_BIT(sector);
896 ebnr = BM_SECT_TO_BIT(esector);
897
898 return drbd_bm_count_bits(device, sbnr, ebnr) == 0;
899 }
900
remote_due_to_read_balancing(struct drbd_device * device,sector_t sector,enum drbd_read_balancing rbm)901 static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
902 enum drbd_read_balancing rbm)
903 {
904 struct backing_dev_info *bdi;
905 int stripe_shift;
906
907 switch (rbm) {
908 case RB_CONGESTED_REMOTE:
909 bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
910 return bdi_read_congested(bdi);
911 case RB_LEAST_PENDING:
912 return atomic_read(&device->local_cnt) >
913 atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
914 case RB_32K_STRIPING: /* stripe_shift = 15 */
915 case RB_64K_STRIPING:
916 case RB_128K_STRIPING:
917 case RB_256K_STRIPING:
918 case RB_512K_STRIPING:
919 case RB_1M_STRIPING: /* stripe_shift = 20 */
920 stripe_shift = (rbm - RB_32K_STRIPING + 15);
921 return (sector >> (stripe_shift - 9)) & 1;
922 case RB_ROUND_ROBIN:
923 return test_and_change_bit(READ_BALANCE_RR, &device->flags);
924 case RB_PREFER_REMOTE:
925 return true;
926 case RB_PREFER_LOCAL:
927 default:
928 return false;
929 }
930 }
931
932 /*
933 * complete_conflicting_writes - wait for any conflicting write requests
934 *
935 * The write_requests tree contains all active write requests which we
936 * currently know about. Wait for any requests to complete which conflict with
937 * the new one.
938 *
939 * Only way out: remove the conflicting intervals from the tree.
940 */
complete_conflicting_writes(struct drbd_request * req)941 static void complete_conflicting_writes(struct drbd_request *req)
942 {
943 DEFINE_WAIT(wait);
944 struct drbd_device *device = req->device;
945 struct drbd_interval *i;
946 sector_t sector = req->i.sector;
947 int size = req->i.size;
948
949 for (;;) {
950 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
951 /* Ignore, if already completed to upper layers. */
952 if (i->completed)
953 continue;
954 /* Handle the first found overlap. After the schedule
955 * we have to restart the tree walk. */
956 break;
957 }
958 if (!i) /* if any */
959 break;
960
961 /* Indicate to wake up device->misc_wait on progress. */
962 prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
963 i->waiting = true;
964 spin_unlock_irq(&device->resource->req_lock);
965 schedule();
966 spin_lock_irq(&device->resource->req_lock);
967 }
968 finish_wait(&device->misc_wait, &wait);
969 }
970
971 /* called within req_lock */
maybe_pull_ahead(struct drbd_device * device)972 static void maybe_pull_ahead(struct drbd_device *device)
973 {
974 struct drbd_connection *connection = first_peer_device(device)->connection;
975 struct net_conf *nc;
976 bool congested = false;
977 enum drbd_on_congestion on_congestion;
978
979 rcu_read_lock();
980 nc = rcu_dereference(connection->net_conf);
981 on_congestion = nc ? nc->on_congestion : OC_BLOCK;
982 rcu_read_unlock();
983 if (on_congestion == OC_BLOCK ||
984 connection->agreed_pro_version < 96)
985 return;
986
987 if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD)
988 return; /* nothing to do ... */
989
990 /* If I don't even have good local storage, we can not reasonably try
991 * to pull ahead of the peer. We also need the local reference to make
992 * sure device->act_log is there.
993 */
994 if (!get_ldev_if_state(device, D_UP_TO_DATE))
995 return;
996
997 if (nc->cong_fill &&
998 atomic_read(&device->ap_in_flight) >= nc->cong_fill) {
999 drbd_info(device, "Congestion-fill threshold reached\n");
1000 congested = true;
1001 }
1002
1003 if (device->act_log->used >= nc->cong_extents) {
1004 drbd_info(device, "Congestion-extents threshold reached\n");
1005 congested = true;
1006 }
1007
1008 if (congested) {
1009 /* start a new epoch for non-mirrored writes */
1010 start_new_tl_epoch(first_peer_device(device)->connection);
1011
1012 if (on_congestion == OC_PULL_AHEAD)
1013 _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL);
1014 else /*nc->on_congestion == OC_DISCONNECT */
1015 _drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL);
1016 }
1017 put_ldev(device);
1018 }
1019
1020 /* If this returns false, and req->private_bio is still set,
1021 * this should be submitted locally.
1022 *
1023 * If it returns false, but req->private_bio is not set,
1024 * we do not have access to good data :(
1025 *
1026 * Otherwise, this destroys req->private_bio, if any,
1027 * and returns true.
1028 */
do_remote_read(struct drbd_request * req)1029 static bool do_remote_read(struct drbd_request *req)
1030 {
1031 struct drbd_device *device = req->device;
1032 enum drbd_read_balancing rbm;
1033
1034 if (req->private_bio) {
1035 if (!drbd_may_do_local_read(device,
1036 req->i.sector, req->i.size)) {
1037 bio_put(req->private_bio);
1038 req->private_bio = NULL;
1039 put_ldev(device);
1040 }
1041 }
1042
1043 if (device->state.pdsk != D_UP_TO_DATE)
1044 return false;
1045
1046 if (req->private_bio == NULL)
1047 return true;
1048
1049 /* TODO: improve read balancing decisions, take into account drbd
1050 * protocol, pending requests etc. */
1051
1052 rcu_read_lock();
1053 rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing;
1054 rcu_read_unlock();
1055
1056 if (rbm == RB_PREFER_LOCAL && req->private_bio)
1057 return false; /* submit locally */
1058
1059 if (remote_due_to_read_balancing(device, req->i.sector, rbm)) {
1060 if (req->private_bio) {
1061 bio_put(req->private_bio);
1062 req->private_bio = NULL;
1063 put_ldev(device);
1064 }
1065 return true;
1066 }
1067
1068 return false;
1069 }
1070
drbd_should_do_remote(union drbd_dev_state s)1071 bool drbd_should_do_remote(union drbd_dev_state s)
1072 {
1073 return s.pdsk == D_UP_TO_DATE ||
1074 (s.pdsk >= D_INCONSISTENT &&
1075 s.conn >= C_WF_BITMAP_T &&
1076 s.conn < C_AHEAD);
1077 /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T.
1078 That is equivalent since before 96 IO was frozen in the C_WF_BITMAP*
1079 states. */
1080 }
1081
drbd_should_send_out_of_sync(union drbd_dev_state s)1082 static bool drbd_should_send_out_of_sync(union drbd_dev_state s)
1083 {
1084 return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
1085 /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary
1086 since we enter state C_AHEAD only if proto >= 96 */
1087 }
1088
1089 /* returns number of connections (== 1, for drbd 8.4)
1090 * expected to actually write this data,
1091 * which does NOT include those that we are L_AHEAD for. */
drbd_process_write_request(struct drbd_request * req)1092 static int drbd_process_write_request(struct drbd_request *req)
1093 {
1094 struct drbd_device *device = req->device;
1095 int remote, send_oos;
1096
1097 remote = drbd_should_do_remote(device->state);
1098 send_oos = drbd_should_send_out_of_sync(device->state);
1099
1100 /* Need to replicate writes. Unless it is an empty flush,
1101 * which is better mapped to a DRBD P_BARRIER packet,
1102 * also for drbd wire protocol compatibility reasons.
1103 * If this was a flush, just start a new epoch.
1104 * Unless the current epoch was empty anyways, or we are not currently
1105 * replicating, in which case there is no point. */
1106 if (unlikely(req->i.size == 0)) {
1107 /* The only size==0 bios we expect are empty flushes. */
1108 D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
1109 if (remote)
1110 _req_mod(req, QUEUE_AS_DRBD_BARRIER);
1111 return remote;
1112 }
1113
1114 if (!remote && !send_oos)
1115 return 0;
1116
1117 D_ASSERT(device, !(remote && send_oos));
1118
1119 if (remote) {
1120 _req_mod(req, TO_BE_SENT);
1121 _req_mod(req, QUEUE_FOR_NET_WRITE);
1122 } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size))
1123 _req_mod(req, QUEUE_FOR_SEND_OOS);
1124
1125 return remote;
1126 }
1127
drbd_process_discard_or_zeroes_req(struct drbd_request * req,int flags)1128 static void drbd_process_discard_or_zeroes_req(struct drbd_request *req, int flags)
1129 {
1130 int err = drbd_issue_discard_or_zero_out(req->device,
1131 req->i.sector, req->i.size >> 9, flags);
1132 if (err)
1133 req->private_bio->bi_status = BLK_STS_IOERR;
1134 bio_endio(req->private_bio);
1135 }
1136
1137 static void
drbd_submit_req_private_bio(struct drbd_request * req)1138 drbd_submit_req_private_bio(struct drbd_request *req)
1139 {
1140 struct drbd_device *device = req->device;
1141 struct bio *bio = req->private_bio;
1142 unsigned int type;
1143
1144 if (bio_op(bio) != REQ_OP_READ)
1145 type = DRBD_FAULT_DT_WR;
1146 else if (bio->bi_opf & REQ_RAHEAD)
1147 type = DRBD_FAULT_DT_RA;
1148 else
1149 type = DRBD_FAULT_DT_RD;
1150
1151 bio_set_dev(bio, device->ldev->backing_bdev);
1152
1153 /* State may have changed since we grabbed our reference on the
1154 * ->ldev member. Double check, and short-circuit to endio.
1155 * In case the last activity log transaction failed to get on
1156 * stable storage, and this is a WRITE, we may not even submit
1157 * this bio. */
1158 if (get_ldev(device)) {
1159 if (drbd_insert_fault(device, type))
1160 bio_io_error(bio);
1161 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
1162 drbd_process_discard_or_zeroes_req(req, EE_ZEROOUT |
1163 ((bio->bi_opf & REQ_NOUNMAP) ? 0 : EE_TRIM));
1164 else if (bio_op(bio) == REQ_OP_DISCARD)
1165 drbd_process_discard_or_zeroes_req(req, EE_TRIM);
1166 else
1167 submit_bio_noacct(bio);
1168 put_ldev(device);
1169 } else
1170 bio_io_error(bio);
1171 }
1172
drbd_queue_write(struct drbd_device * device,struct drbd_request * req)1173 static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
1174 {
1175 spin_lock_irq(&device->resource->req_lock);
1176 list_add_tail(&req->tl_requests, &device->submit.writes);
1177 list_add_tail(&req->req_pending_master_completion,
1178 &device->pending_master_completion[1 /* WRITE */]);
1179 spin_unlock_irq(&device->resource->req_lock);
1180 queue_work(device->submit.wq, &device->submit.worker);
1181 /* do_submit() may sleep internally on al_wait, too */
1182 wake_up(&device->al_wait);
1183 }
1184
1185 /* returns the new drbd_request pointer, if the caller is expected to
1186 * drbd_send_and_submit() it (to save latency), or NULL if we queued the
1187 * request on the submitter thread.
1188 * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
1189 */
1190 static struct drbd_request *
drbd_request_prepare(struct drbd_device * device,struct bio * bio,unsigned long start_jif)1191 drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
1192 {
1193 const int rw = bio_data_dir(bio);
1194 struct drbd_request *req;
1195
1196 /* allocate outside of all locks; */
1197 req = drbd_req_new(device, bio);
1198 if (!req) {
1199 dec_ap_bio(device);
1200 /* only pass the error to the upper layers.
1201 * if user cannot handle io errors, that's not our business. */
1202 drbd_err(device, "could not kmalloc() req\n");
1203 bio->bi_status = BLK_STS_RESOURCE;
1204 bio_endio(bio);
1205 return ERR_PTR(-ENOMEM);
1206 }
1207
1208 /* Update disk stats */
1209 req->start_jif = bio_start_io_acct(req->master_bio);
1210
1211 if (!get_ldev(device)) {
1212 bio_put(req->private_bio);
1213 req->private_bio = NULL;
1214 }
1215
1216 /* process discards always from our submitter thread */
1217 if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
1218 bio_op(bio) == REQ_OP_DISCARD)
1219 goto queue_for_submitter_thread;
1220
1221 if (rw == WRITE && req->private_bio && req->i.size
1222 && !test_bit(AL_SUSPENDED, &device->flags)) {
1223 if (!drbd_al_begin_io_fastpath(device, &req->i))
1224 goto queue_for_submitter_thread;
1225 req->rq_state |= RQ_IN_ACT_LOG;
1226 req->in_actlog_jif = jiffies;
1227 }
1228 return req;
1229
1230 queue_for_submitter_thread:
1231 atomic_inc(&device->ap_actlog_cnt);
1232 drbd_queue_write(device, req);
1233 return NULL;
1234 }
1235
1236 /* Require at least one path to current data.
1237 * We don't want to allow writes on C_STANDALONE D_INCONSISTENT:
1238 * We would not allow to read what was written,
1239 * we would not have bumped the data generation uuids,
1240 * we would cause data divergence for all the wrong reasons.
1241 *
1242 * If we don't see at least one D_UP_TO_DATE, we will fail this request,
1243 * which either returns EIO, or, if OND_SUSPEND_IO is set, suspends IO,
1244 * and queues for retry later.
1245 */
may_do_writes(struct drbd_device * device)1246 static bool may_do_writes(struct drbd_device *device)
1247 {
1248 const union drbd_dev_state s = device->state;
1249 return s.disk == D_UP_TO_DATE || s.pdsk == D_UP_TO_DATE;
1250 }
1251
1252 struct drbd_plug_cb {
1253 struct blk_plug_cb cb;
1254 struct drbd_request *most_recent_req;
1255 /* do we need more? */
1256 };
1257
drbd_unplug(struct blk_plug_cb * cb,bool from_schedule)1258 static void drbd_unplug(struct blk_plug_cb *cb, bool from_schedule)
1259 {
1260 struct drbd_plug_cb *plug = container_of(cb, struct drbd_plug_cb, cb);
1261 struct drbd_resource *resource = plug->cb.data;
1262 struct drbd_request *req = plug->most_recent_req;
1263
1264 kfree(cb);
1265 if (!req)
1266 return;
1267
1268 spin_lock_irq(&resource->req_lock);
1269 /* In case the sender did not process it yet, raise the flag to
1270 * have it followed with P_UNPLUG_REMOTE just after. */
1271 req->rq_state |= RQ_UNPLUG;
1272 /* but also queue a generic unplug */
1273 drbd_queue_unplug(req->device);
1274 kref_put(&req->kref, drbd_req_destroy);
1275 spin_unlock_irq(&resource->req_lock);
1276 }
1277
drbd_check_plugged(struct drbd_resource * resource)1278 static struct drbd_plug_cb* drbd_check_plugged(struct drbd_resource *resource)
1279 {
1280 /* A lot of text to say
1281 * return (struct drbd_plug_cb*)blk_check_plugged(); */
1282 struct drbd_plug_cb *plug;
1283 struct blk_plug_cb *cb = blk_check_plugged(drbd_unplug, resource, sizeof(*plug));
1284
1285 if (cb)
1286 plug = container_of(cb, struct drbd_plug_cb, cb);
1287 else
1288 plug = NULL;
1289 return plug;
1290 }
1291
drbd_update_plug(struct drbd_plug_cb * plug,struct drbd_request * req)1292 static void drbd_update_plug(struct drbd_plug_cb *plug, struct drbd_request *req)
1293 {
1294 struct drbd_request *tmp = plug->most_recent_req;
1295 /* Will be sent to some peer.
1296 * Remember to tag it with UNPLUG_REMOTE on unplug */
1297 kref_get(&req->kref);
1298 plug->most_recent_req = req;
1299 if (tmp)
1300 kref_put(&tmp->kref, drbd_req_destroy);
1301 }
1302
drbd_send_and_submit(struct drbd_device * device,struct drbd_request * req)1303 static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
1304 {
1305 struct drbd_resource *resource = device->resource;
1306 const int rw = bio_data_dir(req->master_bio);
1307 struct bio_and_error m = { NULL, };
1308 bool no_remote = false;
1309 bool submit_private_bio = false;
1310
1311 spin_lock_irq(&resource->req_lock);
1312 if (rw == WRITE) {
1313 /* This may temporarily give up the req_lock,
1314 * but will re-aquire it before it returns here.
1315 * Needs to be before the check on drbd_suspended() */
1316 complete_conflicting_writes(req);
1317 /* no more giving up req_lock from now on! */
1318
1319 /* check for congestion, and potentially stop sending
1320 * full data updates, but start sending "dirty bits" only. */
1321 maybe_pull_ahead(device);
1322 }
1323
1324
1325 if (drbd_suspended(device)) {
1326 /* push back and retry: */
1327 req->rq_state |= RQ_POSTPONED;
1328 if (req->private_bio) {
1329 bio_put(req->private_bio);
1330 req->private_bio = NULL;
1331 put_ldev(device);
1332 }
1333 goto out;
1334 }
1335
1336 /* We fail READ early, if we can not serve it.
1337 * We must do this before req is registered on any lists.
1338 * Otherwise, drbd_req_complete() will queue failed READ for retry. */
1339 if (rw != WRITE) {
1340 if (!do_remote_read(req) && !req->private_bio)
1341 goto nodata;
1342 }
1343
1344 /* which transfer log epoch does this belong to? */
1345 req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr);
1346
1347 /* no point in adding empty flushes to the transfer log,
1348 * they are mapped to drbd barriers already. */
1349 if (likely(req->i.size!=0)) {
1350 if (rw == WRITE)
1351 first_peer_device(device)->connection->current_tle_writes++;
1352
1353 list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log);
1354 }
1355
1356 if (rw == WRITE) {
1357 if (req->private_bio && !may_do_writes(device)) {
1358 bio_put(req->private_bio);
1359 req->private_bio = NULL;
1360 put_ldev(device);
1361 goto nodata;
1362 }
1363 if (!drbd_process_write_request(req))
1364 no_remote = true;
1365 } else {
1366 /* We either have a private_bio, or we can read from remote.
1367 * Otherwise we had done the goto nodata above. */
1368 if (req->private_bio == NULL) {
1369 _req_mod(req, TO_BE_SENT);
1370 _req_mod(req, QUEUE_FOR_NET_READ);
1371 } else
1372 no_remote = true;
1373 }
1374
1375 if (no_remote == false) {
1376 struct drbd_plug_cb *plug = drbd_check_plugged(resource);
1377 if (plug)
1378 drbd_update_plug(plug, req);
1379 }
1380
1381 /* If it took the fast path in drbd_request_prepare, add it here.
1382 * The slow path has added it already. */
1383 if (list_empty(&req->req_pending_master_completion))
1384 list_add_tail(&req->req_pending_master_completion,
1385 &device->pending_master_completion[rw == WRITE]);
1386 if (req->private_bio) {
1387 /* needs to be marked within the same spinlock */
1388 req->pre_submit_jif = jiffies;
1389 list_add_tail(&req->req_pending_local,
1390 &device->pending_completion[rw == WRITE]);
1391 _req_mod(req, TO_BE_SUBMITTED);
1392 /* but we need to give up the spinlock to submit */
1393 submit_private_bio = true;
1394 } else if (no_remote) {
1395 nodata:
1396 if (__ratelimit(&drbd_ratelimit_state))
1397 drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
1398 (unsigned long long)req->i.sector, req->i.size >> 9);
1399 /* A write may have been queued for send_oos, however.
1400 * So we can not simply free it, we must go through drbd_req_put_completion_ref() */
1401 }
1402
1403 out:
1404 drbd_req_put_completion_ref(req, &m, 1);
1405 spin_unlock_irq(&resource->req_lock);
1406
1407 /* Even though above is a kref_put(), this is safe.
1408 * As long as we still need to submit our private bio,
1409 * we hold a completion ref, and the request cannot disappear.
1410 * If however this request did not even have a private bio to submit
1411 * (e.g. remote read), req may already be invalid now.
1412 * That's why we cannot check on req->private_bio. */
1413 if (submit_private_bio)
1414 drbd_submit_req_private_bio(req);
1415 if (m.bio)
1416 complete_master_bio(device, &m);
1417 }
1418
__drbd_make_request(struct drbd_device * device,struct bio * bio,unsigned long start_jif)1419 void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
1420 {
1421 struct drbd_request *req = drbd_request_prepare(device, bio, start_jif);
1422 if (IS_ERR_OR_NULL(req))
1423 return;
1424 drbd_send_and_submit(device, req);
1425 }
1426
submit_fast_path(struct drbd_device * device,struct list_head * incoming)1427 static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)
1428 {
1429 struct blk_plug plug;
1430 struct drbd_request *req, *tmp;
1431
1432 blk_start_plug(&plug);
1433 list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
1434 const int rw = bio_data_dir(req->master_bio);
1435
1436 if (rw == WRITE /* rw != WRITE should not even end up here! */
1437 && req->private_bio && req->i.size
1438 && !test_bit(AL_SUSPENDED, &device->flags)) {
1439 if (!drbd_al_begin_io_fastpath(device, &req->i))
1440 continue;
1441
1442 req->rq_state |= RQ_IN_ACT_LOG;
1443 req->in_actlog_jif = jiffies;
1444 atomic_dec(&device->ap_actlog_cnt);
1445 }
1446
1447 list_del_init(&req->tl_requests);
1448 drbd_send_and_submit(device, req);
1449 }
1450 blk_finish_plug(&plug);
1451 }
1452
prepare_al_transaction_nonblock(struct drbd_device * device,struct list_head * incoming,struct list_head * pending,struct list_head * later)1453 static bool prepare_al_transaction_nonblock(struct drbd_device *device,
1454 struct list_head *incoming,
1455 struct list_head *pending,
1456 struct list_head *later)
1457 {
1458 struct drbd_request *req;
1459 int wake = 0;
1460 int err;
1461
1462 spin_lock_irq(&device->al_lock);
1463 while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) {
1464 err = drbd_al_begin_io_nonblock(device, &req->i);
1465 if (err == -ENOBUFS)
1466 break;
1467 if (err == -EBUSY)
1468 wake = 1;
1469 if (err)
1470 list_move_tail(&req->tl_requests, later);
1471 else
1472 list_move_tail(&req->tl_requests, pending);
1473 }
1474 spin_unlock_irq(&device->al_lock);
1475 if (wake)
1476 wake_up(&device->al_wait);
1477 return !list_empty(pending);
1478 }
1479
send_and_submit_pending(struct drbd_device * device,struct list_head * pending)1480 static void send_and_submit_pending(struct drbd_device *device, struct list_head *pending)
1481 {
1482 struct blk_plug plug;
1483 struct drbd_request *req;
1484
1485 blk_start_plug(&plug);
1486 while ((req = list_first_entry_or_null(pending, struct drbd_request, tl_requests))) {
1487 req->rq_state |= RQ_IN_ACT_LOG;
1488 req->in_actlog_jif = jiffies;
1489 atomic_dec(&device->ap_actlog_cnt);
1490 list_del_init(&req->tl_requests);
1491 drbd_send_and_submit(device, req);
1492 }
1493 blk_finish_plug(&plug);
1494 }
1495
do_submit(struct work_struct * ws)1496 void do_submit(struct work_struct *ws)
1497 {
1498 struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker);
1499 LIST_HEAD(incoming); /* from drbd_make_request() */
1500 LIST_HEAD(pending); /* to be submitted after next AL-transaction commit */
1501 LIST_HEAD(busy); /* blocked by resync requests */
1502
1503 /* grab new incoming requests */
1504 spin_lock_irq(&device->resource->req_lock);
1505 list_splice_tail_init(&device->submit.writes, &incoming);
1506 spin_unlock_irq(&device->resource->req_lock);
1507
1508 for (;;) {
1509 DEFINE_WAIT(wait);
1510
1511 /* move used-to-be-busy back to front of incoming */
1512 list_splice_init(&busy, &incoming);
1513 submit_fast_path(device, &incoming);
1514 if (list_empty(&incoming))
1515 break;
1516
1517 for (;;) {
1518 prepare_to_wait(&device->al_wait, &wait, TASK_UNINTERRUPTIBLE);
1519
1520 list_splice_init(&busy, &incoming);
1521 prepare_al_transaction_nonblock(device, &incoming, &pending, &busy);
1522 if (!list_empty(&pending))
1523 break;
1524
1525 schedule();
1526
1527 /* If all currently "hot" activity log extents are kept busy by
1528 * incoming requests, we still must not totally starve new
1529 * requests to "cold" extents.
1530 * Something left on &incoming means there had not been
1531 * enough update slots available, and the activity log
1532 * has been marked as "starving".
1533 *
1534 * Try again now, without looking for new requests,
1535 * effectively blocking all new requests until we made
1536 * at least _some_ progress with what we currently have.
1537 */
1538 if (!list_empty(&incoming))
1539 continue;
1540
1541 /* Nothing moved to pending, but nothing left
1542 * on incoming: all moved to busy!
1543 * Grab new and iterate. */
1544 spin_lock_irq(&device->resource->req_lock);
1545 list_splice_tail_init(&device->submit.writes, &incoming);
1546 spin_unlock_irq(&device->resource->req_lock);
1547 }
1548 finish_wait(&device->al_wait, &wait);
1549
1550 /* If the transaction was full, before all incoming requests
1551 * had been processed, skip ahead to commit, and iterate
1552 * without splicing in more incoming requests from upper layers.
1553 *
1554 * Else, if all incoming have been processed,
1555 * they have become either "pending" (to be submitted after
1556 * next transaction commit) or "busy" (blocked by resync).
1557 *
1558 * Maybe more was queued, while we prepared the transaction?
1559 * Try to stuff those into this transaction as well.
1560 * Be strictly non-blocking here,
1561 * we already have something to commit.
1562 *
1563 * Commit if we don't make any more progres.
1564 */
1565
1566 while (list_empty(&incoming)) {
1567 LIST_HEAD(more_pending);
1568 LIST_HEAD(more_incoming);
1569 bool made_progress;
1570
1571 /* It is ok to look outside the lock,
1572 * it's only an optimization anyways */
1573 if (list_empty(&device->submit.writes))
1574 break;
1575
1576 spin_lock_irq(&device->resource->req_lock);
1577 list_splice_tail_init(&device->submit.writes, &more_incoming);
1578 spin_unlock_irq(&device->resource->req_lock);
1579
1580 if (list_empty(&more_incoming))
1581 break;
1582
1583 made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending, &busy);
1584
1585 list_splice_tail_init(&more_pending, &pending);
1586 list_splice_tail_init(&more_incoming, &incoming);
1587 if (!made_progress)
1588 break;
1589 }
1590
1591 drbd_al_begin_io_commit(device);
1592 send_and_submit_pending(device, &pending);
1593 }
1594 }
1595
drbd_submit_bio(struct bio * bio)1596 blk_qc_t drbd_submit_bio(struct bio *bio)
1597 {
1598 struct drbd_device *device = bio->bi_disk->private_data;
1599 unsigned long start_jif;
1600
1601 blk_queue_split(&bio);
1602
1603 start_jif = jiffies;
1604
1605 /*
1606 * what we "blindly" assume:
1607 */
1608 D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
1609
1610 inc_ap_bio(device);
1611 __drbd_make_request(device, bio, start_jif);
1612 return BLK_QC_T_NONE;
1613 }
1614
net_timeout_reached(struct drbd_request * net_req,struct drbd_connection * connection,unsigned long now,unsigned long ent,unsigned int ko_count,unsigned int timeout)1615 static bool net_timeout_reached(struct drbd_request *net_req,
1616 struct drbd_connection *connection,
1617 unsigned long now, unsigned long ent,
1618 unsigned int ko_count, unsigned int timeout)
1619 {
1620 struct drbd_device *device = net_req->device;
1621
1622 if (!time_after(now, net_req->pre_send_jif + ent))
1623 return false;
1624
1625 if (time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent))
1626 return false;
1627
1628 if (net_req->rq_state & RQ_NET_PENDING) {
1629 drbd_warn(device, "Remote failed to finish a request within %ums > ko-count (%u) * timeout (%u * 0.1s)\n",
1630 jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout);
1631 return true;
1632 }
1633
1634 /* We received an ACK already (or are using protocol A),
1635 * but are waiting for the epoch closing barrier ack.
1636 * Check if we sent the barrier already. We should not blame the peer
1637 * for being unresponsive, if we did not even ask it yet. */
1638 if (net_req->epoch == connection->send.current_epoch_nr) {
1639 drbd_warn(device,
1640 "We did not send a P_BARRIER for %ums > ko-count (%u) * timeout (%u * 0.1s); drbd kernel thread blocked?\n",
1641 jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout);
1642 return false;
1643 }
1644
1645 /* Worst case: we may have been blocked for whatever reason, then
1646 * suddenly are able to send a lot of requests (and epoch separating
1647 * barriers) in quick succession.
1648 * The timestamp of the net_req may be much too old and not correspond
1649 * to the sending time of the relevant unack'ed barrier packet, so
1650 * would trigger a spurious timeout. The latest barrier packet may
1651 * have a too recent timestamp to trigger the timeout, potentially miss
1652 * a timeout. Right now we don't have a place to conveniently store
1653 * these timestamps.
1654 * But in this particular situation, the application requests are still
1655 * completed to upper layers, DRBD should still "feel" responsive.
1656 * No need yet to kill this connection, it may still recover.
1657 * If not, eventually we will have queued enough into the network for
1658 * us to block. From that point of view, the timestamp of the last sent
1659 * barrier packet is relevant enough.
1660 */
1661 if (time_after(now, connection->send.last_sent_barrier_jif + ent)) {
1662 drbd_warn(device, "Remote failed to answer a P_BARRIER (sent at %lu jif; now=%lu jif) within %ums > ko-count (%u) * timeout (%u * 0.1s)\n",
1663 connection->send.last_sent_barrier_jif, now,
1664 jiffies_to_msecs(now - connection->send.last_sent_barrier_jif), ko_count, timeout);
1665 return true;
1666 }
1667 return false;
1668 }
1669
1670 /* A request is considered timed out, if
1671 * - we have some effective timeout from the configuration,
1672 * with some state restrictions applied,
1673 * - the oldest request is waiting for a response from the network
1674 * resp. the local disk,
1675 * - the oldest request is in fact older than the effective timeout,
1676 * - the connection was established (resp. disk was attached)
1677 * for longer than the timeout already.
1678 * Note that for 32bit jiffies and very stable connections/disks,
1679 * we may have a wrap around, which is catched by
1680 * !time_in_range(now, last_..._jif, last_..._jif + timeout).
1681 *
1682 * Side effect: once per 32bit wrap-around interval, which means every
1683 * ~198 days with 250 HZ, we have a window where the timeout would need
1684 * to expire twice (worst case) to become effective. Good enough.
1685 */
1686
request_timer_fn(struct timer_list * t)1687 void request_timer_fn(struct timer_list *t)
1688 {
1689 struct drbd_device *device = from_timer(device, t, request_timer);
1690 struct drbd_connection *connection = first_peer_device(device)->connection;
1691 struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */
1692 struct net_conf *nc;
1693 unsigned long oldest_submit_jif;
1694 unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
1695 unsigned long now;
1696 unsigned int ko_count = 0, timeout = 0;
1697
1698 rcu_read_lock();
1699 nc = rcu_dereference(connection->net_conf);
1700 if (nc && device->state.conn >= C_WF_REPORT_PARAMS) {
1701 ko_count = nc->ko_count;
1702 timeout = nc->timeout;
1703 }
1704
1705 if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */
1706 dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10;
1707 put_ldev(device);
1708 }
1709 rcu_read_unlock();
1710
1711
1712 ent = timeout * HZ/10 * ko_count;
1713 et = min_not_zero(dt, ent);
1714
1715 if (!et)
1716 return; /* Recurring timer stopped */
1717
1718 now = jiffies;
1719 nt = now + et;
1720
1721 spin_lock_irq(&device->resource->req_lock);
1722 req_read = list_first_entry_or_null(&device->pending_completion[0], struct drbd_request, req_pending_local);
1723 req_write = list_first_entry_or_null(&device->pending_completion[1], struct drbd_request, req_pending_local);
1724
1725 /* maybe the oldest request waiting for the peer is in fact still
1726 * blocking in tcp sendmsg. That's ok, though, that's handled via the
1727 * socket send timeout, requesting a ping, and bumping ko-count in
1728 * we_should_drop_the_connection().
1729 */
1730
1731 /* check the oldest request we did successfully sent,
1732 * but which is still waiting for an ACK. */
1733 req_peer = connection->req_ack_pending;
1734
1735 /* if we don't have such request (e.g. protocoll A)
1736 * check the oldest requests which is still waiting on its epoch
1737 * closing barrier ack. */
1738 if (!req_peer)
1739 req_peer = connection->req_not_net_done;
1740
1741 /* evaluate the oldest peer request only in one timer! */
1742 if (req_peer && req_peer->device != device)
1743 req_peer = NULL;
1744
1745 /* do we have something to evaluate? */
1746 if (req_peer == NULL && req_write == NULL && req_read == NULL)
1747 goto out;
1748
1749 oldest_submit_jif =
1750 (req_write && req_read)
1751 ? ( time_before(req_write->pre_submit_jif, req_read->pre_submit_jif)
1752 ? req_write->pre_submit_jif : req_read->pre_submit_jif )
1753 : req_write ? req_write->pre_submit_jif
1754 : req_read ? req_read->pre_submit_jif : now;
1755
1756 if (ent && req_peer && net_timeout_reached(req_peer, connection, now, ent, ko_count, timeout))
1757 _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_VERBOSE | CS_HARD);
1758
1759 if (dt && oldest_submit_jif != now &&
1760 time_after(now, oldest_submit_jif + dt) &&
1761 !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
1762 drbd_warn(device, "Local backing device failed to meet the disk-timeout\n");
1763 __drbd_chk_io_error(device, DRBD_FORCE_DETACH);
1764 }
1765
1766 /* Reschedule timer for the nearest not already expired timeout.
1767 * Fallback to now + min(effective network timeout, disk timeout). */
1768 ent = (ent && req_peer && time_before(now, req_peer->pre_send_jif + ent))
1769 ? req_peer->pre_send_jif + ent : now + et;
1770 dt = (dt && oldest_submit_jif != now && time_before(now, oldest_submit_jif + dt))
1771 ? oldest_submit_jif + dt : now + et;
1772 nt = time_before(ent, dt) ? ent : dt;
1773 out:
1774 spin_unlock_irq(&device->resource->req_lock);
1775 mod_timer(&device->request_timer, nt);
1776 }
1777