Lines Matching full:incoming
1427 static void submit_fast_path(struct drbd_device *device, struct list_head *incoming) in submit_fast_path() argument
1433 list_for_each_entry_safe(req, tmp, incoming, tl_requests) { in submit_fast_path()
1454 struct list_head *incoming, in prepare_al_transaction_nonblock() argument
1463 while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) { in prepare_al_transaction_nonblock()
1499 LIST_HEAD(incoming); /* from drbd_make_request() */ in do_submit()
1503 /* grab new incoming requests */ in do_submit()
1505 list_splice_tail_init(&device->submit.writes, &incoming); in do_submit()
1511 /* move used-to-be-busy back to front of incoming */ in do_submit()
1512 list_splice_init(&busy, &incoming); in do_submit()
1513 submit_fast_path(device, &incoming); in do_submit()
1514 if (list_empty(&incoming)) in do_submit()
1520 list_splice_init(&busy, &incoming); in do_submit()
1521 prepare_al_transaction_nonblock(device, &incoming, &pending, &busy); in do_submit()
1528 * incoming requests, we still must not totally starve new in do_submit()
1530 * Something left on &incoming means there had not been in do_submit()
1538 if (!list_empty(&incoming)) in do_submit()
1542 * on incoming: all moved to busy! in do_submit()
1545 list_splice_tail_init(&device->submit.writes, &incoming); in do_submit()
1550 /* If the transaction was full, before all incoming requests in do_submit()
1552 * without splicing in more incoming requests from upper layers. in do_submit()
1554 * Else, if all incoming have been processed, in do_submit()
1566 while (list_empty(&incoming)) { in do_submit()
1586 list_splice_tail_init(&more_incoming, &incoming); in do_submit()