Lines Matching refs:rq

52 #define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))  argument
58 static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio) in elv_iosched_allow_bio_merge() argument
60 struct request_queue *q = rq->q; in elv_iosched_allow_bio_merge()
64 return e->type->ops.mq.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge()
66 return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio); in elv_iosched_allow_bio_merge()
74 bool elv_bio_merge_ok(struct request *rq, struct bio *bio) in elv_bio_merge_ok() argument
76 if (!blk_rq_merge_ok(rq, bio)) in elv_bio_merge_ok()
79 if (!elv_iosched_allow_bio_merge(rq, bio)) in elv_bio_merge_ok()
255 static inline void __elv_rqhash_del(struct request *rq) in __elv_rqhash_del() argument
257 hash_del(&rq->hash); in __elv_rqhash_del()
258 rq->rq_flags &= ~RQF_HASHED; in __elv_rqhash_del()
261 void elv_rqhash_del(struct request_queue *q, struct request *rq) in elv_rqhash_del() argument
263 if (ELV_ON_HASH(rq)) in elv_rqhash_del()
264 __elv_rqhash_del(rq); in elv_rqhash_del()
268 void elv_rqhash_add(struct request_queue *q, struct request *rq) in elv_rqhash_add() argument
272 BUG_ON(ELV_ON_HASH(rq)); in elv_rqhash_add()
273 hash_add(e->hash, &rq->hash, rq_hash_key(rq)); in elv_rqhash_add()
274 rq->rq_flags |= RQF_HASHED; in elv_rqhash_add()
278 void elv_rqhash_reposition(struct request_queue *q, struct request *rq) in elv_rqhash_reposition() argument
280 __elv_rqhash_del(rq); in elv_rqhash_reposition()
281 elv_rqhash_add(q, rq); in elv_rqhash_reposition()
288 struct request *rq; in elv_rqhash_find() local
290 hash_for_each_possible_safe(e->hash, rq, next, hash, offset) { in elv_rqhash_find()
291 BUG_ON(!ELV_ON_HASH(rq)); in elv_rqhash_find()
293 if (unlikely(!rq_mergeable(rq))) { in elv_rqhash_find()
294 __elv_rqhash_del(rq); in elv_rqhash_find()
298 if (rq_hash_key(rq) == offset) in elv_rqhash_find()
299 return rq; in elv_rqhash_find()
309 void elv_rb_add(struct rb_root *root, struct request *rq) in elv_rb_add() argument
319 if (blk_rq_pos(rq) < blk_rq_pos(__rq)) in elv_rb_add()
321 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq)) in elv_rb_add()
325 rb_link_node(&rq->rb_node, parent, p); in elv_rb_add()
326 rb_insert_color(&rq->rb_node, root); in elv_rb_add()
330 void elv_rb_del(struct rb_root *root, struct request *rq) in elv_rb_del() argument
332 BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); in elv_rb_del()
333 rb_erase(&rq->rb_node, root); in elv_rb_del()
334 RB_CLEAR_NODE(&rq->rb_node); in elv_rb_del()
341 struct request *rq; in elv_rb_find() local
344 rq = rb_entry(n, struct request, rb_node); in elv_rb_find()
346 if (sector < blk_rq_pos(rq)) in elv_rb_find()
348 else if (sector > blk_rq_pos(rq)) in elv_rb_find()
351 return rq; in elv_rb_find()
363 void elv_dispatch_sort(struct request_queue *q, struct request *rq) in elv_dispatch_sort() argument
368 if (q->last_merge == rq) in elv_dispatch_sort()
371 elv_rqhash_del(q, rq); in elv_dispatch_sort()
379 if (req_op(rq) != req_op(pos)) in elv_dispatch_sort()
381 if (rq_data_dir(rq) != rq_data_dir(pos)) in elv_dispatch_sort()
385 if (blk_rq_pos(rq) >= boundary) { in elv_dispatch_sort()
392 if (blk_rq_pos(rq) >= blk_rq_pos(pos)) in elv_dispatch_sort()
396 list_add(&rq->queuelist, entry); in elv_dispatch_sort()
405 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) in elv_dispatch_add_tail() argument
407 if (q->last_merge == rq) in elv_dispatch_add_tail()
410 elv_rqhash_del(q, rq); in elv_dispatch_add_tail()
414 q->end_sector = rq_end_sector(rq); in elv_dispatch_add_tail()
415 q->boundary_rq = rq; in elv_dispatch_add_tail()
416 list_add_tail(&rq->queuelist, &q->queue_head); in elv_dispatch_add_tail()
474 bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq) in elv_attempt_insert_merge() argument
485 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) in elv_attempt_insert_merge()
496 __rq = elv_rqhash_find(q, blk_rq_pos(rq)); in elv_attempt_insert_merge()
497 if (!__rq || !blk_attempt_req_merge(q, __rq, rq)) in elv_attempt_insert_merge()
502 rq = __rq; in elv_attempt_insert_merge()
508 void elv_merged_request(struct request_queue *q, struct request *rq, in elv_merged_request() argument
514 e->type->ops.mq.request_merged(q, rq, type); in elv_merged_request()
516 e->type->ops.sq.elevator_merged_fn(q, rq, type); in elv_merged_request()
519 elv_rqhash_reposition(q, rq); in elv_merged_request()
521 q->last_merge = rq; in elv_merged_request()
524 void elv_merge_requests(struct request_queue *q, struct request *rq, in elv_merge_requests() argument
531 e->type->ops.mq.requests_merged(q, rq, next); in elv_merge_requests()
535 e->type->ops.sq.elevator_merge_req_fn(q, rq, next); in elv_merge_requests()
538 elv_rqhash_reposition(q, rq); in elv_merge_requests()
545 q->last_merge = rq; in elv_merge_requests()
548 void elv_bio_merged(struct request_queue *q, struct request *rq, in elv_bio_merged() argument
557 e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio); in elv_bio_merged()
561 static void blk_pm_requeue_request(struct request *rq) in blk_pm_requeue_request() argument
563 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_requeue_request()
564 rq->q->nr_pending--; in blk_pm_requeue_request()
567 static void blk_pm_add_request(struct request_queue *q, struct request *rq) in blk_pm_add_request() argument
569 if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 && in blk_pm_add_request()
574 static inline void blk_pm_requeue_request(struct request *rq) {} in blk_pm_requeue_request() argument
576 struct request *rq) in blk_pm_add_request() argument
581 void elv_requeue_request(struct request_queue *q, struct request *rq) in elv_requeue_request() argument
587 if (blk_account_rq(rq)) { in elv_requeue_request()
588 q->in_flight[rq_is_sync(rq)]--; in elv_requeue_request()
589 if (rq->rq_flags & RQF_SORTED) in elv_requeue_request()
590 elv_deactivate_rq(q, rq); in elv_requeue_request()
593 rq->rq_flags &= ~RQF_STARTED; in elv_requeue_request()
595 blk_pm_requeue_request(rq); in elv_requeue_request()
597 __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE); in elv_requeue_request()
619 void __elv_add_request(struct request_queue *q, struct request *rq, int where) in __elv_add_request() argument
621 trace_block_rq_insert(q, rq); in __elv_add_request()
623 blk_pm_add_request(q, rq); in __elv_add_request()
625 rq->q = q; in __elv_add_request()
627 if (rq->rq_flags & RQF_SOFTBARRIER) { in __elv_add_request()
629 if (!blk_rq_is_passthrough(rq)) { in __elv_add_request()
630 q->end_sector = rq_end_sector(rq); in __elv_add_request()
631 q->boundary_rq = rq; in __elv_add_request()
633 } else if (!(rq->rq_flags & RQF_ELVPRIV) && in __elv_add_request()
641 rq->rq_flags |= RQF_SOFTBARRIER; in __elv_add_request()
642 list_add(&rq->queuelist, &q->queue_head); in __elv_add_request()
646 rq->rq_flags |= RQF_SOFTBARRIER; in __elv_add_request()
648 list_add_tail(&rq->queuelist, &q->queue_head); in __elv_add_request()
668 if (elv_attempt_insert_merge(q, rq)) in __elv_add_request()
672 BUG_ON(blk_rq_is_passthrough(rq)); in __elv_add_request()
673 rq->rq_flags |= RQF_SORTED; in __elv_add_request()
675 if (rq_mergeable(rq)) { in __elv_add_request()
676 elv_rqhash_add(q, rq); in __elv_add_request()
678 q->last_merge = rq; in __elv_add_request()
686 q->elevator->type->ops.sq.elevator_add_req_fn(q, rq); in __elv_add_request()
690 rq->rq_flags |= RQF_SOFTBARRIER; in __elv_add_request()
691 blk_insert_flush(rq); in __elv_add_request()
701 void elv_add_request(struct request_queue *q, struct request *rq, int where) in elv_add_request() argument
706 __elv_add_request(q, rq, where); in elv_add_request()
711 struct request *elv_latter_request(struct request_queue *q, struct request *rq) in elv_latter_request() argument
716 return e->type->ops.mq.next_request(q, rq); in elv_latter_request()
718 return e->type->ops.sq.elevator_latter_req_fn(q, rq); in elv_latter_request()
723 struct request *elv_former_request(struct request_queue *q, struct request *rq) in elv_former_request() argument
728 return e->type->ops.mq.former_request(q, rq); in elv_former_request()
730 return e->type->ops.sq.elevator_former_req_fn(q, rq); in elv_former_request()
734 int elv_set_request(struct request_queue *q, struct request *rq, in elv_set_request() argument
743 return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask); in elv_set_request()
747 void elv_put_request(struct request_queue *q, struct request *rq) in elv_put_request() argument
755 e->type->ops.sq.elevator_put_req_fn(rq); in elv_put_request()
771 void elv_completed_request(struct request_queue *q, struct request *rq) in elv_completed_request() argument
781 if (blk_account_rq(rq)) { in elv_completed_request()
782 q->in_flight[rq_is_sync(rq)]--; in elv_completed_request()
783 if ((rq->rq_flags & RQF_SORTED) && in elv_completed_request()
785 e->type->ops.sq.elevator_completed_req_fn(q, rq); in elv_completed_request()
1173 struct request *rq) in elv_rb_former_request() argument
1175 struct rb_node *rbprev = rb_prev(&rq->rb_node); in elv_rb_former_request()
1185 struct request *rq) in elv_rb_latter_request() argument
1187 struct rb_node *rbnext = rb_next(&rq->rb_node); in elv_rb_latter_request()