Home
last modified time | relevance | path

Searched refs:at_head (Results 1 – 13 of 13) sorted by relevance

/Linux-v5.10/block/
Dblk-exec.c49 struct request *rq, int at_head, in blk_execute_rq_nowait() argument
64 blk_mq_sched_insert_request(rq, at_head, true, false); in blk_execute_rq_nowait()
80 struct request *rq, int at_head) in blk_execute_rq() argument
86 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); in blk_execute_rq()
Dblk-mq-sched.c417 void blk_mq_sched_insert_request(struct request *rq, bool at_head, in blk_mq_sched_insert_request() argument
449 at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head; in blk_mq_sched_insert_request()
450 blk_mq_request_bypass_insert(rq, at_head, false); in blk_mq_sched_insert_request()
458 e->type->ops.insert_requests(hctx, &list, at_head); in blk_mq_sched_insert_request()
461 __blk_mq_insert_request(hctx, rq, at_head); in blk_mq_sched_insert_request()
Dblk-mq.h45 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
69 bool at_head);
70 void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
Dmq-deadline.c486 bool at_head) in dd_insert_request() argument
503 if (at_head || blk_rq_is_passthrough(rq)) { in dd_insert_request()
504 if (at_head) in dd_insert_request()
526 struct list_head *list, bool at_head) in dd_insert_requests() argument
537 dd_insert_request(hctx, rq, at_head); in dd_insert_requests()
Dblk-mq-sched.h19 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
Dscsi_ioctl.c288 int at_head = 0; in sg_io() local
311 at_head = 1; in sg_io()
360 blk_execute_rq(q, bd_disk, rq, at_head); in sg_io()
Dblk-mq.c819 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, in blk_mq_add_to_requeue_list() argument
832 if (at_head) { in blk_mq_add_to_requeue_list()
1815 bool at_head) in __blk_mq_insert_req_list() argument
1824 if (at_head) in __blk_mq_insert_req_list()
1831 bool at_head) in __blk_mq_insert_request() argument
1837 __blk_mq_insert_req_list(hctx, rq, at_head); in __blk_mq_insert_request()
1850 void blk_mq_request_bypass_insert(struct request *rq, bool at_head, in blk_mq_request_bypass_insert() argument
1856 if (at_head) in blk_mq_request_bypass_insert()
Dkyber-iosched.c588 struct list_head *rq_list, bool at_head) in kyber_insert_requests() argument
599 if (at_head) in kyber_insert_requests()
Dbfq-iosched.c5493 bool at_head) in bfq_insert_request() argument
5517 if (!bfqq || at_head || blk_rq_is_passthrough(rq)) { in bfq_insert_request()
5518 if (at_head) in bfq_insert_request()
5552 struct list_head *list, bool at_head) in bfq_insert_requests() argument
5559 bfq_insert_request(hctx, rq, at_head); in bfq_insert_requests()
/Linux-v5.10/drivers/scsi/
Dsg.c782 int k, at_head; in sg_common_write() local
826 at_head = 0; in sg_common_write()
828 at_head = 1; in sg_common_write()
833 srp->rq, at_head, sg_rq_end_io); in sg_common_write()
/Linux-v5.10/drivers/nvme/host/
Dnvme.h615 unsigned timeout, int qid, int at_head,
Dcore.c872 struct gendisk *bd_disk, struct request *rq, int at_head) in nvme_execute_rq_polled() argument
880 blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq); in nvme_execute_rq_polled()
894 unsigned timeout, int qid, int at_head, in __nvme_submit_sync_cmd() argument
913 nvme_execute_rq_polled(req->q, NULL, req, at_head); in __nvme_submit_sync_cmd()
915 blk_execute_rq(req->q, NULL, req, at_head); in __nvme_submit_sync_cmd()
/Linux-v5.10/drivers/scsi/smartpqi/
Dsmartpqi_init.c5075 struct pqi_io_request *io_request, bool at_head) in pqi_add_to_raid_bypass_retry_list() argument
5080 if (at_head) in pqi_add_to_raid_bypass_retry_list()