Home
last modified time | relevance | path

Searched refs:rq_flags (Results 1 – 25 of 66) sorted by relevance

123

/Linux-v5.4/block/
Dblk-pm.h18 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_mark_last_busy()
26 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_requeue_request()
35 if (q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_add_request()
43 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_put_request()
Dblk-mq.c291 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator; in blk_mq_need_time_stamp()
299 req_flags_t rq_flags = 0; in blk_mq_rq_ctx_init() local
306 rq_flags = RQF_MQ_INFLIGHT; in blk_mq_rq_ctx_init()
318 rq->rq_flags = rq_flags; in blk_mq_rq_ctx_init()
321 rq->rq_flags |= RQF_PREEMPT; in blk_mq_rq_ctx_init()
323 rq->rq_flags |= RQF_IO_STAT; in blk_mq_rq_ctx_init()
415 rq->rq_flags |= RQF_ELVPRIV; in blk_mq_get_request()
516 if (rq->rq_flags & RQF_ELVPRIV) { in blk_mq_free_request()
526 if (rq->rq_flags & RQF_MQ_INFLIGHT) in blk_mq_free_request()
547 if (rq->rq_flags & RQF_STATS) { in __blk_mq_end_request()
[all …]
Dblk-merge.c508 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_map_sg()
515 if (unlikely(rq->rq_flags & RQF_COPY_USER) && in blk_rq_map_sg()
663 if (rq->rq_flags & RQF_MIXED_MERGE) in blk_rq_set_mixed_merge()
676 rq->rq_flags |= RQF_MIXED_MERGE; in blk_rq_set_mixed_merge()
778 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || in attempt_merge()
Dblk-flush.c130 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request()
319 flush_rq->rq_flags |= RQF_FLUSH_SEQ; in blk_kick_flush()
411 rq->rq_flags |= RQF_FLUSH_SEQ; in blk_insert_flush()
Dblk-core.c235 if (unlikely(rq->rq_flags & RQF_QUIET)) in req_bio_endio()
241 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) in req_bio_endio()
1287 if (!(rq->rq_flags & RQF_MIXED_MERGE)) in blk_rq_err_bytes()
1329 if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) { in blk_account_io_done()
1447 !(req->rq_flags & RQF_QUIET))) in blk_update_request()
1491 if (req->rq_flags & RQF_MIXED_MERGE) { in blk_update_request()
1496 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { in blk_update_request()
1588 if (src->rq_flags & RQF_SPECIAL_PAYLOAD) { in __blk_rq_prep_clone()
1589 dst->rq_flags |= RQF_SPECIAL_PAYLOAD; in __blk_rq_prep_clone()
Dblk-map.c150 rq->rq_flags |= RQF_COPY_USER; in blk_rq_map_user_iov()
248 rq->rq_flags |= RQF_COPY_USER; in blk_rq_map_kern()
Dblk-mq.h214 if (rq->rq_flags & RQF_MQ_INFLIGHT) { in __blk_mq_put_driver_tag()
215 rq->rq_flags &= ~RQF_MQ_INFLIGHT; in __blk_mq_put_driver_tag()
Dblk-zoned.c59 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); in __blk_req_zone_write_lock()
60 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; in __blk_req_zone_write_lock()
66 rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED; in __blk_req_zone_write_unlock()
Dblk-mq-sched.c365 if (rq->rq_flags & RQF_FLUSH_SEQ) { in blk_mq_sched_bypass_insert()
373 rq->rq_flags |= RQF_SORTED; in blk_mq_sched_bypass_insert()
387 if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) { in blk_mq_sched_insert_request()
Dblk-timeout.c124 req->rq_flags &= ~RQF_TIMED_OUT; in blk_add_timer()
Dblk.h190 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
251 (rq->rq_flags & RQF_IO_STAT) && in blk_do_io_stat()
/Linux-v5.4/kernel/sched/
Dsched.h1126 struct rq_flags { struct
1139 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) in rq_pin_lock() argument
1149 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) in rq_unpin_lock()
1159 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) in rq_repin_lock()
1171 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1174 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1178 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) in __task_rq_unlock()
1186 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in task_rq_unlock()
1196 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) in rq_lock_irqsave()
1204 rq_lock_irq(struct rq *rq, struct rq_flags *rf) in rq_lock_irq()
[all …]
Dstop_task.c20 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop()
38 pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task_stop()
Dcore.c78 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock()
102 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) in task_rq_lock()
240 struct rq_flags rf; in hrtick()
267 struct rq_flags rf; in __hrtick_start()
1049 struct rq_flags rf; in uclamp_update_active()
1488 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, in move_queued_task()
1523 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, in __migrate_task()
1546 struct rq_flags rf; in migration_cpu_stop()
1633 struct rq_flags rf; in __set_cpus_allowed_ptr()
1763 struct rq_flags srf, drf; in __migrate_swap_task()
[all …]
Didle.c370 balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_idle()
395 pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task_idle()
/Linux-v5.4/drivers/ide/
Dide-cd.c102 if (!sense || !rq || (rq->rq_flags & RQF_QUIET)) in cdrom_log_sense()
303 rq->rq_flags |= RQF_FAILED; in cdrom_decode_status()
323 !(rq->rq_flags & RQF_QUIET)) in cdrom_decode_status()
358 if (!(rq->rq_flags & RQF_QUIET)) in cdrom_decode_status()
367 if (!(rq->rq_flags & RQF_QUIET)) in cdrom_decode_status()
374 if (!(rq->rq_flags & RQF_QUIET)) in cdrom_decode_status()
392 rq->rq_flags |= RQF_FAILED; in cdrom_decode_status()
434 req_flags_t rq_flags) in ide_cd_queue_pc() argument
443 cmd[0], write, timeout, rq_flags); in ide_cd_queue_pc()
458 rq->rq_flags |= rq_flags; in ide_cd_queue_pc()
[all …]
Dide-io.c327 rq->rq_flags |= RQF_FAILED; in start_request()
463 if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) { in ide_issue_rq()
464 rq->rq_flags |= RQF_DONTPREP; in ide_issue_rq()
523 (rq->rq_flags & RQF_PREEMPT) == 0) { in ide_issue_rq()
Dide-atapi.c226 sense_rq->rq_flags |= RQF_PREEMPT; in ide_prep_sense()
319 if (!(rq->rq_flags & RQF_QUIET)) in ide_cd_expiry()
405 rq->rq_flags |= RQF_FAILED; in ide_check_ireason()
/Linux-v5.4/net/sunrpc/
Dsvc_xprt.c351 if (!test_bit(RQ_DATA, &rqstp->rq_flags)) { in svc_xprt_reserve_slot()
355 set_bit(RQ_DATA, &rqstp->rq_flags); in svc_xprt_reserve_slot()
363 if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) { in svc_xprt_release_slot()
427 if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) in svc_xprt_do_enqueue()
550 if (test_bit(RQ_BUSY, &rqstp->rq_flags)) in svc_wake_up()
718 clear_bit(RQ_BUSY, &rqstp->rq_flags); in svc_get_next_xprt()
728 set_bit(RQ_BUSY, &rqstp->rq_flags); in svc_get_next_xprt()
1178 if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)) in svc_defer()
1207 set_bit(RQ_DROPME, &rqstp->rq_flags); in svc_defer()
Dsvc.c612 __set_bit(RQ_BUSY, &rqstp->rq_flags); in svc_rqst_alloc()
697 set_bit(RQ_VICTIM, &rqstp->rq_flags); in choose_victim()
860 if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags)) in svc_exit_thread()
1174 set_bit(RQ_AUTHERR, &rqstp->rq_flags); in svc_return_autherr()
1182 if (test_and_clear_bit(RQ_AUTHERR, &rqstp->rq_flags)) in svc_get_autherr()
1207 test_bit(RQ_DROPME, &rqstp->rq_flags)) in svc_generic_dispatch()
1210 if (test_bit(RQ_AUTHERR, &rqstp->rq_flags)) in svc_generic_dispatch()
1301 set_bit(RQ_SPLICE_OK, &rqstp->rq_flags); in svc_process_common()
1303 set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags); in svc_process_common()
1304 clear_bit(RQ_DROPME, &rqstp->rq_flags); in svc_process_common()
/Linux-v5.4/drivers/scsi/
Dscsi_lib.c157 if (cmd->request->rq_flags & RQF_DONTPREP) { in scsi_mq_requeue_cmd()
158 cmd->request->rq_flags &= ~RQF_DONTPREP; in scsi_mq_requeue_cmd()
251 int timeout, int retries, u64 flags, req_flags_t rq_flags, in __scsi_execute() argument
274 req->rq_flags |= rq_flags | RQF_QUIET; in __scsi_execute()
792 if (!(req->rq_flags & RQF_QUIET)) { in scsi_io_completion_action()
881 else if (req->rq_flags & RQF_QUIET) in scsi_io_completion_nz_result()
1098 if (rq->rq_flags & RQF_DONTPREP) { in scsi_cleanup_rq()
1100 rq->rq_flags &= ~RQF_DONTPREP; in scsi_cleanup_rq()
1260 if (req && !(req->rq_flags & RQF_PREEMPT)) in scsi_prep_state_check()
1269 if (req && !(req->rq_flags & RQF_PREEMPT)) in scsi_prep_state_check()
[all …]
/Linux-v5.4/include/scsi/
Dscsi_device.h435 req_flags_t rq_flags, int *resid);
438 sshdr, timeout, retries, flags, rq_flags, resid) \ argument
443 sense, sshdr, timeout, retries, flags, rq_flags, \
/Linux-v5.4/include/linux/
Dblkdev.h138 req_flags_t rq_flags; member
670 return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); in blk_account_rq()
757 if (rq->rq_flags & RQF_NOMERGE_FLAGS) in rq_mergeable()
977 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_payload_bytes()
988 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in req_bvec()
1127 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_nr_phys_segments()
1736 if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) in blk_req_zone_write_unlock()
/Linux-v5.4/drivers/mmc/core/
Dqueue.c260 req->rq_flags |= RQF_QUIET; in mmc_mq_queue_rq()
304 if (!(req->rq_flags & RQF_DONTPREP)) { in mmc_mq_queue_rq()
306 req->rq_flags |= RQF_DONTPREP; in mmc_mq_queue_rq()
/Linux-v5.4/drivers/md/
Ddm-rq.c276 if (rq->rq_flags & RQF_FAILED) in dm_softirq_done()
302 rq->rq_flags |= RQF_FAILED; in dm_kill_unmapped_request()
318 clone->rq_flags |= RQF_IO_STAT; in dm_dispatch_clone_request()

123