Home
last modified time | relevance | path

Searched refs:rq_flags (Results 1 – 25 of 62) sorted by relevance

123

/Linux-v6.6/block/
Dblk-mq-sched.h40 if (rq->rq_flags & RQF_USE_SCHED) { in blk_mq_sched_allow_merge()
51 if (rq->rq_flags & RQF_USE_SCHED) { in blk_mq_sched_completed_request()
61 if (rq->rq_flags & RQF_USE_SCHED) { in blk_mq_sched_requeue_request()
Dblk-flush.c135 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request()
334 flush_rq->rq_flags |= RQF_MQ_INFLIGHT; in blk_kick_flush()
340 flush_rq->rq_flags |= RQF_FLUSH_SEQ; in blk_kick_flush()
393 rq->rq_flags |= RQF_FLUSH_SEQ; in blk_rq_init_flush()
Dblk-mq.c362 data->rq_flags |= RQF_PM; in blk_mq_rq_ctx_init()
364 data->rq_flags |= RQF_IO_STAT; in blk_mq_rq_ctx_init()
365 rq->rq_flags = data->rq_flags; in blk_mq_rq_ctx_init()
367 if (data->rq_flags & RQF_SCHED_TAGS) { in blk_mq_rq_ctx_init()
392 if (rq->rq_flags & RQF_USE_SCHED) { in blk_mq_rq_ctx_init()
455 data->rq_flags |= RQF_SCHED_TAGS; in __blk_mq_alloc_requests()
467 data->rq_flags |= RQF_USE_SCHED; in __blk_mq_alloc_requests()
476 if (!(data->rq_flags & RQF_SCHED_TAGS)) in __blk_mq_alloc_requests()
480 data->rq_flags |= RQF_RESV; in __blk_mq_alloc_requests()
661 data.rq_flags |= RQF_SCHED_TAGS; in blk_mq_alloc_request_hctx()
[all …]
Dblk-zoned.c74 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); in blk_req_zone_write_trylock()
75 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; in blk_req_zone_write_trylock()
87 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); in __blk_req_zone_write_lock()
88 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; in __blk_req_zone_write_lock()
94 rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED; in __blk_req_zone_write_unlock()
Dblk-mq.h152 req_flags_t rq_flags; member
224 if (data->rq_flags & RQF_SCHED_TAGS) in blk_mq_tags_from_data()
308 if (rq->rq_flags & RQF_MQ_INFLIGHT) { in __blk_mq_put_driver_tag()
309 rq->rq_flags &= ~RQF_MQ_INFLIGHT; in __blk_mq_put_driver_tag()
Dblk-pm.h21 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_mark_last_busy()
Dblk-merge.c572 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in __blk_rq_map_sg()
738 if (rq->rq_flags & RQF_MIXED_MERGE) in blk_rq_set_mixed_merge()
751 rq->rq_flags |= RQF_MIXED_MERGE; in blk_rq_set_mixed_merge()
770 if (req->rq_flags & RQF_MIXED_MERGE) { in blk_update_mixed_merge()
847 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || in attempt_merge()
Dblk.h140 if (rq->rq_flags & RQF_NOMERGE_FLAGS) in rq_mergeable()
272 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
343 return (rq->rq_flags & RQF_IO_STAT) && !blk_rq_is_passthrough(rq); in blk_do_io_stat()
Dblk-timeout.c140 req->rq_flags &= ~RQF_TIMED_OUT; in blk_add_timer()
/Linux-v6.6/include/linux/
Dblk-mq.h86 req_flags_t rq_flags; member
835 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED)); in blk_mq_need_time_stamp()
840 return rq->rq_flags & RQF_RESV; in blk_mq_is_reserved_rq()
855 if (!iob || (req->rq_flags & RQF_SCHED_TAGS) || ioerror || in blk_mq_add_to_batch()
1073 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_payload_bytes()
1084 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in req_bvec()
1123 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_nr_phys_segments()
1184 if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) in blk_req_zone_write_unlock()
/Linux-v6.6/kernel/sched/
Dsched.h1577 struct rq_flags { struct
1602 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) in rq_pin_lock()
1615 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) in rq_unpin_lock()
1625 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) in rq_repin_lock()
1637 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1640 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1644 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) in __task_rq_unlock()
1652 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in task_rq_unlock()
1662 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) in rq_lock_irqsave()
1670 rq_lock_irq(struct rq *rq, struct rq_flags *rf) in rq_lock_irq()
[all …]
Dcore.c627 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock()
651 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) in task_rq_lock()
790 struct rq_flags rf; in hrtick()
818 struct rq_flags rf; in __hrtick_start()
1483 struct rq_flags rf; in uclamp_update_util_min_rt_default()
1735 struct rq_flags rf; in uclamp_update_active()
2288 struct rq_flags rf; in wait_task_inactive()
2516 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, in move_queued_task()
2562 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, in __migrate_task()
2586 struct rq_flags rf; in migration_cpu_stop()
[all …]
Dstop_task.c19 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop()
Dcore_sched.c57 struct rq_flags rf; in sched_core_update_cookie()
Dpsi.c1046 struct rq_flags rf; in psi_memstall_enter()
1077 struct rq_flags rf; in psi_memstall_leave()
1146 struct rq_flags rf; in cgroup_move_task()
1222 struct rq_flags rf; in psi_cgroup_restart()
Dstats.h169 struct rq_flags rf; in psi_ttwu_dequeue()
/Linux-v6.6/drivers/net/ethernet/fungible/funcore/
Dfun_queue.h69 u16 rq_flags; member
120 u16 rq_flags; member
Dfun_queue.c459 funq->rq_flags = req->rq_flags | FUN_ADMIN_EPSQ_CREATE_FLAG_RQ; in fun_alloc_queue()
528 rc = fun_sq_create(fdev, funq->rq_flags, funq->rqid, funq->cqid, 0, in fun_create_rq()
/Linux-v6.6/drivers/scsi/
Dscsi_lib.c118 if (rq->rq_flags & RQF_DONTPREP) { in scsi_mq_requeue_cmd()
119 rq->rq_flags &= ~RQF_DONTPREP; in scsi_mq_requeue_cmd()
233 req->rq_flags |= RQF_QUIET; in scsi_execute_cmd()
639 if (!(rq->rq_flags & RQF_MIXED_MERGE)) in scsi_rq_err_bytes()
817 if (!(req->rq_flags & RQF_QUIET)) { in scsi_io_completion_action()
908 else if (req->rq_flags & RQF_QUIET) in scsi_io_completion_nz_result()
1148 if (rq->rq_flags & RQF_DONTPREP) { in scsi_cleanup_rq()
1150 rq->rq_flags &= ~RQF_DONTPREP; in scsi_cleanup_rq()
1229 if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM))) in scsi_device_state_check()
1237 if (req && !(req->rq_flags & RQF_PM)) in scsi_device_state_check()
[all …]
/Linux-v6.6/net/sunrpc/
Dsvc_xprt.c403 if (!test_bit(RQ_DATA, &rqstp->rq_flags)) { in svc_xprt_reserve_slot()
407 set_bit(RQ_DATA, &rqstp->rq_flags); in svc_xprt_reserve_slot()
415 if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) { in svc_xprt_release_slot()
739 clear_bit(RQ_BUSY, &rqstp->rq_flags); in svc_get_next_xprt()
749 set_bit(RQ_BUSY, &rqstp->rq_flags); in svc_get_next_xprt()
1188 if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)) in svc_defer()
1219 set_bit(RQ_DROPME, &rqstp->rq_flags); in svc_defer()
Dsvc.c645 __set_bit(RQ_BUSY, &rqstp->rq_flags); in svc_rqst_alloc()
707 if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) in svc_pool_wake_idle_thread()
752 set_bit(RQ_VICTIM, &rqstp->rq_flags); in svc_pool_victim()
929 if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags)) in svc_exit_thread()
1306 set_bit(RQ_SPLICE_OK, &rqstp->rq_flags); in svc_process_common()
1308 set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags); in svc_process_common()
1309 clear_bit(RQ_DROPME, &rqstp->rq_flags); in svc_process_common()
/Linux-v6.6/drivers/mmc/core/
Dqueue.c241 req->rq_flags |= RQF_QUIET; in mmc_mq_queue_rq()
293 if (!(req->rq_flags & RQF_DONTPREP)) { in mmc_mq_queue_rq()
295 req->rq_flags |= RQF_DONTPREP; in mmc_mq_queue_rq()
/Linux-v6.6/drivers/nvme/host/
Dioctl.c152 struct nvme_command *cmd, blk_opf_t rq_flags, in nvme_alloc_user_request() argument
157 req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags); in nvme_alloc_user_request()
565 blk_opf_t rq_flags = REQ_ALLOC_CACHE; in nvme_uring_cmd_io() local
601 rq_flags |= REQ_NOWAIT; in nvme_uring_cmd_io()
605 rq_flags |= REQ_POLLED; in nvme_uring_cmd_io()
607 req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags); in nvme_uring_cmd_io()
/Linux-v6.6/drivers/scsi/device_handler/
Dscsi_dh_hp_sw.c173 req->rq_flags |= RQF_QUIET; in hp_sw_prep_fn()
/Linux-v6.6/drivers/md/
Ddm-rq.c266 if (rq->rq_flags & RQF_FAILED) in dm_softirq_done()
293 rq->rq_flags |= RQF_FAILED; in dm_kill_unmapped_request()

123