Lines Matching refs:rq
198 static inline bool blk_rq_is_passthrough(struct request *rq) in blk_rq_is_passthrough() argument
200 return blk_op_is_passthrough(rq->cmd_flags); in blk_rq_is_passthrough()
208 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) argument
210 #define rq_dma_dir(rq) \ argument
211 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
213 #define rq_list_add(listptr, rq) do { \ argument
214 (rq)->rq_next = *(listptr); \
215 *(listptr) = rq; \
218 #define rq_list_add_tail(lastpptr, rq) do { \ argument
219 (rq)->rq_next = NULL; \
220 **(lastpptr) = rq; \
221 *(lastpptr) = &rq->rq_next; \
249 #define rq_list_next(rq) (rq)->rq_next argument
260 struct request *rq, struct request *prev) in rq_list_move() argument
263 prev->rq_next = rq->rq_next; in rq_list_move()
265 *src = rq->rq_next; in rq_list_move()
266 rq_list_add(dst, rq); in rq_list_move()
527 struct request *rq; member
646 void (*show_rq)(struct seq_file *m, struct request *rq);
711 void blk_mq_free_request(struct request *rq);
712 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
770 u32 blk_mq_unique_tag(struct request *rq);
786 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq) in blk_mq_rq_state() argument
788 return READ_ONCE(rq->state); in blk_mq_rq_state()
791 static inline int blk_mq_request_started(struct request *rq) in blk_mq_request_started() argument
793 return blk_mq_rq_state(rq) != MQ_RQ_IDLE; in blk_mq_request_started()
796 static inline int blk_mq_request_completed(struct request *rq) in blk_mq_request_completed() argument
798 return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE; in blk_mq_request_completed()
808 static inline void blk_mq_set_request_complete(struct request *rq) in blk_mq_set_request_complete() argument
810 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); in blk_mq_set_request_complete()
817 static inline void blk_mq_complete_request_direct(struct request *rq, in blk_mq_complete_request_direct() argument
818 void (*complete)(struct request *rq)) in blk_mq_complete_request_direct() argument
820 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); in blk_mq_complete_request_direct()
821 complete(rq); in blk_mq_complete_request_direct()
824 void blk_mq_start_request(struct request *rq);
825 void blk_mq_end_request(struct request *rq, blk_status_t error);
826 void __blk_mq_end_request(struct request *rq, blk_status_t error);
833 static inline bool blk_mq_need_time_stamp(struct request *rq) in blk_mq_need_time_stamp() argument
835 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED)); in blk_mq_need_time_stamp()
838 static inline bool blk_mq_is_reserved_rq(struct request *rq) in blk_mq_is_reserved_rq() argument
840 return rq->rq_flags & RQF_RESV; in blk_mq_is_reserved_rq()
868 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
871 void blk_mq_complete_request(struct request *rq);
872 bool blk_mq_complete_request_remote(struct request *rq);
903 unsigned int blk_mq_rq_cpu(struct request *rq);
937 static inline void *blk_mq_rq_to_pdu(struct request *rq) in blk_mq_rq_to_pdu() argument
939 return rq + 1; in blk_mq_rq_to_pdu()
949 static inline void blk_mq_cleanup_rq(struct request *rq) in blk_mq_cleanup_rq() argument
951 if (rq->q->mq_ops->cleanup_rq) in blk_mq_cleanup_rq()
952 rq->q->mq_ops->cleanup_rq(rq); in blk_mq_cleanup_rq()
955 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, in blk_rq_bio_prep() argument
958 rq->nr_phys_segments = nr_segs; in blk_rq_bio_prep()
959 rq->__data_len = bio->bi_iter.bi_size; in blk_rq_bio_prep()
960 rq->bio = rq->biotail = bio; in blk_rq_bio_prep()
961 rq->ioprio = bio_prio(bio); in blk_rq_bio_prep()
967 static inline bool rq_is_sync(struct request *rq) in rq_is_sync() argument
969 return op_is_sync(rq->cmd_flags); in rq_is_sync()
972 void blk_rq_init(struct request_queue *q, struct request *rq);
973 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
976 void blk_rq_unprep_clone(struct request *rq);
977 blk_status_t blk_insert_cloned_request(struct request *rq);
997 int blk_rq_append_bio(struct request *rq, struct bio *bio);
998 void blk_execute_rq_nowait(struct request *rq, bool at_head);
999 blk_status_t blk_execute_rq(struct request *rq, bool at_head);
1000 bool blk_rq_is_poll(struct request *rq);
1007 #define __rq_for_each_bio(_bio, rq) \ argument
1008 if ((rq->bio)) \
1009 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
1031 static inline sector_t blk_rq_pos(const struct request *rq) in blk_rq_pos() argument
1033 return rq->__sector; in blk_rq_pos()
1036 static inline unsigned int blk_rq_bytes(const struct request *rq) in blk_rq_bytes() argument
1038 return rq->__data_len; in blk_rq_bytes()
1041 static inline int blk_rq_cur_bytes(const struct request *rq) in blk_rq_cur_bytes() argument
1043 if (!rq->bio) in blk_rq_cur_bytes()
1045 if (!bio_has_data(rq->bio)) /* dataless requests such as discard */ in blk_rq_cur_bytes()
1046 return rq->bio->bi_iter.bi_size; in blk_rq_cur_bytes()
1047 return bio_iovec(rq->bio).bv_len; in blk_rq_cur_bytes()
1050 static inline unsigned int blk_rq_sectors(const struct request *rq) in blk_rq_sectors() argument
1052 return blk_rq_bytes(rq) >> SECTOR_SHIFT; in blk_rq_sectors()
1055 static inline unsigned int blk_rq_cur_sectors(const struct request *rq) in blk_rq_cur_sectors() argument
1057 return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; in blk_rq_cur_sectors()
1060 static inline unsigned int blk_rq_stats_sectors(const struct request *rq) in blk_rq_stats_sectors() argument
1062 return rq->stats_sectors; in blk_rq_stats_sectors()
1071 static inline unsigned int blk_rq_payload_bytes(struct request *rq) in blk_rq_payload_bytes() argument
1073 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_payload_bytes()
1074 return rq->special_vec.bv_len; in blk_rq_payload_bytes()
1075 return blk_rq_bytes(rq); in blk_rq_payload_bytes()
1082 static inline struct bio_vec req_bvec(struct request *rq) in req_bvec() argument
1084 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in req_bvec()
1085 return rq->special_vec; in req_bvec()
1086 return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter); in req_bvec()
1089 static inline unsigned int blk_rq_count_bios(struct request *rq) in blk_rq_count_bios() argument
1094 __rq_for_each_bio(bio, rq) in blk_rq_count_bios()
1100 void blk_steal_bios(struct bio_list *list, struct request *rq);
1108 bool blk_update_request(struct request *rq, blk_status_t error,
1121 static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) in blk_rq_nr_phys_segments() argument
1123 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_nr_phys_segments()
1125 return rq->nr_phys_segments; in blk_rq_nr_phys_segments()
1132 static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) in blk_rq_nr_discard_segments() argument
1134 return max_t(unsigned short, rq->nr_phys_segments, 1); in blk_rq_nr_discard_segments()
1137 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1139 static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, in blk_rq_map_sg() argument
1144 return __blk_rq_map_sg(q, rq, sglist, &last_sg); in blk_rq_map_sg()
1149 static inline unsigned int blk_rq_zone_no(struct request *rq) in blk_rq_zone_no() argument
1151 return disk_zone_no(rq->q->disk, blk_rq_pos(rq)); in blk_rq_zone_no()
1154 static inline unsigned int blk_rq_zone_is_seq(struct request *rq) in blk_rq_zone_is_seq() argument
1156 return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq)); in blk_rq_zone_is_seq()
1165 static inline bool blk_rq_is_seq_zoned_write(struct request *rq) in blk_rq_is_seq_zoned_write() argument
1167 return op_needs_zoned_write_locking(req_op(rq)) && in blk_rq_is_seq_zoned_write()
1168 blk_rq_zone_is_seq(rq); in blk_rq_is_seq_zoned_write()
1171 bool blk_req_needs_zone_write_lock(struct request *rq);
1172 bool blk_req_zone_write_trylock(struct request *rq);
1173 void __blk_req_zone_write_lock(struct request *rq);
1174 void __blk_req_zone_write_unlock(struct request *rq);
1176 static inline void blk_req_zone_write_lock(struct request *rq) in blk_req_zone_write_lock() argument
1178 if (blk_req_needs_zone_write_lock(rq)) in blk_req_zone_write_lock()
1179 __blk_req_zone_write_lock(rq); in blk_req_zone_write_lock()
1182 static inline void blk_req_zone_write_unlock(struct request *rq) in blk_req_zone_write_unlock() argument
1184 if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) in blk_req_zone_write_unlock()
1185 __blk_req_zone_write_unlock(rq); in blk_req_zone_write_unlock()
1188 static inline bool blk_req_zone_is_write_locked(struct request *rq) in blk_req_zone_is_write_locked() argument
1190 return rq->q->disk->seq_zones_wlock && in blk_req_zone_is_write_locked()
1191 test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock); in blk_req_zone_is_write_locked()
1194 static inline bool blk_req_can_dispatch_to_zone(struct request *rq) in blk_req_can_dispatch_to_zone() argument
1196 if (!blk_req_needs_zone_write_lock(rq)) in blk_req_can_dispatch_to_zone()
1198 return !blk_req_zone_is_write_locked(rq); in blk_req_can_dispatch_to_zone()
1201 static inline bool blk_rq_is_seq_zoned_write(struct request *rq) in blk_rq_is_seq_zoned_write() argument
1206 static inline bool blk_req_needs_zone_write_lock(struct request *rq) in blk_req_needs_zone_write_lock() argument
1211 static inline void blk_req_zone_write_lock(struct request *rq) in blk_req_zone_write_lock() argument
1215 static inline void blk_req_zone_write_unlock(struct request *rq) in blk_req_zone_write_unlock() argument
1218 static inline bool blk_req_zone_is_write_locked(struct request *rq) in blk_req_zone_is_write_locked() argument
1223 static inline bool blk_req_can_dispatch_to_zone(struct request *rq) in blk_req_can_dispatch_to_zone() argument