Home
last modified time | relevance | path

Searched refs:rq (Results 1 – 25 of 636) sorted by relevance

12345678910>>...26

/Linux-v6.1/kernel/sched/
Dpelt.h7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
11 int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
13 static inline u64 thermal_load_avg(struct rq *rq) in thermal_load_avg() argument
15 return READ_ONCE(rq->avg_thermal.load_avg); in thermal_load_avg()
19 update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) in update_thermal_load_avg() argument
24 static inline u64 thermal_load_avg(struct rq *rq) in thermal_load_avg() argument
31 int update_irq_load_avg(struct rq *rq, u64 running);
34 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
64 static inline u64 rq_clock_pelt(struct rq *rq) in rq_clock_pelt() argument
[all …]
Dsched.h100 struct rq;
114 extern void calc_global_load_tick(struct rq *this_rq);
115 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
117 extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
617 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ member
693 struct rq *rq; member
888 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
941 struct rq;
944 void (*func)(struct rq *rq);
954 struct rq { struct
[all …]
Ddeadline.c60 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) in rq_of_dl_rq()
62 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
68 struct rq *rq = task_rq(p); in dl_rq_of_se() local
70 return &rq->dl; in dl_rq_of_se()
175 struct rq *rq = cpu_rq(i); in __dl_update() local
177 rq->dl.extra_bw += bw; in __dl_update()
311 struct rq *rq; in dl_change_utilization() local
318 rq = task_rq(p); in dl_change_utilization()
320 sub_running_bw(&p->dl, &rq->dl); in dl_change_utilization()
332 __sub_rq_bw(p->dl.dl_bw, &rq->dl); in dl_change_utilization()
[all …]
Dstop_task.c19 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop() argument
21 return sched_stop_runnable(rq); in balance_stop()
26 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_stop() argument
31 static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first) in set_next_task_stop() argument
33 stop->se.exec_start = rq_clock_task(rq); in set_next_task_stop()
36 static struct task_struct *pick_task_stop(struct rq *rq) in pick_task_stop() argument
38 if (!sched_stop_runnable(rq)) in pick_task_stop()
41 return rq->stop; in pick_task_stop()
44 static struct task_struct *pick_next_task_stop(struct rq *rq) in pick_next_task_stop() argument
46 struct task_struct *p = pick_task_stop(rq); in pick_next_task_stop()
[all …]
Dstats.h13 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument
15 if (rq) { in rq_sched_info_arrive()
16 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive()
17 rq->rq_sched_info.pcount++; in rq_sched_info_arrive()
25 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument
27 if (rq) in rq_sched_info_depart()
28 rq->rq_cpu_time += delta; in rq_sched_info_depart()
32 rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeue() argument
34 if (rq) in rq_sched_info_dequeue()
35 rq->rq_sched_info.run_delay += delta; in rq_sched_info_dequeue()
[all …]
Drt.c175 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
177 return rt_rq->rq; in rq_of_rt_rq()
185 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
189 return rt_rq->rq; in rq_of_rt_se()
218 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local
222 rt_rq->rq = rq; in init_tg_rt_entry()
232 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
290 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
292 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
295 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
[all …]
Dcore.c114 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
233 void sched_core_enqueue(struct rq *rq, struct task_struct *p) in sched_core_enqueue() argument
235 rq->core->core_task_seq++; in sched_core_enqueue()
240 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); in sched_core_enqueue()
243 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) in sched_core_dequeue() argument
245 rq->core->core_task_seq++; in sched_core_dequeue()
248 rb_erase(&p->core_node, &rq->core_tree); in sched_core_dequeue()
257 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && in sched_core_dequeue()
258 rq->core->core_forceidle_count && rq->curr == rq->idle) in sched_core_dequeue()
259 resched_curr(rq); in sched_core_dequeue()
[all …]
/Linux-v6.1/drivers/gpu/drm/i915/
Di915_request.c115 struct i915_request *rq = to_request(fence); in i915_fence_release() local
117 GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT && in i915_fence_release()
118 rq->guc_prio != GUC_PRIO_FINI); in i915_fence_release()
120 i915_request_free_capture_list(fetch_and_zero(&rq->capture_list)); in i915_fence_release()
121 if (rq->batch_res) { in i915_fence_release()
122 i915_vma_resource_put(rq->batch_res); in i915_fence_release()
123 rq->batch_res = NULL; in i915_fence_release()
133 i915_sw_fence_fini(&rq->submit); in i915_fence_release()
134 i915_sw_fence_fini(&rq->semaphore); in i915_fence_release()
169 if (!intel_engine_is_virtual(rq->engine) && in i915_fence_release()
[all …]
Di915_request.h64 #define RQ_TRACE(rq, fmt, ...) do { \ argument
65 const struct i915_request *rq__ = (rq); \
373 void __i915_request_skip(struct i915_request *rq);
374 bool i915_request_set_error_once(struct i915_request *rq, int error);
375 struct i915_request *i915_request_mark_eio(struct i915_request *rq);
378 void __i915_request_queue(struct i915_request *rq,
380 void __i915_request_queue_bh(struct i915_request *rq);
382 bool i915_request_retire(struct i915_request *rq);
383 void i915_request_retire_upto(struct i915_request *rq);
395 i915_request_get(struct i915_request *rq) in i915_request_get() argument
[all …]
/Linux-v6.1/drivers/scsi/fnic/
Dvnic_rq.c15 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
18 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
22 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs()
23 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs()
30 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
33 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
34 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
36 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
39 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
47 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
[all …]
Dvnic_rq.h93 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
96 return rq->ring.desc_avail; in vnic_rq_desc_avail()
99 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
102 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
105 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
107 return rq->to_use->desc; in vnic_rq_next_desc()
110 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
112 return rq->to_use->index; in vnic_rq_next_index()
115 static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) in vnic_rq_next_buf_index() argument
117 return rq->buf_index++; in vnic_rq_next_buf_index()
[all …]
/Linux-v6.1/drivers/net/ethernet/cisco/enic/
Dvnic_rq.c18 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
21 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
25 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL); in vnic_rq_alloc_bufs()
26 if (!rq->bufs[i]) in vnic_rq_alloc_bufs()
31 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
34 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
35 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
37 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
40 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
48 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
[all …]
Dvnic_rq.h84 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
87 return rq->ring.desc_avail; in vnic_rq_desc_avail()
90 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
93 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
96 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
98 return rq->to_use->desc; in vnic_rq_next_desc()
101 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
103 return rq->to_use->index; in vnic_rq_next_index()
106 static inline void vnic_rq_post(struct vnic_rq *rq, in vnic_rq_post() argument
111 struct vnic_rq_buf *buf = rq->to_use; in vnic_rq_post()
[all …]
/Linux-v6.1/drivers/gpu/drm/i915/gt/
Dgen8_engine_cs.c13 int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen8_emit_flush_rcs() argument
42 if (GRAPHICS_VER(rq->engine->i915) == 9) in gen8_emit_flush_rcs()
46 if (IS_KBL_GRAPHICS_STEP(rq->engine->i915, 0, STEP_C0)) in gen8_emit_flush_rcs()
58 cs = intel_ring_begin(rq, len); in gen8_emit_flush_rcs()
74 intel_ring_advance(rq, cs); in gen8_emit_flush_rcs()
79 int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode) in gen8_emit_flush_xcs() argument
83 cs = intel_ring_begin(rq, 4); in gen8_emit_flush_xcs()
99 if (rq->engine->class == VIDEO_DECODE_CLASS) in gen8_emit_flush_xcs()
107 intel_ring_advance(rq, cs); in gen8_emit_flush_xcs()
112 int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen11_emit_flush_rcs() argument
[all …]
Dgen6_engine_cs.c55 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) in gen6_emit_post_sync_nonzero_flush() argument
58 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_post_sync_nonzero_flush()
62 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
72 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
74 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
84 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
89 int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen6_emit_flush_rcs() argument
92 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_flush_rcs()
98 ret = gen6_emit_post_sync_nonzero_flush(rq); in gen6_emit_flush_rcs()
130 cs = intel_ring_begin(rq, 4); in gen6_emit_flush_rcs()
[all …]
Dselftest_execlists.c28 static bool is_active(struct i915_request *rq) in is_active() argument
30 if (i915_request_is_active(rq)) in is_active()
33 if (i915_request_on_hold(rq)) in is_active()
36 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq)) in is_active()
43 struct i915_request *rq, in wait_for_submit() argument
53 if (i915_request_completed(rq)) /* that was quick! */ in wait_for_submit()
58 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) in wait_for_submit()
69 struct i915_request *rq, in wait_for_reset() argument
81 if (i915_request_completed(rq)) in wait_for_reset()
84 if (READ_ONCE(rq->fence.error)) in wait_for_reset()
[all …]
Dintel_breadcrumbs.c106 check_signal_order(struct intel_context *ce, struct i915_request *rq) in check_signal_order() argument
108 if (rq->context != ce) in check_signal_order()
111 if (!list_is_last(&rq->signal_link, &ce->signals) && in check_signal_order()
112 i915_seqno_passed(rq->fence.seqno, in check_signal_order()
113 list_next_entry(rq, signal_link)->fence.seqno)) in check_signal_order()
116 if (!list_is_first(&rq->signal_link, &ce->signals) && in check_signal_order()
117 i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno, in check_signal_order()
118 rq->fence.seqno)) in check_signal_order()
207 struct i915_request *rq; in signal_irq_work() local
209 list_for_each_entry_rcu(rq, &ce->signals, signal_link) { in signal_irq_work()
[all …]
/Linux-v6.1/fs/erofs/
Ddecompressor.c20 struct z_erofs_decompress_req *rq; member
68 struct z_erofs_decompress_req *rq = ctx->rq; in z_erofs_lz4_prepare_dstpages() local
73 EROFS_SB(rq->sb)->lz4.max_distance_pages; in z_erofs_lz4_prepare_dstpages()
79 struct page *const page = rq->out[i]; in z_erofs_lz4_prepare_dstpages()
86 if (!rq->fillgaps && test_bit(j, bounced)) { in z_erofs_lz4_prepare_dstpages()
89 availables[top++] = rq->out[i - lz4_max_distance_pages]; in z_erofs_lz4_prepare_dstpages()
119 rq->out[i] = victim; in z_erofs_lz4_prepare_dstpages()
128 struct z_erofs_decompress_req *rq = ctx->rq; in z_erofs_lz4_handle_overlap() local
133 if (rq->inplace_io) { in z_erofs_lz4_handle_overlap()
135 if (rq->partial_decoding || !may_inplace || in z_erofs_lz4_handle_overlap()
[all …]
/Linux-v6.1/include/linux/
Dblk-mq.h211 static inline bool blk_rq_is_passthrough(struct request *rq) in blk_rq_is_passthrough() argument
213 return blk_op_is_passthrough(req_op(rq)); in blk_rq_is_passthrough()
221 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) argument
223 #define rq_dma_dir(rq) \ argument
224 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
226 #define rq_list_add(listptr, rq) do { \ argument
227 (rq)->rq_next = *(listptr); \
228 *(listptr) = rq; \
256 #define rq_list_next(rq) (rq)->rq_next argument
267 struct request *rq, struct request *prev) in rq_list_move() argument
[all …]
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
Drx.c11 int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) in mlx5e_xsk_alloc_rx_mpwqe() argument
13 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix); in mlx5e_xsk_alloc_rx_mpwqe()
14 struct mlx5e_icosq *icosq = rq->icosq; in mlx5e_xsk_alloc_rx_mpwqe()
21 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe))) in mlx5e_xsk_alloc_rx_mpwqe()
25 batch = xsk_buff_alloc_batch(rq->xsk_pool, (struct xdp_buff **)wi->alloc_units, in mlx5e_xsk_alloc_rx_mpwqe()
26 rq->mpwqe.pages_per_wqe); in mlx5e_xsk_alloc_rx_mpwqe()
34 for (; batch < rq->mpwqe.pages_per_wqe; batch++) { in mlx5e_xsk_alloc_rx_mpwqe()
35 wi->alloc_units[batch].xsk = xsk_buff_alloc(rq->xsk_pool); in mlx5e_xsk_alloc_rx_mpwqe()
40 pi = mlx5e_icosq_get_next_pi(icosq, rq->mpwqe.umr_wqebbs); in mlx5e_xsk_alloc_rx_mpwqe()
42 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe)); in mlx5e_xsk_alloc_rx_mpwqe()
[all …]
/Linux-v6.1/drivers/scsi/esas2r/
Desas2r_disc.c49 struct esas2r_request *rq);
51 struct esas2r_request *rq);
55 struct esas2r_request *rq);
59 struct esas2r_request *rq);
61 struct esas2r_request *rq);
63 struct esas2r_request *rq);
65 struct esas2r_request *rq);
67 struct esas2r_request *rq);
69 struct esas2r_request *rq);
71 struct esas2r_request *rq);
[all …]
Desas2r_vda.c59 static void clear_vda_request(struct esas2r_request *rq);
62 struct esas2r_request *rq);
67 struct esas2r_request *rq, in esas2r_process_vda_ioctl() argument
93 clear_vda_request(rq); in esas2r_process_vda_ioctl()
95 rq->vrq->scsi.function = vi->function; in esas2r_process_vda_ioctl()
96 rq->interrupt_cb = esas2r_complete_vda_ioctl; in esas2r_process_vda_ioctl()
97 rq->interrupt_cx = vi; in esas2r_process_vda_ioctl()
112 rq->vrq->flash.length = cpu_to_le32(datalen); in esas2r_process_vda_ioctl()
113 rq->vrq->flash.sub_func = vi->cmd.flash.sub_func; in esas2r_process_vda_ioctl()
115 memcpy(rq->vrq->flash.data.file.file_name, in esas2r_process_vda_ioctl()
[all …]
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx5/core/
Den_rx.c64 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
67 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
69 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
70 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
71 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
92 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, in mlx5e_read_title_slot() argument
96 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_read_title_slot()
102 rq->stats->cqe_compress_blks++; in mlx5e_read_title_slot()
137 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, in mlx5e_decompress_cqe() argument
141 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_decompress_cqe()
[all …]
/Linux-v6.1/block/
Dblk-flush.c105 static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) in blk_flush_policy() argument
109 if (blk_rq_sectors(rq)) in blk_flush_policy()
113 if (rq->cmd_flags & REQ_PREFLUSH) in blk_flush_policy()
116 (rq->cmd_flags & REQ_FUA)) in blk_flush_policy()
122 static unsigned int blk_flush_cur_seq(struct request *rq) in blk_flush_cur_seq() argument
124 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq()
127 static void blk_flush_restore_request(struct request *rq) in blk_flush_restore_request() argument
134 rq->bio = rq->biotail; in blk_flush_restore_request()
137 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request()
138 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request()
[all …]
Dblk-mq.c52 static int blk_mq_poll_stats_bkt(const struct request *rq) in blk_mq_poll_stats_bkt() argument
56 ddir = rq_data_dir(rq); in blk_mq_poll_stats_bkt()
57 sectors = blk_rq_stats_sectors(rq); in blk_mq_poll_stats_bkt()
89 static inline blk_qc_t blk_rq_to_qc(struct request *rq) in blk_rq_to_qc() argument
91 return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) | in blk_rq_to_qc()
92 (rq->tag != -1 ? in blk_rq_to_qc()
93 rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL)); in blk_rq_to_qc()
132 static bool blk_mq_check_inflight(struct request *rq, void *priv) in blk_mq_check_inflight() argument
136 if (rq->part && blk_do_io_stat(rq) && in blk_mq_check_inflight()
137 (!mi->part->bd_partno || rq->part == mi->part) && in blk_mq_check_inflight()
[all …]

12345678910>>...26