Home
last modified time | relevance | path

Searched refs:rq (Results 1 – 25 of 636) sorted by relevance

12345678910>>...26

/Linux-v5.15/drivers/gpu/drm/i915/
Di915_request.c111 struct i915_request *rq = to_request(fence); in i915_fence_release() local
113 GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT && in i915_fence_release()
114 rq->guc_prio != GUC_PRIO_FINI); in i915_fence_release()
123 i915_sw_fence_fini(&rq->submit); in i915_fence_release()
124 i915_sw_fence_fini(&rq->semaphore); in i915_fence_release()
131 if (!intel_engine_is_virtual(rq->engine) && in i915_fence_release()
132 !cmpxchg(&rq->engine->request_pool, NULL, rq)) { in i915_fence_release()
133 intel_context_put(rq->context); in i915_fence_release()
137 intel_context_put(rq->context); in i915_fence_release()
139 kmem_cache_free(slab_requests, rq); in i915_fence_release()
[all …]
Di915_request.h56 #define RQ_TRACE(rq, fmt, ...) do { \ argument
57 const struct i915_request *rq__ = (rq); \
327 void __i915_request_skip(struct i915_request *rq);
328 bool i915_request_set_error_once(struct i915_request *rq, int error);
329 struct i915_request *i915_request_mark_eio(struct i915_request *rq);
332 void __i915_request_queue(struct i915_request *rq,
334 void __i915_request_queue_bh(struct i915_request *rq);
336 bool i915_request_retire(struct i915_request *rq);
337 void i915_request_retire_upto(struct i915_request *rq);
349 i915_request_get(struct i915_request *rq) in i915_request_get() argument
[all …]
/Linux-v5.15/drivers/scsi/fnic/
Dvnic_rq.c27 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
30 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
34 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs()
35 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs()
42 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
45 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
46 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
48 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
51 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
59 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
[all …]
Dvnic_rq.h105 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
108 return rq->ring.desc_avail; in vnic_rq_desc_avail()
111 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
114 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
117 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
119 return rq->to_use->desc; in vnic_rq_next_desc()
122 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
124 return rq->to_use->index; in vnic_rq_next_index()
127 static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) in vnic_rq_next_buf_index() argument
129 return rq->buf_index++; in vnic_rq_next_buf_index()
[all …]
/Linux-v5.15/kernel/sched/
Dsched.h88 struct rq;
100 extern void calc_global_load_tick(struct rq *this_rq);
101 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
103 extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
595 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ member
667 struct rq *rq; member
862 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
922 struct rq { struct
1100 struct rq *core; argument
1118 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument
[all …]
Dpelt.h7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
11 int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
13 static inline u64 thermal_load_avg(struct rq *rq) in thermal_load_avg() argument
15 return READ_ONCE(rq->avg_thermal.load_avg); in thermal_load_avg()
19 update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) in update_thermal_load_avg() argument
24 static inline u64 thermal_load_avg(struct rq *rq) in thermal_load_avg() argument
31 int update_irq_load_avg(struct rq *rq, u64 running);
34 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
74 static inline void update_rq_clock_pelt(struct rq *rq, s64 delta) in update_rq_clock_pelt() argument
[all …]
Ddeadline.c28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) in rq_of_dl_rq()
30 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
36 struct rq *rq = task_rq(p); in dl_rq_of_se() local
38 return &rq->dl; in dl_rq_of_se()
235 struct rq *rq; in dl_change_utilization() local
242 rq = task_rq(p); in dl_change_utilization()
244 sub_running_bw(&p->dl, &rq->dl); in dl_change_utilization()
256 __sub_rq_bw(p->dl.dl_bw, &rq->dl); in dl_change_utilization()
257 __add_rq_bw(new_bw, &rq->dl); in dl_change_utilization()
319 struct rq *rq = rq_of_dl_rq(dl_rq); in task_non_contending() local
[all …]
Dstop_task.c20 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop() argument
22 return sched_stop_runnable(rq); in balance_stop()
27 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_stop() argument
32 static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first) in set_next_task_stop() argument
34 stop->se.exec_start = rq_clock_task(rq); in set_next_task_stop()
37 static struct task_struct *pick_task_stop(struct rq *rq) in pick_task_stop() argument
39 if (!sched_stop_runnable(rq)) in pick_task_stop()
42 return rq->stop; in pick_task_stop()
45 static struct task_struct *pick_next_task_stop(struct rq *rq) in pick_next_task_stop() argument
47 struct task_struct *p = pick_task_stop(rq); in pick_next_task_stop()
[all …]
Dstats.h9 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument
11 if (rq) { in rq_sched_info_arrive()
12 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive()
13 rq->rq_sched_info.pcount++; in rq_sched_info_arrive()
21 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument
23 if (rq) in rq_sched_info_depart()
24 rq->rq_cpu_time += delta; in rq_sched_info_depart()
28 rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeue() argument
30 if (rq) in rq_sched_info_dequeue()
31 rq->rq_sched_info.run_delay += delta; in rq_sched_info_dequeue()
[all …]
Drt.c123 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
125 return rt_rq->rq; in rq_of_rt_rq()
133 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
137 return rt_rq->rq; in rq_of_rt_se()
162 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local
166 rt_rq->rq = rq; in init_tg_rt_entry()
176 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
234 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
236 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
239 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
[all …]
Dcore.c45 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
170 void sched_core_enqueue(struct rq *rq, struct task_struct *p) in sched_core_enqueue() argument
172 rq->core->core_task_seq++; in sched_core_enqueue()
177 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); in sched_core_enqueue()
180 void sched_core_dequeue(struct rq *rq, struct task_struct *p) in sched_core_dequeue() argument
182 rq->core->core_task_seq++; in sched_core_dequeue()
187 rb_erase(&p->core_node, &rq->core_tree); in sched_core_dequeue()
194 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie) in sched_core_find() argument
198 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp); in sched_core_find()
203 return idle_sched_class.pick_task(rq); in sched_core_find()
[all …]
/Linux-v5.15/drivers/net/ethernet/cisco/enic/
Dvnic_rq.c31 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
34 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
38 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL); in vnic_rq_alloc_bufs()
39 if (!rq->bufs[i]) in vnic_rq_alloc_bufs()
44 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
47 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
48 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
50 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
53 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
61 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
[all …]
Dvnic_rq.h97 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
100 return rq->ring.desc_avail; in vnic_rq_desc_avail()
103 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
106 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
109 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
111 return rq->to_use->desc; in vnic_rq_next_desc()
114 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
116 return rq->to_use->index; in vnic_rq_next_index()
119 static inline void vnic_rq_post(struct vnic_rq *rq, in vnic_rq_post() argument
124 struct vnic_rq_buf *buf = rq->to_use; in vnic_rq_post()
[all …]
/Linux-v5.15/fs/erofs/
Ddecompressor.c24 int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
26 int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
66 static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, in z_erofs_lz4_prepare_destpages() argument
70 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; in z_erofs_lz4_prepare_destpages()
75 EROFS_SB(rq->sb)->lz4.max_distance_pages; in z_erofs_lz4_prepare_destpages()
81 struct page *const page = rq->out[i]; in z_erofs_lz4_prepare_destpages()
91 availables[top++] = rq->out[i - lz4_max_distance_pages]; in z_erofs_lz4_prepare_destpages()
117 rq->out[i] = victim; in z_erofs_lz4_prepare_destpages()
122 static void *z_erofs_handle_inplace_io(struct z_erofs_decompress_req *rq, in z_erofs_handle_inplace_io() argument
131 inputsize = rq->inputsize; in z_erofs_handle_inplace_io()
[all …]
/Linux-v5.15/drivers/gpu/drm/i915/gt/
Dgen8_engine_cs.c12 int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen8_emit_flush_rcs() argument
41 if (GRAPHICS_VER(rq->engine->i915) == 9) in gen8_emit_flush_rcs()
45 if (IS_KBL_GT_STEP(rq->engine->i915, 0, STEP_C0)) in gen8_emit_flush_rcs()
57 cs = intel_ring_begin(rq, len); in gen8_emit_flush_rcs()
73 intel_ring_advance(rq, cs); in gen8_emit_flush_rcs()
78 int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode) in gen8_emit_flush_xcs() argument
82 cs = intel_ring_begin(rq, 4); in gen8_emit_flush_xcs()
98 if (rq->engine->class == VIDEO_DECODE_CLASS) in gen8_emit_flush_xcs()
106 intel_ring_advance(rq, cs); in gen8_emit_flush_xcs()
111 int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen11_emit_flush_rcs() argument
[all …]
Dgen6_engine_cs.c54 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) in gen6_emit_post_sync_nonzero_flush() argument
57 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_post_sync_nonzero_flush()
61 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
71 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
73 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
83 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
88 int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen6_emit_flush_rcs() argument
91 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_flush_rcs()
97 ret = gen6_emit_post_sync_nonzero_flush(rq); in gen6_emit_flush_rcs()
129 cs = intel_ring_begin(rq, 4); in gen6_emit_flush_rcs()
[all …]
Dselftest_execlists.c27 static bool is_active(struct i915_request *rq) in is_active() argument
29 if (i915_request_is_active(rq)) in is_active()
32 if (i915_request_on_hold(rq)) in is_active()
35 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq)) in is_active()
42 struct i915_request *rq, in wait_for_submit() argument
52 if (i915_request_completed(rq)) /* that was quick! */ in wait_for_submit()
57 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) in wait_for_submit()
68 struct i915_request *rq, in wait_for_reset() argument
80 if (i915_request_completed(rq)) in wait_for_reset()
83 if (READ_ONCE(rq->fence.error)) in wait_for_reset()
[all …]
Dintel_breadcrumbs.c105 check_signal_order(struct intel_context *ce, struct i915_request *rq) in check_signal_order() argument
107 if (rq->context != ce) in check_signal_order()
110 if (!list_is_last(&rq->signal_link, &ce->signals) && in check_signal_order()
111 i915_seqno_passed(rq->fence.seqno, in check_signal_order()
112 list_next_entry(rq, signal_link)->fence.seqno)) in check_signal_order()
115 if (!list_is_first(&rq->signal_link, &ce->signals) && in check_signal_order()
116 i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno, in check_signal_order()
117 rq->fence.seqno)) in check_signal_order()
206 struct i915_request *rq; in signal_irq_work() local
208 list_for_each_entry_rcu(rq, &ce->signals, signal_link) { in signal_irq_work()
[all …]
Dintel_execlists_submission.c208 struct i915_request *rq, in __active_request() argument
211 struct i915_request *active = rq; in __active_request()
213 list_for_each_entry_from_reverse(rq, &tl->requests, link) { in __active_request()
214 if (__i915_request_is_complete(rq)) in __active_request()
218 i915_request_set_error_once(rq, error); in __active_request()
219 __i915_request_skip(rq); in __active_request()
221 active = rq; in __active_request()
228 active_request(const struct intel_timeline * const tl, struct i915_request *rq) in active_request() argument
230 return __active_request(tl, rq, 0); in active_request()
251 static int rq_prio(const struct i915_request *rq) in rq_prio() argument
[all …]
Dgen2_engine_cs.c14 int gen2_emit_flush(struct i915_request *rq, u32 mode) in gen2_emit_flush() argument
23 cs = intel_ring_begin(rq, 2 + 4 * num_store_dw); in gen2_emit_flush()
36 intel_ring_advance(rq, cs); in gen2_emit_flush()
41 int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen4_emit_flush_rcs() argument
77 if (IS_G4X(rq->engine->i915) || GRAPHICS_VER(rq->engine->i915) == 5) in gen4_emit_flush_rcs()
85 cs = intel_ring_begin(rq, i); in gen4_emit_flush_rcs()
103 *cs++ = intel_gt_scratch_offset(rq->engine->gt, in gen4_emit_flush_rcs()
113 *cs++ = intel_gt_scratch_offset(rq->engine->gt, in gen4_emit_flush_rcs()
122 intel_ring_advance(rq, cs); in gen4_emit_flush_rcs()
127 int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode) in gen4_emit_flush_vcs() argument
[all …]
/Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/
Den_rx.c58 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
61 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
63 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
64 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
84 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, in mlx5e_read_title_slot() argument
88 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_read_title_slot()
94 rq->stats->cqe_compress_blks++; in mlx5e_read_title_slot()
129 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, in mlx5e_decompress_cqe() argument
133 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_decompress_cqe()
145 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) { in mlx5e_decompress_cqe()
[all …]
/Linux-v5.15/drivers/scsi/esas2r/
Desas2r_disc.c49 struct esas2r_request *rq);
51 struct esas2r_request *rq);
55 struct esas2r_request *rq);
59 struct esas2r_request *rq);
61 struct esas2r_request *rq);
63 struct esas2r_request *rq);
65 struct esas2r_request *rq);
67 struct esas2r_request *rq);
69 struct esas2r_request *rq);
71 struct esas2r_request *rq);
[all …]
Desas2r_vda.c59 static void clear_vda_request(struct esas2r_request *rq);
62 struct esas2r_request *rq);
67 struct esas2r_request *rq, in esas2r_process_vda_ioctl() argument
93 clear_vda_request(rq); in esas2r_process_vda_ioctl()
95 rq->vrq->scsi.function = vi->function; in esas2r_process_vda_ioctl()
96 rq->interrupt_cb = esas2r_complete_vda_ioctl; in esas2r_process_vda_ioctl()
97 rq->interrupt_cx = vi; in esas2r_process_vda_ioctl()
112 rq->vrq->flash.length = cpu_to_le32(datalen); in esas2r_process_vda_ioctl()
113 rq->vrq->flash.sub_func = vi->cmd.flash.sub_func; in esas2r_process_vda_ioctl()
115 memcpy(rq->vrq->flash.data.file.file_name, in esas2r_process_vda_ioctl()
[all …]
/Linux-v5.15/block/
Dblk-flush.c98 static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) in blk_flush_policy() argument
102 if (blk_rq_sectors(rq)) in blk_flush_policy()
106 if (rq->cmd_flags & REQ_PREFLUSH) in blk_flush_policy()
109 (rq->cmd_flags & REQ_FUA)) in blk_flush_policy()
115 static unsigned int blk_flush_cur_seq(struct request *rq) in blk_flush_cur_seq() argument
117 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq()
120 static void blk_flush_restore_request(struct request *rq) in blk_flush_restore_request() argument
127 rq->bio = rq->biotail; in blk_flush_restore_request()
130 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request()
131 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request()
[all …]
Dblk-exec.c20 static void blk_end_sync_rq(struct request *rq, blk_status_t error) in blk_end_sync_rq() argument
22 struct completion *waiting = rq->end_io_data; in blk_end_sync_rq()
24 rq->end_io_data = (void *)(uintptr_t)error; in blk_end_sync_rq()
47 void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq, in blk_execute_rq_nowait() argument
51 WARN_ON(!blk_rq_is_passthrough(rq)); in blk_execute_rq_nowait()
53 rq->rq_disk = bd_disk; in blk_execute_rq_nowait()
54 rq->end_io = done; in blk_execute_rq_nowait()
56 blk_account_io_start(rq); in blk_execute_rq_nowait()
62 blk_mq_sched_insert_request(rq, at_head, true, false); in blk_execute_rq_nowait()
66 static bool blk_rq_is_poll(struct request *rq) in blk_rq_is_poll() argument
[all …]

12345678910>>...26