Home
last modified time | relevance | path

Searched refs:rq (Results 1 – 25 of 588) sorted by relevance

12345678910>>...24

/Linux-v5.4/drivers/scsi/fnic/
Dvnic_rq.c27 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
30 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
34 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs()
35 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs()
42 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
45 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
46 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
48 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
51 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
59 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
[all …]
Dvnic_rq.h105 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
108 return rq->ring.desc_avail; in vnic_rq_desc_avail()
111 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
114 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
117 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
119 return rq->to_use->desc; in vnic_rq_next_desc()
122 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
124 return rq->to_use->index; in vnic_rq_next_index()
127 static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) in vnic_rq_next_buf_index() argument
129 return rq->buf_index++; in vnic_rq_next_buf_index()
[all …]
/Linux-v5.4/drivers/net/ethernet/cisco/enic/
Dvnic_rq.c31 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
34 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
38 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL); in vnic_rq_alloc_bufs()
39 if (!rq->bufs[i]) in vnic_rq_alloc_bufs()
44 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
47 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
48 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
50 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
53 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
61 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
[all …]
Dvnic_rq.h97 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
100 return rq->ring.desc_avail; in vnic_rq_desc_avail()
103 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
106 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
109 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
111 return rq->to_use->desc; in vnic_rq_next_desc()
114 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
116 return rq->to_use->index; in vnic_rq_next_index()
119 static inline void vnic_rq_post(struct vnic_rq *rq, in vnic_rq_post() argument
124 struct vnic_rq_buf *buf = rq->to_use; in vnic_rq_post()
[all …]
/Linux-v5.4/kernel/sched/
Dsched.h84 struct rq;
96 extern void calc_global_load_tick(struct rq *this_rq);
97 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
552 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ member
621 struct rq *rq; member
789 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
847 struct rq { struct
1007 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument
1009 return cfs_rq->rq; in rq_of()
1014 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of()
[all …]
Dpelt.h7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
11 int update_irq_load_avg(struct rq *rq, u64 running);
14 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
58 static inline void update_rq_clock_pelt(struct rq *rq, s64 delta) in update_rq_clock_pelt() argument
60 if (unlikely(is_idle_task(rq->curr))) { in update_rq_clock_pelt()
62 rq->clock_pelt = rq_clock_task(rq); in update_rq_clock_pelt()
82 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq))); in update_rq_clock_pelt()
83 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq))); in update_rq_clock_pelt()
85 rq->clock_pelt += delta; in update_rq_clock_pelt()
[all …]
Dstats.h9 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument
11 if (rq) { in rq_sched_info_arrive()
12 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive()
13 rq->rq_sched_info.pcount++; in rq_sched_info_arrive()
21 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument
23 if (rq) in rq_sched_info_depart()
24 rq->rq_cpu_time += delta; in rq_sched_info_depart()
28 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeued() argument
30 if (rq) in rq_sched_info_dequeued()
31 rq->rq_sched_info.run_delay += delta; in rq_sched_info_dequeued()
[all …]
Ddeadline.c28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) in rq_of_dl_rq()
30 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
36 struct rq *rq = task_rq(p); in dl_rq_of_se() local
38 return &rq->dl; in dl_rq_of_se()
158 struct rq *rq; in dl_change_utilization() local
165 rq = task_rq(p); in dl_change_utilization()
167 sub_running_bw(&p->dl, &rq->dl); in dl_change_utilization()
179 __sub_rq_bw(p->dl.dl_bw, &rq->dl); in dl_change_utilization()
180 __add_rq_bw(new_bw, &rq->dl); in dl_change_utilization()
242 struct rq *rq = rq_of_dl_rq(dl_rq); in task_non_contending() local
[all …]
Dstop_task.c20 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop() argument
22 return sched_stop_runnable(rq); in balance_stop()
27 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_stop() argument
32 static void set_next_task_stop(struct rq *rq, struct task_struct *stop) in set_next_task_stop() argument
34 stop->se.exec_start = rq_clock_task(rq); in set_next_task_stop()
38 pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task_stop() argument
42 if (!sched_stop_runnable(rq)) in pick_next_task_stop()
45 set_next_task_stop(rq, rq->stop); in pick_next_task_stop()
46 return rq->stop; in pick_next_task_stop()
50 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_stop() argument
[all …]
Drt.c121 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
123 return rt_rq->rq; in rq_of_rt_rq()
131 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
135 return rt_rq->rq; in rq_of_rt_se()
160 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local
164 rt_rq->rq = rq; in init_tg_rt_entry()
174 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
232 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
234 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
237 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
[all …]
Dcore.c37 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
78 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock()
79 __acquires(rq->lock) in __task_rq_lock()
81 struct rq *rq; in __task_rq_lock() local
86 rq = task_rq(p); in __task_rq_lock()
87 raw_spin_lock(&rq->lock); in __task_rq_lock()
88 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in __task_rq_lock()
89 rq_pin_lock(rq, rf); in __task_rq_lock()
90 return rq; in __task_rq_lock()
92 raw_spin_unlock(&rq->lock); in __task_rq_lock()
[all …]
/Linux-v5.4/drivers/gpu/drm/i915/
Di915_request.c45 void (*hook)(struct i915_request *rq, struct dma_fence *signal);
99 struct i915_request *rq = to_request(fence); in i915_fence_release() local
108 i915_sw_fence_fini(&rq->submit); in i915_fence_release()
109 i915_sw_fence_fini(&rq->semaphore); in i915_fence_release()
111 kmem_cache_free(global.slab_requests, rq); in i915_fence_release()
142 static void __notify_execute_cb(struct i915_request *rq) in __notify_execute_cb() argument
146 lockdep_assert_held(&rq->lock); in __notify_execute_cb()
148 if (list_empty(&rq->execute_cb)) in __notify_execute_cb()
151 list_for_each_entry(cb, &rq->execute_cb, link) in __notify_execute_cb()
164 INIT_LIST_HEAD(&rq->execute_cb); in __notify_execute_cb()
[all …]
Di915_request.h251 void __i915_request_queue(struct i915_request *rq,
254 void i915_request_retire_upto(struct i915_request *rq);
266 i915_request_get(struct i915_request *rq) in i915_request_get() argument
268 return to_request(dma_fence_get(&rq->fence)); in i915_request_get()
272 i915_request_get_rcu(struct i915_request *rq) in i915_request_get_rcu() argument
274 return to_request(dma_fence_get_rcu(&rq->fence)); in i915_request_get_rcu()
278 i915_request_put(struct i915_request *rq) in i915_request_put() argument
280 dma_fence_put(&rq->fence); in i915_request_put()
286 int i915_request_await_dma_fence(struct i915_request *rq,
288 int i915_request_await_execution(struct i915_request *rq,
[all …]
/Linux-v5.4/fs/erofs/
Ddecompressor.c25 int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
27 int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
31 static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, in z_erofs_lz4_prepare_destpages() argument
35 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; in z_erofs_lz4_prepare_destpages()
44 struct page *const page = rq->out[i]; in z_erofs_lz4_prepare_destpages()
54 availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES]; in z_erofs_lz4_prepare_destpages()
81 rq->out[i] = victim; in z_erofs_lz4_prepare_destpages()
86 static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq, in generic_copy_inplace_data() argument
93 struct page **in = rq->in; in generic_copy_inplace_data()
96 unsigned int inlen = rq->inputsize - pageofs_in; in generic_copy_inplace_data()
[all …]
/Linux-v5.4/drivers/scsi/esas2r/
Desas2r_disc.c49 struct esas2r_request *rq);
51 struct esas2r_request *rq);
55 struct esas2r_request *rq);
59 struct esas2r_request *rq);
61 struct esas2r_request *rq);
63 struct esas2r_request *rq);
65 struct esas2r_request *rq);
67 struct esas2r_request *rq);
69 struct esas2r_request *rq);
71 struct esas2r_request *rq);
[all …]
Desas2r_vda.c59 static void clear_vda_request(struct esas2r_request *rq);
62 struct esas2r_request *rq);
67 struct esas2r_request *rq, in esas2r_process_vda_ioctl() argument
93 clear_vda_request(rq); in esas2r_process_vda_ioctl()
95 rq->vrq->scsi.function = vi->function; in esas2r_process_vda_ioctl()
96 rq->interrupt_cb = esas2r_complete_vda_ioctl; in esas2r_process_vda_ioctl()
97 rq->interrupt_cx = vi; in esas2r_process_vda_ioctl()
112 rq->vrq->flash.length = cpu_to_le32(datalen); in esas2r_process_vda_ioctl()
113 rq->vrq->flash.sub_func = vi->cmd.flash.sub_func; in esas2r_process_vda_ioctl()
115 memcpy(rq->vrq->flash.data.file.file_name, in esas2r_process_vda_ioctl()
[all …]
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/
Den_rx.c66 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, in mlx5e_read_title_slot() argument
70 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_read_title_slot()
76 rq->stats->cqe_compress_blks++; in mlx5e_read_title_slot()
111 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, in mlx5e_decompress_cqe() argument
115 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_decompress_cqe()
125 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) in mlx5e_decompress_cqe()
129 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1); in mlx5e_decompress_cqe()
132 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, in mlx5e_decompress_cqe_no_hash() argument
136 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_decompress_cqe_no_hash()
138 mlx5e_decompress_cqe(rq, wq, cqcc); in mlx5e_decompress_cqe_no_hash()
[all …]
/Linux-v5.4/block/
Dblk-flush.c98 static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) in blk_flush_policy() argument
102 if (blk_rq_sectors(rq)) in blk_flush_policy()
106 if (rq->cmd_flags & REQ_PREFLUSH) in blk_flush_policy()
109 (rq->cmd_flags & REQ_FUA)) in blk_flush_policy()
115 static unsigned int blk_flush_cur_seq(struct request *rq) in blk_flush_cur_seq() argument
117 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq()
120 static void blk_flush_restore_request(struct request *rq) in blk_flush_restore_request() argument
127 rq->bio = rq->biotail; in blk_flush_restore_request()
130 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request()
131 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request()
[all …]
Dblk-pm.h16 static inline void blk_pm_mark_last_busy(struct request *rq) in blk_pm_mark_last_busy() argument
18 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_mark_last_busy()
19 pm_runtime_mark_last_busy(rq->q->dev); in blk_pm_mark_last_busy()
22 static inline void blk_pm_requeue_request(struct request *rq) in blk_pm_requeue_request() argument
24 lockdep_assert_held(&rq->q->queue_lock); in blk_pm_requeue_request()
26 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_requeue_request()
27 rq->q->nr_pending--; in blk_pm_requeue_request()
31 struct request *rq) in blk_pm_add_request() argument
35 if (q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_add_request()
39 static inline void blk_pm_put_request(struct request *rq) in blk_pm_put_request() argument
[all …]
Dblk-mq.c46 static int blk_mq_poll_stats_bkt(const struct request *rq) in blk_mq_poll_stats_bkt() argument
50 ddir = rq_data_dir(rq); in blk_mq_poll_stats_bkt()
51 sectors = blk_rq_stats_sectors(rq); in blk_mq_poll_stats_bkt()
100 struct request *rq, void *priv, in blk_mq_check_inflight() argument
108 if (rq->part == mi->part) in blk_mq_check_inflight()
126 struct request *rq, void *priv, in blk_mq_check_inflight_rw() argument
131 if (rq->part == mi->part) in blk_mq_check_inflight_rw()
132 mi->inflight[rq_data_dir(rq)]++; in blk_mq_check_inflight_rw()
289 static inline bool blk_mq_need_time_stamp(struct request *rq) in blk_mq_need_time_stamp() argument
291 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator; in blk_mq_need_time_stamp()
[all …]
Dmq-deadline.c68 deadline_rb_root(struct deadline_data *dd, struct request *rq) in deadline_rb_root() argument
70 return &dd->sort_list[rq_data_dir(rq)]; in deadline_rb_root()
77 deadline_latter_request(struct request *rq) in deadline_latter_request() argument
79 struct rb_node *node = rb_next(&rq->rb_node); in deadline_latter_request()
88 deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_add_rq_rb() argument
90 struct rb_root *root = deadline_rb_root(dd, rq); in deadline_add_rq_rb()
92 elv_rb_add(root, rq); in deadline_add_rq_rb()
96 deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_del_rq_rb() argument
98 const int data_dir = rq_data_dir(rq); in deadline_del_rq_rb()
100 if (dd->next_rq[data_dir] == rq) in deadline_del_rq_rb()
[all …]
/Linux-v5.4/drivers/gpu/drm/i915/gt/
Dintel_breadcrumbs.c81 static inline bool __request_completed(const struct i915_request *rq) in __request_completed() argument
83 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); in __request_completed()
87 check_signal_order(struct intel_context *ce, struct i915_request *rq) in check_signal_order() argument
89 if (!list_is_last(&rq->signal_link, &ce->signals) && in check_signal_order()
90 i915_seqno_passed(rq->fence.seqno, in check_signal_order()
91 list_next_entry(rq, signal_link)->fence.seqno)) in check_signal_order()
94 if (!list_is_first(&rq->signal_link, &ce->signals) && in check_signal_order()
95 i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno, in check_signal_order()
96 rq->fence.seqno)) in check_signal_order()
148 struct i915_request *rq = in intel_engine_breadcrumbs_irq() local
[all …]
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
Drx.c10 bool mlx5e_xsk_pages_enough_umem(struct mlx5e_rq *rq, int count) in mlx5e_xsk_pages_enough_umem() argument
15 return xsk_umem_has_addrs_rq(rq->umem, count); in mlx5e_xsk_pages_enough_umem()
18 int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq, in mlx5e_xsk_page_alloc_umem() argument
21 struct xdp_umem *umem = rq->umem; in mlx5e_xsk_page_alloc_umem()
28 rq->buff.umem_headroom); in mlx5e_xsk_page_alloc_umem()
40 dma_sync_single_for_device(rq->pdev, dma_info->addr, PAGE_SIZE, in mlx5e_xsk_page_alloc_umem()
46 static inline void mlx5e_xsk_recycle_frame(struct mlx5e_rq *rq, u64 handle) in mlx5e_xsk_recycle_frame() argument
48 xsk_umem_fq_reuse(rq->umem, handle & rq->umem->chunk_mask); in mlx5e_xsk_recycle_frame()
55 void mlx5e_xsk_page_release(struct mlx5e_rq *rq, in mlx5e_xsk_page_release() argument
58 mlx5e_xsk_recycle_frame(rq, dma_info->xsk.handle); in mlx5e_xsk_page_release()
[all …]
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/en/
Dreporter_rx.c61 struct mlx5e_rq *rq; in mlx5e_rx_reporter_err_icosq_cqe_recover() local
66 rq = &icosq->channel->rq; in mlx5e_rx_reporter_err_icosq_cqe_recover()
79 mlx5e_deactivate_rq(rq); in mlx5e_rx_reporter_err_icosq_cqe_recover()
93 mlx5e_free_rx_descs(rq); in mlx5e_rx_reporter_err_icosq_cqe_recover()
96 mlx5e_activate_rq(rq); in mlx5e_rx_reporter_err_icosq_cqe_recover()
98 rq->stats->recover++; in mlx5e_rx_reporter_err_icosq_cqe_recover()
118 static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state) in mlx5e_rq_to_ready() argument
120 struct net_device *dev = rq->netdev; in mlx5e_rq_to_ready()
123 err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST); in mlx5e_rq_to_ready()
125 netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn); in mlx5e_rq_to_ready()
[all …]
/Linux-v5.4/drivers/ide/
Dide-io.c57 int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error, in ide_end_rq() argument
70 if (!blk_update_request(rq, error, nr_bytes)) { in ide_end_rq()
71 if (rq == drive->sense_rq) { in ide_end_rq()
76 __blk_mq_end_request(rq, error); in ide_end_rq()
88 struct request *rq = cmd->rq; in ide_complete_cmd() local
115 if (rq && ata_taskfile_request(rq)) { in ide_complete_cmd()
116 struct ide_cmd *orig_cmd = ide_req(rq)->special; in ide_complete_cmd()
128 struct request *rq = hwif->rq; in ide_complete_rq() local
135 if (blk_noretry_request(rq) && error) in ide_complete_rq()
136 nr_bytes = blk_rq_sectors(rq) << 9; in ide_complete_rq()
[all …]

12345678910>>...24