Home
last modified time | relevance | path

Searched refs:rq (Results 1 – 25 of 531) sorted by relevance

12345678910>>...22

/Linux-v4.19/drivers/scsi/fnic/
Dvnic_rq.c27 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
31 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
34 vdev = rq->vdev; in vnic_rq_alloc_bufs()
37 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs()
38 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs()
45 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
48 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
49 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
51 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
54 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
[all …]
Dvnic_rq.h105 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
108 return rq->ring.desc_avail; in vnic_rq_desc_avail()
111 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
114 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
117 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
119 return rq->to_use->desc; in vnic_rq_next_desc()
122 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
124 return rq->to_use->index; in vnic_rq_next_index()
127 static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) in vnic_rq_next_buf_index() argument
129 return rq->buf_index++; in vnic_rq_next_buf_index()
[all …]
/Linux-v4.19/drivers/net/ethernet/cisco/enic/
Dvnic_rq.c31 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
34 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
38 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL); in vnic_rq_alloc_bufs()
39 if (!rq->bufs[i]) in vnic_rq_alloc_bufs()
44 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
47 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
48 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
50 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
53 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
61 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
[all …]
Dvnic_rq.h97 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
100 return rq->ring.desc_avail; in vnic_rq_desc_avail()
103 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
106 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
109 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
111 return rq->to_use->desc; in vnic_rq_next_desc()
114 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
116 return rq->to_use->index; in vnic_rq_next_index()
119 static inline void vnic_rq_post(struct vnic_rq *rq, in vnic_rq_post() argument
124 struct vnic_rq_buf *buf = rq->to_use; in vnic_rq_post()
[all …]
/Linux-v4.19/kernel/sched/
Dstats.h9 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument
11 if (rq) { in rq_sched_info_arrive()
12 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive()
13 rq->rq_sched_info.pcount++; in rq_sched_info_arrive()
21 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument
23 if (rq) in rq_sched_info_depart()
24 rq->rq_cpu_time += delta; in rq_sched_info_depart()
28 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeued() argument
30 if (rq) in rq_sched_info_dequeued()
31 rq->rq_sched_info.run_delay += delta; in rq_sched_info_dequeued()
[all …]
Dsched.h82 struct rq;
94 extern void calc_global_load_tick(struct rq *this_rq);
95 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
98 extern void cpu_load_update_active(struct rq *this_rq);
100 static inline void cpu_load_update_active(struct rq *this_rq) { } in cpu_load_update_active()
541 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ member
612 struct rq *rq; member
760 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
776 struct rq { struct
922 static inline int cpu_of(struct rq *rq) in cpu_of() argument
[all …]
Ddeadline.c28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) in rq_of_dl_rq()
30 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
36 struct rq *rq = task_rq(p); in dl_rq_of_se() local
38 return &rq->dl; in dl_rq_of_se()
158 struct rq *rq; in dl_change_utilization() local
165 rq = task_rq(p); in dl_change_utilization()
167 sub_running_bw(&p->dl, &rq->dl); in dl_change_utilization()
179 __sub_rq_bw(p->dl.dl_bw, &rq->dl); in dl_change_utilization()
180 __add_rq_bw(new_bw, &rq->dl); in dl_change_utilization()
242 struct rq *rq = rq_of_dl_rq(dl_rq); in task_non_contending() local
[all …]
Dstop_task.c21 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_stop() argument
27 pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task_stop() argument
29 struct task_struct *stop = rq->stop; in pick_next_task_stop()
34 put_prev_task(rq, prev); in pick_next_task_stop()
36 stop->se.exec_start = rq_clock_task(rq); in pick_next_task_stop()
42 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_stop() argument
44 add_nr_running(rq, 1); in enqueue_task_stop()
48 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_stop() argument
50 sub_nr_running(rq, 1); in dequeue_task_stop()
53 static void yield_task_stop(struct rq *rq) in yield_task_stop() argument
[all …]
Drt.c120 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
122 return rt_rq->rq; in rq_of_rt_rq()
130 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
134 return rt_rq->rq; in rq_of_rt_se()
159 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local
163 rt_rq->rq = rq; in init_tg_rt_entry()
173 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
231 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
233 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
236 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
[all …]
Dcore.c25 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
66 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock()
67 __acquires(rq->lock) in __task_rq_lock()
69 struct rq *rq; in __task_rq_lock() local
74 rq = task_rq(p); in __task_rq_lock()
75 raw_spin_lock(&rq->lock); in __task_rq_lock()
76 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in __task_rq_lock()
77 rq_pin_lock(rq, rf); in __task_rq_lock()
78 return rq; in __task_rq_lock()
80 raw_spin_unlock(&rq->lock); in __task_rq_lock()
[all …]
/Linux-v4.19/drivers/scsi/esas2r/
Desas2r_disc.c49 struct esas2r_request *rq);
51 struct esas2r_request *rq);
55 struct esas2r_request *rq);
59 struct esas2r_request *rq);
61 struct esas2r_request *rq);
63 struct esas2r_request *rq);
65 struct esas2r_request *rq);
67 struct esas2r_request *rq);
69 struct esas2r_request *rq);
71 struct esas2r_request *rq);
[all …]
Desas2r_vda.c59 static void clear_vda_request(struct esas2r_request *rq);
62 struct esas2r_request *rq);
67 struct esas2r_request *rq, in esas2r_process_vda_ioctl() argument
93 clear_vda_request(rq); in esas2r_process_vda_ioctl()
95 rq->vrq->scsi.function = vi->function; in esas2r_process_vda_ioctl()
96 rq->interrupt_cb = esas2r_complete_vda_ioctl; in esas2r_process_vda_ioctl()
97 rq->interrupt_cx = vi; in esas2r_process_vda_ioctl()
112 rq->vrq->flash.length = cpu_to_le32(datalen); in esas2r_process_vda_ioctl()
113 rq->vrq->flash.sub_func = vi->cmd.flash.sub_func; in esas2r_process_vda_ioctl()
115 memcpy(rq->vrq->flash.data.file.file_name, in esas2r_process_vda_ioctl()
[all …]
Desas2r_int.c173 struct esas2r_request *rq, in esas2r_handle_outbound_rsp_err() argument
181 if (unlikely(rq->req_stat != RS_SUCCESS)) { in esas2r_handle_outbound_rsp_err()
182 memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp)); in esas2r_handle_outbound_rsp_err()
184 if (rq->req_stat == RS_ABORTED) { in esas2r_handle_outbound_rsp_err()
185 if (rq->timeout > RQ_MAX_TIMEOUT) in esas2r_handle_outbound_rsp_err()
186 rq->req_stat = RS_TIMEOUT; in esas2r_handle_outbound_rsp_err()
187 } else if (rq->req_stat == RS_SCSI_ERROR) { in esas2r_handle_outbound_rsp_err()
188 u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat; in esas2r_handle_outbound_rsp_err()
197 rq->req_stat = RS_SUCCESS; in esas2r_handle_outbound_rsp_err()
198 rq->func_rsp.scsi_rsp.scsi_stat = in esas2r_handle_outbound_rsp_err()
[all …]
/Linux-v4.19/block/
Dblk-flush.c99 static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) in blk_flush_policy() argument
103 if (blk_rq_sectors(rq)) in blk_flush_policy()
107 if (rq->cmd_flags & REQ_PREFLUSH) in blk_flush_policy()
110 (rq->cmd_flags & REQ_FUA)) in blk_flush_policy()
116 static unsigned int blk_flush_cur_seq(struct request *rq) in blk_flush_cur_seq() argument
118 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq()
121 static void blk_flush_restore_request(struct request *rq) in blk_flush_restore_request() argument
128 rq->bio = rq->biotail; in blk_flush_restore_request()
131 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request()
132 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request()
[all …]
Dblk-mq.c44 static int blk_mq_poll_stats_bkt(const struct request *rq) in blk_mq_poll_stats_bkt() argument
48 ddir = rq_data_dir(rq); in blk_mq_poll_stats_bkt()
49 bytes = blk_rq_bytes(rq); in blk_mq_poll_stats_bkt()
93 struct request *rq, void *priv, in blk_mq_check_inflight() argument
103 if (rq->part == mi->part) in blk_mq_check_inflight()
119 struct request *rq, void *priv, in blk_mq_check_inflight_rw() argument
124 if (rq->part == mi->part) in blk_mq_check_inflight_rw()
125 mi->inflight[rq_data_dir(rq)]++; in blk_mq_check_inflight_rw()
281 struct request *rq = tags->static_rqs[tag]; in blk_mq_rq_ctx_init() local
285 rq->tag = -1; in blk_mq_rq_ctx_init()
[all …]
Ddeadline-iosched.c54 deadline_rb_root(struct deadline_data *dd, struct request *rq) in deadline_rb_root() argument
56 return &dd->sort_list[rq_data_dir(rq)]; in deadline_rb_root()
63 deadline_latter_request(struct request *rq) in deadline_latter_request() argument
65 struct rb_node *node = rb_next(&rq->rb_node); in deadline_latter_request()
74 deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_add_rq_rb() argument
76 struct rb_root *root = deadline_rb_root(dd, rq); in deadline_add_rq_rb()
78 elv_rb_add(root, rq); in deadline_add_rq_rb()
82 deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_del_rq_rb() argument
84 const int data_dir = rq_data_dir(rq); in deadline_del_rq_rb()
86 if (dd->next_rq[data_dir] == rq) in deadline_del_rq_rb()
[all …]
Delevator.c52 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) argument
58 static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio) in elv_iosched_allow_bio_merge() argument
60 struct request_queue *q = rq->q; in elv_iosched_allow_bio_merge()
64 return e->type->ops.mq.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge()
66 return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio); in elv_iosched_allow_bio_merge()
74 bool elv_bio_merge_ok(struct request *rq, struct bio *bio) in elv_bio_merge_ok() argument
76 if (!blk_rq_merge_ok(rq, bio)) in elv_bio_merge_ok()
79 if (!elv_iosched_allow_bio_merge(rq, bio)) in elv_bio_merge_ok()
255 static inline void __elv_rqhash_del(struct request *rq) in __elv_rqhash_del() argument
257 hash_del(&rq->hash); in __elv_rqhash_del()
[all …]
Dmq-deadline.c67 deadline_rb_root(struct deadline_data *dd, struct request *rq) in deadline_rb_root() argument
69 return &dd->sort_list[rq_data_dir(rq)]; in deadline_rb_root()
76 deadline_latter_request(struct request *rq) in deadline_latter_request() argument
78 struct rb_node *node = rb_next(&rq->rb_node); in deadline_latter_request()
87 deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_add_rq_rb() argument
89 struct rb_root *root = deadline_rb_root(dd, rq); in deadline_add_rq_rb()
91 elv_rb_add(root, rq); in deadline_add_rq_rb()
95 deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_del_rq_rb() argument
97 const int data_dir = rq_data_dir(rq); in deadline_del_rq_rb()
99 if (dd->next_rq[data_dir] == rq) in deadline_del_rq_rb()
[all …]
/Linux-v4.19/drivers/net/ethernet/mellanox/mlx5/core/
Den_rx.c63 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, in mlx5e_read_title_slot() argument
69 rq->stats->cqe_compress_blks++; in mlx5e_read_title_slot()
103 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, in mlx5e_decompress_cqe() argument
112 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) in mlx5e_decompress_cqe()
117 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cq->decmprs_wqe_counter + 1); in mlx5e_decompress_cqe()
120 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, in mlx5e_decompress_cqe_no_hash() argument
123 mlx5e_decompress_cqe(rq, cq, cqcc); in mlx5e_decompress_cqe_no_hash()
128 static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, in mlx5e_decompress_cqes_cont() argument
144 mlx5e_decompress_cqe_no_hash(rq, cq, cqcc); in mlx5e_decompress_cqes_cont()
145 rq->handle_rx_cqe(rq, &cq->title); in mlx5e_decompress_cqes_cont()
[all …]
/Linux-v4.19/drivers/ide/
Dide-pm.c11 struct request *rq; in generic_ide_suspend() local
22 rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0); in generic_ide_suspend()
23 ide_req(rq)->type = ATA_PRIV_PM_SUSPEND; in generic_ide_suspend()
24 rq->special = &rqpm; in generic_ide_suspend()
30 blk_execute_rq(drive->queue, NULL, rq, 0); in generic_ide_suspend()
31 ret = scsi_req(rq)->result ? -EIO : 0; in generic_ide_suspend()
32 blk_put_request(rq); in generic_ide_suspend()
43 static void ide_end_sync_rq(struct request *rq, blk_status_t error) in ide_end_sync_rq() argument
45 complete(rq->end_io_data); in ide_end_sync_rq()
48 static int ide_pm_execute_rq(struct request *rq) in ide_pm_execute_rq() argument
[all …]
Dide-io.c57 int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error, in ide_end_rq() argument
70 return blk_end_request(rq, error, nr_bytes); in ide_end_rq()
78 struct request *rq = cmd->rq; in ide_complete_cmd() local
105 if (rq && ata_taskfile_request(rq)) { in ide_complete_cmd()
106 struct ide_cmd *orig_cmd = rq->special; in ide_complete_cmd()
118 struct request *rq = hwif->rq; in ide_complete_rq() local
125 if (blk_noretry_request(rq) && error) in ide_complete_rq()
126 nr_bytes = blk_rq_sectors(rq) << 9; in ide_complete_rq()
128 rc = ide_end_rq(drive, rq, error, nr_bytes); in ide_complete_rq()
130 hwif->rq = NULL; in ide_complete_rq()
[all …]
/Linux-v4.19/drivers/gpu/drm/i915/
Di915_request.c74 struct i915_request *rq = to_request(fence); in i915_fence_release() local
83 i915_sw_fence_fini(&rq->submit); in i915_fence_release()
85 kmem_cache_free(rq->i915->requests, rq); in i915_fence_release()
349 struct i915_request *rq) in __retire_engine_request() argument
353 rq->fence.context, rq->fence.seqno, in __retire_engine_request()
354 rq->global_seqno, in __retire_engine_request()
357 GEM_BUG_ON(!i915_request_completed(rq)); in __retire_engine_request()
362 GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests)); in __retire_engine_request()
363 list_del_init(&rq->link); in __retire_engine_request()
366 spin_lock(&rq->lock); in __retire_engine_request()
[all …]
/Linux-v4.19/drivers/gpu/drm/i915/selftests/
Dintel_lrc.c77 const struct i915_request *rq) in hws_address() argument
79 return hws->node.start + seqno_offset(rq->fence.context); in hws_address()
83 struct i915_request *rq, in emit_recurse_batch() argument
86 struct i915_address_space *vm = &rq->gem_context->ppgtt->vm; in emit_recurse_batch()
107 err = i915_vma_move_to_active(vma, rq, 0); in emit_recurse_batch()
116 err = i915_vma_move_to_active(hws, rq, 0); in emit_recurse_batch()
128 *batch++ = lower_32_bits(hws_address(hws, rq)); in emit_recurse_batch()
129 *batch++ = upper_32_bits(hws_address(hws, rq)); in emit_recurse_batch()
130 *batch++ = rq->fence.seqno; in emit_recurse_batch()
141 err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); in emit_recurse_batch()
[all …]
Dintel_hangcheck.c100 const struct i915_request *rq) in hws_address() argument
102 return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context); in hws_address()
106 struct i915_request *rq) in emit_recurse_batch() argument
110 rq->gem_context->ppgtt ? in emit_recurse_batch()
111 &rq->gem_context->ppgtt->vm : in emit_recurse_batch()
134 err = i915_vma_move_to_active(vma, rq, 0); in emit_recurse_batch()
143 err = i915_vma_move_to_active(hws, rq, 0); in emit_recurse_batch()
155 *batch++ = lower_32_bits(hws_address(hws, rq)); in emit_recurse_batch()
156 *batch++ = upper_32_bits(hws_address(hws, rq)); in emit_recurse_batch()
157 *batch++ = rq->fence.seqno; in emit_recurse_batch()
[all …]
/Linux-v4.19/drivers/net/ethernet/huawei/hinic/
Dhinic_hw_qp.c71 #define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask) argument
161 struct hinic_rq *rq, u16 global_qid) in hinic_rq_prepare_ctxt() argument
168 wq = rq->wq; in hinic_rq_prepare_ctxt()
187 HINIC_RQ_CTXT_PI_SET(rq->msix_entry, INTR); in hinic_rq_prepare_ctxt()
208 rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr); in hinic_rq_prepare_ctxt()
209 rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr); in hinic_rq_prepare_ctxt()
253 static int alloc_rq_skb_arr(struct hinic_rq *rq) in alloc_rq_skb_arr() argument
255 struct hinic_wq *wq = rq->wq; in alloc_rq_skb_arr()
258 skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb); in alloc_rq_skb_arr()
259 rq->saved_skb = vzalloc(skb_arr_size); in alloc_rq_skb_arr()
[all …]

12345678910>>...22