Searched refs:rq_depth (Results 1 – 14 of 14) sorted by relevance
/Linux-v5.4/block/ |
D | blk-wbt.c | 237 struct rq_depth *rqd = &rwb->rq_depth; in latency_exceeded() 290 struct rq_depth *rqd = &rwb->rq_depth; in rwb_trace_step() 300 } else if (rwb->rq_depth.max_depth <= 2) { in calc_wb_limits() 301 rwb->wb_normal = rwb->rq_depth.max_depth; in calc_wb_limits() 304 rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2; in calc_wb_limits() 305 rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4; in calc_wb_limits() 311 if (!rq_depth_scale_up(&rwb->rq_depth)) in scale_up() 321 if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle)) in scale_down() 330 struct rq_depth *rqd = &rwb->rq_depth; in rwb_arm_timer() 355 struct rq_depth *rqd = &rwb->rq_depth; in wb_timer_fn() [all …]
|
D | blk-iolatency.c | 138 struct rq_depth rq_depth; member 277 return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth); in iolat_acquire_inflight() 369 unsigned long old = iolat->rq_depth.max_depth; in scale_change() 381 iolat->rq_depth.max_depth = old; in scale_change() 386 iolat->rq_depth.max_depth = max(old, 1UL); in scale_change() 444 if (iolat->rq_depth.max_depth == 1 && direction < 0) { in check_scale_change() 452 iolat->rq_depth.max_depth = UINT_MAX; in check_scale_change() 507 if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) { in iolatency_record_time() 903 if (iolat->rq_depth.max_depth == UINT_MAX) in iolatency_ssd_stat() 910 iolat->rq_depth.max_depth); in iolatency_ssd_stat() [all …]
|
D | blk-rq-qos.h | 50 struct rq_depth { struct 130 bool rq_depth_scale_up(struct rq_depth *rqd); 131 bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle); 132 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
|
D | blk-wbt.h | 69 struct rq_depth rq_depth; member
|
D | blk-rq-qos.c | 116 bool rq_depth_calc_max_depth(struct rq_depth *rqd) in rq_depth_calc_max_depth() 164 bool rq_depth_scale_up(struct rq_depth *rqd) in rq_depth_scale_up() 183 bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle) in rq_depth_scale_down()
|
/Linux-v5.4/net/9p/ |
D | trans_rdma.c | 88 int rq_depth; member 126 int rq_depth; member 158 if (rdma->rq_depth != P9_RDMA_RQ_DEPTH) in p9_rdma_show_options() 159 seq_printf(m, ",rq=%u", rdma->rq_depth); in p9_rdma_show_options() 183 opts->rq_depth = P9_RDMA_RQ_DEPTH; in parse_opts() 220 opts->rq_depth = option; in parse_opts() 233 opts->rq_depth = max(opts->rq_depth, opts->sq_depth); in parse_opts() 573 rdma->rq_depth = opts->rq_depth; in alloc_rdma() 578 sema_init(&rdma->rq_sem, rdma->rq_depth); in alloc_rdma() 689 opts.sq_depth + opts.rq_depth + 1, in rdma_create_trans() [all …]
|
/Linux-v5.4/net/sunrpc/xprtrdma/ |
D | svc_rdma_transport.c | 395 unsigned int ctxts, rq_depth; in svc_rdma_accept() local 432 rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests; in svc_rdma_accept() 433 if (rq_depth > dev->attrs.max_qp_wr) { in svc_rdma_accept() 436 rq_depth = dev->attrs.max_qp_wr; in svc_rdma_accept() 437 newxprt->sc_max_requests = rq_depth - 2; in svc_rdma_accept() 443 newxprt->sc_sq_depth = rq_depth + ctxts; in svc_rdma_accept() 463 ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE); in svc_rdma_accept() 475 qp_attr.cap.max_recv_wr = rq_depth; in svc_rdma_accept()
|
/Linux-v5.4/drivers/net/ethernet/huawei/hinic/ |
D | hinic_hw_dev.c | 286 static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth, in set_hw_ioctxt() argument 306 hw_ioctxt.rq_depth = ilog2(rq_depth); in set_hw_ioctxt()
|
D | hinic_hw_dev.h | 157 u16 rq_depth; member
|
D | hinic_port.h | 211 u32 rq_depth; member
|
D | hinic_port.c | 475 rq_num.rq_depth = ilog2(HINIC_SQ_DEPTH); in hinic_set_max_qnum()
|
/Linux-v5.4/drivers/infiniband/hw/efa/ |
D | efa_com_cmd.h | 26 u32 rq_depth; member
|
D | efa_com_cmd.c | 38 params->rq_depth; in efa_com_create_qp()
|
D | efa_verbs.c | 712 create_qp_params.rq_depth = init_attr->cap.max_recv_wr; in efa_create_qp()
|