Searched refs:rq_depth (Results 1 – 9 of 9) sorted by relevance
/Linux-v4.19/block/ |
D | blk-wbt.c | 236 struct rq_depth *rqd = &rwb->rq_depth; in latency_exceeded() 289 struct rq_depth *rqd = &rwb->rq_depth; in rwb_trace_step() 299 } else if (rwb->rq_depth.max_depth <= 2) { in calc_wb_limits() 300 rwb->wb_normal = rwb->rq_depth.max_depth; in calc_wb_limits() 303 rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2; in calc_wb_limits() 304 rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4; in calc_wb_limits() 310 rq_depth_scale_up(&rwb->rq_depth); in scale_up() 319 rq_depth_scale_down(&rwb->rq_depth, hard_throttle); in scale_down() 327 struct rq_depth *rqd = &rwb->rq_depth; in rwb_arm_timer() 352 struct rq_depth *rqd = &rwb->rq_depth; in wb_timer_fn() [all …]
|
D | blk-rq-qos.h | 38 struct rq_depth { struct 97 void rq_depth_scale_up(struct rq_depth *rqd); 98 void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle); 99 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
|
D | blk-iolatency.c | 122 struct rq_depth rq_depth; member 184 return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth); in iolatency_may_queue() 300 unsigned long old = iolat->rq_depth.max_depth; in scale_change() 314 iolat->rq_depth.max_depth = old; in scale_change() 320 iolat->rq_depth.max_depth = max(old, 1UL); in scale_change() 378 if (iolat->rq_depth.max_depth == 1 && direction < 0) { in check_scale_change() 386 iolat->rq_depth.max_depth = UINT_MAX; in check_scale_change() 465 if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) { in iolatency_record_time() 845 if (iolat->rq_depth.max_depth == UINT_MAX) in iolatency_pd_stat() 850 iolat->rq_depth.max_depth, avg_lat, cur_win); in iolatency_pd_stat() [all …]
|
D | blk-rq-qos.c | 104 bool rq_depth_calc_max_depth(struct rq_depth *rqd) in rq_depth_calc_max_depth() 151 void rq_depth_scale_up(struct rq_depth *rqd) in rq_depth_scale_up() 168 void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle) in rq_depth_scale_down()
|
D | blk-wbt.h | 69 struct rq_depth rq_depth; member
|
/Linux-v4.19/net/9p/ |
D | trans_rdma.c | 103 int rq_depth; member 141 int rq_depth; member 173 if (rdma->rq_depth != P9_RDMA_RQ_DEPTH) in p9_rdma_show_options() 174 seq_printf(m, ",rq=%u", rdma->rq_depth); in p9_rdma_show_options() 198 opts->rq_depth = P9_RDMA_RQ_DEPTH; in parse_opts() 235 opts->rq_depth = option; in parse_opts() 248 opts->rq_depth = max(opts->rq_depth, opts->sq_depth); in parse_opts() 587 rdma->rq_depth = opts->rq_depth; in alloc_rdma() 592 sema_init(&rdma->rq_sem, rdma->rq_depth); in alloc_rdma() 703 opts.sq_depth + opts.rq_depth + 1, in rdma_create_trans() [all …]
|
/Linux-v4.19/net/sunrpc/xprtrdma/ |
D | svc_rdma_transport.c | 450 unsigned int ctxts, rq_depth; in svc_rdma_accept() local 488 rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests; in svc_rdma_accept() 489 if (rq_depth > dev->attrs.max_qp_wr) { in svc_rdma_accept() 492 rq_depth = dev->attrs.max_qp_wr; in svc_rdma_accept() 493 newxprt->sc_max_requests = rq_depth - 2; in svc_rdma_accept() 499 newxprt->sc_sq_depth = rq_depth + ctxts; in svc_rdma_accept() 518 newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, rq_depth, in svc_rdma_accept() 531 qp_attr.cap.max_recv_wr = rq_depth; in svc_rdma_accept()
|
/Linux-v4.19/drivers/net/ethernet/huawei/hinic/ |
D | hinic_hw_dev.h | 110 u16 rq_depth; member
|
D | hinic_hw_dev.c | 298 static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth, in set_hw_ioctxt() argument 316 hw_ioctxt.rq_depth = ilog2(rq_depth); in set_hw_ioctxt()
|