/Linux-v5.10/drivers/nvme/target/ |
D | fabrics-cmd.c | 110 u16 sqsize = le16_to_cpu(c->sqsize); in nvmet_install_queue() local 120 if (!sqsize) { in nvmet_install_queue() 122 req->error_loc = offsetof(struct nvmf_connect_command, sqsize); in nvmet_install_queue() 128 nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1); in nvmet_install_queue() 129 nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1); in nvmet_install_queue()
|
D | trace.c | 141 u16 sqsize = get_unaligned_le16(spc + 4); in nvmet_trace_fabrics_connect() local 146 recfmt, qid, sqsize, cattr, kato); in nvmet_trace_fabrics_connect()
|
D | fc.c | 132 u16 sqsize; member 631 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_prep_fcp_iodlist() 666 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_destroy_fcp_iodlist() 790 u16 qid, u16 sqsize) in nvmet_fc_alloc_target_queue() argument 799 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); in nvmet_fc_alloc_target_queue() 813 queue->sqsize = sqsize; in nvmet_fc_alloc_target_queue() 900 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_delete_target_queue() 1666 be16_to_cpu(rqst->assoc_cmd.sqsize))) in nvmet_fc_ls_create_association() 1677 be16_to_cpu(rqst->assoc_cmd.sqsize)); in nvmet_fc_ls_create_association() 1755 be16_to_cpu(rqst->connect_cmd.sqsize))) in nvmet_fc_ls_create_connection() [all …]
|
D | loop.c | 589 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_loop_create_ctrl()
|
/Linux-v5.10/include/linux/ |
D | nvme-fc.h | 260 __be16 sqsize; member 288 __be16 sqsize; member
|
D | nvme.h | 1328 __le16 sqsize; member
|
/Linux-v5.10/drivers/nvme/host/ |
D | trace.c | 200 u16 sqsize = get_unaligned_le16(spc + 4); in nvme_trace_fabrics_connect() local 205 recfmt, qid, sqsize, cattr, kato); in nvme_trace_fabrics_connect()
|
D | rdma.c | 768 ctrl->ctrl.sqsize + 1); in nvme_rdma_alloc_io_queues() 806 set->queue_depth = nctrl->sqsize + 1; in nvme_rdma_alloc_tagset() 1100 if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) { in nvme_rdma_setup_ctrl() 1103 ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1); in nvme_rdma_setup_ctrl() 1106 if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { in nvme_rdma_setup_ctrl() 1109 ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd); in nvme_rdma_setup_ctrl() 1110 ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; in nvme_rdma_setup_ctrl() 1895 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); in nvme_rdma_route_resolved() 2357 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_rdma_create_ctrl()
|
D | tcp.c | 1576 set->queue_depth = nctrl->sqsize + 1; in nvme_tcp_alloc_tagset() 1664 ctrl->sqsize + 1); in __nvme_tcp_alloc_io_queues() 1955 if (opts->queue_size > ctrl->sqsize + 1) in nvme_tcp_setup_ctrl() 1958 opts->queue_size, ctrl->sqsize + 1); in nvme_tcp_setup_ctrl() 1960 if (ctrl->sqsize + 1 > ctrl->maxcmd) { in nvme_tcp_setup_ctrl() 1963 ctrl->sqsize + 1, ctrl->maxcmd); in nvme_tcp_setup_ctrl() 1964 ctrl->sqsize = ctrl->maxcmd - 1; in nvme_tcp_setup_ctrl() 2444 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_tcp_create_ctrl()
|
D | fc.c | 1215 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); in nvme_fc_connect_admin_queue() 1337 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); in nvme_fc_connect_queue() 2877 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_create_io_queues() 2881 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_create_io_queues() 2932 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_recreate_io_queues() 2936 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_recreate_io_queues() 3102 if (opts->queue_size > ctrl->ctrl.sqsize + 1) { in nvme_fc_create_association() 3107 opts->queue_size, ctrl->ctrl.sqsize + 1); in nvme_fc_create_association() 3108 opts->queue_size = ctrl->ctrl.sqsize + 1; in nvme_fc_create_association() 3460 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_fc_init_ctrl()
|
D | fabrics.c | 378 cmd.connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1); in nvmf_connect_admin_queue() 448 cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize); in nvmf_connect_io_queue()
|
D | nvme.h | 277 u16 sqsize; member
|
D | core.c | 2973 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize); in nvme_init_identify() 3434 nvme_show_int_function(sqsize);
|
D | pci.c | 2353 dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ in nvme_pci_enable()
|
/Linux-v5.10/drivers/infiniband/hw/i40iw/ |
D | i40iw_main.c | 556 u32 sqsize = I40IW_CQP_SW_SQSIZE_2048; in i40iw_create_cqp() local 564 cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL); in i40iw_create_cqp() 567 cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL); in i40iw_create_cqp() 576 (sizeof(struct i40iw_cqp_sq_wqe) * sqsize), in i40iw_create_cqp() 588 cqp_init_info.sq_size = sqsize; in i40iw_create_cqp() 611 for (i = 0; i < sqsize; i++) { in i40iw_create_cqp()
|
/Linux-v5.10/drivers/infiniband/hw/cxgb4/ |
D | qp.c | 2120 unsigned int sqsize, rqsize = 0; in c4iw_create_qp() local 2152 sqsize = attrs->cap.max_send_wr + 1; in c4iw_create_qp() 2153 if (sqsize < 8) in c4iw_create_qp() 2154 sqsize = 8; in c4iw_create_qp() 2166 qhp->wq.sq.size = sqsize; in c4iw_create_qp() 2168 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * in c4iw_create_qp() 2192 attrs->cap.max_send_wr = sqsize - 1; in c4iw_create_qp()
|
/Linux-v5.10/drivers/infiniband/sw/rdmavt/ |
D | qp.c | 1080 size_t sqsize; in rvt_create_qp() local 1105 sqsize = in rvt_create_qp() 1119 swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node); in rvt_create_qp() 1197 qp->s_size = sqsize; in rvt_create_qp()
|