Home
last modified time | relevance | path

Searched refs:sqp (Results 1 – 11 of 11) sorted by relevance

/Linux-v5.4/drivers/infiniband/hw/mlx4/
Dqp.c997 struct mlx4_ib_sqp *sqp = NULL; in create_qp_common() local
1050 sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL); in create_qp_common()
1051 if (!sqp) in create_qp_common()
1053 qp = &sqp->qp; in create_qp_common()
1296 if (!sqp && !*caller_qp) in create_qp_common()
1298 kfree(sqp); in create_qp_common()
1657 struct mlx4_ib_sqp *sqp = to_msqp((to_mqp(ibqp))); in mlx4_ib_create_qp() local
1663 sqp->roce_v2_gsi = ib_create_qp(pd, init_attr); in mlx4_ib_create_qp()
1665 if (IS_ERR(sqp->roce_v2_gsi)) { in mlx4_ib_create_qp()
1666 pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp->roce_v2_gsi)); in mlx4_ib_create_qp()
[all …]
Dmad.c1369 struct mlx4_ib_demux_pv_qp *sqp; in mlx4_ib_send_to_wire() local
1386 sqp = &sqp_ctx->qp[0]; in mlx4_ib_send_to_wire()
1390 sqp = &sqp_ctx->qp[1]; in mlx4_ib_send_to_wire()
1394 send_qp = sqp->qp; in mlx4_ib_send_to_wire()
1410 spin_lock(&sqp->tx_lock); in mlx4_ib_send_to_wire()
1411 if (sqp->tx_ix_head - sqp->tx_ix_tail >= in mlx4_ib_send_to_wire()
1415 wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); in mlx4_ib_send_to_wire()
1416 spin_unlock(&sqp->tx_lock); in mlx4_ib_send_to_wire()
1420 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); in mlx4_ib_send_to_wire()
1421 kfree(sqp->tx_ring[wire_tx_ix].ah); in mlx4_ib_send_to_wire()
[all …]
/Linux-v5.4/drivers/infiniband/hw/mthca/
Dmthca_qp.c299 static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr, in store_attrs() argument
303 sqp->pkey_index = attr->pkey_index; in store_attrs()
305 sqp->qkey = attr->qkey; in store_attrs()
307 sqp->send_psn = attr->sq_psn; in store_attrs()
1371 struct mthca_sqp *sqp, in mthca_alloc_sqp() argument
1377 sqp->qp.transport = MLX; in mthca_alloc_sqp()
1378 err = mthca_set_qp_size(dev, cap, pd, &sqp->qp); in mthca_alloc_sqp()
1382 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; in mthca_alloc_sqp()
1383 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size, in mthca_alloc_sqp()
1384 &sqp->header_dma, GFP_KERNEL); in mthca_alloc_sqp()
[all …]
Dmthca_dev.h561 struct mthca_sqp *sqp,
/Linux-v5.4/drivers/infiniband/sw/rdmavt/
Dqp.c2925 struct rvt_qp *sqp) in loopback_qp_drop() argument
2932 return sqp->ibqp.qp_type == IB_QPT_RC ? in loopback_qp_drop()
2946 void rvt_ruc_loopback(struct rvt_qp *sqp) in rvt_ruc_loopback() argument
2949 struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device); in rvt_ruc_loopback()
2964 rvp = rdi->ports[sqp->port_num - 1]; in rvt_ruc_loopback()
2971 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp, in rvt_ruc_loopback()
2972 sqp->remote_qpn); in rvt_ruc_loopback()
2974 spin_lock_irqsave(&sqp->s_lock, flags); in rvt_ruc_loopback()
2977 if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) || in rvt_ruc_loopback()
2978 !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND)) in rvt_ruc_loopback()
[all …]
/Linux-v5.4/drivers/scsi/
Dscsi_debug.c3714 struct sdebug_queue *sqp; in sdebug_q_cmd_complete() local
3723 sqp = sdebug_q_arr + sd_dp->sqa_idx; in sdebug_q_cmd_complete()
3733 spin_lock_irqsave(&sqp->qc_lock, iflags); in sdebug_q_cmd_complete()
3734 sqcp = &sqp->qc_arr[qc_idx]; in sdebug_q_cmd_complete()
3737 spin_unlock_irqrestore(&sqp->qc_lock, iflags); in sdebug_q_cmd_complete()
3751 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) { in sdebug_q_cmd_complete()
3752 spin_unlock_irqrestore(&sqp->qc_lock, iflags); in sdebug_q_cmd_complete()
3762 spin_unlock_irqrestore(&sqp->qc_lock, iflags); in sdebug_q_cmd_complete()
3766 k = find_last_bit(sqp->in_use_bm, retval); in sdebug_q_cmd_complete()
3772 spin_unlock_irqrestore(&sqp->qc_lock, iflags); in sdebug_q_cmd_complete()
[all …]
/Linux-v5.4/drivers/infiniband/hw/qib/
Dqib_ud.c51 static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) in qib_ud_loopback() argument
53 struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); in qib_ud_loopback()
73 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ? in qib_ud_loopback()
74 IB_QPT_UD : sqp->ibqp.qp_type; in qib_ud_loopback()
92 pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index); in qib_ud_loopback()
99 sqp->ibqp.qp_num, qp->ibqp.qp_num, in qib_ud_loopback()
115 sqp->qkey : rvt_get_swqe_remote_qkey(swqe); in qib_ud_loopback()
205 wc.src_qp = sqp->ibqp.qp_num; in qib_ud_loopback()
/Linux-v5.4/drivers/infiniband/hw/hfi1/
Dud.c73 static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) in ud_loopback() argument
75 struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); in ud_loopback()
77 struct hfi1_qp_priv *priv = sqp->priv; in ud_loopback()
89 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp, in ud_loopback()
97 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ? in ud_loopback()
98 IB_QPT_UD : sqp->ibqp.qp_type; in ud_loopback()
116 pkey = hfi1_get_pkey(ibp, sqp->s_pkey_index); in ud_loopback()
124 sqp->ibqp.qp_num, qp->ibqp.qp_num, in ud_loopback()
139 sqp->qkey : rvt_get_swqe_remote_qkey(swqe); in ud_loopback()
239 wc.src_qp = sqp->ibqp.qp_num; in ud_loopback()
[all …]
/Linux-v5.4/drivers/infiniband/hw/cxgb3/
Dcxio_hal.c367 struct t3_swsq *sqp) in insert_sq_cqe() argument
375 V_CQE_OPCODE(sqp->opcode) | in insert_sq_cqe()
381 cqe.u.scqe.wrid_hi = sqp->sq_wptr; in insert_sq_cqe()
391 struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); in cxio_flush_sq() local
394 sqp->signaled = 0; in cxio_flush_sq()
395 insert_sq_cqe(wq, cq, sqp); in cxio_flush_sq()
397 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); in cxio_flush_sq()
1044 struct t3_swsq *sqp; in flush_completed_wrs() local
1048 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); in flush_completed_wrs()
1050 if (!sqp->signaled) { in flush_completed_wrs()
[all …]
Diwch_qp.c363 struct t3_swsq *sqp; in iwch_post_send() local
392 sqp = qhp->wq.sq + in iwch_post_send()
414 sqp->read_len = wqe->read.local_len; in iwch_post_send()
416 qhp->wq.oldest_read = sqp; in iwch_post_send()
437 sqp->wr_id = wr->wr_id; in iwch_post_send()
438 sqp->opcode = wr2opcode(t3_wr_opcode); in iwch_post_send()
439 sqp->sq_wptr = qhp->wq.sq_wptr; in iwch_post_send()
440 sqp->complete = 0; in iwch_post_send()
441 sqp->signaled = (wr->send_flags & IB_SEND_SIGNALED); in iwch_post_send()
450 sqp->opcode); in iwch_post_send()
/Linux-v5.4/drivers/infiniband/sw/rxe/
Drxe_hdr.h493 __be32 sqp; member
517 return DETH_SQP_MASK & be32_to_cpu(deth->sqp); in __deth_sqp()
520 static inline void __deth_set_sqp(void *arg, u32 sqp) in __deth_set_sqp() argument
524 deth->sqp = cpu_to_be32(DETH_SQP_MASK & sqp); in __deth_set_sqp()
545 static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp) in deth_set_sqp() argument
548 + rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp); in deth_set_sqp()