Home
last modified time | relevance | path

Searched refs:sqp (Results 1 – 12 of 12) sorted by relevance

/Linux-v4.19/drivers/infiniband/hw/qib/
Dqib_ruc.c184 static void qib_ruc_loopback(struct rvt_qp *sqp) in qib_ruc_loopback() argument
186 struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); in qib_ruc_loopback()
206 qp = rvt_lookup_qpn(rdi, &ibp->rvp, sqp->remote_qpn); in qib_ruc_loopback()
210 spin_lock_irqsave(&sqp->s_lock, flags); in qib_ruc_loopback()
213 if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) || in qib_ruc_loopback()
214 !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND)) in qib_ruc_loopback()
217 sqp->s_flags |= RVT_S_BUSY; in qib_ruc_loopback()
220 if (sqp->s_last == READ_ONCE(sqp->s_head)) in qib_ruc_loopback()
222 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); in qib_ruc_loopback()
225 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) { in qib_ruc_loopback()
[all …]
Dqib_ud.c50 static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) in qib_ud_loopback() argument
52 struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); in qib_ud_loopback()
72 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ? in qib_ud_loopback()
73 IB_QPT_UD : sqp->ibqp.qp_type; in qib_ud_loopback()
91 pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index); in qib_ud_loopback()
98 sqp->ibqp.qp_num, qp->ibqp.qp_num, in qib_ud_loopback()
114 sqp->qkey : swqe->ud_wr.remote_qkey; in qib_ud_loopback()
209 wc.src_qp = sqp->ibqp.qp_num; in qib_ud_loopback()
/Linux-v4.19/drivers/infiniband/hw/hfi1/
Druc.c169 static void ruc_loopback(struct rvt_qp *sqp) in ruc_loopback() argument
171 struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); in ruc_loopback()
191 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp, in ruc_loopback()
192 sqp->remote_qpn); in ruc_loopback()
194 spin_lock_irqsave(&sqp->s_lock, flags); in ruc_loopback()
197 if ((sqp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT)) || in ruc_loopback()
198 !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND)) in ruc_loopback()
201 sqp->s_flags |= RVT_S_BUSY; in ruc_loopback()
204 if (sqp->s_last == READ_ONCE(sqp->s_head)) in ruc_loopback()
206 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); in ruc_loopback()
[all …]
Dud.c72 static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) in ud_loopback() argument
74 struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); in ud_loopback()
76 struct hfi1_qp_priv *priv = sqp->priv; in ud_loopback()
88 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp, in ud_loopback()
96 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ? in ud_loopback()
97 IB_QPT_UD : sqp->ibqp.qp_type; in ud_loopback()
115 pkey = hfi1_get_pkey(ibp, sqp->s_pkey_index); in ud_loopback()
123 sqp->ibqp.qp_num, qp->ibqp.qp_num, in ud_loopback()
138 sqp->qkey : swqe->ud_wr.remote_qkey; in ud_loopback()
258 wc.src_qp = sqp->ibqp.qp_num; in ud_loopback()
[all …]
/Linux-v4.19/drivers/infiniband/hw/mlx4/
Dqp.c864 struct mlx4_ib_sqp *sqp = NULL; in create_qp_common() local
916 sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL); in create_qp_common()
917 if (!sqp) in create_qp_common()
919 qp = &sqp->qp; in create_qp_common()
1214 if (sqp) in create_qp_common()
1215 kfree(sqp); in create_qp_common()
1569 struct mlx4_ib_sqp *sqp = to_msqp((to_mqp(ibqp))); in mlx4_ib_create_qp() local
1575 sqp->roce_v2_gsi = ib_create_qp(pd, init_attr); in mlx4_ib_create_qp()
1577 if (IS_ERR(sqp->roce_v2_gsi)) { in mlx4_ib_create_qp()
1578 pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp->roce_v2_gsi)); in mlx4_ib_create_qp()
[all …]
Dmad.c1367 struct mlx4_ib_demux_pv_qp *sqp; in mlx4_ib_send_to_wire() local
1384 sqp = &sqp_ctx->qp[0]; in mlx4_ib_send_to_wire()
1388 sqp = &sqp_ctx->qp[1]; in mlx4_ib_send_to_wire()
1392 send_qp = sqp->qp; in mlx4_ib_send_to_wire()
1400 spin_lock(&sqp->tx_lock); in mlx4_ib_send_to_wire()
1401 if (sqp->tx_ix_head - sqp->tx_ix_tail >= in mlx4_ib_send_to_wire()
1405 wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); in mlx4_ib_send_to_wire()
1406 spin_unlock(&sqp->tx_lock); in mlx4_ib_send_to_wire()
1410 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); in mlx4_ib_send_to_wire()
1411 if (sqp->tx_ring[wire_tx_ix].ah) in mlx4_ib_send_to_wire()
[all …]
/Linux-v4.19/drivers/infiniband/hw/mthca/
Dmthca_qp.c298 static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr, in store_attrs() argument
302 sqp->pkey_index = attr->pkey_index; in store_attrs()
304 sqp->qkey = attr->qkey; in store_attrs()
306 sqp->send_psn = attr->sq_psn; in store_attrs()
1363 struct mthca_sqp *sqp) in mthca_alloc_sqp() argument
1368 sqp->qp.transport = MLX; in mthca_alloc_sqp()
1369 err = mthca_set_qp_size(dev, cap, pd, &sqp->qp); in mthca_alloc_sqp()
1373 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; in mthca_alloc_sqp()
1374 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size, in mthca_alloc_sqp()
1375 &sqp->header_dma, GFP_KERNEL); in mthca_alloc_sqp()
[all …]
Dmthca_dev.h559 struct mthca_sqp *sqp);
/Linux-v4.19/drivers/scsi/
Dscsi_debug.c3801 struct sdebug_queue *sqp; in sdebug_q_cmd_complete() local
3810 sqp = sdebug_q_arr + sd_dp->sqa_idx; in sdebug_q_cmd_complete()
3820 spin_lock_irqsave(&sqp->qc_lock, iflags); in sdebug_q_cmd_complete()
3821 sqcp = &sqp->qc_arr[qc_idx]; in sdebug_q_cmd_complete()
3824 spin_unlock_irqrestore(&sqp->qc_lock, iflags); in sdebug_q_cmd_complete()
3838 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) { in sdebug_q_cmd_complete()
3839 spin_unlock_irqrestore(&sqp->qc_lock, iflags); in sdebug_q_cmd_complete()
3849 spin_unlock_irqrestore(&sqp->qc_lock, iflags); in sdebug_q_cmd_complete()
3853 k = find_last_bit(sqp->in_use_bm, retval); in sdebug_q_cmd_complete()
3859 spin_unlock_irqrestore(&sqp->qc_lock, iflags); in sdebug_q_cmd_complete()
[all …]
/Linux-v4.19/drivers/infiniband/hw/cxgb3/
Dcxio_hal.c385 struct t3_swsq *sqp) in insert_sq_cqe() argument
393 V_CQE_OPCODE(sqp->opcode) | in insert_sq_cqe()
399 cqe.u.scqe.wrid_hi = sqp->sq_wptr; in insert_sq_cqe()
409 struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); in cxio_flush_sq() local
412 sqp->signaled = 0; in cxio_flush_sq()
413 insert_sq_cqe(wq, cq, sqp); in cxio_flush_sq()
415 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); in cxio_flush_sq()
1064 struct t3_swsq *sqp; in flush_completed_wrs() local
1068 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); in flush_completed_wrs()
1070 if (!sqp->signaled) { in flush_completed_wrs()
[all …]
Diwch_qp.c363 struct t3_swsq *sqp; in iwch_post_send() local
392 sqp = qhp->wq.sq + in iwch_post_send()
414 sqp->read_len = wqe->read.local_len; in iwch_post_send()
416 qhp->wq.oldest_read = sqp; in iwch_post_send()
437 sqp->wr_id = wr->wr_id; in iwch_post_send()
438 sqp->opcode = wr2opcode(t3_wr_opcode); in iwch_post_send()
439 sqp->sq_wptr = qhp->wq.sq_wptr; in iwch_post_send()
440 sqp->complete = 0; in iwch_post_send()
441 sqp->signaled = (wr->send_flags & IB_SEND_SIGNALED); in iwch_post_send()
450 sqp->opcode); in iwch_post_send()
/Linux-v4.19/drivers/infiniband/sw/rxe/
Drxe_hdr.h493 __be32 sqp; member
517 return DETH_SQP_MASK & be32_to_cpu(deth->sqp); in __deth_sqp()
520 static inline void __deth_set_sqp(void *arg, u32 sqp) in __deth_set_sqp() argument
524 deth->sqp = cpu_to_be32(DETH_SQP_MASK & sqp); in __deth_set_sqp()
545 static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp) in deth_set_sqp() argument
548 + rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp); in deth_set_sqp()