Home
last modified time | relevance | path

Searched refs:qp (Results 1 – 25 of 297) sorted by relevance

12345678910>>...12

/Linux-v4.19/drivers/infiniband/hw/qib/
Dqib_rc.c67 static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, in qib_make_rc_ack() argument
77 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) in qib_make_rc_ack()
83 switch (qp->s_ack_state) { in qib_make_rc_ack()
86 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in qib_make_rc_ack()
98 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC) in qib_make_rc_ack()
99 qp->s_tail_ack_queue = 0; in qib_make_rc_ack()
104 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { in qib_make_rc_ack()
105 if (qp->s_flags & RVT_S_ACK_PENDING) in qib_make_rc_ack()
110 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in qib_make_rc_ack()
120 qp->s_tail_ack_queue = qp->r_head_ack_queue; in qib_make_rc_ack()
[all …]
Dqib_uc.c48 int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) in qib_make_uc_req() argument
50 struct qib_qp_priv *priv = qp->priv; in qib_make_uc_req()
56 u32 pmtu = qp->pmtu; in qib_make_uc_req()
59 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { in qib_make_uc_req()
60 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) in qib_make_uc_req()
63 if (qp->s_last == READ_ONCE(qp->s_head)) in qib_make_uc_req()
67 qp->s_flags |= RVT_S_WAIT_DMA; in qib_make_uc_req()
70 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qib_make_uc_req()
71 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_uc_req()
76 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) in qib_make_uc_req()
[all …]
Dqib_ruc.c44 void qib_migrate_qp(struct rvt_qp *qp) in qib_migrate_qp() argument
48 qp->s_mig_state = IB_MIG_MIGRATED; in qib_migrate_qp()
49 qp->remote_ah_attr = qp->alt_ah_attr; in qib_migrate_qp()
50 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr); in qib_migrate_qp()
51 qp->s_pkey_index = qp->s_alt_pkey_index; in qib_migrate_qp()
53 ev.device = qp->ibqp.device; in qib_migrate_qp()
54 ev.element.qp = &qp->ibqp; in qib_migrate_qp()
56 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); in qib_migrate_qp()
83 int has_grh, struct rvt_qp *qp, u32 bth0) in qib_ruc_check_hdr() argument
88 if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) { in qib_ruc_check_hdr()
[all …]
Dqib_ud.c56 struct rvt_qp *qp; in qib_ud_loopback() local
66 qp = rvt_lookup_qpn(rdi, &ibp->rvp, swqe->ud_wr.remote_qpn); in qib_ud_loopback()
67 if (!qp) { in qib_ud_loopback()
74 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? in qib_ud_loopback()
75 IB_QPT_UD : qp->ibqp.qp_type; in qib_ud_loopback()
78 !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { in qib_ud_loopback()
86 if (qp->ibqp.qp_num > 1) { in qib_ud_loopback()
92 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index); in qib_ud_loopback()
98 sqp->ibqp.qp_num, qp->ibqp.qp_num, in qib_ud_loopback()
110 if (qp->ibqp.qp_num) { in qib_ud_loopback()
[all …]
Dqib_qp.c224 if (rcu_dereference(ibp->rvp.qp[0])) in qib_free_all_qps()
226 if (rcu_dereference(ibp->rvp.qp[1])) in qib_free_all_qps()
233 void qib_notify_qp_reset(struct rvt_qp *qp) in qib_notify_qp_reset() argument
235 struct qib_qp_priv *priv = qp->priv; in qib_notify_qp_reset()
240 void qib_notify_error_qp(struct rvt_qp *qp) in qib_notify_error_qp() argument
242 struct qib_qp_priv *priv = qp->priv; in qib_notify_error_qp()
243 struct qib_ibdev *dev = to_idev(qp->ibqp.device); in qib_notify_error_qp()
246 if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) { in qib_notify_error_qp()
247 qp->s_flags &= ~RVT_S_ANY_WAIT_IO; in qib_notify_error_qp()
252 if (!(qp->s_flags & RVT_S_BUSY)) { in qib_notify_error_qp()
[all …]
/Linux-v4.19/drivers/infiniband/sw/rxe/
Drxe_qp.c123 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) in alloc_rd_atomic_resources() argument
125 qp->resp.res_head = 0; in alloc_rd_atomic_resources()
126 qp->resp.res_tail = 0; in alloc_rd_atomic_resources()
127 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); in alloc_rd_atomic_resources()
129 if (!qp->resp.resources) in alloc_rd_atomic_resources()
135 static void free_rd_atomic_resources(struct rxe_qp *qp) in free_rd_atomic_resources() argument
137 if (qp->resp.resources) { in free_rd_atomic_resources()
140 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { in free_rd_atomic_resources()
141 struct resp_res *res = &qp->resp.resources[i]; in free_rd_atomic_resources()
143 free_rd_atomic_resource(qp, res); in free_rd_atomic_resources()
[all …]
Drxe_req.c41 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
44 static inline void retry_first_write_send(struct rxe_qp *qp, in retry_first_write_send() argument
51 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send()
52 qp->mtu : wqe->dma.resid; in retry_first_write_send()
54 qp->req.opcode = next_opcode(qp, wqe, in retry_first_write_send()
64 wqe->iova += qp->mtu; in retry_first_write_send()
68 static void req_retry(struct rxe_qp *qp) in req_retry() argument
76 wqe = queue_head(qp->sq.queue); in req_retry()
77 npsn = (qp->comp.psn - wqe->first_psn) & BTH_PSN_MASK; in req_retry()
79 qp->req.wqe_index = consumer_index(qp->sq.queue); in req_retry()
[all …]
Drxe_comp.c141 struct rxe_qp *qp = from_timer(qp, t, retrans_timer); in retransmit_timer() local
143 if (qp->valid) { in retransmit_timer()
144 qp->comp.timeout = 1; in retransmit_timer()
145 rxe_run_task(&qp->comp.task, 1); in retransmit_timer()
149 void rxe_comp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp, in rxe_comp_queue_pkt() argument
154 skb_queue_tail(&qp->resp_pkts, skb); in rxe_comp_queue_pkt()
156 must_sched = skb_queue_len(&qp->resp_pkts) > 1; in rxe_comp_queue_pkt()
159 rxe_run_task(&qp->comp.task, must_sched); in rxe_comp_queue_pkt()
162 static inline enum comp_state get_wqe(struct rxe_qp *qp, in get_wqe() argument
171 wqe = queue_head(qp->sq.queue); in get_wqe()
[all …]
Drxe_resp.c107 void rxe_resp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp, in rxe_resp_queue_pkt() argument
113 skb_queue_tail(&qp->req_pkts, skb); in rxe_resp_queue_pkt()
116 (skb_queue_len(&qp->req_pkts) > 1); in rxe_resp_queue_pkt()
118 rxe_run_task(&qp->resp.task, must_sched); in rxe_resp_queue_pkt()
121 static inline enum resp_states get_req(struct rxe_qp *qp, in get_req() argument
126 if (qp->resp.state == QP_STATE_ERROR) { in get_req()
127 skb = skb_dequeue(&qp->req_pkts); in get_req()
130 rxe_drop_ref(qp); in get_req()
139 skb = skb_peek(&qp->req_pkts); in get_req()
145 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN; in get_req()
[all …]
/Linux-v4.19/drivers/infiniband/hw/hfi1/
Drc.c85 static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, in make_rc_ack() argument
95 u32 pmtu = qp->pmtu; in make_rc_ack()
96 struct hfi1_qp_priv *priv = qp->priv; in make_rc_ack()
98 lockdep_assert_held(&qp->s_lock); in make_rc_ack()
100 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) in make_rc_ack()
110 switch (qp->s_ack_state) { in make_rc_ack()
113 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in make_rc_ack()
125 if (++qp->s_tail_ack_queue > HFI1_MAX_RDMA_ATOMIC) in make_rc_ack()
126 qp->s_tail_ack_queue = 0; in make_rc_ack()
131 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { in make_rc_ack()
[all …]
Dqp.c66 static void flush_tx_list(struct rvt_qp *qp);
75 static void qp_pio_drain(struct rvt_qp *qp);
137 static void flush_tx_list(struct rvt_qp *qp) in flush_tx_list() argument
139 struct hfi1_qp_priv *priv = qp->priv; in flush_tx_list()
154 static void flush_iowait(struct rvt_qp *qp) in flush_iowait() argument
156 struct hfi1_qp_priv *priv = qp->priv; in flush_iowait()
166 rvt_put_qp(qp); in flush_iowait()
198 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, in hfi1_check_modify_qp() argument
201 struct ib_qp *ibqp = &qp->ibqp; in hfi1_check_modify_qp()
211 if (!qp_to_sdma_engine(qp, sc) && in hfi1_check_modify_qp()
[all …]
Duc.c63 int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) in hfi1_make_uc_req() argument
65 struct hfi1_qp_priv *priv = qp->priv; in hfi1_make_uc_req()
71 u32 pmtu = qp->pmtu; in hfi1_make_uc_req()
74 ps->s_txreq = get_txreq(ps->dev, qp); in hfi1_make_uc_req()
78 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { in hfi1_make_uc_req()
79 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) in hfi1_make_uc_req()
82 if (qp->s_last == READ_ONCE(qp->s_head)) in hfi1_make_uc_req()
86 qp->s_flags |= RVT_S_WAIT_DMA; in hfi1_make_uc_req()
89 clear_ahg(qp); in hfi1_make_uc_req()
90 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_uc_req()
[all …]
Druc.c73 struct rvt_qp *qp = packet->qp; in hfi1_ruc_check_hdr() local
74 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; in hfi1_ruc_check_hdr()
81 if (qp->s_mig_state == IB_MIG_ARMED && migrated) { in hfi1_ruc_check_hdr()
83 if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) & in hfi1_ruc_check_hdr()
90 if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) & in hfi1_ruc_check_hdr()
93 grh = rdma_ah_read_grh(&qp->alt_ah_attr); in hfi1_ruc_check_hdr()
106 hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num, in hfi1_ruc_check_hdr()
111 if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) || in hfi1_ruc_check_hdr()
113 rdma_ah_get_port_num(&qp->alt_ah_attr)) in hfi1_ruc_check_hdr()
115 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_ruc_check_hdr()
[all …]
Dqp.h64 static inline int hfi1_send_ok(struct rvt_qp *qp) in hfi1_send_ok() argument
66 return !(qp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT_IO)) && in hfi1_send_ok()
67 (verbs_txreq_queued(qp) || in hfi1_send_ok()
68 (qp->s_flags & RVT_S_RESP_PENDING) || in hfi1_send_ok()
69 !(qp->s_flags & RVT_S_ANY_WAIT_SEND)); in hfi1_send_ok()
95 static inline void clear_ahg(struct rvt_qp *qp) in clear_ahg() argument
97 struct hfi1_qp_priv *priv = qp->priv; in clear_ahg()
100 qp->s_flags &= ~(HFI1_S_AHG_VALID | HFI1_S_AHG_CLEAR); in clear_ahg()
101 if (priv->s_sde && qp->s_ahgidx >= 0) in clear_ahg()
102 sdma_ahg_free(priv->s_sde, qp->s_ahgidx); in clear_ahg()
[all …]
Dud.c77 struct rvt_qp *qp; in ud_loopback() local
88 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp, in ud_loopback()
90 if (!qp) { in ud_loopback()
98 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? in ud_loopback()
99 IB_QPT_UD : qp->ibqp.qp_type; in ud_loopback()
102 !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { in ud_loopback()
110 if (qp->ibqp.qp_num > 1) { in ud_loopback()
119 qp->s_pkey_index, in ud_loopback()
123 sqp->ibqp.qp_num, qp->ibqp.qp_num, in ud_loopback()
134 if (qp->ibqp.qp_num) { in ud_loopback()
[all …]
/Linux-v4.19/drivers/infiniband/sw/rdmavt/
Dqp.c280 struct rvt_qp *qp; in rvt_free_all_qps() local
295 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n], in rvt_free_all_qps()
299 for (; qp; qp = rcu_dereference_protected(qp->next, in rvt_free_all_qps()
430 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) in rvt_clear_mr_refs() argument
433 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); in rvt_clear_mr_refs()
435 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) in rvt_clear_mr_refs()
436 rvt_put_ss(&qp->s_rdma_read_sge); in rvt_clear_mr_refs()
438 rvt_put_ss(&qp->r_sge); in rvt_clear_mr_refs()
441 while (qp->s_last != qp->s_head) { in rvt_clear_mr_refs()
442 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last); in rvt_clear_mr_refs()
[all …]
/Linux-v4.19/drivers/ntb/
Dntb_transport.c114 struct ntb_transport_qp *qp; member
142 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
152 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
253 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) argument
260 static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
449 struct ntb_transport_qp *qp; in debugfs_read() local
453 qp = filp->private_data; in debugfs_read()
455 if (!qp || !qp->link_is_up) in debugfs_read()
468 "rx_bytes - \t%llu\n", qp->rx_bytes); in debugfs_read()
470 "rx_pkts - \t%llu\n", qp->rx_pkts); in debugfs_read()
[all …]
/Linux-v4.19/drivers/infiniband/hw/mthca/
Dmthca_qp.c195 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) in is_sqp() argument
197 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp()
198 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp()
201 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) in is_qp0() argument
203 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0()
204 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0()
207 static void *get_recv_wqe(struct mthca_qp *qp, int n) in get_recv_wqe() argument
209 if (qp->is_direct) in get_recv_wqe()
210 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); in get_recv_wqe()
212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + in get_recv_wqe()
[all …]
/Linux-v4.19/drivers/net/ethernet/qlogic/qed/
Dqed_roce.c119 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, in qed_rdma_copy_gids() argument
124 if (qp->roce_mode == ROCE_V2_IPV4) { in qed_rdma_copy_gids()
130 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr); in qed_rdma_copy_gids()
131 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr); in qed_rdma_copy_gids()
134 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) { in qed_rdma_copy_gids()
135 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]); in qed_rdma_copy_gids()
136 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]); in qed_rdma_copy_gids()
229 static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) in qed_roce_get_qp_tc() argument
233 if (qp->vlan_id) { in qed_roce_get_qp_tc()
234 pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; in qed_roce_get_qp_tc()
[all …]
/Linux-v4.19/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_qp.c55 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, in get_cqs() argument
58 *send_cq = to_vcq(qp->ibqp.send_cq); in get_cqs()
59 *recv_cq = to_vcq(qp->ibqp.recv_cq); in get_cqs()
98 static void pvrdma_reset_qp(struct pvrdma_qp *qp) in pvrdma_reset_qp() argument
104 get_cqs(qp, &scq, &rcq); in pvrdma_reset_qp()
107 _pvrdma_flush_cqe(qp, scq); in pvrdma_reset_qp()
109 _pvrdma_flush_cqe(qp, rcq); in pvrdma_reset_qp()
117 if (qp->rq.ring) { in pvrdma_reset_qp()
118 atomic_set(&qp->rq.ring->cons_head, 0); in pvrdma_reset_qp()
119 atomic_set(&qp->rq.ring->prod_tail, 0); in pvrdma_reset_qp()
[all …]
/Linux-v4.19/drivers/infiniband/hw/mlx4/
Dqp.c80 struct mlx4_ib_qp qp; member
126 return container_of(mqp, struct mlx4_ib_sqp, qp); in to_msqp()
129 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_tunnel_qp() argument
134 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp()
135 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp()
139 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_sqp() argument
146 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp()
147 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp()
153 if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy || in is_sqp()
154 qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp1_proxy) { in is_sqp()
[all …]
/Linux-v4.19/drivers/infiniband/hw/i40iw/
Di40iw_uk.c47 static enum i40iw_status_code i40iw_nop_1(struct i40iw_qp_uk *qp) in i40iw_nop_1() argument
54 if (!qp->sq_ring.head) in i40iw_nop_1()
57 wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); in i40iw_nop_1()
58 wqe = qp->sq_base[wqe_idx].elem; in i40iw_nop_1()
60 qp->sq_wrtrk_array[wqe_idx].wqe_size = I40IW_QP_WQE_MIN_SIZE; in i40iw_nop_1()
62 peek_head = (qp->sq_ring.head + 1) % qp->sq_ring.size; in i40iw_nop_1()
63 wqe_0 = qp->sq_base[peek_head].elem; in i40iw_nop_1()
65 wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID); in i40iw_nop_1()
67 wqe_0[3] = LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); in i40iw_nop_1()
75 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID) | nop_signature++; in i40iw_nop_1()
[all …]
/Linux-v4.19/drivers/infiniband/hw/bnxt_re/
Dqplib_fp.c54 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
57 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp) in bnxt_qplib_cancel_phantom_processing() argument
59 qp->sq.condition = false; in bnxt_qplib_cancel_phantom_processing()
60 qp->sq.send_phantom = false; in bnxt_qplib_cancel_phantom_processing()
61 qp->sq.single = false; in bnxt_qplib_cancel_phantom_processing()
65 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) in __bnxt_qplib_add_flush_qp() argument
69 scq = qp->scq; in __bnxt_qplib_add_flush_qp()
70 rcq = qp->rcq; in __bnxt_qplib_add_flush_qp()
72 if (!qp->sq.flushed) { in __bnxt_qplib_add_flush_qp()
75 qp); in __bnxt_qplib_add_flush_qp()
[all …]
/Linux-v4.19/net/ipv4/
Dip_fragment.c129 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
135 struct ipq *qp = container_of(q, struct ipq, q); in ip4_frag_init() local
143 qp->ecn = 0; in ip4_frag_init()
144 qp->peer = q->net->max_dist ? in ip4_frag_init()
151 struct ipq *qp; in ip4_frag_free() local
153 qp = container_of(q, struct ipq, q); in ip4_frag_free()
154 if (qp->peer) in ip4_frag_free()
155 inet_putpeer(qp->peer); in ip4_frag_free()
192 struct ipq *qp; in ip_expire() local
195 qp = container_of(frag, struct ipq, q); in ip_expire()
[all …]
/Linux-v4.19/drivers/infiniband/hw/qedr/
Dqedr_roce_cm.c58 void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp, in qedr_store_gsi_qp_cq() argument
64 dev->gsi_qp = qp; in qedr_store_gsi_qp_cq()
76 struct qedr_qp *qp = dev->gsi_qp; in qedr_ll2_complete_tx_packet() local
81 dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons, in qedr_ll2_complete_tx_packet()
88 spin_lock_irqsave(&qp->q_lock, flags); in qedr_ll2_complete_tx_packet()
89 qedr_inc_sw_gsi_cons(&qp->sq); in qedr_ll2_complete_tx_packet()
90 spin_unlock_irqrestore(&qp->q_lock, flags); in qedr_ll2_complete_tx_packet()
101 struct qedr_qp *qp = dev->gsi_qp; in qedr_ll2_complete_rx_packet() local
104 spin_lock_irqsave(&qp->q_lock, flags); in qedr_ll2_complete_rx_packet()
106 qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ? in qedr_ll2_complete_rx_packet()
[all …]

12345678910>>...12