Lines Matching +full:total +full:- +full:timeout

1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3 * Copyright(c) 2016 - 2020 Intel Corporation.
13 #include <rdma/rvt-abi.h>
54 #define RVT_AIP_QP_MAX (u32)(RVT_AIP_QP_BASE + RVT_AIP_QPN_MAX - 1)
59 * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
60 * RVT_S_BUSY - send tasklet is processing the QP
61 * RVT_S_TIMER - the RC retry timer is active
62 * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
63 * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
65 * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
67 * RVT_S_WAIT_RNR - waiting for RNR timeout
68 * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
69 * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
71 * RVT_S_WAIT_PIO - waiting for a send buffer to be available
72 * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
73 * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
74 * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
75 * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
76 * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
77 * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
78 * RVT_S_ECN - a BECN was queued to the send engine
79 * RVT_S_MAX_BIT_MASK - The max bit that can be used by rdmavt
146 * rvt_ud_wr - IB UD work plus AH cache
164 * in qp->s_max_sge.
177 u32 length; /* total length of data in sg_list */
183 * struct rvt_krwq - kernel struct receive work request
188 * @count: count is aproximate of total receive enteries posted
208 * rvt_get_swqe_ah - Return the pointer to the struct rvt_ah
214 return ibah_to_rvtah(swqe->ud_wr.wr.ah); in rvt_get_swqe_ah()
218 * rvt_get_swqe_ah_attr - Return the cached ah attribute information
224 return swqe->ud_wr.attr; in rvt_get_swqe_ah_attr()
228 * rvt_get_swqe_remote_qpn - Access the remote QPN value
234 return swqe->ud_wr.wr.remote_qpn; in rvt_get_swqe_remote_qpn()
238 * rvt_get_swqe_remote_qkey - Acces the remote qkey value
244 return swqe->ud_wr.wr.remote_qkey; in rvt_get_swqe_remote_qkey()
248 * rvt_get_swqe_pkey_index - Access the pkey index
254 return swqe->ud_wr.wr.pkey_index; in rvt_get_swqe_pkey_index()
267 * rvt_get_rq_count - count numbers of request work queue entries
273 * Return - total number of entries in the Receive Queue
278 u32 count = head - tail; in rvt_get_rq_count()
281 count += rq->size; in rvt_get_rq_count()
311 * rvt_operation_params - op table entry
312 * @length - the length to copy into the swqe entry
313 * @qpt_support - a bit mask indicating QP type support
314 * @flags - RVT_OPERATION flags (see above)
342 unsigned long timeout_jiffies; /* computed from timeout */
355 u8 alt_timeout; /* Alternate path timeout for this QP */
356 u8 timeout; /* Timeout for this QP */ member
366 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
367 u8 s_max_sge; /* size of s_wq->sg_list */
382 u32 r_len; /* total length of r_sge */
410 u32 s_len; /* total length of s_sge */
411 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
420 u32 s_acked; /* last un-ACK'ed entry */
430 u8 s_nak_state; /* non-zero if NAK is pending */
431 u8 r_nak_state; /* non-zero if NAK is pending */
472 #define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1)
476 * QPN-map pages start out as NULL, they get allocated upon
534 return (struct rvt_swqe *)((char *)qp->s_wq + in rvt_get_swqe_ptr()
536 qp->s_max_sge * in rvt_get_swqe_ptr()
547 ((char *)rq->kwq->curr_wq + in rvt_get_rwqe_ptr()
549 rq->max_sge * sizeof(struct ib_sge)) * n); in rvt_get_rwqe_ptr()
553 * rvt_is_user_qp - return if this is user mode QP
554 * @qp - the target QP
558 return !!qp->pid; in rvt_is_user_qp()
562 * rvt_get_qp - get a QP reference
563 * @qp - the QP to hold
567 atomic_inc(&qp->refcount); in rvt_get_qp()
571 * rvt_put_qp - release a QP reference
572 * @qp - the QP to release
576 if (qp && atomic_dec_and_test(&qp->refcount)) in rvt_put_qp()
577 wake_up(&qp->wait); in rvt_put_qp()
581 * rvt_put_swqe - drop mr refs held by swqe
582 * @wqe - the send wqe
590 for (i = 0; i < wqe->wr.num_sge; i++) { in rvt_put_swqe()
591 struct rvt_sge *sge = &wqe->sg_list[i]; in rvt_put_swqe()
593 rvt_put_mr(sge->mr); in rvt_put_swqe()
598 * rvt_qp_wqe_reserve - reserve operation
599 * @qp - the rvt qp
600 * @wqe - the send wqe
609 atomic_inc(&qp->s_reserved_used); in rvt_qp_wqe_reserve()
613 * rvt_qp_wqe_unreserve - clean reserved operation
614 * @qp - the rvt qp
615 * @flags - send wqe flags
630 atomic_dec(&qp->s_reserved_used); in rvt_qp_wqe_unreserve()
631 /* insure no compiler re-order up to s_last change */ in rvt_qp_wqe_unreserve()
644 return (((int)a) - ((int)b)) << 8; in rvt_cmp_msn()
654 * rvt_div_round_up_mtu - round up divide
655 * @qp - the qp pair
656 * @len - the length
662 return (len + qp->pmtu - 1) >> qp->log_pmtu; in rvt_div_round_up_mtu()
666 * @qp - the qp pair
667 * @len - the length
673 return len >> qp->log_pmtu; in rvt_div_mtu()
677 * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
678 * @timeout - timeout input(0 - 31).
680 * Return a timeout value in jiffies.
682 static inline unsigned long rvt_timeout_to_jiffies(u8 timeout) in rvt_timeout_to_jiffies() argument
684 if (timeout > 31) in rvt_timeout_to_jiffies()
685 timeout = 31; in rvt_timeout_to_jiffies()
687 return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL; in rvt_timeout_to_jiffies()
691 * rvt_lookup_qpn - return the QP with the given QPN
705 qp = rcu_dereference(rvp->qp[qpn]); in rvt_lookup_qpn()
707 u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits); in rvt_lookup_qpn()
709 for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp; in rvt_lookup_qpn()
710 qp = rcu_dereference(qp->next)) in rvt_lookup_qpn()
711 if (qp->ibqp.qp_num == qpn) in rvt_lookup_qpn()
718 * rvt_mod_retry_timer - mod a retry timer
719 * @qp - the QP
720 * @shift - timeout shift to wait for multiple packets
725 struct ib_qp *ibqp = &qp->ibqp; in rvt_mod_retry_timer_ext()
726 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); in rvt_mod_retry_timer_ext()
728 lockdep_assert_held(&qp->s_lock); in rvt_mod_retry_timer_ext()
729 qp->s_flags |= RVT_S_TIMER; in rvt_mod_retry_timer_ext()
730 /* 4.096 usec. * (1 << qp->timeout) */ in rvt_mod_retry_timer_ext()
731 mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies + in rvt_mod_retry_timer_ext()
732 (qp->timeout_jiffies << shift)); in rvt_mod_retry_timer_ext()
741 * rvt_put_qp_swqe - drop refs held by swqe
750 if (qp->allowed_ops == IB_OPCODE_UD) in rvt_put_qp_swqe()
751 rdma_destroy_ah_attr(wqe->ud_wr.attr); in rvt_put_qp_swqe()
755 * rvt_qp_sqwe_incr - increment ring index
764 if (++val >= qp->s_size) in rvt_qp_swqe_incr()
772 * rvt_recv_cq - add a new entry to completion queue
785 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq); in rvt_recv_cq()
792 * rvt_send_cq - add a new entry to completion queue
805 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq); in rvt_send_cq()
812 * rvt_qp_complete_swqe - insert send completion
813 * @qp - the qp
814 * @wqe - the send wqe
815 * @opcode - wc operation (driver dependent)
816 * @status - completion status
836 int flags = wqe->wr.send_flags; in rvt_qp_complete_swqe()
843 (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || in rvt_qp_complete_swqe()
847 wr_id = wqe->wr.wr_id; in rvt_qp_complete_swqe()
848 byte_len = wqe->length; in rvt_qp_complete_swqe()
851 last = rvt_qp_swqe_incr(qp, qp->s_last); in rvt_qp_complete_swqe()
853 smp_store_release(&qp->s_last, last); in rvt_qp_complete_swqe()
859 .qp = &qp->ibqp, in rvt_qp_complete_swqe()
892 * struct rvt_qp_iter - the iterator for QPs
893 * @qp - the current QP
914 * ib_cq_tail - Return tail index of cq buffer
915 * @send_cq - The cq for send
924 return ibcq_to_rvtcq(send_cq)->ip ? in ib_cq_tail()
925 RDMA_READ_UAPI_ATOMIC(cq->queue->tail) : in ib_cq_tail()
926 ibcq_to_rvtcq(send_cq)->kqueue->tail; in ib_cq_tail()
930 * ib_cq_head - Return head index of cq buffer
931 * @send_cq - The cq for send
940 return ibcq_to_rvtcq(send_cq)->ip ? in ib_cq_head()
941 RDMA_READ_UAPI_ATOMIC(cq->queue->head) : in ib_cq_head()
942 ibcq_to_rvtcq(send_cq)->kqueue->head; in ib_cq_head()
946 * rvt_free_rq - free memory allocated for rvt_rq struct
954 kvfree(rq->kwq); in rvt_free_rq()
955 rq->kwq = NULL; in rvt_free_rq()
956 vfree(rq->wq); in rvt_free_rq()
957 rq->wq = NULL; in rvt_free_rq()
961 * rvt_to_iport - Get the ibport pointer
968 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); in rvt_to_iport()
970 return rdi->ports[qp->port_num - 1]; in rvt_to_iport()
974 * rvt_rc_credit_avail - Check if there are enough RC credits for the request
983 lockdep_assert_held(&qp->s_lock); in rvt_rc_credit_avail()
984 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && in rvt_rc_credit_avail()
985 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { in rvt_rc_credit_avail()
988 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; in rvt_rc_credit_avail()
989 rvp->n_rc_crwaits++; in rvt_rc_credit_avail()