1  /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2  /*
3   * Copyright(c) 2016 - 2020 Intel Corporation.
4   */
5  
6  #ifndef DEF_RDMAVT_INCQP_H
7  #define DEF_RDMAVT_INCQP_H
8  
9  #include <rdma/rdma_vt.h>
10  #include <rdma/ib_pack.h>
11  #include <rdma/ib_verbs.h>
12  #include <rdma/rdmavt_cq.h>
13  #include <rdma/rvt-abi.h>
14  /*
15   * Atomic bit definitions for r_aflags.
16   */
17  #define RVT_R_WRID_VALID        0
18  #define RVT_R_REWIND_SGE        1
19  
20  /*
21   * Bit definitions for r_flags.
22   */
23  #define RVT_R_REUSE_SGE 0x01
24  #define RVT_R_RDMAR_SEQ 0x02
25  #define RVT_R_RSP_NAK   0x04
26  #define RVT_R_RSP_SEND  0x08
27  #define RVT_R_COMM_EST  0x10
28  
29  /*
30   * If a packet's QP[23:16] bits match this value, then it is
31   * a PSM packet and the hardware will expect a KDETH header
32   * following the BTH.
33   */
34  #define RVT_KDETH_QP_PREFIX       0x80
35  #define RVT_KDETH_QP_SUFFIX       0xffff
36  #define RVT_KDETH_QP_PREFIX_MASK  0x00ff0000
37  #define RVT_KDETH_QP_PREFIX_SHIFT 16
38  #define RVT_KDETH_QP_BASE         (u32)(RVT_KDETH_QP_PREFIX << \
39  					RVT_KDETH_QP_PREFIX_SHIFT)
40  #define RVT_KDETH_QP_MAX          (u32)(RVT_KDETH_QP_BASE + RVT_KDETH_QP_SUFFIX)
41  
42  /*
43   * If a packet's LNH == BTH and DEST QPN[23:16] in the BTH match this
44   * prefix value, then it is an AIP packet with a DETH containing the entropy
45   * value in byte 4 following the BTH.
46   */
47  #define RVT_AIP_QP_PREFIX       0x81
48  #define RVT_AIP_QP_SUFFIX       0xffff
49  #define RVT_AIP_QP_PREFIX_MASK  0x00ff0000
50  #define RVT_AIP_QP_PREFIX_SHIFT 16
51  #define RVT_AIP_QP_BASE         (u32)(RVT_AIP_QP_PREFIX << \
52  				      RVT_AIP_QP_PREFIX_SHIFT)
53  #define RVT_AIP_QPN_MAX         BIT(RVT_AIP_QP_PREFIX_SHIFT)
54  #define RVT_AIP_QP_MAX          (u32)(RVT_AIP_QP_BASE + RVT_AIP_QPN_MAX - 1)
55  
56  /*
57   * Bit definitions for s_flags.
58   *
59   * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
60   * RVT_S_BUSY - send tasklet is processing the QP
61   * RVT_S_TIMER - the RC retry timer is active
62   * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
63   * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
64   *                         before processing the next SWQE
65   * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
66   *                         before processing the next SWQE
67   * RVT_S_WAIT_RNR - waiting for RNR timeout
68   * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
69   * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
70   *                  next send completion entry not via send DMA
71   * RVT_S_WAIT_PIO - waiting for a send buffer to be available
72   * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
73   * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
74   * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
75   * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
76   * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
77   * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
78   * RVT_S_ECN - a BECN was queued to the send engine
79   * RVT_S_MAX_BIT_MASK - The max bit that can be used by rdmavt
80   */
81  #define RVT_S_SIGNAL_REQ_WR	0x0001
82  #define RVT_S_BUSY		0x0002
83  #define RVT_S_TIMER		0x0004
84  #define RVT_S_RESP_PENDING	0x0008
85  #define RVT_S_ACK_PENDING	0x0010
86  #define RVT_S_WAIT_FENCE	0x0020
87  #define RVT_S_WAIT_RDMAR	0x0040
88  #define RVT_S_WAIT_RNR		0x0080
89  #define RVT_S_WAIT_SSN_CREDIT	0x0100
90  #define RVT_S_WAIT_DMA		0x0200
91  #define RVT_S_WAIT_PIO		0x0400
92  #define RVT_S_WAIT_TX		0x0800
93  #define RVT_S_WAIT_DMA_DESC	0x1000
94  #define RVT_S_WAIT_KMEM		0x2000
95  #define RVT_S_WAIT_PSN		0x4000
96  #define RVT_S_WAIT_ACK		0x8000
97  #define RVT_S_SEND_ONE		0x10000
98  #define RVT_S_UNLIMITED_CREDIT	0x20000
99  #define RVT_S_ECN		0x40000
100  #define RVT_S_MAX_BIT_MASK	0x800000
101  
102  /*
103   * Drivers should use s_flags starting with bit 31 down to the bit next to
104   * RVT_S_MAX_BIT_MASK
105   */
106  
107  /*
108   * Wait flags that would prevent any packet type from being sent.
109   */
110  #define RVT_S_ANY_WAIT_IO \
111  	(RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \
112  	 RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
113  
114  /*
115   * Wait flags that would prevent send work requests from making progress.
116   */
117  #define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
118  	RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
119  	RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
120  
121  #define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
122  
123  /* Number of bits to pay attention to in the opcode for checking qp type */
124  #define RVT_OPCODE_QP_MASK 0xE0
125  
126  /* Flags for checking QP state (see ib_rvt_state_ops[]) */
127  #define RVT_POST_SEND_OK                0x01
128  #define RVT_POST_RECV_OK                0x02
129  #define RVT_PROCESS_RECV_OK             0x04
130  #define RVT_PROCESS_SEND_OK             0x08
131  #define RVT_PROCESS_NEXT_SEND_OK        0x10
132  #define RVT_FLUSH_SEND			0x20
133  #define RVT_FLUSH_RECV			0x40
134  #define RVT_PROCESS_OR_FLUSH_SEND \
135  	(RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
136  #define RVT_SEND_OR_FLUSH_OR_RECV_OK \
137  	(RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK)
138  
139  /*
140   * Internal send flags
141   */
142  #define RVT_SEND_RESERVE_USED           IB_SEND_RESERVED_START
143  #define RVT_SEND_COMPLETION_ONLY	(IB_SEND_RESERVED_START << 1)
144  
145  /**
146   * rvt_ud_wr - IB UD work plus AH cache
147   * @wr: valid IB work request
148   * @attr: pointer to an allocated AH attribute
149   *
150   * Special case the UD WR so we can keep track of the AH attributes.
151   *
152   * NOTE: This data structure is stricly ordered wr then attr. I.e the attr
153   * MUST come after wr.  The ib_ud_wr is sized and copied in rvt_post_one_wr.
154   * The copy assumes that wr is first.
155   */
156  struct rvt_ud_wr {
157  	struct ib_ud_wr wr;
158  	struct rdma_ah_attr *attr;
159  };
160  
161  /*
162   * Send work request queue entry.
163   * The size of the sg_list is determined when the QP is created and stored
164   * in qp->s_max_sge.
165   */
166  struct rvt_swqe {
167  	union {
168  		struct ib_send_wr wr;   /* don't use wr.sg_list */
169  		struct rvt_ud_wr ud_wr;
170  		struct ib_reg_wr reg_wr;
171  		struct ib_rdma_wr rdma_wr;
172  		struct ib_atomic_wr atomic_wr;
173  	};
174  	u32 psn;                /* first packet sequence number */
175  	u32 lpsn;               /* last packet sequence number */
176  	u32 ssn;                /* send sequence number */
177  	u32 length;             /* total length of data in sg_list */
178  	void *priv;             /* driver dependent field */
179  	struct rvt_sge sg_list[];
180  };
181  
182  /**
183   * struct rvt_krwq - kernel struct receive work request
184   * @p_lock: lock to protect producer of the kernel buffer
185   * @head: index of next entry to fill
186   * @c_lock:lock to protect consumer of the kernel buffer
187   * @tail: index of next entry to pull
188   * @count: count is aproximate of total receive enteries posted
189   * @rvt_rwqe: struct of receive work request queue entry
190   *
191   * This structure is used to contain the head pointer,
192   * tail pointer and receive work queue entries for kernel
193   * mode user.
194   */
195  struct rvt_krwq {
196  	spinlock_t p_lock;	/* protect producer */
197  	u32 head;               /* new work requests posted to the head */
198  
199  	/* protect consumer */
200  	spinlock_t c_lock ____cacheline_aligned_in_smp;
201  	u32 tail;               /* receives pull requests from here. */
202  	u32 count;		/* approx count of receive entries posted */
203  	struct rvt_rwqe *curr_wq;
204  	struct rvt_rwqe wq[];
205  };
206  
207  /*
208   * rvt_get_swqe_ah - Return the pointer to the struct rvt_ah
209   * @swqe: valid Send WQE
210   *
211   */
rvt_get_swqe_ah(struct rvt_swqe * swqe)212  static inline struct rvt_ah *rvt_get_swqe_ah(struct rvt_swqe *swqe)
213  {
214  	return ibah_to_rvtah(swqe->ud_wr.wr.ah);
215  }
216  
217  /**
218   * rvt_get_swqe_ah_attr - Return the cached ah attribute information
219   * @swqe: valid Send WQE
220   *
221   */
rvt_get_swqe_ah_attr(struct rvt_swqe * swqe)222  static inline struct rdma_ah_attr *rvt_get_swqe_ah_attr(struct rvt_swqe *swqe)
223  {
224  	return swqe->ud_wr.attr;
225  }
226  
227  /**
228   * rvt_get_swqe_remote_qpn - Access the remote QPN value
229   * @swqe: valid Send WQE
230   *
231   */
rvt_get_swqe_remote_qpn(struct rvt_swqe * swqe)232  static inline u32 rvt_get_swqe_remote_qpn(struct rvt_swqe *swqe)
233  {
234  	return swqe->ud_wr.wr.remote_qpn;
235  }
236  
237  /**
238   * rvt_get_swqe_remote_qkey - Acces the remote qkey value
239   * @swqe: valid Send WQE
240   *
241   */
rvt_get_swqe_remote_qkey(struct rvt_swqe * swqe)242  static inline u32 rvt_get_swqe_remote_qkey(struct rvt_swqe *swqe)
243  {
244  	return swqe->ud_wr.wr.remote_qkey;
245  }
246  
247  /**
248   * rvt_get_swqe_pkey_index - Access the pkey index
249   * @swqe: valid Send WQE
250   *
251   */
rvt_get_swqe_pkey_index(struct rvt_swqe * swqe)252  static inline u16 rvt_get_swqe_pkey_index(struct rvt_swqe *swqe)
253  {
254  	return swqe->ud_wr.wr.pkey_index;
255  }
256  
257  struct rvt_rq {
258  	struct rvt_rwq *wq;
259  	struct rvt_krwq *kwq;
260  	u32 size;               /* size of RWQE array */
261  	u8 max_sge;
262  	/* protect changes in this struct */
263  	spinlock_t lock ____cacheline_aligned_in_smp;
264  };
265  
266  /**
267   * rvt_get_rq_count - count numbers of request work queue entries
268   * in circular buffer
269   * @rq: data structure for request queue entry
270   * @head: head indices of the circular buffer
271   * @tail: tail indices of the circular buffer
272   *
273   * Return - total number of entries in the Receive Queue
274   */
275  
rvt_get_rq_count(struct rvt_rq * rq,u32 head,u32 tail)276  static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
277  {
278  	u32 count = head - tail;
279  
280  	if ((s32)count < 0)
281  		count += rq->size;
282  	return count;
283  }
284  
285  /*
286   * This structure holds the information that the send tasklet needs
287   * to send a RDMA read response or atomic operation.
288   */
289  struct rvt_ack_entry {
290  	struct rvt_sge rdma_sge;
291  	u64 atomic_data;
292  	u32 psn;
293  	u32 lpsn;
294  	u8 opcode;
295  	u8 sent;
296  	void *priv;
297  };
298  
299  #define	RC_QP_SCALING_INTERVAL	5
300  
301  #define RVT_OPERATION_PRIV        0x00000001
302  #define RVT_OPERATION_ATOMIC      0x00000002
303  #define RVT_OPERATION_ATOMIC_SGE  0x00000004
304  #define RVT_OPERATION_LOCAL       0x00000008
305  #define RVT_OPERATION_USE_RESERVE 0x00000010
306  #define RVT_OPERATION_IGN_RNR_CNT 0x00000020
307  
308  #define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
309  
310  /**
311   * rvt_operation_params - op table entry
312   * @length - the length to copy into the swqe entry
313   * @qpt_support - a bit mask indicating QP type support
314   * @flags - RVT_OPERATION flags (see above)
315   *
316   * This supports table driven post send so that
317   * the driver can have differing an potentially
318   * different sets of operations.
319   *
320   **/
321  
322  struct rvt_operation_params {
323  	size_t length;
324  	u32 qpt_support;
325  	u32 flags;
326  };
327  
328  /*
329   * Common variables are protected by both r_rq.lock and s_lock in that order
330   * which only happens in modify_qp() or changing the QP 'state'.
331   */
332  struct rvt_qp {
333  	struct ib_qp ibqp;
334  	void *priv; /* Driver private data */
335  	/* read mostly fields above and below */
336  	struct rdma_ah_attr remote_ah_attr;
337  	struct rdma_ah_attr alt_ah_attr;
338  	struct rvt_qp __rcu *next;           /* link list for QPN hash table */
339  	struct rvt_swqe *s_wq;  /* send work queue */
340  	struct rvt_mmap_info *ip;
341  
342  	unsigned long timeout_jiffies;  /* computed from timeout */
343  
344  	int srate_mbps;		/* s_srate (below) converted to Mbit/s */
345  	pid_t pid;		/* pid for user mode QPs */
346  	u32 remote_qpn;
347  	u32 qkey;               /* QKEY for this QP (for UD or RD) */
348  	u32 s_size;             /* send work queue size */
349  
350  	u16 pmtu;		/* decoded from path_mtu */
351  	u8 log_pmtu;		/* shift for pmtu */
352  	u8 state;               /* QP state */
353  	u8 allowed_ops;		/* high order bits of allowed opcodes */
354  	u8 qp_access_flags;
355  	u8 alt_timeout;         /* Alternate path timeout for this QP */
356  	u8 timeout;             /* Timeout for this QP */
357  	u8 s_srate;
358  	u8 s_mig_state;
359  	u8 port_num;
360  	u8 s_pkey_index;        /* PKEY index to use */
361  	u8 s_alt_pkey_index;    /* Alternate path PKEY index to use */
362  	u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
363  	u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
364  	u8 s_retry_cnt;         /* number of times to retry */
365  	u8 s_rnr_retry_cnt;
366  	u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
367  	u8 s_max_sge;           /* size of s_wq->sg_list */
368  	u8 s_draining;
369  
370  	/* start of read/write fields */
371  	atomic_t refcount ____cacheline_aligned_in_smp;
372  	wait_queue_head_t wait;
373  
374  	struct rvt_ack_entry *s_ack_queue;
375  	struct rvt_sge_state s_rdma_read_sge;
376  
377  	spinlock_t r_lock ____cacheline_aligned_in_smp;      /* used for APM */
378  	u32 r_psn;              /* expected rcv packet sequence number */
379  	unsigned long r_aflags;
380  	u64 r_wr_id;            /* ID for current receive WQE */
381  	u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
382  	u32 r_len;              /* total length of r_sge */
383  	u32 r_rcv_len;          /* receive data len processed */
384  	u32 r_msn;              /* message sequence number */
385  
386  	u8 r_state;             /* opcode of last packet received */
387  	u8 r_flags;
388  	u8 r_head_ack_queue;    /* index into s_ack_queue[] */
389  	u8 r_adefered;          /* defered ack count */
390  
391  	struct list_head rspwait;       /* link for waiting to respond */
392  
393  	struct rvt_sge_state r_sge;     /* current receive data */
394  	struct rvt_rq r_rq;             /* receive work queue */
395  
396  	/* post send line */
397  	spinlock_t s_hlock ____cacheline_aligned_in_smp;
398  	u32 s_head;             /* new entries added here */
399  	u32 s_next_psn;         /* PSN for next request */
400  	u32 s_avail;            /* number of entries avail */
401  	u32 s_ssn;              /* SSN of tail entry */
402  	atomic_t s_reserved_used; /* reserved entries in use */
403  
404  	spinlock_t s_lock ____cacheline_aligned_in_smp;
405  	u32 s_flags;
406  	struct rvt_sge_state *s_cur_sge;
407  	struct rvt_swqe *s_wqe;
408  	struct rvt_sge_state s_sge;     /* current send request data */
409  	struct rvt_mregion *s_rdma_mr;
410  	u32 s_len;              /* total length of s_sge */
411  	u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
412  	u32 s_last_psn;         /* last response PSN processed */
413  	u32 s_sending_psn;      /* lowest PSN that is being sent */
414  	u32 s_sending_hpsn;     /* highest PSN that is being sent */
415  	u32 s_psn;              /* current packet sequence number */
416  	u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
417  	u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
418  	u32 s_tail;             /* next entry to process */
419  	u32 s_cur;              /* current work queue entry */
420  	u32 s_acked;            /* last un-ACK'ed entry */
421  	u32 s_last;             /* last completed entry */
422  	u32 s_lsn;              /* limit sequence number (credit) */
423  	u32 s_ahgpsn;           /* set to the psn in the copy of the header */
424  	u16 s_cur_size;         /* size of send packet in bytes */
425  	u16 s_rdma_ack_cnt;
426  	u8 s_hdrwords;         /* size of s_hdr in 32 bit words */
427  	s8 s_ahgidx;
428  	u8 s_state;             /* opcode of last packet sent */
429  	u8 s_ack_state;         /* opcode of packet to ACK */
430  	u8 s_nak_state;         /* non-zero if NAK is pending */
431  	u8 r_nak_state;         /* non-zero if NAK is pending */
432  	u8 s_retry;             /* requester retry counter */
433  	u8 s_rnr_retry;         /* requester RNR retry counter */
434  	u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
435  	u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
436  	u8 s_acked_ack_queue;   /* index into s_ack_queue[] */
437  
438  	struct rvt_sge_state s_ack_rdma_sge;
439  	struct timer_list s_timer;
440  	struct hrtimer s_rnr_timer;
441  
442  	atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */
443  
444  	/*
445  	 * This sge list MUST be last. Do not add anything below here.
446  	 */
447  	struct rvt_sge r_sg_list[] /* verified SGEs */
448  		____cacheline_aligned_in_smp;
449  };
450  
451  struct rvt_srq {
452  	struct ib_srq ibsrq;
453  	struct rvt_rq rq;
454  	struct rvt_mmap_info *ip;
455  	/* send signal when number of RWQEs < limit */
456  	u32 limit;
457  };
458  
ibsrq_to_rvtsrq(struct ib_srq * ibsrq)459  static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
460  {
461  	return container_of(ibsrq, struct rvt_srq, ibsrq);
462  }
463  
ibqp_to_rvtqp(struct ib_qp * ibqp)464  static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
465  {
466  	return container_of(ibqp, struct rvt_qp, ibqp);
467  }
468  
469  #define RVT_QPN_MAX                 BIT(24)
470  #define RVT_QPNMAP_ENTRIES          (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
471  #define RVT_BITS_PER_PAGE           (PAGE_SIZE * BITS_PER_BYTE)
472  #define RVT_BITS_PER_PAGE_MASK      (RVT_BITS_PER_PAGE - 1)
473  #define RVT_QPN_MASK		    IB_QPN_MASK
474  
475  /*
476   * QPN-map pages start out as NULL, they get allocated upon
477   * first use and are never deallocated. This way,
478   * large bitmaps are not allocated unless large numbers of QPs are used.
479   */
480  struct rvt_qpn_map {
481  	void *page;
482  };
483  
484  struct rvt_qpn_table {
485  	spinlock_t lock; /* protect changes to the qp table */
486  	unsigned flags;         /* flags for QP0/1 allocated for each port */
487  	u32 last;               /* last QP number allocated */
488  	u32 nmaps;              /* size of the map table */
489  	u16 limit;
490  	u8  incr;
491  	/* bit map of free QP numbers other than 0/1 */
492  	struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
493  };
494  
495  struct rvt_qp_ibdev {
496  	u32 qp_table_size;
497  	u32 qp_table_bits;
498  	struct rvt_qp __rcu **qp_table;
499  	spinlock_t qpt_lock; /* qptable lock */
500  	struct rvt_qpn_table qpn_table;
501  };
502  
503  /*
504   * There is one struct rvt_mcast for each multicast GID.
505   * All attached QPs are then stored as a list of
506   * struct rvt_mcast_qp.
507   */
508  struct rvt_mcast_qp {
509  	struct list_head list;
510  	struct rvt_qp *qp;
511  };
512  
513  struct rvt_mcast_addr {
514  	union ib_gid mgid;
515  	u16 lid;
516  };
517  
518  struct rvt_mcast {
519  	struct rb_node rb_node;
520  	struct rvt_mcast_addr mcast_addr;
521  	struct list_head qp_list;
522  	wait_queue_head_t wait;
523  	atomic_t refcount;
524  	int n_attached;
525  };
526  
527  /*
528   * Since struct rvt_swqe is not a fixed size, we can't simply index into
529   * struct rvt_qp.s_wq.  This function does the array index computation.
530   */
rvt_get_swqe_ptr(struct rvt_qp * qp,unsigned n)531  static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
532  						unsigned n)
533  {
534  	return (struct rvt_swqe *)((char *)qp->s_wq +
535  				     (sizeof(struct rvt_swqe) +
536  				      qp->s_max_sge *
537  				      sizeof(struct rvt_sge)) * n);
538  }
539  
540  /*
541   * Since struct rvt_rwqe is not a fixed size, we can't simply index into
542   * struct rvt_rwq.wq.  This function does the array index computation.
543   */
rvt_get_rwqe_ptr(struct rvt_rq * rq,unsigned n)544  static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
545  {
546  	return (struct rvt_rwqe *)
547  		((char *)rq->kwq->curr_wq +
548  		 (sizeof(struct rvt_rwqe) +
549  		  rq->max_sge * sizeof(struct ib_sge)) * n);
550  }
551  
552  /**
553   * rvt_is_user_qp - return if this is user mode QP
554   * @qp - the target QP
555   */
rvt_is_user_qp(struct rvt_qp * qp)556  static inline bool rvt_is_user_qp(struct rvt_qp *qp)
557  {
558  	return !!qp->pid;
559  }
560  
561  /**
562   * rvt_get_qp - get a QP reference
563   * @qp - the QP to hold
564   */
rvt_get_qp(struct rvt_qp * qp)565  static inline void rvt_get_qp(struct rvt_qp *qp)
566  {
567  	atomic_inc(&qp->refcount);
568  }
569  
570  /**
571   * rvt_put_qp - release a QP reference
572   * @qp - the QP to release
573   */
rvt_put_qp(struct rvt_qp * qp)574  static inline void rvt_put_qp(struct rvt_qp *qp)
575  {
576  	if (qp && atomic_dec_and_test(&qp->refcount))
577  		wake_up(&qp->wait);
578  }
579  
580  /**
581   * rvt_put_swqe - drop mr refs held by swqe
582   * @wqe - the send wqe
583   *
584   * This drops any mr references held by the swqe
585   */
rvt_put_swqe(struct rvt_swqe * wqe)586  static inline void rvt_put_swqe(struct rvt_swqe *wqe)
587  {
588  	int i;
589  
590  	for (i = 0; i < wqe->wr.num_sge; i++) {
591  		struct rvt_sge *sge = &wqe->sg_list[i];
592  
593  		rvt_put_mr(sge->mr);
594  	}
595  }
596  
597  /**
598   * rvt_qp_wqe_reserve - reserve operation
599   * @qp - the rvt qp
600   * @wqe - the send wqe
601   *
602   * This routine used in post send to record
603   * a wqe relative reserved operation use.
604   */
rvt_qp_wqe_reserve(struct rvt_qp * qp,struct rvt_swqe * wqe)605  static inline void rvt_qp_wqe_reserve(
606  	struct rvt_qp *qp,
607  	struct rvt_swqe *wqe)
608  {
609  	atomic_inc(&qp->s_reserved_used);
610  }
611  
612  /**
613   * rvt_qp_wqe_unreserve - clean reserved operation
614   * @qp - the rvt qp
615   * @flags - send wqe flags
616   *
617   * This decrements the reserve use count.
618   *
619   * This call MUST precede the change to
620   * s_last to insure that post send sees a stable
621   * s_avail.
622   *
623   * An smp_mp__after_atomic() is used to insure
624   * the compiler does not juggle the order of the s_last
625   * ring index and the decrementing of s_reserved_used.
626   */
rvt_qp_wqe_unreserve(struct rvt_qp * qp,int flags)627  static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
628  {
629  	if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
630  		atomic_dec(&qp->s_reserved_used);
631  		/* insure no compiler re-order up to s_last change */
632  		smp_mb__after_atomic();
633  	}
634  }
635  
636  extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
637  
638  /*
639   * Compare the lower 24 bits of the msn values.
640   * Returns an integer <, ==, or > than zero.
641   */
rvt_cmp_msn(u32 a,u32 b)642  static inline int rvt_cmp_msn(u32 a, u32 b)
643  {
644  	return (((int)a) - ((int)b)) << 8;
645  }
646  
647  __be32 rvt_compute_aeth(struct rvt_qp *qp);
648  
649  void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
650  
651  u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
652  
653  /**
654   * rvt_div_round_up_mtu - round up divide
655   * @qp - the qp pair
656   * @len - the length
657   *
658   * Perform a shift based mtu round up divide
659   */
rvt_div_round_up_mtu(struct rvt_qp * qp,u32 len)660  static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
661  {
662  	return (len + qp->pmtu - 1) >> qp->log_pmtu;
663  }
664  
665  /**
666   * @qp - the qp pair
667   * @len - the length
668   *
669   * Perform a shift based mtu divide
670   */
rvt_div_mtu(struct rvt_qp * qp,u32 len)671  static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
672  {
673  	return len >> qp->log_pmtu;
674  }
675  
676  /**
677   * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
678   * @timeout - timeout input(0 - 31).
679   *
680   * Return a timeout value in jiffies.
681   */
rvt_timeout_to_jiffies(u8 timeout)682  static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
683  {
684  	if (timeout > 31)
685  		timeout = 31;
686  
687  	return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
688  }
689  
690  /**
691   * rvt_lookup_qpn - return the QP with the given QPN
692   * @ibp: the ibport
693   * @qpn: the QP number to look up
694   *
695   * The caller must hold the rcu_read_lock(), and keep the lock until
696   * the returned qp is no longer in use.
697   */
rvt_lookup_qpn(struct rvt_dev_info * rdi,struct rvt_ibport * rvp,u32 qpn)698  static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
699  					    struct rvt_ibport *rvp,
700  					    u32 qpn) __must_hold(RCU)
701  {
702  	struct rvt_qp *qp = NULL;
703  
704  	if (unlikely(qpn <= 1)) {
705  		qp = rcu_dereference(rvp->qp[qpn]);
706  	} else {
707  		u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
708  
709  		for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
710  			qp = rcu_dereference(qp->next))
711  			if (qp->ibqp.qp_num == qpn)
712  				break;
713  	}
714  	return qp;
715  }
716  
717  /**
718   * rvt_mod_retry_timer - mod a retry timer
719   * @qp - the QP
720   * @shift - timeout shift to wait for multiple packets
721   * Modify a potentially already running retry timer
722   */
rvt_mod_retry_timer_ext(struct rvt_qp * qp,u8 shift)723  static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
724  {
725  	struct ib_qp *ibqp = &qp->ibqp;
726  	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
727  
728  	lockdep_assert_held(&qp->s_lock);
729  	qp->s_flags |= RVT_S_TIMER;
730  	/* 4.096 usec. * (1 << qp->timeout) */
731  	mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
732  		  (qp->timeout_jiffies << shift));
733  }
734  
rvt_mod_retry_timer(struct rvt_qp * qp)735  static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
736  {
737  	return rvt_mod_retry_timer_ext(qp, 0);
738  }
739  
740  /**
741   * rvt_put_qp_swqe - drop refs held by swqe
742   * @qp: the send qp
743   * @wqe: the send wqe
744   *
745   * This drops any references held by the swqe
746   */
rvt_put_qp_swqe(struct rvt_qp * qp,struct rvt_swqe * wqe)747  static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
748  {
749  	rvt_put_swqe(wqe);
750  	if (qp->allowed_ops == IB_OPCODE_UD)
751  		rdma_destroy_ah_attr(wqe->ud_wr.attr);
752  }
753  
754  /**
755   * rvt_qp_sqwe_incr - increment ring index
756   * @qp: the qp
757   * @val: the starting value
758   *
759   * Return: the new value wrapping as appropriate
760   */
761  static inline u32
rvt_qp_swqe_incr(struct rvt_qp * qp,u32 val)762  rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val)
763  {
764  	if (++val >= qp->s_size)
765  		val = 0;
766  	return val;
767  }
768  
769  int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
770  
771  /**
772   * rvt_recv_cq - add a new entry to completion queue
773   *			by receive queue
774   * @qp: receive queue
775   * @wc: work completion entry to add
776   * @solicited: true if @entry is solicited
777   *
778   * This is wrapper function for rvt_enter_cq function call by
779   * receive queue. If rvt_cq_enter return false, it means cq is
780   * full and the qp is put into error state.
781   */
rvt_recv_cq(struct rvt_qp * qp,struct ib_wc * wc,bool solicited)782  static inline void rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc,
783  			       bool solicited)
784  {
785  	struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq);
786  
787  	if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
788  		rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
789  }
790  
791  /**
792   * rvt_send_cq - add a new entry to completion queue
793   *                        by send queue
794   * @qp: send queue
795   * @wc: work completion entry to add
796   * @solicited: true if @entry is solicited
797   *
798   * This is wrapper function for rvt_enter_cq function call by
799   * send queue. If rvt_cq_enter return false, it means cq is
800   * full and the qp is put into error state.
801   */
rvt_send_cq(struct rvt_qp * qp,struct ib_wc * wc,bool solicited)802  static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc,
803  			       bool solicited)
804  {
805  	struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq);
806  
807  	if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
808  		rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
809  }
810  
811  /**
812   * rvt_qp_complete_swqe - insert send completion
813   * @qp - the qp
814   * @wqe - the send wqe
815   * @opcode - wc operation (driver dependent)
816   * @status - completion status
817   *
818   * Update the s_last information, and then insert a send
819   * completion into the completion
820   * queue if the qp indicates it should be done.
821   *
822   * See IBTA 10.7.3.1 for info on completion
823   * control.
824   *
825   * Return: new last
826   */
827  static inline u32
rvt_qp_complete_swqe(struct rvt_qp * qp,struct rvt_swqe * wqe,enum ib_wc_opcode opcode,enum ib_wc_status status)828  rvt_qp_complete_swqe(struct rvt_qp *qp,
829  		     struct rvt_swqe *wqe,
830  		     enum ib_wc_opcode opcode,
831  		     enum ib_wc_status status)
832  {
833  	bool need_completion;
834  	u64 wr_id;
835  	u32 byte_len, last;
836  	int flags = wqe->wr.send_flags;
837  
838  	rvt_qp_wqe_unreserve(qp, flags);
839  	rvt_put_qp_swqe(qp, wqe);
840  
841  	need_completion =
842  		!(flags & RVT_SEND_RESERVE_USED) &&
843  		(!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
844  		(flags & IB_SEND_SIGNALED) ||
845  		status != IB_WC_SUCCESS);
846  	if (need_completion) {
847  		wr_id = wqe->wr.wr_id;
848  		byte_len = wqe->length;
849  		/* above fields required before writing s_last */
850  	}
851  	last = rvt_qp_swqe_incr(qp, qp->s_last);
852  	/* see rvt_qp_is_avail() */
853  	smp_store_release(&qp->s_last, last);
854  	if (need_completion) {
855  		struct ib_wc w = {
856  			.wr_id = wr_id,
857  			.status = status,
858  			.opcode = opcode,
859  			.qp = &qp->ibqp,
860  			.byte_len = byte_len,
861  		};
862  		rvt_send_cq(qp, &w, status != IB_WC_SUCCESS);
863  	}
864  	return last;
865  }
866  
867  extern const int  ib_rvt_state_ops[];
868  
869  struct rvt_dev_info;
870  int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
871  void rvt_comm_est(struct rvt_qp *qp);
872  void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
873  unsigned long rvt_rnr_tbl_to_usec(u32 index);
874  enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
875  void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
876  void rvt_del_timers_sync(struct rvt_qp *qp);
877  void rvt_stop_rc_timers(struct rvt_qp *qp);
878  void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift);
rvt_add_retry_timer(struct rvt_qp * qp)879  static inline void rvt_add_retry_timer(struct rvt_qp *qp)
880  {
881  	rvt_add_retry_timer_ext(qp, 0);
882  }
883  
884  void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
885  		  void *data, u32 length,
886  		  bool release, bool copy_last);
887  void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
888  		       enum ib_wc_status status);
889  void rvt_ruc_loopback(struct rvt_qp *qp);
890  
891  /**
892   * struct rvt_qp_iter - the iterator for QPs
893   * @qp - the current QP
894   *
895   * This structure defines the current iterator
896   * state for sequenced access to all QPs relative
897   * to an rvt_dev_info.
898   */
899  struct rvt_qp_iter {
900  	struct rvt_qp *qp;
901  	/* private: backpointer */
902  	struct rvt_dev_info *rdi;
903  	/* private: callback routine */
904  	void (*cb)(struct rvt_qp *qp, u64 v);
905  	/* private: for arg to callback routine */
906  	u64 v;
907  	/* private: number of SMI,GSI QPs for device */
908  	int specials;
909  	/* private: current iterator index */
910  	int n;
911  };
912  
913  /**
914   * ib_cq_tail - Return tail index of cq buffer
915   * @send_cq - The cq for send
916   *
917   * This is called in qp_iter_print to get tail
918   * of cq buffer.
919   */
ib_cq_tail(struct ib_cq * send_cq)920  static inline u32 ib_cq_tail(struct ib_cq *send_cq)
921  {
922  	struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
923  
924  	return ibcq_to_rvtcq(send_cq)->ip ?
925  	       RDMA_READ_UAPI_ATOMIC(cq->queue->tail) :
926  	       ibcq_to_rvtcq(send_cq)->kqueue->tail;
927  }
928  
929  /**
930   * ib_cq_head - Return head index of cq buffer
931   * @send_cq - The cq for send
932   *
933   * This is called in qp_iter_print to get head
934   * of cq buffer.
935   */
ib_cq_head(struct ib_cq * send_cq)936  static inline u32 ib_cq_head(struct ib_cq *send_cq)
937  {
938  	struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
939  
940  	return ibcq_to_rvtcq(send_cq)->ip ?
941  	       RDMA_READ_UAPI_ATOMIC(cq->queue->head) :
942  	       ibcq_to_rvtcq(send_cq)->kqueue->head;
943  }
944  
945  /**
946   * rvt_free_rq - free memory allocated for rvt_rq struct
947   * @rvt_rq: request queue data structure
948   *
949   * This function should only be called if the rvt_mmap_info()
950   * has not succeeded.
951   */
rvt_free_rq(struct rvt_rq * rq)952  static inline void rvt_free_rq(struct rvt_rq *rq)
953  {
954  	kvfree(rq->kwq);
955  	rq->kwq = NULL;
956  	vfree(rq->wq);
957  	rq->wq = NULL;
958  }
959  
960  /**
961   * rvt_to_iport - Get the ibport pointer
962   * @qp: the qp pointer
963   *
964   * This function returns the ibport pointer from the qp pointer.
965   */
rvt_to_iport(struct rvt_qp * qp)966  static inline struct rvt_ibport *rvt_to_iport(struct rvt_qp *qp)
967  {
968  	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
969  
970  	return rdi->ports[qp->port_num - 1];
971  }
972  
973  /**
974   * rvt_rc_credit_avail - Check if there are enough RC credits for the request
975   * @qp: the qp
976   * @wqe: the request
977   *
978   * This function returns false when there are not enough credits for the given
979   * request and true otherwise.
980   */
rvt_rc_credit_avail(struct rvt_qp * qp,struct rvt_swqe * wqe)981  static inline bool rvt_rc_credit_avail(struct rvt_qp *qp, struct rvt_swqe *wqe)
982  {
983  	lockdep_assert_held(&qp->s_lock);
984  	if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
985  	    rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
986  		struct rvt_ibport *rvp = rvt_to_iport(qp);
987  
988  		qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
989  		rvp->n_rc_crwaits++;
990  		return false;
991  	}
992  	return true;
993  }
994  
995  struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
996  				     u64 v,
997  				     void (*cb)(struct rvt_qp *qp, u64 v));
998  int rvt_qp_iter_next(struct rvt_qp_iter *iter);
999  void rvt_qp_iter(struct rvt_dev_info *rdi,
1000  		 u64 v,
1001  		 void (*cb)(struct rvt_qp *qp, u64 v));
1002  void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey);
1003  #endif          /* DEF_RDMAVT_INCQP_H */
1004