1  /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
2  /* Copyright (c) 2015 - 2020 Intel Corporation */
3  #ifndef IRDMA_USER_H
4  #define IRDMA_USER_H
5  
6  #define irdma_handle void *
7  #define irdma_adapter_handle irdma_handle
8  #define irdma_qp_handle irdma_handle
9  #define irdma_cq_handle irdma_handle
10  #define irdma_pd_id irdma_handle
11  #define irdma_stag_handle irdma_handle
12  #define irdma_stag_index u32
13  #define irdma_stag u32
14  #define irdma_stag_key u8
15  #define irdma_tagged_offset u64
16  #define irdma_access_privileges u32
17  #define irdma_physical_fragment u64
18  #define irdma_address_list u64 *
19  #define irdma_sgl struct irdma_sge *
20  
21  #define	IRDMA_MAX_MR_SIZE       0x200000000000ULL
22  
23  #define IRDMA_ACCESS_FLAGS_LOCALREAD		0x01
24  #define IRDMA_ACCESS_FLAGS_LOCALWRITE		0x02
25  #define IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY	0x04
26  #define IRDMA_ACCESS_FLAGS_REMOTEREAD		0x05
27  #define IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY	0x08
28  #define IRDMA_ACCESS_FLAGS_REMOTEWRITE		0x0a
29  #define IRDMA_ACCESS_FLAGS_BIND_WINDOW		0x10
30  #define IRDMA_ACCESS_FLAGS_ZERO_BASED		0x20
31  #define IRDMA_ACCESS_FLAGS_ALL			0x3f
32  
33  #define IRDMA_OP_TYPE_RDMA_WRITE		0x00
34  #define IRDMA_OP_TYPE_RDMA_READ			0x01
35  #define IRDMA_OP_TYPE_SEND			0x03
36  #define IRDMA_OP_TYPE_SEND_INV			0x04
37  #define IRDMA_OP_TYPE_SEND_SOL			0x05
38  #define IRDMA_OP_TYPE_SEND_SOL_INV		0x06
39  #define IRDMA_OP_TYPE_RDMA_WRITE_SOL		0x0d
40  #define IRDMA_OP_TYPE_BIND_MW			0x08
41  #define IRDMA_OP_TYPE_FAST_REG_NSMR		0x09
42  #define IRDMA_OP_TYPE_INV_STAG			0x0a
43  #define IRDMA_OP_TYPE_RDMA_READ_INV_STAG	0x0b
44  #define IRDMA_OP_TYPE_NOP			0x0c
45  #define IRDMA_OP_TYPE_REC	0x3e
46  #define IRDMA_OP_TYPE_REC_IMM	0x3f
47  
48  #define IRDMA_FLUSH_MAJOR_ERR	1
49  
50  enum irdma_device_caps_const {
51  	IRDMA_WQE_SIZE =			4,
52  	IRDMA_CQP_WQE_SIZE =			8,
53  	IRDMA_CQE_SIZE =			4,
54  	IRDMA_EXTENDED_CQE_SIZE =		8,
55  	IRDMA_AEQE_SIZE =			2,
56  	IRDMA_CEQE_SIZE =			1,
57  	IRDMA_CQP_CTX_SIZE =			8,
58  	IRDMA_SHADOW_AREA_SIZE =		8,
59  	IRDMA_QUERY_FPM_BUF_SIZE =		176,
60  	IRDMA_COMMIT_FPM_BUF_SIZE =		176,
61  	IRDMA_GATHER_STATS_BUF_SIZE =		1024,
62  	IRDMA_MIN_IW_QP_ID =			0,
63  	IRDMA_MAX_IW_QP_ID =			262143,
64  	IRDMA_MIN_CEQID =			0,
65  	IRDMA_MAX_CEQID =			1023,
66  	IRDMA_CEQ_MAX_COUNT =			IRDMA_MAX_CEQID + 1,
67  	IRDMA_MIN_CQID =			0,
68  	IRDMA_MAX_CQID =			524287,
69  	IRDMA_MIN_AEQ_ENTRIES =			1,
70  	IRDMA_MAX_AEQ_ENTRIES =			524287,
71  	IRDMA_MIN_CEQ_ENTRIES =			1,
72  	IRDMA_MAX_CEQ_ENTRIES =			262143,
73  	IRDMA_MIN_CQ_SIZE =			1,
74  	IRDMA_MAX_CQ_SIZE =			1048575,
75  	IRDMA_DB_ID_ZERO =			0,
76  	IRDMA_MAX_WQ_FRAGMENT_COUNT =		13,
77  	IRDMA_MAX_SGE_RD =			13,
78  	IRDMA_MAX_OUTBOUND_MSG_SIZE =		2147483647,
79  	IRDMA_MAX_INBOUND_MSG_SIZE =		2147483647,
80  	IRDMA_MAX_PUSH_PAGE_COUNT =		1024,
81  	IRDMA_MAX_PE_ENA_VF_COUNT =		32,
82  	IRDMA_MAX_VF_FPM_ID =			47,
83  	IRDMA_MAX_SQ_PAYLOAD_SIZE =		2145386496,
84  	IRDMA_MAX_INLINE_DATA_SIZE =		101,
85  	IRDMA_MAX_WQ_ENTRIES =			32768,
86  	IRDMA_Q2_BUF_SIZE =			256,
87  	IRDMA_QP_CTX_SIZE =			256,
88  	IRDMA_MAX_PDS =				262144,
89  };
90  
91  enum irdma_addressing_type {
92  	IRDMA_ADDR_TYPE_ZERO_BASED = 0,
93  	IRDMA_ADDR_TYPE_VA_BASED   = 1,
94  };
95  
96  enum irdma_flush_opcode {
97  	FLUSH_INVALID = 0,
98  	FLUSH_GENERAL_ERR,
99  	FLUSH_PROT_ERR,
100  	FLUSH_REM_ACCESS_ERR,
101  	FLUSH_LOC_QP_OP_ERR,
102  	FLUSH_REM_OP_ERR,
103  	FLUSH_LOC_LEN_ERR,
104  	FLUSH_FATAL_ERR,
105  	FLUSH_RETRY_EXC_ERR,
106  	FLUSH_MW_BIND_ERR,
107  };
108  
109  enum irdma_cmpl_status {
110  	IRDMA_COMPL_STATUS_SUCCESS = 0,
111  	IRDMA_COMPL_STATUS_FLUSHED,
112  	IRDMA_COMPL_STATUS_INVALID_WQE,
113  	IRDMA_COMPL_STATUS_QP_CATASTROPHIC,
114  	IRDMA_COMPL_STATUS_REMOTE_TERMINATION,
115  	IRDMA_COMPL_STATUS_INVALID_STAG,
116  	IRDMA_COMPL_STATUS_BASE_BOUND_VIOLATION,
117  	IRDMA_COMPL_STATUS_ACCESS_VIOLATION,
118  	IRDMA_COMPL_STATUS_INVALID_PD_ID,
119  	IRDMA_COMPL_STATUS_WRAP_ERROR,
120  	IRDMA_COMPL_STATUS_STAG_INVALID_PDID,
121  	IRDMA_COMPL_STATUS_RDMA_READ_ZERO_ORD,
122  	IRDMA_COMPL_STATUS_QP_NOT_PRIVLEDGED,
123  	IRDMA_COMPL_STATUS_STAG_NOT_INVALID,
124  	IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_SIZE,
125  	IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY,
126  	IRDMA_COMPL_STATUS_INVALID_FBO,
127  	IRDMA_COMPL_STATUS_INVALID_LEN,
128  	IRDMA_COMPL_STATUS_INVALID_ACCESS,
129  	IRDMA_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG,
130  	IRDMA_COMPL_STATUS_INVALID_VIRT_ADDRESS,
131  	IRDMA_COMPL_STATUS_INVALID_REGION,
132  	IRDMA_COMPL_STATUS_INVALID_WINDOW,
133  	IRDMA_COMPL_STATUS_INVALID_TOTAL_LEN,
134  	IRDMA_COMPL_STATUS_UNKNOWN,
135  };
136  
137  enum irdma_cmpl_notify {
138  	IRDMA_CQ_COMPL_EVENT     = 0,
139  	IRDMA_CQ_COMPL_SOLICITED = 1,
140  };
141  
142  enum irdma_qp_caps {
143  	IRDMA_WRITE_WITH_IMM = 1,
144  	IRDMA_SEND_WITH_IMM  = 2,
145  	IRDMA_ROCE	     = 4,
146  	IRDMA_PUSH_MODE      = 8,
147  };
148  
149  struct irdma_qp_uk;
150  struct irdma_cq_uk;
151  struct irdma_qp_uk_init_info;
152  struct irdma_cq_uk_init_info;
153  
154  struct irdma_sge {
155  	irdma_tagged_offset tag_off;
156  	u32 len;
157  	irdma_stag stag;
158  };
159  
160  struct irdma_ring {
161  	u32 head;
162  	u32 tail;
163  	u32 size;
164  };
165  
166  struct irdma_cqe {
167  	__le64 buf[IRDMA_CQE_SIZE];
168  };
169  
170  struct irdma_extended_cqe {
171  	__le64 buf[IRDMA_EXTENDED_CQE_SIZE];
172  };
173  
174  struct irdma_post_send {
175  	irdma_sgl sg_list;
176  	u32 num_sges;
177  	u32 qkey;
178  	u32 dest_qp;
179  	u32 ah_id;
180  };
181  
182  struct irdma_post_inline_send {
183  	void *data;
184  	u32 len;
185  	u32 qkey;
186  	u32 dest_qp;
187  	u32 ah_id;
188  };
189  
190  struct irdma_post_rq_info {
191  	u64 wr_id;
192  	irdma_sgl sg_list;
193  	u32 num_sges;
194  };
195  
196  struct irdma_rdma_write {
197  	irdma_sgl lo_sg_list;
198  	u32 num_lo_sges;
199  	struct irdma_sge rem_addr;
200  };
201  
202  struct irdma_inline_rdma_write {
203  	void *data;
204  	u32 len;
205  	struct irdma_sge rem_addr;
206  };
207  
208  struct irdma_rdma_read {
209  	irdma_sgl lo_sg_list;
210  	u32 num_lo_sges;
211  	struct irdma_sge rem_addr;
212  };
213  
214  struct irdma_bind_window {
215  	irdma_stag mr_stag;
216  	u64 bind_len;
217  	void *va;
218  	enum irdma_addressing_type addressing_type;
219  	bool ena_reads:1;
220  	bool ena_writes:1;
221  	irdma_stag mw_stag;
222  	bool mem_window_type_1:1;
223  };
224  
225  struct irdma_inv_local_stag {
226  	irdma_stag target_stag;
227  };
228  
229  struct irdma_post_sq_info {
230  	u64 wr_id;
231  	u8 op_type;
232  	u8 l4len;
233  	bool signaled:1;
234  	bool read_fence:1;
235  	bool local_fence:1;
236  	bool inline_data:1;
237  	bool imm_data_valid:1;
238  	bool push_wqe:1;
239  	bool report_rtt:1;
240  	bool udp_hdr:1;
241  	bool defer_flag:1;
242  	u32 imm_data;
243  	u32 stag_to_inv;
244  	union {
245  		struct irdma_post_send send;
246  		struct irdma_rdma_write rdma_write;
247  		struct irdma_rdma_read rdma_read;
248  		struct irdma_bind_window bind_window;
249  		struct irdma_inv_local_stag inv_local_stag;
250  		struct irdma_inline_rdma_write inline_rdma_write;
251  		struct irdma_post_inline_send inline_send;
252  	} op;
253  };
254  
255  struct irdma_cq_poll_info {
256  	u64 wr_id;
257  	irdma_qp_handle qp_handle;
258  	u32 bytes_xfered;
259  	u32 tcp_seq_num_rtt;
260  	u32 qp_id;
261  	u32 ud_src_qpn;
262  	u32 imm_data;
263  	irdma_stag inv_stag; /* or L_R_Key */
264  	enum irdma_cmpl_status comp_status;
265  	u16 major_err;
266  	u16 minor_err;
267  	u16 ud_vlan;
268  	u8 ud_smac[6];
269  	u8 op_type;
270  	bool stag_invalid_set:1; /* or L_R_Key set */
271  	bool push_dropped:1;
272  	bool error:1;
273  	bool solicited_event:1;
274  	bool ipv4:1;
275  	bool ud_vlan_valid:1;
276  	bool ud_smac_valid:1;
277  	bool imm_valid:1;
278  };
279  
280  enum irdma_status_code irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
281  						  struct irdma_post_sq_info *info,
282  						  bool post_sq);
283  enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
284  					    struct irdma_post_sq_info *info,
285  					    bool post_sq);
286  enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
287  					struct irdma_post_sq_info *info,
288  					bool post_sq);
289  enum irdma_status_code irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id,
290  					 bool signaled, bool post_sq);
291  enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
292  					     struct irdma_post_rq_info *info);
293  void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp);
294  enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
295  					  struct irdma_post_sq_info *info,
296  					  bool inv_stag, bool post_sq);
297  enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
298  					   struct irdma_post_sq_info *info,
299  					   bool post_sq);
300  enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
301  				     struct irdma_post_sq_info *info, bool post_sq);
302  enum irdma_status_code irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
303  						      struct irdma_post_sq_info *info,
304  						      bool post_sq);
305  
306  struct irdma_wqe_uk_ops {
307  	void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
308  	u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
309  	void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge,
310  				u8 valid);
311  	void (*iw_set_mw_bind_wqe)(__le64 *wqe,
312  				   struct irdma_bind_window *op_info);
313  };
314  
315  enum irdma_status_code irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
316  					     struct irdma_cq_poll_info *info);
317  void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
318  				      enum irdma_cmpl_notify cq_notify);
319  void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size);
320  void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt);
321  enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq,
322  					struct irdma_cq_uk_init_info *info);
323  enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
324  					struct irdma_qp_uk_init_info *info);
325  struct irdma_sq_uk_wr_trk_info {
326  	u64 wrid;
327  	u32 wr_len;
328  	u16 quanta;
329  	u8 reserved[2];
330  };
331  
332  struct irdma_qp_quanta {
333  	__le64 elem[IRDMA_WQE_SIZE];
334  };
335  
336  struct irdma_qp_uk {
337  	struct irdma_qp_quanta *sq_base;
338  	struct irdma_qp_quanta *rq_base;
339  	struct irdma_uk_attrs *uk_attrs;
340  	u32 __iomem *wqe_alloc_db;
341  	struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
342  	u64 *rq_wrid_array;
343  	__le64 *shadow_area;
344  	__le32 *push_db;
345  	__le64 *push_wqe;
346  	struct irdma_ring sq_ring;
347  	struct irdma_ring rq_ring;
348  	struct irdma_ring initial_ring;
349  	u32 qp_id;
350  	u32 qp_caps;
351  	u32 sq_size;
352  	u32 rq_size;
353  	u32 max_sq_frag_cnt;
354  	u32 max_rq_frag_cnt;
355  	u32 max_inline_data;
356  	struct irdma_wqe_uk_ops wqe_ops;
357  	u16 conn_wqes;
358  	u8 qp_type;
359  	u8 swqe_polarity;
360  	u8 swqe_polarity_deferred;
361  	u8 rwqe_polarity;
362  	u8 rq_wqe_size;
363  	u8 rq_wqe_size_multiplier;
364  	bool deferred_flag:1;
365  	bool push_mode:1; /* whether the last post wqe was pushed */
366  	bool push_dropped:1;
367  	bool first_sq_wq:1;
368  	bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */
369  	bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
370  	bool destroy_pending:1; /* Indicates the QP is being destroyed */
371  	void *back_qp;
372  	spinlock_t *lock;
373  	u8 dbg_rq_flushed;
374  	u8 sq_flush_seen;
375  	u8 rq_flush_seen;
376  };
377  
378  struct irdma_cq_uk {
379  	struct irdma_cqe *cq_base;
380  	u32 __iomem *cqe_alloc_db;
381  	u32 __iomem *cq_ack_db;
382  	__le64 *shadow_area;
383  	u32 cq_id;
384  	u32 cq_size;
385  	struct irdma_ring cq_ring;
386  	u8 polarity;
387  	bool avoid_mem_cflct:1;
388  };
389  
390  struct irdma_qp_uk_init_info {
391  	struct irdma_qp_quanta *sq;
392  	struct irdma_qp_quanta *rq;
393  	struct irdma_uk_attrs *uk_attrs;
394  	u32 __iomem *wqe_alloc_db;
395  	__le64 *shadow_area;
396  	struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
397  	u64 *rq_wrid_array;
398  	u32 qp_id;
399  	u32 qp_caps;
400  	u32 sq_size;
401  	u32 rq_size;
402  	u32 max_sq_frag_cnt;
403  	u32 max_rq_frag_cnt;
404  	u32 max_inline_data;
405  	u8 first_sq_wq;
406  	u8 type;
407  	int abi_ver;
408  	bool legacy_mode;
409  };
410  
411  struct irdma_cq_uk_init_info {
412  	u32 __iomem *cqe_alloc_db;
413  	u32 __iomem *cq_ack_db;
414  	struct irdma_cqe *cq_base;
415  	__le64 *shadow_area;
416  	u32 cq_size;
417  	u32 cq_id;
418  	bool avoid_mem_cflct;
419  };
420  
421  __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
422  				   u16 quanta, u32 total_size,
423  				   struct irdma_post_sq_info *info);
424  __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
425  void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
426  enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,
427  				 bool signaled, bool post_sq);
428  enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
429  enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
430  void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
431  			 u32 inline_data, u8 *shift);
432  enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs,
433  					 u32 sq_size, u8 shift, u32 *wqdepth);
434  enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs,
435  					 u32 rq_size, u8 shift, u32 *wqdepth);
436  void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
437  		       u32 wqe_idx, bool post_sq);
438  void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
439  #endif /* IRDMA_USER_H */
440