1 /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #ifndef IRDMA_TYPE_H
4 #define IRDMA_TYPE_H
5 #include "status.h"
6 #include "osdep.h"
7 #include "irdma.h"
8 #include "user.h"
9 #include "hmc.h"
10 #include "uda.h"
11 #include "ws.h"
12 #define IRDMA_DEBUG_ERR		"ERR"
13 #define IRDMA_DEBUG_INIT	"INIT"
14 #define IRDMA_DEBUG_DEV		"DEV"
15 #define IRDMA_DEBUG_CM		"CM"
16 #define IRDMA_DEBUG_VERBS	"VERBS"
17 #define IRDMA_DEBUG_PUDA	"PUDA"
18 #define IRDMA_DEBUG_ILQ		"ILQ"
19 #define IRDMA_DEBUG_IEQ		"IEQ"
20 #define IRDMA_DEBUG_QP		"QP"
21 #define IRDMA_DEBUG_CQ		"CQ"
22 #define IRDMA_DEBUG_MR		"MR"
23 #define IRDMA_DEBUG_PBLE	"PBLE"
24 #define IRDMA_DEBUG_WQE		"WQE"
25 #define IRDMA_DEBUG_AEQ		"AEQ"
26 #define IRDMA_DEBUG_CQP		"CQP"
27 #define IRDMA_DEBUG_HMC		"HMC"
28 #define IRDMA_DEBUG_USER	"USER"
29 #define IRDMA_DEBUG_VIRT	"VIRT"
30 #define IRDMA_DEBUG_DCB		"DCB"
31 #define	IRDMA_DEBUG_CQE		"CQE"
32 #define IRDMA_DEBUG_CLNT	"CLNT"
33 #define IRDMA_DEBUG_WS		"WS"
34 #define IRDMA_DEBUG_STATS	"STATS"
35 
36 enum irdma_page_size {
37 	IRDMA_PAGE_SIZE_4K = 0,
38 	IRDMA_PAGE_SIZE_2M,
39 	IRDMA_PAGE_SIZE_1G,
40 };
41 
42 enum irdma_hdrct_flags {
43 	DDP_LEN_FLAG  = 0x80,
44 	DDP_HDR_FLAG  = 0x40,
45 	RDMA_HDR_FLAG = 0x20,
46 };
47 
48 enum irdma_term_layers {
49 	LAYER_RDMA = 0,
50 	LAYER_DDP  = 1,
51 	LAYER_MPA  = 2,
52 };
53 
54 enum irdma_term_error_types {
55 	RDMAP_REMOTE_PROT = 1,
56 	RDMAP_REMOTE_OP   = 2,
57 	DDP_CATASTROPHIC  = 0,
58 	DDP_TAGGED_BUF    = 1,
59 	DDP_UNTAGGED_BUF  = 2,
60 	DDP_LLP		  = 3,
61 };
62 
63 enum irdma_term_rdma_errors {
64 	RDMAP_INV_STAG		  = 0x00,
65 	RDMAP_INV_BOUNDS	  = 0x01,
66 	RDMAP_ACCESS		  = 0x02,
67 	RDMAP_UNASSOC_STAG	  = 0x03,
68 	RDMAP_TO_WRAP		  = 0x04,
69 	RDMAP_INV_RDMAP_VER       = 0x05,
70 	RDMAP_UNEXPECTED_OP       = 0x06,
71 	RDMAP_CATASTROPHIC_LOCAL  = 0x07,
72 	RDMAP_CATASTROPHIC_GLOBAL = 0x08,
73 	RDMAP_CANT_INV_STAG       = 0x09,
74 	RDMAP_UNSPECIFIED	  = 0xff,
75 };
76 
77 enum irdma_term_ddp_errors {
78 	DDP_CATASTROPHIC_LOCAL      = 0x00,
79 	DDP_TAGGED_INV_STAG	    = 0x00,
80 	DDP_TAGGED_BOUNDS	    = 0x01,
81 	DDP_TAGGED_UNASSOC_STAG     = 0x02,
82 	DDP_TAGGED_TO_WRAP	    = 0x03,
83 	DDP_TAGGED_INV_DDP_VER      = 0x04,
84 	DDP_UNTAGGED_INV_QN	    = 0x01,
85 	DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
86 	DDP_UNTAGGED_INV_MSN_RANGE  = 0x03,
87 	DDP_UNTAGGED_INV_MO	    = 0x04,
88 	DDP_UNTAGGED_INV_TOO_LONG   = 0x05,
89 	DDP_UNTAGGED_INV_DDP_VER    = 0x06,
90 };
91 
92 enum irdma_term_mpa_errors {
93 	MPA_CLOSED  = 0x01,
94 	MPA_CRC     = 0x02,
95 	MPA_MARKER  = 0x03,
96 	MPA_REQ_RSP = 0x04,
97 };
98 
99 enum irdma_qp_event_type {
100 	IRDMA_QP_EVENT_CATASTROPHIC,
101 	IRDMA_QP_EVENT_ACCESS_ERR,
102 };
103 
104 enum irdma_hw_stats_index_32b {
105 	IRDMA_HW_STAT_INDEX_IP4RXDISCARD	= 0,
106 	IRDMA_HW_STAT_INDEX_IP4RXTRUNC		= 1,
107 	IRDMA_HW_STAT_INDEX_IP4TXNOROUTE	= 2,
108 	IRDMA_HW_STAT_INDEX_IP6RXDISCARD	= 3,
109 	IRDMA_HW_STAT_INDEX_IP6RXTRUNC		= 4,
110 	IRDMA_HW_STAT_INDEX_IP6TXNOROUTE	= 5,
111 	IRDMA_HW_STAT_INDEX_TCPRTXSEG		= 6,
112 	IRDMA_HW_STAT_INDEX_TCPRXOPTERR		= 7,
113 	IRDMA_HW_STAT_INDEX_TCPRXPROTOERR	= 8,
114 	IRDMA_HW_STAT_INDEX_MAX_32_GEN_1	= 9, /* Must be same value as next entry */
115 	IRDMA_HW_STAT_INDEX_RXVLANERR		= 9,
116 	IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED	= 10,
117 	IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED	= 11,
118 	IRDMA_HW_STAT_INDEX_TXNPCNPSENT		= 12,
119 	IRDMA_HW_STAT_INDEX_MAX_32, /* Must be last entry */
120 };
121 
122 enum irdma_hw_stats_index_64b {
123 	IRDMA_HW_STAT_INDEX_IP4RXOCTS	= 0,
124 	IRDMA_HW_STAT_INDEX_IP4RXPKTS	= 1,
125 	IRDMA_HW_STAT_INDEX_IP4RXFRAGS	= 2,
126 	IRDMA_HW_STAT_INDEX_IP4RXMCPKTS	= 3,
127 	IRDMA_HW_STAT_INDEX_IP4TXOCTS	= 4,
128 	IRDMA_HW_STAT_INDEX_IP4TXPKTS	= 5,
129 	IRDMA_HW_STAT_INDEX_IP4TXFRAGS	= 6,
130 	IRDMA_HW_STAT_INDEX_IP4TXMCPKTS	= 7,
131 	IRDMA_HW_STAT_INDEX_IP6RXOCTS	= 8,
132 	IRDMA_HW_STAT_INDEX_IP6RXPKTS	= 9,
133 	IRDMA_HW_STAT_INDEX_IP6RXFRAGS	= 10,
134 	IRDMA_HW_STAT_INDEX_IP6RXMCPKTS	= 11,
135 	IRDMA_HW_STAT_INDEX_IP6TXOCTS	= 12,
136 	IRDMA_HW_STAT_INDEX_IP6TXPKTS	= 13,
137 	IRDMA_HW_STAT_INDEX_IP6TXFRAGS	= 14,
138 	IRDMA_HW_STAT_INDEX_IP6TXMCPKTS	= 15,
139 	IRDMA_HW_STAT_INDEX_TCPRXSEGS	= 16,
140 	IRDMA_HW_STAT_INDEX_TCPTXSEG	= 17,
141 	IRDMA_HW_STAT_INDEX_RDMARXRDS	= 18,
142 	IRDMA_HW_STAT_INDEX_RDMARXSNDS	= 19,
143 	IRDMA_HW_STAT_INDEX_RDMARXWRS	= 20,
144 	IRDMA_HW_STAT_INDEX_RDMATXRDS	= 21,
145 	IRDMA_HW_STAT_INDEX_RDMATXSNDS	= 22,
146 	IRDMA_HW_STAT_INDEX_RDMATXWRS	= 23,
147 	IRDMA_HW_STAT_INDEX_RDMAVBND	= 24,
148 	IRDMA_HW_STAT_INDEX_RDMAVINV	= 25,
149 	IRDMA_HW_STAT_INDEX_MAX_64_GEN_1 = 26, /* Must be same value as next entry */
150 	IRDMA_HW_STAT_INDEX_IP4RXMCOCTS	= 26,
151 	IRDMA_HW_STAT_INDEX_IP4TXMCOCTS	= 27,
152 	IRDMA_HW_STAT_INDEX_IP6RXMCOCTS	= 28,
153 	IRDMA_HW_STAT_INDEX_IP6TXMCOCTS	= 29,
154 	IRDMA_HW_STAT_INDEX_UDPRXPKTS	= 30,
155 	IRDMA_HW_STAT_INDEX_UDPTXPKTS	= 31,
156 	IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 32,
157 	IRDMA_HW_STAT_INDEX_MAX_64, /* Must be last entry */
158 };
159 
160 enum irdma_feature_type {
161 	IRDMA_FEATURE_FW_INFO = 0,
162 	IRDMA_HW_VERSION_INFO = 1,
163 	IRDMA_QSETS_MAX       = 26,
164 	IRDMA_MAX_FEATURES, /* Must be last entry */
165 };
166 
167 enum irdma_sched_prio_type {
168 	IRDMA_PRIO_WEIGHTED_RR     = 1,
169 	IRDMA_PRIO_STRICT	   = 2,
170 	IRDMA_PRIO_WEIGHTED_STRICT = 3,
171 };
172 
173 enum irdma_vm_vf_type {
174 	IRDMA_VF_TYPE = 0,
175 	IRDMA_VM_TYPE,
176 	IRDMA_PF_TYPE,
177 };
178 
179 enum irdma_cqp_hmc_profile {
180 	IRDMA_HMC_PROFILE_DEFAULT  = 1,
181 	IRDMA_HMC_PROFILE_FAVOR_VF = 2,
182 	IRDMA_HMC_PROFILE_EQUAL    = 3,
183 };
184 
185 enum irdma_quad_entry_type {
186 	IRDMA_QHASH_TYPE_TCP_ESTABLISHED = 1,
187 	IRDMA_QHASH_TYPE_TCP_SYN,
188 	IRDMA_QHASH_TYPE_UDP_UNICAST,
189 	IRDMA_QHASH_TYPE_UDP_MCAST,
190 	IRDMA_QHASH_TYPE_ROCE_MCAST,
191 	IRDMA_QHASH_TYPE_ROCEV2_HW,
192 };
193 
194 enum irdma_quad_hash_manage_type {
195 	IRDMA_QHASH_MANAGE_TYPE_DELETE = 0,
196 	IRDMA_QHASH_MANAGE_TYPE_ADD,
197 	IRDMA_QHASH_MANAGE_TYPE_MODIFY,
198 };
199 
200 enum irdma_syn_rst_handling {
201 	IRDMA_SYN_RST_HANDLING_HW_TCP_SECURE = 0,
202 	IRDMA_SYN_RST_HANDLING_HW_TCP,
203 	IRDMA_SYN_RST_HANDLING_FW_TCP_SECURE,
204 	IRDMA_SYN_RST_HANDLING_FW_TCP,
205 };
206 
207 enum irdma_queue_type {
208 	IRDMA_QUEUE_TYPE_SQ_RQ = 0,
209 	IRDMA_QUEUE_TYPE_CQP,
210 };
211 
212 struct irdma_sc_dev;
213 struct irdma_vsi_pestat;
214 
215 struct irdma_dcqcn_cc_params {
216 	u8 cc_cfg_valid;
217 	u8 min_dec_factor;
218 	u8 min_rate;
219 	u8 dcqcn_f;
220 	u16 rai_factor;
221 	u16 hai_factor;
222 	u16 dcqcn_t;
223 	u32 dcqcn_b;
224 	u32 rreduce_mperiod;
225 };
226 
227 struct irdma_cqp_init_info {
228 	u64 cqp_compl_ctx;
229 	u64 host_ctx_pa;
230 	u64 sq_pa;
231 	struct irdma_sc_dev *dev;
232 	struct irdma_cqp_quanta *sq;
233 	struct irdma_dcqcn_cc_params dcqcn_params;
234 	__le64 *host_ctx;
235 	u64 *scratch_array;
236 	u32 sq_size;
237 	u16 hw_maj_ver;
238 	u16 hw_min_ver;
239 	u8 struct_ver;
240 	u8 hmc_profile;
241 	u8 ena_vf_count;
242 	u8 ceqs_per_vf;
243 	bool en_datacenter_tcp:1;
244 	bool disable_packed:1;
245 	bool rocev2_rto_policy:1;
246 	enum irdma_protocol_used protocol_used;
247 };
248 
249 struct irdma_terminate_hdr {
250 	u8 layer_etype;
251 	u8 error_code;
252 	u8 hdrct;
253 	u8 rsvd;
254 };
255 
256 struct irdma_cqp_sq_wqe {
257 	__le64 buf[IRDMA_CQP_WQE_SIZE];
258 };
259 
260 struct irdma_sc_aeqe {
261 	__le64 buf[IRDMA_AEQE_SIZE];
262 };
263 
264 struct irdma_ceqe {
265 	__le64 buf[IRDMA_CEQE_SIZE];
266 };
267 
268 struct irdma_cqp_ctx {
269 	__le64 buf[IRDMA_CQP_CTX_SIZE];
270 };
271 
272 struct irdma_cq_shadow_area {
273 	__le64 buf[IRDMA_SHADOW_AREA_SIZE];
274 };
275 
276 struct irdma_dev_hw_stats_offsets {
277 	u32 stats_offset_32[IRDMA_HW_STAT_INDEX_MAX_32];
278 	u32 stats_offset_64[IRDMA_HW_STAT_INDEX_MAX_64];
279 };
280 
281 struct irdma_dev_hw_stats {
282 	u64 stats_val_32[IRDMA_HW_STAT_INDEX_MAX_32];
283 	u64 stats_val_64[IRDMA_HW_STAT_INDEX_MAX_64];
284 };
285 
286 struct irdma_gather_stats {
287 	u32 rsvd1;
288 	u32 rxvlanerr;
289 	u64 ip4rxocts;
290 	u64 ip4rxpkts;
291 	u32 ip4rxtrunc;
292 	u32 ip4rxdiscard;
293 	u64 ip4rxfrags;
294 	u64 ip4rxmcocts;
295 	u64 ip4rxmcpkts;
296 	u64 ip6rxocts;
297 	u64 ip6rxpkts;
298 	u32 ip6rxtrunc;
299 	u32 ip6rxdiscard;
300 	u64 ip6rxfrags;
301 	u64 ip6rxmcocts;
302 	u64 ip6rxmcpkts;
303 	u64 ip4txocts;
304 	u64 ip4txpkts;
305 	u64 ip4txfrag;
306 	u64 ip4txmcocts;
307 	u64 ip4txmcpkts;
308 	u64 ip6txocts;
309 	u64 ip6txpkts;
310 	u64 ip6txfrags;
311 	u64 ip6txmcocts;
312 	u64 ip6txmcpkts;
313 	u32 ip6txnoroute;
314 	u32 ip4txnoroute;
315 	u64 tcprxsegs;
316 	u32 tcprxprotoerr;
317 	u32 tcprxopterr;
318 	u64 tcptxsegs;
319 	u32 rsvd2;
320 	u32 tcprtxseg;
321 	u64 udprxpkts;
322 	u64 udptxpkts;
323 	u64 rdmarxwrs;
324 	u64 rdmarxrds;
325 	u64 rdmarxsnds;
326 	u64 rdmatxwrs;
327 	u64 rdmatxrds;
328 	u64 rdmatxsnds;
329 	u64 rdmavbn;
330 	u64 rdmavinv;
331 	u64 rxnpecnmrkpkts;
332 	u32 rxrpcnphandled;
333 	u32 rxrpcnpignored;
334 	u32 txnpcnpsent;
335 	u32 rsvd3[88];
336 };
337 
338 struct irdma_stats_gather_info {
339 	bool use_hmc_fcn_index:1;
340 	bool use_stats_inst:1;
341 	u8 hmc_fcn_index;
342 	u8 stats_inst_index;
343 	struct irdma_dma_mem stats_buff_mem;
344 	void *gather_stats_va;
345 	void *last_gather_stats_va;
346 };
347 
348 struct irdma_vsi_pestat {
349 	struct irdma_hw *hw;
350 	struct irdma_dev_hw_stats hw_stats;
351 	struct irdma_stats_gather_info gather_info;
352 	struct timer_list stats_timer;
353 	struct irdma_sc_vsi *vsi;
354 	struct irdma_dev_hw_stats last_hw_stats;
355 	spinlock_t lock; /* rdma stats lock */
356 };
357 
358 struct irdma_hw {
359 	u8 __iomem *hw_addr;
360 	u8 __iomem *priv_hw_addr;
361 	struct device *device;
362 	struct irdma_hmc_info hmc;
363 };
364 
365 struct irdma_pfpdu {
366 	struct list_head rxlist;
367 	u32 rcv_nxt;
368 	u32 fps;
369 	u32 max_fpdu_data;
370 	u32 nextseqnum;
371 	u32 rcv_start_seq;
372 	bool mode:1;
373 	bool mpa_crc_err:1;
374 	u8  marker_len;
375 	u64 total_ieq_bufs;
376 	u64 fpdu_processed;
377 	u64 bad_seq_num;
378 	u64 crc_err;
379 	u64 no_tx_bufs;
380 	u64 tx_err;
381 	u64 out_of_order;
382 	u64 pmode_count;
383 	struct irdma_sc_ah *ah;
384 	struct irdma_puda_buf *ah_buf;
385 	spinlock_t lock; /* fpdu processing lock */
386 	struct irdma_puda_buf *lastrcv_buf;
387 };
388 
389 struct irdma_sc_pd {
390 	struct irdma_sc_dev *dev;
391 	u32 pd_id;
392 	int abi_ver;
393 };
394 
395 struct irdma_cqp_quanta {
396 	__le64 elem[IRDMA_CQP_WQE_SIZE];
397 };
398 
399 struct irdma_sc_cqp {
400 	u32 size;
401 	u64 sq_pa;
402 	u64 host_ctx_pa;
403 	void *back_cqp;
404 	struct irdma_sc_dev *dev;
405 	enum irdma_status_code (*process_cqp_sds)(struct irdma_sc_dev *dev,
406 						  struct irdma_update_sds_info *info);
407 	struct irdma_dma_mem sdbuf;
408 	struct irdma_ring sq_ring;
409 	struct irdma_cqp_quanta *sq_base;
410 	struct irdma_dcqcn_cc_params dcqcn_params;
411 	__le64 *host_ctx;
412 	u64 *scratch_array;
413 	u32 cqp_id;
414 	u32 sq_size;
415 	u32 hw_sq_size;
416 	u16 hw_maj_ver;
417 	u16 hw_min_ver;
418 	u8 struct_ver;
419 	u8 polarity;
420 	u8 hmc_profile;
421 	u8 ena_vf_count;
422 	u8 timeout_count;
423 	u8 ceqs_per_vf;
424 	bool en_datacenter_tcp:1;
425 	bool disable_packed:1;
426 	bool rocev2_rto_policy:1;
427 	enum irdma_protocol_used protocol_used;
428 };
429 
430 struct irdma_sc_aeq {
431 	u32 size;
432 	u64 aeq_elem_pa;
433 	struct irdma_sc_dev *dev;
434 	struct irdma_sc_aeqe *aeqe_base;
435 	void *pbl_list;
436 	u32 elem_cnt;
437 	struct irdma_ring aeq_ring;
438 	u8 pbl_chunk_size;
439 	u32 first_pm_pbl_idx;
440 	u32 msix_idx;
441 	u8 polarity;
442 	bool virtual_map:1;
443 };
444 
445 struct irdma_sc_ceq {
446 	u32 size;
447 	u64 ceq_elem_pa;
448 	struct irdma_sc_dev *dev;
449 	struct irdma_ceqe *ceqe_base;
450 	void *pbl_list;
451 	u32 ceq_id;
452 	u32 elem_cnt;
453 	struct irdma_ring ceq_ring;
454 	u8 pbl_chunk_size;
455 	u8 tph_val;
456 	u32 first_pm_pbl_idx;
457 	u8 polarity;
458 	struct irdma_sc_vsi *vsi;
459 	struct irdma_sc_cq **reg_cq;
460 	u32 reg_cq_size;
461 	spinlock_t req_cq_lock; /* protect access to reg_cq array */
462 	bool virtual_map:1;
463 	bool tph_en:1;
464 	bool itr_no_expire:1;
465 };
466 
467 struct irdma_sc_cq {
468 	struct irdma_cq_uk cq_uk;
469 	u64 cq_pa;
470 	u64 shadow_area_pa;
471 	struct irdma_sc_dev *dev;
472 	struct irdma_sc_vsi *vsi;
473 	void *pbl_list;
474 	void *back_cq;
475 	u32 ceq_id;
476 	u32 shadow_read_threshold;
477 	u8 pbl_chunk_size;
478 	u8 cq_type;
479 	u8 tph_val;
480 	u32 first_pm_pbl_idx;
481 	bool ceqe_mask:1;
482 	bool virtual_map:1;
483 	bool check_overflow:1;
484 	bool ceq_id_valid:1;
485 	bool tph_en;
486 };
487 
488 struct irdma_sc_qp {
489 	struct irdma_qp_uk qp_uk;
490 	u64 sq_pa;
491 	u64 rq_pa;
492 	u64 hw_host_ctx_pa;
493 	u64 shadow_area_pa;
494 	u64 q2_pa;
495 	struct irdma_sc_dev *dev;
496 	struct irdma_sc_vsi *vsi;
497 	struct irdma_sc_pd *pd;
498 	__le64 *hw_host_ctx;
499 	void *llp_stream_handle;
500 	struct irdma_pfpdu pfpdu;
501 	u32 ieq_qp;
502 	u8 *q2_buf;
503 	u64 qp_compl_ctx;
504 	u32 push_idx;
505 	u16 qs_handle;
506 	u16 push_offset;
507 	u8 flush_wqes_count;
508 	u8 sq_tph_val;
509 	u8 rq_tph_val;
510 	u8 qp_state;
511 	u8 hw_sq_size;
512 	u8 hw_rq_size;
513 	u8 src_mac_addr_idx;
514 	bool on_qoslist:1;
515 	bool ieq_pass_thru:1;
516 	bool sq_tph_en:1;
517 	bool rq_tph_en:1;
518 	bool rcv_tph_en:1;
519 	bool xmit_tph_en:1;
520 	bool virtual_map:1;
521 	bool flush_sq:1;
522 	bool flush_rq:1;
523 	bool sq_flush_code:1;
524 	bool rq_flush_code:1;
525 	enum irdma_flush_opcode flush_code;
526 	enum irdma_qp_event_type event_type;
527 	u8 term_flags;
528 	u8 user_pri;
529 	struct list_head list;
530 };
531 
532 struct irdma_stats_inst_info {
533 	bool use_hmc_fcn_index;
534 	u8 hmc_fn_id;
535 	u8 stats_idx;
536 };
537 
538 struct irdma_up_info {
539 	u8 map[8];
540 	u8 cnp_up_override;
541 	u8 hmc_fcn_idx;
542 	bool use_vlan:1;
543 	bool use_cnp_up_override:1;
544 };
545 
546 #define IRDMA_MAX_WS_NODES	0x3FF
547 #define IRDMA_WS_NODE_INVALID	0xFFFF
548 
549 struct irdma_ws_node_info {
550 	u16 id;
551 	u16 vsi;
552 	u16 parent_id;
553 	u16 qs_handle;
554 	bool type_leaf:1;
555 	bool enable:1;
556 	u8 prio_type;
557 	u8 tc;
558 	u8 weight;
559 };
560 
561 struct irdma_hmc_fpm_misc {
562 	u32 max_ceqs;
563 	u32 max_sds;
564 	u32 xf_block_size;
565 	u32 q1_block_size;
566 	u32 ht_multiplier;
567 	u32 timer_bucket;
568 	u32 rrf_block_size;
569 	u32 ooiscf_block_size;
570 };
571 
572 #define IRDMA_LEAF_DEFAULT_REL_BW		64
573 #define IRDMA_PARENT_DEFAULT_REL_BW		1
574 
575 struct irdma_qos {
576 	struct list_head qplist;
577 	struct mutex qos_mutex; /* protect QoS attributes per QoS level */
578 	u64 lan_qos_handle;
579 	u32 l2_sched_node_id;
580 	u16 qs_handle;
581 	u8 traffic_class;
582 	u8 rel_bw;
583 	u8 prio_type;
584 	bool valid;
585 };
586 
587 #define IRDMA_INVALID_FCN_ID 0xff
588 struct irdma_sc_vsi {
589 	u16 vsi_idx;
590 	struct irdma_sc_dev *dev;
591 	void *back_vsi;
592 	u32 ilq_count;
593 	struct irdma_virt_mem ilq_mem;
594 	struct irdma_puda_rsrc *ilq;
595 	u32 ieq_count;
596 	struct irdma_virt_mem ieq_mem;
597 	struct irdma_puda_rsrc *ieq;
598 	u32 exception_lan_q;
599 	u16 mtu;
600 	u16 vm_id;
601 	u8 fcn_id;
602 	enum irdma_vm_vf_type vm_vf_type;
603 	bool stats_fcn_id_alloc:1;
604 	bool tc_change_pending:1;
605 	struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
606 	struct irdma_vsi_pestat *pestat;
607 	atomic_t qp_suspend_reqs;
608 	enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi,
609 						struct irdma_ws_node *tc_node);
610 	void (*unregister_qset)(struct irdma_sc_vsi *vsi,
611 				struct irdma_ws_node *tc_node);
612 	u8 qos_rel_bw;
613 	u8 qos_prio_type;
614 };
615 
616 struct irdma_sc_dev {
617 	struct list_head cqp_cmd_head; /* head of the CQP command list */
618 	spinlock_t cqp_lock; /* protect CQP list access */
619 	bool fcn_id_array[IRDMA_MAX_STATS_COUNT];
620 	struct irdma_dma_mem vf_fpm_query_buf[IRDMA_MAX_PE_ENA_VF_COUNT];
621 	u64 fpm_query_buf_pa;
622 	u64 fpm_commit_buf_pa;
623 	__le64 *fpm_query_buf;
624 	__le64 *fpm_commit_buf;
625 	struct irdma_hw *hw;
626 	u8 __iomem *db_addr;
627 	u32 __iomem *wqe_alloc_db;
628 	u32 __iomem *cq_arm_db;
629 	u32 __iomem *aeq_alloc_db;
630 	u32 __iomem *cqp_db;
631 	u32 __iomem *cq_ack_db;
632 	u32 __iomem *ceq_itr_mask_db;
633 	u32 __iomem *aeq_itr_mask_db;
634 	u32 __iomem *hw_regs[IRDMA_MAX_REGS];
635 	u32 ceq_itr;   /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */
636 	u64 hw_masks[IRDMA_MAX_MASKS];
637 	u64 hw_shifts[IRDMA_MAX_SHIFTS];
638 	u64 hw_stats_regs_32[IRDMA_HW_STAT_INDEX_MAX_32];
639 	u64 hw_stats_regs_64[IRDMA_HW_STAT_INDEX_MAX_64];
640 	u64 feature_info[IRDMA_MAX_FEATURES];
641 	u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
642 	struct irdma_hw_attrs hw_attrs;
643 	struct irdma_hmc_info *hmc_info;
644 	struct irdma_sc_cqp *cqp;
645 	struct irdma_sc_aeq *aeq;
646 	struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT];
647 	struct irdma_sc_cq *ccq;
648 	const struct irdma_irq_ops *irq_ops;
649 	struct irdma_hmc_fpm_misc hmc_fpm_misc;
650 	struct irdma_ws_node *ws_tree_root;
651 	struct mutex ws_mutex; /* ws tree mutex */
652 	u16 num_vfs;
653 	u8 hmc_fn_id;
654 	u8 vf_id;
655 	bool vchnl_up:1;
656 	bool ceq_valid:1;
657 	u8 pci_rev;
658 	enum irdma_status_code (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
659 	void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
660 	void (*ws_reset)(struct irdma_sc_vsi *vsi);
661 };
662 
663 struct irdma_modify_cq_info {
664 	u64 cq_pa;
665 	struct irdma_cqe *cq_base;
666 	u32 cq_size;
667 	u32 shadow_read_threshold;
668 	u8 pbl_chunk_size;
669 	u32 first_pm_pbl_idx;
670 	bool virtual_map:1;
671 	bool check_overflow;
672 	bool cq_resize:1;
673 };
674 
675 struct irdma_create_qp_info {
676 	bool ord_valid:1;
677 	bool tcp_ctx_valid:1;
678 	bool cq_num_valid:1;
679 	bool arp_cache_idx_valid:1;
680 	bool mac_valid:1;
681 	bool force_lpb;
682 	u8 next_iwarp_state;
683 };
684 
685 struct irdma_modify_qp_info {
686 	u64 rx_win0;
687 	u64 rx_win1;
688 	u16 new_mss;
689 	u8 next_iwarp_state;
690 	u8 curr_iwarp_state;
691 	u8 termlen;
692 	bool ord_valid:1;
693 	bool tcp_ctx_valid:1;
694 	bool udp_ctx_valid:1;
695 	bool cq_num_valid:1;
696 	bool arp_cache_idx_valid:1;
697 	bool reset_tcp_conn:1;
698 	bool remove_hash_idx:1;
699 	bool dont_send_term:1;
700 	bool dont_send_fin:1;
701 	bool cached_var_valid:1;
702 	bool mss_change:1;
703 	bool force_lpb:1;
704 	bool mac_valid:1;
705 };
706 
707 struct irdma_ccq_cqe_info {
708 	struct irdma_sc_cqp *cqp;
709 	u64 scratch;
710 	u32 op_ret_val;
711 	u16 maj_err_code;
712 	u16 min_err_code;
713 	u8 op_code;
714 	bool error;
715 };
716 
717 struct irdma_dcb_app_info {
718 	u8 priority;
719 	u8 selector;
720 	u16 prot_id;
721 };
722 
723 struct irdma_qos_tc_info {
724 	u64 tc_ctx;
725 	u8 rel_bw;
726 	u8 prio_type;
727 	u8 egress_virt_up;
728 	u8 ingress_virt_up;
729 };
730 
731 struct irdma_l2params {
732 	struct irdma_qos_tc_info tc_info[IRDMA_MAX_USER_PRIORITY];
733 	struct irdma_dcb_app_info apps[IRDMA_MAX_APPS];
734 	u32 num_apps;
735 	u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY];
736 	u16 mtu;
737 	u8 up2tc[IRDMA_MAX_USER_PRIORITY];
738 	u8 num_tc;
739 	u8 vsi_rel_bw;
740 	u8 vsi_prio_type;
741 	bool mtu_changed:1;
742 	bool tc_changed:1;
743 };
744 
745 struct irdma_vsi_init_info {
746 	struct irdma_sc_dev *dev;
747 	void *back_vsi;
748 	struct irdma_l2params *params;
749 	u16 exception_lan_q;
750 	u16 pf_data_vsi_num;
751 	enum irdma_vm_vf_type vm_vf_type;
752 	u16 vm_id;
753 	enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi,
754 						struct irdma_ws_node *tc_node);
755 	void (*unregister_qset)(struct irdma_sc_vsi *vsi,
756 				struct irdma_ws_node *tc_node);
757 };
758 
759 struct irdma_vsi_stats_info {
760 	struct irdma_vsi_pestat *pestat;
761 	u8 fcn_id;
762 	bool alloc_fcn_id;
763 };
764 
765 struct irdma_device_init_info {
766 	u64 fpm_query_buf_pa;
767 	u64 fpm_commit_buf_pa;
768 	__le64 *fpm_query_buf;
769 	__le64 *fpm_commit_buf;
770 	struct irdma_hw *hw;
771 	void __iomem *bar0;
772 	u8 hmc_fn_id;
773 };
774 
775 struct irdma_ceq_init_info {
776 	u64 ceqe_pa;
777 	struct irdma_sc_dev *dev;
778 	u64 *ceqe_base;
779 	void *pbl_list;
780 	u32 elem_cnt;
781 	u32 ceq_id;
782 	bool virtual_map:1;
783 	bool tph_en:1;
784 	bool itr_no_expire:1;
785 	u8 pbl_chunk_size;
786 	u8 tph_val;
787 	u32 first_pm_pbl_idx;
788 	struct irdma_sc_vsi *vsi;
789 	struct irdma_sc_cq **reg_cq;
790 	u32 reg_cq_idx;
791 };
792 
793 struct irdma_aeq_init_info {
794 	u64 aeq_elem_pa;
795 	struct irdma_sc_dev *dev;
796 	u32 *aeqe_base;
797 	void *pbl_list;
798 	u32 elem_cnt;
799 	bool virtual_map;
800 	u8 pbl_chunk_size;
801 	u32 first_pm_pbl_idx;
802 	u32 msix_idx;
803 };
804 
805 struct irdma_ccq_init_info {
806 	u64 cq_pa;
807 	u64 shadow_area_pa;
808 	struct irdma_sc_dev *dev;
809 	struct irdma_cqe *cq_base;
810 	__le64 *shadow_area;
811 	void *pbl_list;
812 	u32 num_elem;
813 	u32 ceq_id;
814 	u32 shadow_read_threshold;
815 	bool ceqe_mask:1;
816 	bool ceq_id_valid:1;
817 	bool avoid_mem_cflct:1;
818 	bool virtual_map:1;
819 	bool tph_en:1;
820 	u8 tph_val;
821 	u8 pbl_chunk_size;
822 	u32 first_pm_pbl_idx;
823 	struct irdma_sc_vsi *vsi;
824 };
825 
826 struct irdma_udp_offload_info {
827 	bool ipv4:1;
828 	bool insert_vlan_tag:1;
829 	u8 ttl;
830 	u8 tos;
831 	u16 src_port;
832 	u16 dst_port;
833 	u32 dest_ip_addr[4];
834 	u32 snd_mss;
835 	u16 vlan_tag;
836 	u16 arp_idx;
837 	u32 flow_label;
838 	u8 udp_state;
839 	u32 psn_nxt;
840 	u32 lsn;
841 	u32 epsn;
842 	u32 psn_max;
843 	u32 psn_una;
844 	u32 local_ipaddr[4];
845 	u32 cwnd;
846 	u8 rexmit_thresh;
847 	u8 rnr_nak_thresh;
848 };
849 
850 struct irdma_roce_offload_info {
851 	u16 p_key;
852 	u16 err_rq_idx;
853 	u32 qkey;
854 	u32 dest_qp;
855 	u32 local_qp;
856 	u8 roce_tver;
857 	u8 ack_credits;
858 	u8 err_rq_idx_valid;
859 	u32 pd_id;
860 	u16 ord_size;
861 	u16 ird_size;
862 	bool is_qp1:1;
863 	bool udprivcq_en:1;
864 	bool dcqcn_en:1;
865 	bool rcv_no_icrc:1;
866 	bool wr_rdresp_en:1;
867 	bool bind_en:1;
868 	bool fast_reg_en:1;
869 	bool priv_mode_en:1;
870 	bool rd_en:1;
871 	bool timely_en:1;
872 	bool dctcp_en:1;
873 	bool fw_cc_enable:1;
874 	bool use_stats_inst:1;
875 	u16 t_high;
876 	u16 t_low;
877 	u8 last_byte_sent;
878 	u8 mac_addr[ETH_ALEN];
879 	u8 rtomin;
880 };
881 
882 struct irdma_iwarp_offload_info {
883 	u16 rcv_mark_offset;
884 	u16 snd_mark_offset;
885 	u8 ddp_ver;
886 	u8 rdmap_ver;
887 	u8 iwarp_mode;
888 	u16 err_rq_idx;
889 	u32 pd_id;
890 	u16 ord_size;
891 	u16 ird_size;
892 	bool ib_rd_en:1;
893 	bool align_hdrs:1;
894 	bool rcv_no_mpa_crc:1;
895 	bool err_rq_idx_valid:1;
896 	bool snd_mark_en:1;
897 	bool rcv_mark_en:1;
898 	bool wr_rdresp_en:1;
899 	bool bind_en:1;
900 	bool fast_reg_en:1;
901 	bool priv_mode_en:1;
902 	bool rd_en:1;
903 	bool timely_en:1;
904 	bool use_stats_inst:1;
905 	bool ecn_en:1;
906 	bool dctcp_en:1;
907 	u16 t_high;
908 	u16 t_low;
909 	u8 last_byte_sent;
910 	u8 mac_addr[ETH_ALEN];
911 	u8 rtomin;
912 };
913 
914 struct irdma_tcp_offload_info {
915 	bool ipv4:1;
916 	bool no_nagle:1;
917 	bool insert_vlan_tag:1;
918 	bool time_stamp:1;
919 	bool drop_ooo_seg:1;
920 	bool avoid_stretch_ack:1;
921 	bool wscale:1;
922 	bool ignore_tcp_opt:1;
923 	bool ignore_tcp_uns_opt:1;
924 	u8 cwnd_inc_limit;
925 	u8 dup_ack_thresh;
926 	u8 ttl;
927 	u8 src_mac_addr_idx;
928 	u8 tos;
929 	u16 src_port;
930 	u16 dst_port;
931 	u32 dest_ip_addr[4];
932 	//u32 dest_ip_addr0;
933 	//u32 dest_ip_addr1;
934 	//u32 dest_ip_addr2;
935 	//u32 dest_ip_addr3;
936 	u32 snd_mss;
937 	u16 syn_rst_handling;
938 	u16 vlan_tag;
939 	u16 arp_idx;
940 	u32 flow_label;
941 	u8 tcp_state;
942 	u8 snd_wscale;
943 	u8 rcv_wscale;
944 	u32 time_stamp_recent;
945 	u32 time_stamp_age;
946 	u32 snd_nxt;
947 	u32 snd_wnd;
948 	u32 rcv_nxt;
949 	u32 rcv_wnd;
950 	u32 snd_max;
951 	u32 snd_una;
952 	u32 srtt;
953 	u32 rtt_var;
954 	u32 ss_thresh;
955 	u32 cwnd;
956 	u32 snd_wl1;
957 	u32 snd_wl2;
958 	u32 max_snd_window;
959 	u8 rexmit_thresh;
960 	u32 local_ipaddr[4];
961 };
962 
963 struct irdma_qp_host_ctx_info {
964 	u64 qp_compl_ctx;
965 	union {
966 		struct irdma_tcp_offload_info *tcp_info;
967 		struct irdma_udp_offload_info *udp_info;
968 	};
969 	union {
970 		struct irdma_iwarp_offload_info *iwarp_info;
971 		struct irdma_roce_offload_info *roce_info;
972 	};
973 	u32 send_cq_num;
974 	u32 rcv_cq_num;
975 	u32 rem_endpoint_idx;
976 	u8 stats_idx;
977 	bool srq_valid:1;
978 	bool tcp_info_valid:1;
979 	bool iwarp_info_valid:1;
980 	bool stats_idx_valid:1;
981 	u8 user_pri;
982 };
983 
984 struct irdma_aeqe_info {
985 	u64 compl_ctx;
986 	u32 qp_cq_id;
987 	u16 ae_id;
988 	u16 wqe_idx;
989 	u8 tcp_state;
990 	u8 iwarp_state;
991 	bool qp:1;
992 	bool cq:1;
993 	bool sq:1;
994 	bool rq:1;
995 	bool in_rdrsp_wr:1;
996 	bool out_rdrsp:1;
997 	bool aeqe_overflow:1;
998 	u8 q2_data_written;
999 	u8 ae_src;
1000 };
1001 
1002 struct irdma_allocate_stag_info {
1003 	u64 total_len;
1004 	u64 first_pm_pbl_idx;
1005 	u32 chunk_size;
1006 	u32 stag_idx;
1007 	u32 page_size;
1008 	u32 pd_id;
1009 	u16 access_rights;
1010 	bool remote_access:1;
1011 	bool use_hmc_fcn_index:1;
1012 	bool use_pf_rid:1;
1013 	u8 hmc_fcn_index;
1014 };
1015 
1016 struct irdma_mw_alloc_info {
1017 	u32 mw_stag_index;
1018 	u32 page_size;
1019 	u32 pd_id;
1020 	bool remote_access:1;
1021 	bool mw_wide:1;
1022 	bool mw1_bind_dont_vldt_key:1;
1023 };
1024 
1025 struct irdma_reg_ns_stag_info {
1026 	u64 reg_addr_pa;
1027 	u64 va;
1028 	u64 total_len;
1029 	u32 page_size;
1030 	u32 chunk_size;
1031 	u32 first_pm_pbl_index;
1032 	enum irdma_addressing_type addr_type;
1033 	irdma_stag_index stag_idx;
1034 	u16 access_rights;
1035 	u32 pd_id;
1036 	irdma_stag_key stag_key;
1037 	bool use_hmc_fcn_index:1;
1038 	u8 hmc_fcn_index;
1039 	bool use_pf_rid:1;
1040 };
1041 
1042 struct irdma_fast_reg_stag_info {
1043 	u64 wr_id;
1044 	u64 reg_addr_pa;
1045 	u64 fbo;
1046 	void *va;
1047 	u64 total_len;
1048 	u32 page_size;
1049 	u32 chunk_size;
1050 	u32 first_pm_pbl_index;
1051 	enum irdma_addressing_type addr_type;
1052 	irdma_stag_index stag_idx;
1053 	u16 access_rights;
1054 	u32 pd_id;
1055 	irdma_stag_key stag_key;
1056 	bool local_fence:1;
1057 	bool read_fence:1;
1058 	bool signaled:1;
1059 	bool push_wqe:1;
1060 	bool use_hmc_fcn_index:1;
1061 	u8 hmc_fcn_index;
1062 	bool use_pf_rid:1;
1063 	bool defer_flag:1;
1064 };
1065 
1066 struct irdma_dealloc_stag_info {
1067 	u32 stag_idx;
1068 	u32 pd_id;
1069 	bool mr:1;
1070 	bool dealloc_pbl:1;
1071 };
1072 
1073 struct irdma_register_shared_stag {
1074 	u64 va;
1075 	enum irdma_addressing_type addr_type;
1076 	irdma_stag_index new_stag_idx;
1077 	irdma_stag_index parent_stag_idx;
1078 	u32 access_rights;
1079 	u32 pd_id;
1080 	u32 page_size;
1081 	irdma_stag_key new_stag_key;
1082 };
1083 
1084 struct irdma_qp_init_info {
1085 	struct irdma_qp_uk_init_info qp_uk_init_info;
1086 	struct irdma_sc_pd *pd;
1087 	struct irdma_sc_vsi *vsi;
1088 	__le64 *host_ctx;
1089 	u8 *q2;
1090 	u64 sq_pa;
1091 	u64 rq_pa;
1092 	u64 host_ctx_pa;
1093 	u64 q2_pa;
1094 	u64 shadow_area_pa;
1095 	u8 sq_tph_val;
1096 	u8 rq_tph_val;
1097 	bool sq_tph_en:1;
1098 	bool rq_tph_en:1;
1099 	bool rcv_tph_en:1;
1100 	bool xmit_tph_en:1;
1101 	bool virtual_map:1;
1102 };
1103 
1104 struct irdma_cq_init_info {
1105 	struct irdma_sc_dev *dev;
1106 	u64 cq_base_pa;
1107 	u64 shadow_area_pa;
1108 	u32 ceq_id;
1109 	u32 shadow_read_threshold;
1110 	u8 pbl_chunk_size;
1111 	u32 first_pm_pbl_idx;
1112 	bool virtual_map:1;
1113 	bool ceqe_mask:1;
1114 	bool ceq_id_valid:1;
1115 	bool tph_en:1;
1116 	u8 tph_val;
1117 	u8 type;
1118 	struct irdma_cq_uk_init_info cq_uk_init_info;
1119 	struct irdma_sc_vsi *vsi;
1120 };
1121 
1122 struct irdma_upload_context_info {
1123 	u64 buf_pa;
1124 	u32 qp_id;
1125 	u8 qp_type;
1126 	bool freeze_qp:1;
1127 	bool raw_format:1;
1128 };
1129 
1130 struct irdma_local_mac_entry_info {
1131 	u8 mac_addr[6];
1132 	u16 entry_idx;
1133 };
1134 
1135 struct irdma_add_arp_cache_entry_info {
1136 	u8 mac_addr[ETH_ALEN];
1137 	u32 reach_max;
1138 	u16 arp_index;
1139 	bool permanent;
1140 };
1141 
1142 struct irdma_apbvt_info {
1143 	u16 port;
1144 	bool add;
1145 };
1146 
1147 struct irdma_qhash_table_info {
1148 	struct irdma_sc_vsi *vsi;
1149 	enum irdma_quad_hash_manage_type manage;
1150 	enum irdma_quad_entry_type entry_type;
1151 	bool vlan_valid:1;
1152 	bool ipv4_valid:1;
1153 	u8 mac_addr[ETH_ALEN];
1154 	u16 vlan_id;
1155 	u8 user_pri;
1156 	u32 qp_num;
1157 	u32 dest_ip[4];
1158 	u32 src_ip[4];
1159 	u16 dest_port;
1160 	u16 src_port;
1161 };
1162 
1163 struct irdma_cqp_manage_push_page_info {
1164 	u32 push_idx;
1165 	u16 qs_handle;
1166 	u8 free_page;
1167 	u8 push_page_type;
1168 };
1169 
1170 struct irdma_qp_flush_info {
1171 	u16 sq_minor_code;
1172 	u16 sq_major_code;
1173 	u16 rq_minor_code;
1174 	u16 rq_major_code;
1175 	u16 ae_code;
1176 	u8 ae_src;
1177 	bool sq:1;
1178 	bool rq:1;
1179 	bool userflushcode:1;
1180 	bool generate_ae:1;
1181 };
1182 
1183 struct irdma_gen_ae_info {
1184 	u16 ae_code;
1185 	u8 ae_src;
1186 };
1187 
1188 struct irdma_cqp_timeout {
1189 	u64 compl_cqp_cmds;
1190 	u32 count;
1191 };
1192 
1193 struct irdma_irq_ops {
1194 	void (*irdma_cfg_aeq)(struct irdma_sc_dev *dev, u32 idx, bool enable);
1195 	void (*irdma_cfg_ceq)(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
1196 			      bool enable);
1197 	void (*irdma_dis_irq)(struct irdma_sc_dev *dev, u32 idx);
1198 	void (*irdma_en_irq)(struct irdma_sc_dev *dev, u32 idx);
1199 };
1200 
1201 void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq);
1202 enum irdma_status_code irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
1203 					   bool check_overflow, bool post_sq);
1204 enum irdma_status_code irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch,
1205 					    bool post_sq);
1206 enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
1207 						 struct irdma_ccq_cqe_info *info);
1208 enum irdma_status_code irdma_sc_ccq_init(struct irdma_sc_cq *ccq,
1209 					 struct irdma_ccq_init_info *info);
1210 
1211 enum irdma_status_code irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch);
1212 enum irdma_status_code irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq);
1213 
1214 enum irdma_status_code irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch,
1215 					    bool post_sq);
1216 enum irdma_status_code irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
1217 					 struct irdma_ceq_init_info *info);
1218 void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq);
1219 void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq);
1220 
1221 enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
1222 					 struct irdma_aeq_init_info *info);
1223 enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
1224 					      struct irdma_aeqe_info *info);
1225 void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
1226 
1227 void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
1228 		      int abi_ver);
1229 void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
1230 void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
1231 			      struct irdma_sc_dev *dev);
1232 enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err,
1233 					   u16 *min_err);
1234 enum irdma_status_code irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
1235 enum irdma_status_code irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
1236 					 struct irdma_cqp_init_info *info);
1237 void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
1238 enum irdma_status_code irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode,
1239 						     struct irdma_ccq_cqe_info *cmpl_info);
1240 enum irdma_status_code irdma_sc_fast_register(struct irdma_sc_qp *qp,
1241 					      struct irdma_fast_reg_stag_info *info,
1242 					      bool post_sq);
1243 enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp,
1244 					  struct irdma_create_qp_info *info,
1245 					  u64 scratch, bool post_sq);
1246 enum irdma_status_code irdma_sc_qp_destroy(struct irdma_sc_qp *qp,
1247 					   u64 scratch, bool remove_hash_idx,
1248 					   bool ignore_mw_bnd, bool post_sq);
1249 enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
1250 					      struct irdma_qp_flush_info *info,
1251 					      u64 scratch, bool post_sq);
1252 enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp,
1253 					struct irdma_qp_init_info *info);
1254 enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp,
1255 					  struct irdma_modify_qp_info *info,
1256 					  u64 scratch, bool post_sq);
1257 void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
1258 			irdma_stag stag);
1259 void irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size);
1260 void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
1261 void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1262 			struct irdma_qp_host_ctx_info *info);
1263 void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1264 			     struct irdma_qp_host_ctx_info *info);
1265 enum irdma_status_code irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch,
1266 					   bool post_sq);
1267 enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq,
1268 					struct irdma_cq_init_info *info);
1269 void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info);
1270 enum irdma_status_code irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp,
1271 							   u64 scratch, u8 hmc_fn_id,
1272 							   bool post_sq, bool poll_registers);
1273 
1274 void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
1275 struct cqp_info {
1276 	union {
1277 		struct {
1278 			struct irdma_sc_qp *qp;
1279 			struct irdma_create_qp_info info;
1280 			u64 scratch;
1281 		} qp_create;
1282 
1283 		struct {
1284 			struct irdma_sc_qp *qp;
1285 			struct irdma_modify_qp_info info;
1286 			u64 scratch;
1287 		} qp_modify;
1288 
1289 		struct {
1290 			struct irdma_sc_qp *qp;
1291 			u64 scratch;
1292 			bool remove_hash_idx;
1293 			bool ignore_mw_bnd;
1294 		} qp_destroy;
1295 
1296 		struct {
1297 			struct irdma_sc_cq *cq;
1298 			u64 scratch;
1299 			bool check_overflow;
1300 		} cq_create;
1301 
1302 		struct {
1303 			struct irdma_sc_cq *cq;
1304 			struct irdma_modify_cq_info info;
1305 			u64 scratch;
1306 		} cq_modify;
1307 
1308 		struct {
1309 			struct irdma_sc_cq *cq;
1310 			u64 scratch;
1311 		} cq_destroy;
1312 
1313 		struct {
1314 			struct irdma_sc_dev *dev;
1315 			struct irdma_allocate_stag_info info;
1316 			u64 scratch;
1317 		} alloc_stag;
1318 
1319 		struct {
1320 			struct irdma_sc_dev *dev;
1321 			struct irdma_mw_alloc_info info;
1322 			u64 scratch;
1323 		} mw_alloc;
1324 
1325 		struct {
1326 			struct irdma_sc_dev *dev;
1327 			struct irdma_reg_ns_stag_info info;
1328 			u64 scratch;
1329 		} mr_reg_non_shared;
1330 
1331 		struct {
1332 			struct irdma_sc_dev *dev;
1333 			struct irdma_dealloc_stag_info info;
1334 			u64 scratch;
1335 		} dealloc_stag;
1336 
1337 		struct {
1338 			struct irdma_sc_cqp *cqp;
1339 			struct irdma_add_arp_cache_entry_info info;
1340 			u64 scratch;
1341 		} add_arp_cache_entry;
1342 
1343 		struct {
1344 			struct irdma_sc_cqp *cqp;
1345 			u64 scratch;
1346 			u16 arp_index;
1347 		} del_arp_cache_entry;
1348 
1349 		struct {
1350 			struct irdma_sc_cqp *cqp;
1351 			struct irdma_local_mac_entry_info info;
1352 			u64 scratch;
1353 		} add_local_mac_entry;
1354 
1355 		struct {
1356 			struct irdma_sc_cqp *cqp;
1357 			u64 scratch;
1358 			u8 entry_idx;
1359 			u8 ignore_ref_count;
1360 		} del_local_mac_entry;
1361 
1362 		struct {
1363 			struct irdma_sc_cqp *cqp;
1364 			u64 scratch;
1365 		} alloc_local_mac_entry;
1366 
1367 		struct {
1368 			struct irdma_sc_cqp *cqp;
1369 			struct irdma_cqp_manage_push_page_info info;
1370 			u64 scratch;
1371 		} manage_push_page;
1372 
1373 		struct {
1374 			struct irdma_sc_dev *dev;
1375 			struct irdma_upload_context_info info;
1376 			u64 scratch;
1377 		} qp_upload_context;
1378 
1379 		struct {
1380 			struct irdma_sc_dev *dev;
1381 			struct irdma_hmc_fcn_info info;
1382 			u64 scratch;
1383 		} manage_hmc_pm;
1384 
1385 		struct {
1386 			struct irdma_sc_ceq *ceq;
1387 			u64 scratch;
1388 		} ceq_create;
1389 
1390 		struct {
1391 			struct irdma_sc_ceq *ceq;
1392 			u64 scratch;
1393 		} ceq_destroy;
1394 
1395 		struct {
1396 			struct irdma_sc_aeq *aeq;
1397 			u64 scratch;
1398 		} aeq_create;
1399 
1400 		struct {
1401 			struct irdma_sc_aeq *aeq;
1402 			u64 scratch;
1403 		} aeq_destroy;
1404 
1405 		struct {
1406 			struct irdma_sc_qp *qp;
1407 			struct irdma_qp_flush_info info;
1408 			u64 scratch;
1409 		} qp_flush_wqes;
1410 
1411 		struct {
1412 			struct irdma_sc_qp *qp;
1413 			struct irdma_gen_ae_info info;
1414 			u64 scratch;
1415 		} gen_ae;
1416 
1417 		struct {
1418 			struct irdma_sc_cqp *cqp;
1419 			void *fpm_val_va;
1420 			u64 fpm_val_pa;
1421 			u8 hmc_fn_id;
1422 			u64 scratch;
1423 		} query_fpm_val;
1424 
1425 		struct {
1426 			struct irdma_sc_cqp *cqp;
1427 			void *fpm_val_va;
1428 			u64 fpm_val_pa;
1429 			u8 hmc_fn_id;
1430 			u64 scratch;
1431 		} commit_fpm_val;
1432 
1433 		struct {
1434 			struct irdma_sc_cqp *cqp;
1435 			struct irdma_apbvt_info info;
1436 			u64 scratch;
1437 		} manage_apbvt_entry;
1438 
1439 		struct {
1440 			struct irdma_sc_cqp *cqp;
1441 			struct irdma_qhash_table_info info;
1442 			u64 scratch;
1443 		} manage_qhash_table_entry;
1444 
1445 		struct {
1446 			struct irdma_sc_dev *dev;
1447 			struct irdma_update_sds_info info;
1448 			u64 scratch;
1449 		} update_pe_sds;
1450 
1451 		struct {
1452 			struct irdma_sc_cqp *cqp;
1453 			struct irdma_sc_qp *qp;
1454 			u64 scratch;
1455 		} suspend_resume;
1456 
1457 		struct {
1458 			struct irdma_sc_cqp *cqp;
1459 			struct irdma_ah_info info;
1460 			u64 scratch;
1461 		} ah_create;
1462 
1463 		struct {
1464 			struct irdma_sc_cqp *cqp;
1465 			struct irdma_ah_info info;
1466 			u64 scratch;
1467 		} ah_destroy;
1468 
1469 		struct {
1470 			struct irdma_sc_cqp *cqp;
1471 			struct irdma_mcast_grp_info info;
1472 			u64 scratch;
1473 		} mc_create;
1474 
1475 		struct {
1476 			struct irdma_sc_cqp *cqp;
1477 			struct irdma_mcast_grp_info info;
1478 			u64 scratch;
1479 		} mc_destroy;
1480 
1481 		struct {
1482 			struct irdma_sc_cqp *cqp;
1483 			struct irdma_mcast_grp_info info;
1484 			u64 scratch;
1485 		} mc_modify;
1486 
1487 		struct {
1488 			struct irdma_sc_cqp *cqp;
1489 			struct irdma_stats_inst_info info;
1490 			u64 scratch;
1491 		} stats_manage;
1492 
1493 		struct {
1494 			struct irdma_sc_cqp *cqp;
1495 			struct irdma_stats_gather_info info;
1496 			u64 scratch;
1497 		} stats_gather;
1498 
1499 		struct {
1500 			struct irdma_sc_cqp *cqp;
1501 			struct irdma_ws_node_info info;
1502 			u64 scratch;
1503 		} ws_node;
1504 
1505 		struct {
1506 			struct irdma_sc_cqp *cqp;
1507 			struct irdma_up_info info;
1508 			u64 scratch;
1509 		} up_map;
1510 
1511 		struct {
1512 			struct irdma_sc_cqp *cqp;
1513 			struct irdma_dma_mem query_buff_mem;
1514 			u64 scratch;
1515 		} query_rdma;
1516 	} u;
1517 };
1518 
1519 struct cqp_cmds_info {
1520 	struct list_head cqp_cmd_entry;
1521 	u8 cqp_cmd;
1522 	u8 post_sq;
1523 	struct cqp_info in;
1524 };
1525 
1526 __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
1527 					   u32 *wqe_idx);
1528 
1529 /**
1530  * irdma_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
1531  * @cqp: struct for cqp hw
1532  * @scratch: private data for CQP WQE
1533  */
irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp * cqp,u64 scratch)1534 static inline __le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch)
1535 {
1536 	u32 wqe_idx;
1537 
1538 	return irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
1539 }
1540 #endif /* IRDMA_TYPE_H */
1541