1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #ifndef _MANA_H
5 #define _MANA_H
6 
7 #include "gdma.h"
8 #include "hw_channel.h"
9 
10 /* Microsoft Azure Network Adapter (MANA)'s definitions
11  *
12  * Structures labeled with "HW DATA" are exchanged with the hardware. All of
13  * them are naturally aligned and hence don't need __packed.
14  */
15 
16 /* MANA protocol version */
17 #define MANA_MAJOR_VERSION	0
18 #define MANA_MINOR_VERSION	1
19 #define MANA_MICRO_VERSION	1
20 
21 typedef u64 mana_handle_t;
22 #define INVALID_MANA_HANDLE ((mana_handle_t)-1)
23 
24 enum TRI_STATE {
25 	TRI_STATE_UNKNOWN = -1,
26 	TRI_STATE_FALSE = 0,
27 	TRI_STATE_TRUE = 1
28 };
29 
30 /* Number of entries for hardware indirection table must be in power of 2 */
31 #define MANA_INDIRECT_TABLE_SIZE 64
32 #define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
33 
34 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */
35 #define MANA_HASH_KEY_SIZE 40
36 
37 #define COMP_ENTRY_SIZE 64
38 
39 #define ADAPTER_MTU_SIZE 1500
40 #define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14)
41 
42 #define RX_BUFFERS_PER_QUEUE 512
43 
44 #define MAX_SEND_BUFFERS_PER_QUEUE 256
45 
46 #define EQ_SIZE (8 * PAGE_SIZE)
47 #define LOG2_EQ_THROTTLE 3
48 
49 #define MAX_PORTS_IN_MANA_DEV 256
50 
51 struct mana_stats {
52 	u64 packets;
53 	u64 bytes;
54 	struct u64_stats_sync syncp;
55 };
56 
57 struct mana_txq {
58 	struct gdma_queue *gdma_sq;
59 
60 	union {
61 		u32 gdma_txq_id;
62 		struct {
63 			u32 reserved1	: 10;
64 			u32 vsq_frame	: 14;
65 			u32 reserved2	: 8;
66 		};
67 	};
68 
69 	u16 vp_offset;
70 
71 	struct net_device *ndev;
72 
73 	/* The SKBs are sent to the HW and we are waiting for the CQEs. */
74 	struct sk_buff_head pending_skbs;
75 	struct netdev_queue *net_txq;
76 
77 	atomic_t pending_sends;
78 
79 	struct mana_stats stats;
80 };
81 
82 /* skb data and frags dma mappings */
83 struct mana_skb_head {
84 	dma_addr_t dma_handle[MAX_SKB_FRAGS + 1];
85 
86 	u32 size[MAX_SKB_FRAGS + 1];
87 };
88 
89 #define MANA_HEADROOM sizeof(struct mana_skb_head)
90 
91 enum mana_tx_pkt_format {
92 	MANA_SHORT_PKT_FMT	= 0,
93 	MANA_LONG_PKT_FMT	= 1,
94 };
95 
96 struct mana_tx_short_oob {
97 	u32 pkt_fmt		: 2;
98 	u32 is_outer_ipv4	: 1;
99 	u32 is_outer_ipv6	: 1;
100 	u32 comp_iphdr_csum	: 1;
101 	u32 comp_tcp_csum	: 1;
102 	u32 comp_udp_csum	: 1;
103 	u32 supress_txcqe_gen	: 1;
104 	u32 vcq_num		: 24;
105 
106 	u32 trans_off		: 10; /* Transport header offset */
107 	u32 vsq_frame		: 14;
108 	u32 short_vp_offset	: 8;
109 }; /* HW DATA */
110 
111 struct mana_tx_long_oob {
112 	u32 is_encap		: 1;
113 	u32 inner_is_ipv6	: 1;
114 	u32 inner_tcp_opt	: 1;
115 	u32 inject_vlan_pri_tag : 1;
116 	u32 reserved1		: 12;
117 	u32 pcp			: 3;  /* 802.1Q */
118 	u32 dei			: 1;  /* 802.1Q */
119 	u32 vlan_id		: 12; /* 802.1Q */
120 
121 	u32 inner_frame_offset	: 10;
122 	u32 inner_ip_rel_offset : 6;
123 	u32 long_vp_offset	: 12;
124 	u32 reserved2		: 4;
125 
126 	u32 reserved3;
127 	u32 reserved4;
128 }; /* HW DATA */
129 
130 struct mana_tx_oob {
131 	struct mana_tx_short_oob s_oob;
132 	struct mana_tx_long_oob l_oob;
133 }; /* HW DATA */
134 
135 enum mana_cq_type {
136 	MANA_CQ_TYPE_RX,
137 	MANA_CQ_TYPE_TX,
138 };
139 
140 enum mana_cqe_type {
141 	CQE_INVALID			= 0,
142 	CQE_RX_OKAY			= 1,
143 	CQE_RX_COALESCED_4		= 2,
144 	CQE_RX_OBJECT_FENCE		= 3,
145 	CQE_RX_TRUNCATED		= 4,
146 
147 	CQE_TX_OKAY			= 32,
148 	CQE_TX_SA_DROP			= 33,
149 	CQE_TX_MTU_DROP			= 34,
150 	CQE_TX_INVALID_OOB		= 35,
151 	CQE_TX_INVALID_ETH_TYPE		= 36,
152 	CQE_TX_HDR_PROCESSING_ERROR	= 37,
153 	CQE_TX_VF_DISABLED		= 38,
154 	CQE_TX_VPORT_IDX_OUT_OF_RANGE	= 39,
155 	CQE_TX_VPORT_DISABLED		= 40,
156 	CQE_TX_VLAN_TAGGING_VIOLATION	= 41,
157 };
158 
159 #define MANA_CQE_COMPLETION 1
160 
161 struct mana_cqe_header {
162 	u32 cqe_type	: 6;
163 	u32 client_type	: 2;
164 	u32 vendor_err	: 24;
165 }; /* HW DATA */
166 
167 /* NDIS HASH Types */
168 #define NDIS_HASH_IPV4		BIT(0)
169 #define NDIS_HASH_TCP_IPV4	BIT(1)
170 #define NDIS_HASH_UDP_IPV4	BIT(2)
171 #define NDIS_HASH_IPV6		BIT(3)
172 #define NDIS_HASH_TCP_IPV6	BIT(4)
173 #define NDIS_HASH_UDP_IPV6	BIT(5)
174 #define NDIS_HASH_IPV6_EX	BIT(6)
175 #define NDIS_HASH_TCP_IPV6_EX	BIT(7)
176 #define NDIS_HASH_UDP_IPV6_EX	BIT(8)
177 
178 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
179 #define MANA_HASH_L4                                                         \
180 	(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 |      \
181 	 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
182 
183 struct mana_rxcomp_perpkt_info {
184 	u32 pkt_len	: 16;
185 	u32 reserved1	: 16;
186 	u32 reserved2;
187 	u32 pkt_hash;
188 }; /* HW DATA */
189 
190 #define MANA_RXCOMP_OOB_NUM_PPI 4
191 
192 /* Receive completion OOB */
193 struct mana_rxcomp_oob {
194 	struct mana_cqe_header cqe_hdr;
195 
196 	u32 rx_vlan_id			: 12;
197 	u32 rx_vlantag_present		: 1;
198 	u32 rx_outer_iphdr_csum_succeed	: 1;
199 	u32 rx_outer_iphdr_csum_fail	: 1;
200 	u32 reserved1			: 1;
201 	u32 rx_hashtype			: 9;
202 	u32 rx_iphdr_csum_succeed	: 1;
203 	u32 rx_iphdr_csum_fail		: 1;
204 	u32 rx_tcp_csum_succeed		: 1;
205 	u32 rx_tcp_csum_fail		: 1;
206 	u32 rx_udp_csum_succeed		: 1;
207 	u32 rx_udp_csum_fail		: 1;
208 	u32 reserved2			: 1;
209 
210 	struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
211 
212 	u32 rx_wqe_offset;
213 }; /* HW DATA */
214 
215 struct mana_tx_comp_oob {
216 	struct mana_cqe_header cqe_hdr;
217 
218 	u32 tx_data_offset;
219 
220 	u32 tx_sgl_offset	: 5;
221 	u32 tx_wqe_offset	: 27;
222 
223 	u32 reserved[12];
224 }; /* HW DATA */
225 
226 struct mana_rxq;
227 
228 #define CQE_POLLING_BUFFER 512
229 
230 struct mana_cq {
231 	struct gdma_queue *gdma_cq;
232 
233 	/* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
234 	u32 gdma_id;
235 
236 	/* Type of the CQ: TX or RX */
237 	enum mana_cq_type type;
238 
239 	/* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
240 	 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
241 	 */
242 	struct mana_rxq *rxq;
243 
244 	/* Pointer to the mana_txq that is pushing TX CQEs to the queue.
245 	 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
246 	 */
247 	struct mana_txq *txq;
248 
249 	/* Buffer which the CQ handler can copy the CQE's into. */
250 	struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
251 
252 	/* NAPI data */
253 	struct napi_struct napi;
254 	int work_done;
255 	int budget;
256 };
257 
258 #define GDMA_MAX_RQE_SGES 15
259 
260 struct mana_recv_buf_oob {
261 	/* A valid GDMA work request representing the data buffer. */
262 	struct gdma_wqe_request wqe_req;
263 
264 	void *buf_va;
265 	dma_addr_t buf_dma_addr;
266 
267 	/* SGL of the buffer going to be sent has part of the work request. */
268 	u32 num_sge;
269 	struct gdma_sge sgl[GDMA_MAX_RQE_SGES];
270 
271 	/* Required to store the result of mana_gd_post_work_request.
272 	 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
273 	 * work queue when the WQE is consumed.
274 	 */
275 	struct gdma_posted_wqe_info wqe_inf;
276 };
277 
278 struct mana_rxq {
279 	struct gdma_queue *gdma_rq;
280 	/* Cache the gdma receive queue id */
281 	u32 gdma_id;
282 
283 	/* Index of RQ in the vPort, not gdma receive queue id */
284 	u32 rxq_idx;
285 
286 	u32 datasize;
287 
288 	mana_handle_t rxobj;
289 
290 	struct mana_cq rx_cq;
291 
292 	struct net_device *ndev;
293 
294 	/* Total number of receive buffers to be allocated */
295 	u32 num_rx_buf;
296 
297 	u32 buf_index;
298 
299 	struct mana_stats stats;
300 
301 	/* MUST BE THE LAST MEMBER:
302 	 * Each receive buffer has an associated mana_recv_buf_oob.
303 	 */
304 	struct mana_recv_buf_oob rx_oobs[];
305 };
306 
307 struct mana_tx_qp {
308 	struct mana_txq txq;
309 
310 	struct mana_cq tx_cq;
311 
312 	mana_handle_t tx_object;
313 };
314 
315 struct mana_ethtool_stats {
316 	u64 stop_queue;
317 	u64 wake_queue;
318 };
319 
320 struct mana_context {
321 	struct gdma_dev *gdma_dev;
322 
323 	u16 num_ports;
324 
325 	struct mana_eq *eqs;
326 
327 	struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
328 };
329 
330 struct mana_port_context {
331 	struct mana_context *ac;
332 	struct net_device *ndev;
333 
334 	u8 mac_addr[ETH_ALEN];
335 
336 	enum TRI_STATE rss_state;
337 
338 	mana_handle_t default_rxobj;
339 	bool tx_shortform_allowed;
340 	u16 tx_vp_offset;
341 
342 	struct mana_tx_qp *tx_qp;
343 
344 	/* Indirection Table for RX & TX. The values are queue indexes */
345 	u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
346 
347 	/* Indirection table containing RxObject Handles */
348 	mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
349 
350 	/*  Hash key used by the NIC */
351 	u8 hashkey[MANA_HASH_KEY_SIZE];
352 
353 	/* This points to an array of num_queues of RQ pointers. */
354 	struct mana_rxq **rxqs;
355 
356 	/* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
357 	unsigned int max_queues;
358 	unsigned int num_queues;
359 
360 	mana_handle_t port_handle;
361 
362 	u16 port_idx;
363 
364 	bool port_is_up;
365 	bool port_st_save; /* Saved port state */
366 
367 	struct mana_ethtool_stats eth_stats;
368 };
369 
370 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
371 		    bool update_hash, bool update_tab);
372 
373 int mana_alloc_queues(struct net_device *ndev);
374 int mana_attach(struct net_device *ndev);
375 int mana_detach(struct net_device *ndev, bool from_close);
376 
377 int mana_probe(struct gdma_dev *gd);
378 void mana_remove(struct gdma_dev *gd);
379 
380 extern const struct ethtool_ops mana_ethtool_ops;
381 
382 struct mana_obj_spec {
383 	u32 queue_index;
384 	u64 gdma_region;
385 	u32 queue_size;
386 	u32 attached_eq;
387 	u32 modr_ctx_id;
388 };
389 
390 enum mana_command_code {
391 	MANA_QUERY_DEV_CONFIG	= 0x20001,
392 	MANA_QUERY_GF_STAT	= 0x20002,
393 	MANA_CONFIG_VPORT_TX	= 0x20003,
394 	MANA_CREATE_WQ_OBJ	= 0x20004,
395 	MANA_DESTROY_WQ_OBJ	= 0x20005,
396 	MANA_FENCE_RQ		= 0x20006,
397 	MANA_CONFIG_VPORT_RX	= 0x20007,
398 	MANA_QUERY_VPORT_CONFIG	= 0x20008,
399 };
400 
401 /* Query Device Configuration */
402 struct mana_query_device_cfg_req {
403 	struct gdma_req_hdr hdr;
404 
405 	/* MANA Nic Driver Capability flags */
406 	u64 mn_drv_cap_flags1;
407 	u64 mn_drv_cap_flags2;
408 	u64 mn_drv_cap_flags3;
409 	u64 mn_drv_cap_flags4;
410 
411 	u32 proto_major_ver;
412 	u32 proto_minor_ver;
413 	u32 proto_micro_ver;
414 
415 	u32 reserved;
416 }; /* HW DATA */
417 
418 struct mana_query_device_cfg_resp {
419 	struct gdma_resp_hdr hdr;
420 
421 	u64 pf_cap_flags1;
422 	u64 pf_cap_flags2;
423 	u64 pf_cap_flags3;
424 	u64 pf_cap_flags4;
425 
426 	u16 max_num_vports;
427 	u16 reserved;
428 	u32 max_num_eqs;
429 }; /* HW DATA */
430 
431 /* Query vPort Configuration */
432 struct mana_query_vport_cfg_req {
433 	struct gdma_req_hdr hdr;
434 	u32 vport_index;
435 }; /* HW DATA */
436 
437 struct mana_query_vport_cfg_resp {
438 	struct gdma_resp_hdr hdr;
439 	u32 max_num_sq;
440 	u32 max_num_rq;
441 	u32 num_indirection_ent;
442 	u32 reserved1;
443 	u8 mac_addr[6];
444 	u8 reserved2[2];
445 	mana_handle_t vport;
446 }; /* HW DATA */
447 
448 /* Configure vPort */
449 struct mana_config_vport_req {
450 	struct gdma_req_hdr hdr;
451 	mana_handle_t vport;
452 	u32 pdid;
453 	u32 doorbell_pageid;
454 }; /* HW DATA */
455 
456 struct mana_config_vport_resp {
457 	struct gdma_resp_hdr hdr;
458 	u16 tx_vport_offset;
459 	u8 short_form_allowed;
460 	u8 reserved;
461 }; /* HW DATA */
462 
463 /* Create WQ Object */
464 struct mana_create_wqobj_req {
465 	struct gdma_req_hdr hdr;
466 	mana_handle_t vport;
467 	u32 wq_type;
468 	u32 reserved;
469 	u64 wq_gdma_region;
470 	u64 cq_gdma_region;
471 	u32 wq_size;
472 	u32 cq_size;
473 	u32 cq_moderation_ctx_id;
474 	u32 cq_parent_qid;
475 }; /* HW DATA */
476 
477 struct mana_create_wqobj_resp {
478 	struct gdma_resp_hdr hdr;
479 	u32 wq_id;
480 	u32 cq_id;
481 	mana_handle_t wq_obj;
482 }; /* HW DATA */
483 
484 /* Destroy WQ Object */
485 struct mana_destroy_wqobj_req {
486 	struct gdma_req_hdr hdr;
487 	u32 wq_type;
488 	u32 reserved;
489 	mana_handle_t wq_obj_handle;
490 }; /* HW DATA */
491 
492 struct mana_destroy_wqobj_resp {
493 	struct gdma_resp_hdr hdr;
494 }; /* HW DATA */
495 
496 /* Fence RQ */
497 struct mana_fence_rq_req {
498 	struct gdma_req_hdr hdr;
499 	mana_handle_t wq_obj_handle;
500 }; /* HW DATA */
501 
502 struct mana_fence_rq_resp {
503 	struct gdma_resp_hdr hdr;
504 }; /* HW DATA */
505 
506 /* Configure vPort Rx Steering */
507 struct mana_cfg_rx_steer_req {
508 	struct gdma_req_hdr hdr;
509 	mana_handle_t vport;
510 	u16 num_indir_entries;
511 	u16 indir_tab_offset;
512 	u32 rx_enable;
513 	u32 rss_enable;
514 	u8 update_default_rxobj;
515 	u8 update_hashkey;
516 	u8 update_indir_tab;
517 	u8 reserved;
518 	mana_handle_t default_rxobj;
519 	u8 hashkey[MANA_HASH_KEY_SIZE];
520 }; /* HW DATA */
521 
522 struct mana_cfg_rx_steer_resp {
523 	struct gdma_resp_hdr hdr;
524 }; /* HW DATA */
525 
526 #define MANA_MAX_NUM_QUEUES 64
527 
528 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
529 
530 struct mana_tx_package {
531 	struct gdma_wqe_request wqe_req;
532 	struct gdma_sge sgl_array[5];
533 	struct gdma_sge *sgl_ptr;
534 
535 	struct mana_tx_oob tx_oob;
536 
537 	struct gdma_posted_wqe_info wqe_info;
538 };
539 
540 #endif /* _MANA_H */
541