1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Marvell OcteonTx2 RVU Admin Function driver
3 *
4 * Copyright (C) 2018 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #ifndef MBOX_H
12 #define MBOX_H
13
14 #include <linux/etherdevice.h>
15 #include <linux/sizes.h>
16
17 #include "rvu_struct.h"
18 #include "common.h"
19
20 #define MBOX_SIZE SZ_64K
21
22 /* AF/PF: PF initiated, PF/VF VF initiated */
23 #define MBOX_DOWN_RX_START 0
24 #define MBOX_DOWN_RX_SIZE (46 * SZ_1K)
25 #define MBOX_DOWN_TX_START (MBOX_DOWN_RX_START + MBOX_DOWN_RX_SIZE)
26 #define MBOX_DOWN_TX_SIZE (16 * SZ_1K)
27 /* AF/PF: AF initiated, PF/VF PF initiated */
28 #define MBOX_UP_RX_START (MBOX_DOWN_TX_START + MBOX_DOWN_TX_SIZE)
29 #define MBOX_UP_RX_SIZE SZ_1K
30 #define MBOX_UP_TX_START (MBOX_UP_RX_START + MBOX_UP_RX_SIZE)
31 #define MBOX_UP_TX_SIZE SZ_1K
32
33 #if MBOX_UP_TX_SIZE + MBOX_UP_TX_START != MBOX_SIZE
34 # error "incorrect mailbox area sizes"
35 #endif
36
37 #define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
38
39 #define MBOX_RSP_TIMEOUT 1000 /* in ms, Time to wait for mbox response */
40
41 #define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
42
43 /* Mailbox directions */
44 #define MBOX_DIR_AFPF 0 /* AF replies to PF */
45 #define MBOX_DIR_PFAF 1 /* PF sends messages to AF */
46 #define MBOX_DIR_PFVF 2 /* PF replies to VF */
47 #define MBOX_DIR_VFPF 3 /* VF sends messages to PF */
48 #define MBOX_DIR_AFPF_UP 4 /* AF sends messages to PF */
49 #define MBOX_DIR_PFAF_UP 5 /* PF replies to AF */
50 #define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */
51 #define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */
52
53 struct otx2_mbox_dev {
54 void *mbase; /* This dev's mbox region */
55 spinlock_t mbox_lock;
56 u16 msg_size; /* Total msg size to be sent */
57 u16 rsp_size; /* Total rsp size to be sure the reply is ok */
58 u16 num_msgs; /* No of msgs sent or waiting for response */
59 u16 msgs_acked; /* No of msgs for which response is received */
60 };
61
62 struct otx2_mbox {
63 struct pci_dev *pdev;
64 void *hwbase; /* Mbox region advertised by HW */
65 void *reg_base;/* CSR base for this dev */
66 u64 trigger; /* Trigger mbox notification */
67 u16 tr_shift; /* Mbox trigger shift */
68 u64 rx_start; /* Offset of Rx region in mbox memory */
69 u64 tx_start; /* Offset of Tx region in mbox memory */
70 u16 rx_size; /* Size of Rx region */
71 u16 tx_size; /* Size of Tx region */
72 u16 ndevs; /* The number of peers */
73 struct otx2_mbox_dev *dev;
74 };
75
76 /* Header which preceeds all mbox messages */
77 struct mbox_hdr {
78 u16 num_msgs; /* No of msgs embedded */
79 };
80
81 /* Header which preceeds every msg and is also part of it */
82 struct mbox_msghdr {
83 u16 pcifunc; /* Who's sending this msg */
84 u16 id; /* Mbox message ID */
85 #define OTX2_MBOX_REQ_SIG (0xdead)
86 #define OTX2_MBOX_RSP_SIG (0xbeef)
87 u16 sig; /* Signature, for validating corrupted msgs */
88 #define OTX2_MBOX_VERSION (0x0001)
89 u16 ver; /* Version of msg's structure for this ID */
90 u16 next_msgoff; /* Offset of next msg within mailbox region */
91 int rc; /* Msg process'ed response code */
92 };
93
94 void otx2_mbox_reset(struct otx2_mbox *mbox, int devid);
95 void otx2_mbox_destroy(struct otx2_mbox *mbox);
96 int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
97 struct pci_dev *pdev, void __force *reg_base,
98 int direction, int ndevs);
99 void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
100 int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
101 int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
102 struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
103 int size, int size_rsp);
104 struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
105 struct mbox_msghdr *msg);
106 int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid,
107 u16 pcifunc, u16 id);
108 bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid);
109 const char *otx2_mbox_id2name(u16 id);
otx2_mbox_alloc_msg(struct otx2_mbox * mbox,int devid,int size)110 static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
111 int devid, int size)
112 {
113 return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
114 }
115
116 /* Mailbox message types */
117 #define MBOX_MSG_MASK 0xFFFF
118 #define MBOX_MSG_INVALID 0xFFFE
119 #define MBOX_MSG_MAX 0xFFFF
120
121 #define MBOX_MESSAGES \
122 /* Generic mbox IDs (range 0x000 - 0x1FF) */ \
123 M(READY, 0x001, ready, msg_req, ready_msg_rsp) \
124 M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
125 M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
126 M(MSIX_OFFSET, 0x004, msix_offset, msg_req, msix_offset_rsp) \
127 M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
128 /* CGX mbox IDs (range 0x200 - 0x3FF) */ \
129 M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
130 M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
131 M(CGX_STATS, 0x202, cgx_stats, msg_req, cgx_stats_rsp) \
132 M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set, cgx_mac_addr_set_or_get, \
133 cgx_mac_addr_set_or_get) \
134 M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_get, cgx_mac_addr_set_or_get, \
135 cgx_mac_addr_set_or_get) \
136 M(CGX_PROMISC_ENABLE, 0x205, cgx_promisc_enable, msg_req, msg_rsp) \
137 M(CGX_PROMISC_DISABLE, 0x206, cgx_promisc_disable, msg_req, msg_rsp) \
138 M(CGX_START_LINKEVENTS, 0x207, cgx_start_linkevents, msg_req, msg_rsp) \
139 M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \
140 M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
141 M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \
142 M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \
143 /* NPA mbox IDs (range 0x400 - 0x5FF) */ \
144 M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
145 npa_lf_alloc_req, npa_lf_alloc_rsp) \
146 M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \
147 M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp) \
148 M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\
149 /* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
150 /* TIM mbox IDs (range 0x800 - 0x9FF) */ \
151 /* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
152 /* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
153 M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\
154 npc_mcam_alloc_entry_rsp) \
155 M(NPC_MCAM_FREE_ENTRY, 0x6001, npc_mcam_free_entry, \
156 npc_mcam_free_entry_req, msg_rsp) \
157 M(NPC_MCAM_WRITE_ENTRY, 0x6002, npc_mcam_write_entry, \
158 npc_mcam_write_entry_req, msg_rsp) \
159 M(NPC_MCAM_ENA_ENTRY, 0x6003, npc_mcam_ena_entry, \
160 npc_mcam_ena_dis_entry_req, msg_rsp) \
161 M(NPC_MCAM_DIS_ENTRY, 0x6004, npc_mcam_dis_entry, \
162 npc_mcam_ena_dis_entry_req, msg_rsp) \
163 M(NPC_MCAM_SHIFT_ENTRY, 0x6005, npc_mcam_shift_entry, npc_mcam_shift_entry_req,\
164 npc_mcam_shift_entry_rsp) \
165 M(NPC_MCAM_ALLOC_COUNTER, 0x6006, npc_mcam_alloc_counter, \
166 npc_mcam_alloc_counter_req, \
167 npc_mcam_alloc_counter_rsp) \
168 M(NPC_MCAM_FREE_COUNTER, 0x6007, npc_mcam_free_counter, \
169 npc_mcam_oper_counter_req, msg_rsp) \
170 M(NPC_MCAM_UNMAP_COUNTER, 0x6008, npc_mcam_unmap_counter, \
171 npc_mcam_unmap_counter_req, msg_rsp) \
172 M(NPC_MCAM_CLEAR_COUNTER, 0x6009, npc_mcam_clear_counter, \
173 npc_mcam_oper_counter_req, msg_rsp) \
174 M(NPC_MCAM_COUNTER_STATS, 0x600a, npc_mcam_counter_stats, \
175 npc_mcam_oper_counter_req, \
176 npc_mcam_oper_counter_rsp) \
177 M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry, \
178 npc_mcam_alloc_and_write_entry_req, \
179 npc_mcam_alloc_and_write_entry_rsp) \
180 M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \
181 msg_req, npc_get_kex_cfg_rsp) \
182 /* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
183 M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
184 nix_lf_alloc_req, nix_lf_alloc_rsp) \
185 M(NIX_LF_FREE, 0x8001, nix_lf_free, msg_req, msg_rsp) \
186 M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp) \
187 M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \
188 hwctx_disable_req, msg_rsp) \
189 M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \
190 nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
191 M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \
192 M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \
193 M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \
194 M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp) \
195 M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \
196 nix_rss_flowkey_cfg, \
197 nix_rss_flowkey_cfg_rsp) \
198 M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, nix_set_mac_addr, msg_rsp) \
199 M(NIX_SET_RX_MODE, 0x800b, nix_set_rx_mode, nix_rx_mode, msg_rsp) \
200 M(NIX_SET_HW_FRS, 0x800c, nix_set_hw_frs, nix_frs_cfg, msg_rsp) \
201 M(NIX_LF_START_RX, 0x800d, nix_lf_start_rx, msg_req, msg_rsp) \
202 M(NIX_LF_STOP_RX, 0x800e, nix_lf_stop_rx, msg_req, msg_rsp) \
203 M(NIX_MARK_FORMAT_CFG, 0x800f, nix_mark_format_cfg, \
204 nix_mark_format_cfg, \
205 nix_mark_format_cfg_rsp) \
206 M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \
207 M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
208 nix_lso_format_cfg, \
209 nix_lso_format_cfg_rsp) \
210 M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp)
211
212 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
213 #define MBOX_UP_CGX_MESSAGES \
214 M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
215
216 enum {
217 #define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
218 MBOX_MESSAGES
219 MBOX_UP_CGX_MESSAGES
220 #undef M
221 };
222
223 /* Mailbox message formats */
224
225 #define RVU_DEFAULT_PF_FUNC 0xFFFF
226
227 /* Generic request msg used for those mbox messages which
228 * don't send any data in the request.
229 */
230 struct msg_req {
231 struct mbox_msghdr hdr;
232 };
233
234 /* Generic rsponse msg used a ack or response for those mbox
235 * messages which doesn't have a specific rsp msg format.
236 */
237 struct msg_rsp {
238 struct mbox_msghdr hdr;
239 };
240
241 /* RVU mailbox error codes
242 * Range 256 - 300.
243 */
244 enum rvu_af_status {
245 RVU_INVALID_VF_ID = -256,
246 };
247
248 struct ready_msg_rsp {
249 struct mbox_msghdr hdr;
250 u16 sclk_feq; /* SCLK frequency */
251 };
252
253 /* Structure for requesting resource provisioning.
254 * 'modify' flag to be used when either requesting more
255 * or to detach partial of a cetain resource type.
256 * Rest of the fields specify how many of what type to
257 * be attached.
258 */
259 struct rsrc_attach {
260 struct mbox_msghdr hdr;
261 u8 modify:1;
262 u8 npalf:1;
263 u8 nixlf:1;
264 u16 sso;
265 u16 ssow;
266 u16 timlfs;
267 u16 cptlfs;
268 };
269
270 /* Structure for relinquishing resources.
271 * 'partial' flag to be used when relinquishing all resources
272 * but only of a certain type. If not set, all resources of all
273 * types provisioned to the RVU function will be detached.
274 */
275 struct rsrc_detach {
276 struct mbox_msghdr hdr;
277 u8 partial:1;
278 u8 npalf:1;
279 u8 nixlf:1;
280 u8 sso:1;
281 u8 ssow:1;
282 u8 timlfs:1;
283 u8 cptlfs:1;
284 };
285
286 #define MSIX_VECTOR_INVALID 0xFFFF
287 #define MAX_RVU_BLKLF_CNT 256
288
289 struct msix_offset_rsp {
290 struct mbox_msghdr hdr;
291 u16 npa_msixoff;
292 u16 nix_msixoff;
293 u8 sso;
294 u8 ssow;
295 u8 timlfs;
296 u8 cptlfs;
297 u16 sso_msixoff[MAX_RVU_BLKLF_CNT];
298 u16 ssow_msixoff[MAX_RVU_BLKLF_CNT];
299 u16 timlf_msixoff[MAX_RVU_BLKLF_CNT];
300 u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
301 };
302
303 /* CGX mbox message formats */
304
305 struct cgx_stats_rsp {
306 struct mbox_msghdr hdr;
307 #define CGX_RX_STATS_COUNT 13
308 #define CGX_TX_STATS_COUNT 18
309 u64 rx_stats[CGX_RX_STATS_COUNT];
310 u64 tx_stats[CGX_TX_STATS_COUNT];
311 };
312
313 /* Structure for requesting the operation for
314 * setting/getting mac address in the CGX interface
315 */
316 struct cgx_mac_addr_set_or_get {
317 struct mbox_msghdr hdr;
318 u8 mac_addr[ETH_ALEN];
319 };
320
321 struct cgx_link_user_info {
322 uint64_t link_up:1;
323 uint64_t full_duplex:1;
324 uint64_t lmac_type_id:4;
325 uint64_t speed:20; /* speed in Mbps */
326 #define LMACTYPE_STR_LEN 16
327 char lmac_type[LMACTYPE_STR_LEN];
328 };
329
330 struct cgx_link_info_msg {
331 struct mbox_msghdr hdr;
332 struct cgx_link_user_info link_info;
333 };
334
335 /* NPA mbox message formats */
336
337 /* NPA mailbox error codes
338 * Range 301 - 400.
339 */
340 enum npa_af_status {
341 NPA_AF_ERR_PARAM = -301,
342 NPA_AF_ERR_AQ_FULL = -302,
343 NPA_AF_ERR_AQ_ENQUEUE = -303,
344 NPA_AF_ERR_AF_LF_INVALID = -304,
345 NPA_AF_ERR_AF_LF_ALLOC = -305,
346 NPA_AF_ERR_LF_RESET = -306,
347 };
348
349 /* For NPA LF context alloc and init */
350 struct npa_lf_alloc_req {
351 struct mbox_msghdr hdr;
352 int node;
353 int aura_sz; /* No of auras */
354 u32 nr_pools; /* No of pools */
355 };
356
357 struct npa_lf_alloc_rsp {
358 struct mbox_msghdr hdr;
359 u32 stack_pg_ptrs; /* No of ptrs per stack page */
360 u32 stack_pg_bytes; /* Size of stack page */
361 u16 qints; /* NPA_AF_CONST::QINTS */
362 };
363
364 /* NPA AQ enqueue msg */
365 struct npa_aq_enq_req {
366 struct mbox_msghdr hdr;
367 u32 aura_id;
368 u8 ctype;
369 u8 op;
370 union {
371 /* Valid when op == WRITE/INIT and ctype == AURA.
372 * LF fills the pool_id in aura.pool_addr. AF will translate
373 * the pool_id to pool context pointer.
374 */
375 struct npa_aura_s aura;
376 /* Valid when op == WRITE/INIT and ctype == POOL */
377 struct npa_pool_s pool;
378 };
379 /* Mask data when op == WRITE (1=write, 0=don't write) */
380 union {
381 /* Valid when op == WRITE and ctype == AURA */
382 struct npa_aura_s aura_mask;
383 /* Valid when op == WRITE and ctype == POOL */
384 struct npa_pool_s pool_mask;
385 };
386 };
387
388 struct npa_aq_enq_rsp {
389 struct mbox_msghdr hdr;
390 union {
391 /* Valid when op == READ and ctype == AURA */
392 struct npa_aura_s aura;
393 /* Valid when op == READ and ctype == POOL */
394 struct npa_pool_s pool;
395 };
396 };
397
398 /* Disable all contexts of type 'ctype' */
399 struct hwctx_disable_req {
400 struct mbox_msghdr hdr;
401 u8 ctype;
402 };
403
404 /* NIX mbox message formats */
405
406 /* NIX mailbox error codes
407 * Range 401 - 500.
408 */
409 enum nix_af_status {
410 NIX_AF_ERR_PARAM = -401,
411 NIX_AF_ERR_AQ_FULL = -402,
412 NIX_AF_ERR_AQ_ENQUEUE = -403,
413 NIX_AF_ERR_AF_LF_INVALID = -404,
414 NIX_AF_ERR_AF_LF_ALLOC = -405,
415 NIX_AF_ERR_TLX_ALLOC_FAIL = -406,
416 NIX_AF_ERR_TLX_INVALID = -407,
417 NIX_AF_ERR_RSS_SIZE_INVALID = -408,
418 NIX_AF_ERR_RSS_GRPS_INVALID = -409,
419 NIX_AF_ERR_FRS_INVALID = -410,
420 NIX_AF_ERR_RX_LINK_INVALID = -411,
421 NIX_AF_INVAL_TXSCHQ_CFG = -412,
422 NIX_AF_SMQ_FLUSH_FAILED = -413,
423 NIX_AF_ERR_LF_RESET = -414,
424 NIX_AF_ERR_RSS_NOSPC_FIELD = -415,
425 NIX_AF_ERR_RSS_NOSPC_ALGO = -416,
426 NIX_AF_ERR_MARK_CFG_FAIL = -417,
427 NIX_AF_ERR_LSO_CFG_FAIL = -418,
428 NIX_AF_INVAL_NPA_PF_FUNC = -419,
429 NIX_AF_INVAL_SSO_PF_FUNC = -420,
430 };
431
432 /* For NIX LF context alloc and init */
433 struct nix_lf_alloc_req {
434 struct mbox_msghdr hdr;
435 int node;
436 u32 rq_cnt; /* No of receive queues */
437 u32 sq_cnt; /* No of send queues */
438 u32 cq_cnt; /* No of completion queues */
439 u8 xqe_sz;
440 u16 rss_sz;
441 u8 rss_grps;
442 u16 npa_func;
443 u16 sso_func;
444 u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
445 };
446
447 struct nix_lf_alloc_rsp {
448 struct mbox_msghdr hdr;
449 u16 sqb_size;
450 u16 rx_chan_base;
451 u16 tx_chan_base;
452 u8 rx_chan_cnt; /* total number of RX channels */
453 u8 tx_chan_cnt; /* total number of TX channels */
454 u8 lso_tsov4_idx;
455 u8 lso_tsov6_idx;
456 u8 mac_addr[ETH_ALEN];
457 u8 lf_rx_stats; /* NIX_AF_CONST1::LF_RX_STATS */
458 u8 lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */
459 u16 cints; /* NIX_AF_CONST2::CINTS */
460 u16 qints; /* NIX_AF_CONST2::QINTS */
461 };
462
463 /* NIX AQ enqueue msg */
464 struct nix_aq_enq_req {
465 struct mbox_msghdr hdr;
466 u32 qidx;
467 u8 ctype;
468 u8 op;
469 union {
470 struct nix_rq_ctx_s rq;
471 struct nix_sq_ctx_s sq;
472 struct nix_cq_ctx_s cq;
473 struct nix_rsse_s rss;
474 struct nix_rx_mce_s mce;
475 };
476 union {
477 struct nix_rq_ctx_s rq_mask;
478 struct nix_sq_ctx_s sq_mask;
479 struct nix_cq_ctx_s cq_mask;
480 struct nix_rsse_s rss_mask;
481 struct nix_rx_mce_s mce_mask;
482 };
483 };
484
485 struct nix_aq_enq_rsp {
486 struct mbox_msghdr hdr;
487 union {
488 struct nix_rq_ctx_s rq;
489 struct nix_sq_ctx_s sq;
490 struct nix_cq_ctx_s cq;
491 struct nix_rsse_s rss;
492 struct nix_rx_mce_s mce;
493 };
494 };
495
496 /* Tx scheduler/shaper mailbox messages */
497
498 #define MAX_TXSCHQ_PER_FUNC 128
499
500 struct nix_txsch_alloc_req {
501 struct mbox_msghdr hdr;
502 /* Scheduler queue count request at each level */
503 u16 schq_contig[NIX_TXSCH_LVL_CNT]; /* No of contiguous queues */
504 u16 schq[NIX_TXSCH_LVL_CNT]; /* No of non-contiguous queues */
505 };
506
507 struct nix_txsch_alloc_rsp {
508 struct mbox_msghdr hdr;
509 /* Scheduler queue count allocated at each level */
510 u16 schq_contig[NIX_TXSCH_LVL_CNT];
511 u16 schq[NIX_TXSCH_LVL_CNT];
512 /* Scheduler queue list allocated at each level */
513 u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
514 u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
515 };
516
517 struct nix_txsch_free_req {
518 struct mbox_msghdr hdr;
519 #define TXSCHQ_FREE_ALL BIT_ULL(0)
520 u16 flags;
521 /* Scheduler queue level to be freed */
522 u16 schq_lvl;
523 /* List of scheduler queues to be freed */
524 u16 schq;
525 };
526
527 struct nix_txschq_config {
528 struct mbox_msghdr hdr;
529 u8 lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */
530 #define TXSCHQ_IDX_SHIFT 16
531 #define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1)
532 #define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK)
533 u8 num_regs;
534 #define MAX_REGS_PER_MBOX_MSG 20
535 u64 reg[MAX_REGS_PER_MBOX_MSG];
536 u64 regval[MAX_REGS_PER_MBOX_MSG];
537 };
538
539 struct nix_vtag_config {
540 struct mbox_msghdr hdr;
541 /* '0' for 4 octet VTAG, '1' for 8 octet VTAG */
542 u8 vtag_size;
543 /* cfg_type is '0' for tx vlan cfg
544 * cfg_type is '1' for rx vlan cfg
545 */
546 u8 cfg_type;
547 union {
548 /* valid when cfg_type is '0' */
549 struct {
550 /* tx vlan0 tag(C-VLAN) */
551 u64 vlan0;
552 /* tx vlan1 tag(S-VLAN) */
553 u64 vlan1;
554 /* insert tx vlan tag */
555 u8 insert_vlan :1;
556 /* insert tx double vlan tag */
557 u8 double_vlan :1;
558 } tx;
559
560 /* valid when cfg_type is '1' */
561 struct {
562 /* rx vtag type index, valid values are in 0..7 range */
563 u8 vtag_type;
564 /* rx vtag strip */
565 u8 strip_vtag :1;
566 /* rx vtag capture */
567 u8 capture_vtag :1;
568 } rx;
569 };
570 };
571
572 struct nix_rss_flowkey_cfg {
573 struct mbox_msghdr hdr;
574 int mcam_index; /* MCAM entry index to modify */
575 #define NIX_FLOW_KEY_TYPE_PORT BIT(0)
576 #define NIX_FLOW_KEY_TYPE_IPV4 BIT(1)
577 #define NIX_FLOW_KEY_TYPE_IPV6 BIT(2)
578 #define NIX_FLOW_KEY_TYPE_TCP BIT(3)
579 #define NIX_FLOW_KEY_TYPE_UDP BIT(4)
580 #define NIX_FLOW_KEY_TYPE_SCTP BIT(5)
581 u32 flowkey_cfg; /* Flowkey types selected */
582 u8 group; /* RSS context or group */
583 };
584
585 struct nix_rss_flowkey_cfg_rsp {
586 struct mbox_msghdr hdr;
587 u8 alg_idx; /* Selected algo index */
588 };
589
590 struct nix_set_mac_addr {
591 struct mbox_msghdr hdr;
592 u8 mac_addr[ETH_ALEN]; /* MAC address to be set for this pcifunc */
593 };
594
595 struct nix_mark_format_cfg {
596 struct mbox_msghdr hdr;
597 u8 offset;
598 u8 y_mask;
599 u8 y_val;
600 u8 r_mask;
601 u8 r_val;
602 };
603
604 struct nix_mark_format_cfg_rsp {
605 struct mbox_msghdr hdr;
606 u8 mark_format_idx;
607 };
608
609 struct nix_rx_mode {
610 struct mbox_msghdr hdr;
611 #define NIX_RX_MODE_UCAST BIT(0)
612 #define NIX_RX_MODE_PROMISC BIT(1)
613 #define NIX_RX_MODE_ALLMULTI BIT(2)
614 u16 mode;
615 };
616
617 struct nix_rx_cfg {
618 struct mbox_msghdr hdr;
619 #define NIX_RX_OL3_VERIFY BIT(0)
620 #define NIX_RX_OL4_VERIFY BIT(1)
621 u8 len_verify; /* Outer L3/L4 len check */
622 #define NIX_RX_CSUM_OL4_VERIFY BIT(0)
623 u8 csum_verify; /* Outer L4 checksum verification */
624 };
625
626 struct nix_frs_cfg {
627 struct mbox_msghdr hdr;
628 u8 update_smq; /* Update SMQ's min/max lens */
629 u8 update_minlen; /* Set minlen also */
630 u8 sdp_link; /* Set SDP RX link */
631 u16 maxlen;
632 u16 minlen;
633 };
634
635 struct nix_lso_format_cfg {
636 struct mbox_msghdr hdr;
637 u64 field_mask;
638 #define NIX_LSO_FIELD_MAX 8
639 u64 fields[NIX_LSO_FIELD_MAX];
640 };
641
642 struct nix_lso_format_cfg_rsp {
643 struct mbox_msghdr hdr;
644 u8 lso_format_idx;
645 };
646
647 /* NPC mbox message structs */
648
649 #define NPC_MCAM_ENTRY_INVALID 0xFFFF
650 #define NPC_MCAM_INVALID_MAP 0xFFFF
651
652 /* NPC mailbox error codes
653 * Range 701 - 800.
654 */
655 enum npc_af_status {
656 NPC_MCAM_INVALID_REQ = -701,
657 NPC_MCAM_ALLOC_DENIED = -702,
658 NPC_MCAM_ALLOC_FAILED = -703,
659 NPC_MCAM_PERM_DENIED = -704,
660 };
661
662 struct npc_mcam_alloc_entry_req {
663 struct mbox_msghdr hdr;
664 #define NPC_MAX_NONCONTIG_ENTRIES 256
665 u8 contig; /* Contiguous entries ? */
666 #define NPC_MCAM_ANY_PRIO 0
667 #define NPC_MCAM_LOWER_PRIO 1
668 #define NPC_MCAM_HIGHER_PRIO 2
669 u8 priority; /* Lower or higher w.r.t ref_entry */
670 u16 ref_entry;
671 u16 count; /* Number of entries requested */
672 };
673
674 struct npc_mcam_alloc_entry_rsp {
675 struct mbox_msghdr hdr;
676 u16 entry; /* Entry allocated or start index if contiguous.
677 * Invalid incase of non-contiguous.
678 */
679 u16 count; /* Number of entries allocated */
680 u16 free_count; /* Number of entries available */
681 u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES];
682 };
683
684 struct npc_mcam_free_entry_req {
685 struct mbox_msghdr hdr;
686 u16 entry; /* Entry index to be freed */
687 u8 all; /* If all entries allocated to this PFVF to be freed */
688 };
689
690 struct mcam_entry {
691 #define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max keywidth */
692 u64 kw[NPC_MAX_KWS_IN_KEY];
693 u64 kw_mask[NPC_MAX_KWS_IN_KEY];
694 u64 action;
695 u64 vtag_action;
696 };
697
698 struct npc_mcam_write_entry_req {
699 struct mbox_msghdr hdr;
700 struct mcam_entry entry_data;
701 u16 entry; /* MCAM entry to write this match key */
702 u16 cntr; /* Counter for this MCAM entry */
703 u8 intf; /* Rx or Tx interface */
704 u8 enable_entry;/* Enable this MCAM entry ? */
705 u8 set_cntr; /* Set counter for this entry ? */
706 };
707
708 /* Enable/Disable a given entry */
709 struct npc_mcam_ena_dis_entry_req {
710 struct mbox_msghdr hdr;
711 u16 entry;
712 };
713
714 struct npc_mcam_shift_entry_req {
715 struct mbox_msghdr hdr;
716 #define NPC_MCAM_MAX_SHIFTS 64
717 u16 curr_entry[NPC_MCAM_MAX_SHIFTS];
718 u16 new_entry[NPC_MCAM_MAX_SHIFTS];
719 u16 shift_count; /* Number of entries to shift */
720 };
721
722 struct npc_mcam_shift_entry_rsp {
723 struct mbox_msghdr hdr;
724 u16 failed_entry_idx; /* Index in 'curr_entry', not entry itself */
725 };
726
727 struct npc_mcam_alloc_counter_req {
728 struct mbox_msghdr hdr;
729 u8 contig; /* Contiguous counters ? */
730 #define NPC_MAX_NONCONTIG_COUNTERS 64
731 u16 count; /* Number of counters requested */
732 };
733
734 struct npc_mcam_alloc_counter_rsp {
735 struct mbox_msghdr hdr;
736 u16 cntr; /* Counter allocated or start index if contiguous.
737 * Invalid incase of non-contiguous.
738 */
739 u16 count; /* Number of counters allocated */
740 u16 cntr_list[NPC_MAX_NONCONTIG_COUNTERS];
741 };
742
743 struct npc_mcam_oper_counter_req {
744 struct mbox_msghdr hdr;
745 u16 cntr; /* Free a counter or clear/fetch it's stats */
746 };
747
748 struct npc_mcam_oper_counter_rsp {
749 struct mbox_msghdr hdr;
750 u64 stat; /* valid only while fetching counter's stats */
751 };
752
753 struct npc_mcam_unmap_counter_req {
754 struct mbox_msghdr hdr;
755 u16 cntr;
756 u16 entry; /* Entry and counter to be unmapped */
757 u8 all; /* Unmap all entries using this counter ? */
758 };
759
760 struct npc_mcam_alloc_and_write_entry_req {
761 struct mbox_msghdr hdr;
762 struct mcam_entry entry_data;
763 u16 ref_entry;
764 u8 priority; /* Lower or higher w.r.t ref_entry */
765 u8 intf; /* Rx or Tx interface */
766 u8 enable_entry;/* Enable this MCAM entry ? */
767 u8 alloc_cntr; /* Allocate counter and map ? */
768 };
769
770 struct npc_mcam_alloc_and_write_entry_rsp {
771 struct mbox_msghdr hdr;
772 u16 entry;
773 u16 cntr;
774 };
775
776 struct npc_get_kex_cfg_rsp {
777 struct mbox_msghdr hdr;
778 u64 rx_keyx_cfg; /* NPC_AF_INTF(0)_KEX_CFG */
779 u64 tx_keyx_cfg; /* NPC_AF_INTF(1)_KEX_CFG */
780 #define NPC_MAX_INTF 2
781 #define NPC_MAX_LID 8
782 #define NPC_MAX_LT 16
783 #define NPC_MAX_LD 2
784 #define NPC_MAX_LFL 16
785 /* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */
786 u64 kex_ld_flags[NPC_MAX_LD];
787 /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
788 u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
789 /* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
790 u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
791 #define MKEX_NAME_LEN 128
792 u8 mkex_pfl_name[MKEX_NAME_LEN];
793 };
794
795 #endif /* MBOX_H */
796