1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3 *
4 * Copyright (C) 2018 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12 #include <linux/pci.h>
13
14 #include "rvu_struct.h"
15 #include "rvu_reg.h"
16 #include "rvu.h"
17 #include "npc.h"
18 #include "cgx.h"
19
20 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
21 int type, int chan_id);
22
23 enum mc_tbl_sz {
24 MC_TBL_SZ_256,
25 MC_TBL_SZ_512,
26 MC_TBL_SZ_1K,
27 MC_TBL_SZ_2K,
28 MC_TBL_SZ_4K,
29 MC_TBL_SZ_8K,
30 MC_TBL_SZ_16K,
31 MC_TBL_SZ_32K,
32 MC_TBL_SZ_64K,
33 };
34
35 enum mc_buf_cnt {
36 MC_BUF_CNT_8,
37 MC_BUF_CNT_16,
38 MC_BUF_CNT_32,
39 MC_BUF_CNT_64,
40 MC_BUF_CNT_128,
41 MC_BUF_CNT_256,
42 MC_BUF_CNT_512,
43 MC_BUF_CNT_1024,
44 MC_BUF_CNT_2048,
45 };
46
47 enum nix_makr_fmt_indexes {
48 NIX_MARK_CFG_IP_DSCP_RED,
49 NIX_MARK_CFG_IP_DSCP_YELLOW,
50 NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
51 NIX_MARK_CFG_IP_ECN_RED,
52 NIX_MARK_CFG_IP_ECN_YELLOW,
53 NIX_MARK_CFG_IP_ECN_YELLOW_RED,
54 NIX_MARK_CFG_VLAN_DEI_RED,
55 NIX_MARK_CFG_VLAN_DEI_YELLOW,
56 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
57 NIX_MARK_CFG_MAX,
58 };
59
60 /* For now considering MC resources needed for broadcast
61 * pkt replication only. i.e 256 HWVFs + 12 PFs.
62 */
63 #define MC_TBL_SIZE MC_TBL_SZ_512
64 #define MC_BUF_CNT MC_BUF_CNT_128
65
66 struct mce {
67 struct hlist_node node;
68 u16 pcifunc;
69 };
70
is_nixlf_attached(struct rvu * rvu,u16 pcifunc)71 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
72 {
73 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
74 int blkaddr;
75
76 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
77 if (!pfvf->nixlf || blkaddr < 0)
78 return false;
79 return true;
80 }
81
rvu_get_nixlf_count(struct rvu * rvu)82 int rvu_get_nixlf_count(struct rvu *rvu)
83 {
84 struct rvu_block *block;
85 int blkaddr;
86
87 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
88 if (blkaddr < 0)
89 return 0;
90 block = &rvu->hw->block[blkaddr];
91 return block->lf.max;
92 }
93
nix_get_nixlf(struct rvu * rvu,u16 pcifunc,int * nixlf,int * nix_blkaddr)94 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
95 {
96 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
97 struct rvu_hwinfo *hw = rvu->hw;
98 int blkaddr;
99
100 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
101 if (!pfvf->nixlf || blkaddr < 0)
102 return NIX_AF_ERR_AF_LF_INVALID;
103
104 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
105 if (*nixlf < 0)
106 return NIX_AF_ERR_AF_LF_INVALID;
107
108 if (nix_blkaddr)
109 *nix_blkaddr = blkaddr;
110
111 return 0;
112 }
113
nix_mce_list_init(struct nix_mce_list * list,int max)114 static void nix_mce_list_init(struct nix_mce_list *list, int max)
115 {
116 INIT_HLIST_HEAD(&list->head);
117 list->count = 0;
118 list->max = max;
119 }
120
nix_alloc_mce_list(struct nix_mcast * mcast,int count)121 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
122 {
123 int idx;
124
125 if (!mcast)
126 return 0;
127
128 idx = mcast->next_free_mce;
129 mcast->next_free_mce += count;
130 return idx;
131 }
132
get_nix_hw(struct rvu_hwinfo * hw,int blkaddr)133 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
134 {
135 if (blkaddr == BLKADDR_NIX0 && hw->nix0)
136 return hw->nix0;
137
138 return NULL;
139 }
140
nix_rx_sync(struct rvu * rvu,int blkaddr)141 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
142 {
143 int err;
144
145 /*Sync all in flight RX packets to LLC/DRAM */
146 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
147 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
148 if (err)
149 dev_err(rvu->dev, "NIX RX software sync failed\n");
150 }
151
is_valid_txschq(struct rvu * rvu,int blkaddr,int lvl,u16 pcifunc,u16 schq)152 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
153 int lvl, u16 pcifunc, u16 schq)
154 {
155 struct rvu_hwinfo *hw = rvu->hw;
156 struct nix_txsch *txsch;
157 struct nix_hw *nix_hw;
158 u16 map_func;
159
160 nix_hw = get_nix_hw(rvu->hw, blkaddr);
161 if (!nix_hw)
162 return false;
163
164 txsch = &nix_hw->txsch[lvl];
165 /* Check out of bounds */
166 if (schq >= txsch->schq.max)
167 return false;
168
169 mutex_lock(&rvu->rsrc_lock);
170 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
171 mutex_unlock(&rvu->rsrc_lock);
172
173 /* TLs aggegating traffic are shared across PF and VFs */
174 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
175 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
176 return false;
177 else
178 return true;
179 }
180
181 if (map_func != pcifunc)
182 return false;
183
184 return true;
185 }
186
nix_interface_init(struct rvu * rvu,u16 pcifunc,int type,int nixlf)187 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
188 {
189 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
190 u8 cgx_id, lmac_id;
191 int pkind, pf, vf;
192 int err;
193
194 pf = rvu_get_pf(pcifunc);
195 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
196 return 0;
197
198 switch (type) {
199 case NIX_INTF_TYPE_CGX:
200 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
201 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
202
203 pkind = rvu_npc_get_pkind(rvu, pf);
204 if (pkind < 0) {
205 dev_err(rvu->dev,
206 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
207 return -EINVAL;
208 }
209 pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
210 pfvf->tx_chan_base = pfvf->rx_chan_base;
211 pfvf->rx_chan_cnt = 1;
212 pfvf->tx_chan_cnt = 1;
213 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
214 rvu_npc_set_pkind(rvu, pkind, pfvf);
215
216 /* By default we enable pause frames */
217 if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
218 cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu),
219 lmac_id, true, true);
220 break;
221 case NIX_INTF_TYPE_LBK:
222 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
223
224 /* Note that AF's VFs work in pairs and talk over consecutive
225 * loopback channels.Therefore if odd number of AF VFs are
226 * enabled then the last VF remains with no pair.
227 */
228 pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
229 pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
230 NIX_CHAN_LBK_CHX(0, vf + 1);
231 pfvf->rx_chan_cnt = 1;
232 pfvf->tx_chan_cnt = 1;
233 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
234 pfvf->rx_chan_base, false);
235 break;
236 }
237
238 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
239 * RVU PF/VF's MAC address.
240 */
241 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
242 pfvf->rx_chan_base, pfvf->mac_addr);
243
244 /* Add this PF_FUNC to bcast pkt replication list */
245 err = nix_update_bcast_mce_list(rvu, pcifunc, true);
246 if (err) {
247 dev_err(rvu->dev,
248 "Bcast list, failed to enable PF_FUNC 0x%x\n",
249 pcifunc);
250 return err;
251 }
252
253 rvu_npc_install_bcast_match_entry(rvu, pcifunc,
254 nixlf, pfvf->rx_chan_base);
255 pfvf->maxlen = NIC_HW_MIN_FRS;
256 pfvf->minlen = NIC_HW_MIN_FRS;
257
258 return 0;
259 }
260
nix_interface_deinit(struct rvu * rvu,u16 pcifunc,u8 nixlf)261 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
262 {
263 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
264 int err;
265
266 pfvf->maxlen = 0;
267 pfvf->minlen = 0;
268 pfvf->rxvlan = false;
269
270 /* Remove this PF_FUNC from bcast pkt replication list */
271 err = nix_update_bcast_mce_list(rvu, pcifunc, false);
272 if (err) {
273 dev_err(rvu->dev,
274 "Bcast list, failed to disable PF_FUNC 0x%x\n",
275 pcifunc);
276 }
277
278 /* Free and disable any MCAM entries used by this NIX LF */
279 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
280 }
281
rvu_mbox_handler_nix_bp_disable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct msg_rsp * rsp)282 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
283 struct nix_bp_cfg_req *req,
284 struct msg_rsp *rsp)
285 {
286 u16 pcifunc = req->hdr.pcifunc;
287 struct rvu_pfvf *pfvf;
288 int blkaddr, pf, type;
289 u16 chan_base, chan;
290 u64 cfg;
291
292 pf = rvu_get_pf(pcifunc);
293 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
294 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
295 return 0;
296
297 pfvf = rvu_get_pfvf(rvu, pcifunc);
298 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
299
300 chan_base = pfvf->rx_chan_base + req->chan_base;
301 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
302 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
303 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
304 cfg & ~BIT_ULL(16));
305 }
306 return 0;
307 }
308
rvu_nix_get_bpid(struct rvu * rvu,struct nix_bp_cfg_req * req,int type,int chan_id)309 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
310 int type, int chan_id)
311 {
312 int bpid, blkaddr, lmac_chan_cnt;
313 struct rvu_hwinfo *hw = rvu->hw;
314 u16 cgx_bpid_cnt, lbk_bpid_cnt;
315 struct rvu_pfvf *pfvf;
316 u8 cgx_id, lmac_id;
317 u64 cfg;
318
319 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
320 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
321 lmac_chan_cnt = cfg & 0xFF;
322
323 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
324 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
325
326 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
327
328 /* Backpressure IDs range division
329 * CGX channles are mapped to (0 - 191) BPIDs
330 * LBK channles are mapped to (192 - 255) BPIDs
331 * SDP channles are mapped to (256 - 511) BPIDs
332 *
333 * Lmac channles and bpids mapped as follows
334 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
335 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
336 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
337 */
338 switch (type) {
339 case NIX_INTF_TYPE_CGX:
340 if ((req->chan_base + req->chan_cnt) > 15)
341 return -EINVAL;
342 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
343 /* Assign bpid based on cgx, lmac and chan id */
344 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
345 (lmac_id * lmac_chan_cnt) + req->chan_base;
346
347 if (req->bpid_per_chan)
348 bpid += chan_id;
349 if (bpid > cgx_bpid_cnt)
350 return -EINVAL;
351 break;
352
353 case NIX_INTF_TYPE_LBK:
354 if ((req->chan_base + req->chan_cnt) > 63)
355 return -EINVAL;
356 bpid = cgx_bpid_cnt + req->chan_base;
357 if (req->bpid_per_chan)
358 bpid += chan_id;
359 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
360 return -EINVAL;
361 break;
362 default:
363 return -EINVAL;
364 }
365 return bpid;
366 }
367
rvu_mbox_handler_nix_bp_enable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct nix_bp_cfg_rsp * rsp)368 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
369 struct nix_bp_cfg_req *req,
370 struct nix_bp_cfg_rsp *rsp)
371 {
372 int blkaddr, pf, type, chan_id = 0;
373 u16 pcifunc = req->hdr.pcifunc;
374 struct rvu_pfvf *pfvf;
375 u16 chan_base, chan;
376 s16 bpid, bpid_base;
377 u64 cfg;
378
379 pf = rvu_get_pf(pcifunc);
380 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
381
382 /* Enable backpressure only for CGX mapped PFs and LBK interface */
383 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
384 return 0;
385
386 pfvf = rvu_get_pfvf(rvu, pcifunc);
387 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
388
389 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
390 chan_base = pfvf->rx_chan_base + req->chan_base;
391 bpid = bpid_base;
392
393 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
394 if (bpid < 0) {
395 dev_warn(rvu->dev, "Fail to enable backpressure\n");
396 return -EINVAL;
397 }
398
399 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
400 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
401 cfg | (bpid & 0xFF) | BIT_ULL(16));
402 chan_id++;
403 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
404 }
405
406 for (chan = 0; chan < req->chan_cnt; chan++) {
407 /* Map channel and bpid assign to it */
408 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
409 (bpid_base & 0x3FF);
410 if (req->bpid_per_chan)
411 bpid_base++;
412 }
413 rsp->chan_cnt = req->chan_cnt;
414
415 return 0;
416 }
417
nix_setup_lso_tso_l3(struct rvu * rvu,int blkaddr,u64 format,bool v4,u64 * fidx)418 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
419 u64 format, bool v4, u64 *fidx)
420 {
421 struct nix_lso_format field = {0};
422
423 /* IP's Length field */
424 field.layer = NIX_TXLAYER_OL3;
425 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
426 field.offset = v4 ? 2 : 4;
427 field.sizem1 = 1; /* i.e 2 bytes */
428 field.alg = NIX_LSOALG_ADD_PAYLEN;
429 rvu_write64(rvu, blkaddr,
430 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
431 *(u64 *)&field);
432
433 /* No ID field in IPv6 header */
434 if (!v4)
435 return;
436
437 /* IP's ID field */
438 field.layer = NIX_TXLAYER_OL3;
439 field.offset = 4;
440 field.sizem1 = 1; /* i.e 2 bytes */
441 field.alg = NIX_LSOALG_ADD_SEGNUM;
442 rvu_write64(rvu, blkaddr,
443 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
444 *(u64 *)&field);
445 }
446
nix_setup_lso_tso_l4(struct rvu * rvu,int blkaddr,u64 format,u64 * fidx)447 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
448 u64 format, u64 *fidx)
449 {
450 struct nix_lso_format field = {0};
451
452 /* TCP's sequence number field */
453 field.layer = NIX_TXLAYER_OL4;
454 field.offset = 4;
455 field.sizem1 = 3; /* i.e 4 bytes */
456 field.alg = NIX_LSOALG_ADD_OFFSET;
457 rvu_write64(rvu, blkaddr,
458 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
459 *(u64 *)&field);
460
461 /* TCP's flags field */
462 field.layer = NIX_TXLAYER_OL4;
463 field.offset = 12;
464 field.sizem1 = 1; /* 2 bytes */
465 field.alg = NIX_LSOALG_TCP_FLAGS;
466 rvu_write64(rvu, blkaddr,
467 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
468 *(u64 *)&field);
469 }
470
nix_setup_lso(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)471 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
472 {
473 u64 cfg, idx, fidx = 0;
474
475 /* Get max HW supported format indices */
476 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
477 nix_hw->lso.total = cfg;
478
479 /* Enable LSO */
480 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
481 /* For TSO, set first and middle segment flags to
482 * mask out PSH, RST & FIN flags in TCP packet
483 */
484 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
485 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
486 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
487
488 /* Setup default static LSO formats
489 *
490 * Configure format fields for TCPv4 segmentation offload
491 */
492 idx = NIX_LSO_FORMAT_IDX_TSOV4;
493 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
494 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
495
496 /* Set rest of the fields to NOP */
497 for (; fidx < 8; fidx++) {
498 rvu_write64(rvu, blkaddr,
499 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
500 }
501 nix_hw->lso.in_use++;
502
503 /* Configure format fields for TCPv6 segmentation offload */
504 idx = NIX_LSO_FORMAT_IDX_TSOV6;
505 fidx = 0;
506 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
507 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
508
509 /* Set rest of the fields to NOP */
510 for (; fidx < 8; fidx++) {
511 rvu_write64(rvu, blkaddr,
512 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
513 }
514 nix_hw->lso.in_use++;
515 }
516
nix_ctx_free(struct rvu * rvu,struct rvu_pfvf * pfvf)517 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
518 {
519 kfree(pfvf->rq_bmap);
520 kfree(pfvf->sq_bmap);
521 kfree(pfvf->cq_bmap);
522 if (pfvf->rq_ctx)
523 qmem_free(rvu->dev, pfvf->rq_ctx);
524 if (pfvf->sq_ctx)
525 qmem_free(rvu->dev, pfvf->sq_ctx);
526 if (pfvf->cq_ctx)
527 qmem_free(rvu->dev, pfvf->cq_ctx);
528 if (pfvf->rss_ctx)
529 qmem_free(rvu->dev, pfvf->rss_ctx);
530 if (pfvf->nix_qints_ctx)
531 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
532 if (pfvf->cq_ints_ctx)
533 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
534
535 pfvf->rq_bmap = NULL;
536 pfvf->cq_bmap = NULL;
537 pfvf->sq_bmap = NULL;
538 pfvf->rq_ctx = NULL;
539 pfvf->sq_ctx = NULL;
540 pfvf->cq_ctx = NULL;
541 pfvf->rss_ctx = NULL;
542 pfvf->nix_qints_ctx = NULL;
543 pfvf->cq_ints_ctx = NULL;
544 }
545
nixlf_rss_ctx_init(struct rvu * rvu,int blkaddr,struct rvu_pfvf * pfvf,int nixlf,int rss_sz,int rss_grps,int hwctx_size,u64 way_mask)546 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
547 struct rvu_pfvf *pfvf, int nixlf,
548 int rss_sz, int rss_grps, int hwctx_size,
549 u64 way_mask)
550 {
551 int err, grp, num_indices;
552
553 /* RSS is not requested for this NIXLF */
554 if (!rss_sz)
555 return 0;
556 num_indices = rss_sz * rss_grps;
557
558 /* Alloc NIX RSS HW context memory and config the base */
559 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
560 if (err)
561 return err;
562
563 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
564 (u64)pfvf->rss_ctx->iova);
565
566 /* Config full RSS table size, enable RSS and caching */
567 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
568 BIT_ULL(36) | BIT_ULL(4) |
569 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
570 way_mask << 20);
571 /* Config RSS group offset and sizes */
572 for (grp = 0; grp < rss_grps; grp++)
573 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
574 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
575 return 0;
576 }
577
nix_aq_enqueue_wait(struct rvu * rvu,struct rvu_block * block,struct nix_aq_inst_s * inst)578 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
579 struct nix_aq_inst_s *inst)
580 {
581 struct admin_queue *aq = block->aq;
582 struct nix_aq_res_s *result;
583 int timeout = 1000;
584 u64 reg, head;
585
586 result = (struct nix_aq_res_s *)aq->res->base;
587
588 /* Get current head pointer where to append this instruction */
589 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
590 head = (reg >> 4) & AQ_PTR_MASK;
591
592 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
593 (void *)inst, aq->inst->entry_sz);
594 memset(result, 0, sizeof(*result));
595 /* sync into memory */
596 wmb();
597
598 /* Ring the doorbell and wait for result */
599 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
600 while (result->compcode == NIX_AQ_COMP_NOTDONE) {
601 cpu_relax();
602 udelay(1);
603 timeout--;
604 if (!timeout)
605 return -EBUSY;
606 }
607
608 if (result->compcode != NIX_AQ_COMP_GOOD)
609 /* TODO: Replace this with some error code */
610 return -EBUSY;
611
612 return 0;
613 }
614
rvu_nix_aq_enq_inst(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)615 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
616 struct nix_aq_enq_rsp *rsp)
617 {
618 struct rvu_hwinfo *hw = rvu->hw;
619 u16 pcifunc = req->hdr.pcifunc;
620 int nixlf, blkaddr, rc = 0;
621 struct nix_aq_inst_s inst;
622 struct rvu_block *block;
623 struct admin_queue *aq;
624 struct rvu_pfvf *pfvf;
625 void *ctx, *mask;
626 bool ena;
627 u64 cfg;
628
629 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
630 if (blkaddr < 0)
631 return NIX_AF_ERR_AF_LF_INVALID;
632
633 block = &hw->block[blkaddr];
634 aq = block->aq;
635 if (!aq) {
636 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
637 return NIX_AF_ERR_AQ_ENQUEUE;
638 }
639
640 pfvf = rvu_get_pfvf(rvu, pcifunc);
641 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
642
643 /* Skip NIXLF check for broadcast MCE entry init */
644 if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
645 if (!pfvf->nixlf || nixlf < 0)
646 return NIX_AF_ERR_AF_LF_INVALID;
647 }
648
649 switch (req->ctype) {
650 case NIX_AQ_CTYPE_RQ:
651 /* Check if index exceeds max no of queues */
652 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
653 rc = NIX_AF_ERR_AQ_ENQUEUE;
654 break;
655 case NIX_AQ_CTYPE_SQ:
656 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
657 rc = NIX_AF_ERR_AQ_ENQUEUE;
658 break;
659 case NIX_AQ_CTYPE_CQ:
660 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
661 rc = NIX_AF_ERR_AQ_ENQUEUE;
662 break;
663 case NIX_AQ_CTYPE_RSS:
664 /* Check if RSS is enabled and qidx is within range */
665 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
666 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
667 (req->qidx >= (256UL << (cfg & 0xF))))
668 rc = NIX_AF_ERR_AQ_ENQUEUE;
669 break;
670 case NIX_AQ_CTYPE_MCE:
671 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
672 /* Check if index exceeds MCE list length */
673 if (!hw->nix0->mcast.mce_ctx ||
674 (req->qidx >= (256UL << (cfg & 0xF))))
675 rc = NIX_AF_ERR_AQ_ENQUEUE;
676
677 /* Adding multicast lists for requests from PF/VFs is not
678 * yet supported, so ignore this.
679 */
680 if (rsp)
681 rc = NIX_AF_ERR_AQ_ENQUEUE;
682 break;
683 default:
684 rc = NIX_AF_ERR_AQ_ENQUEUE;
685 }
686
687 if (rc)
688 return rc;
689
690 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
691 if (req->ctype == NIX_AQ_CTYPE_SQ &&
692 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
693 (req->op == NIX_AQ_INSTOP_WRITE &&
694 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
695 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
696 pcifunc, req->sq.smq))
697 return NIX_AF_ERR_AQ_ENQUEUE;
698 }
699
700 memset(&inst, 0, sizeof(struct nix_aq_inst_s));
701 inst.lf = nixlf;
702 inst.cindex = req->qidx;
703 inst.ctype = req->ctype;
704 inst.op = req->op;
705 /* Currently we are not supporting enqueuing multiple instructions,
706 * so always choose first entry in result memory.
707 */
708 inst.res_addr = (u64)aq->res->iova;
709
710 /* Hardware uses same aq->res->base for updating result of
711 * previous instruction hence wait here till it is done.
712 */
713 spin_lock(&aq->lock);
714
715 /* Clean result + context memory */
716 memset(aq->res->base, 0, aq->res->entry_sz);
717 /* Context needs to be written at RES_ADDR + 128 */
718 ctx = aq->res->base + 128;
719 /* Mask needs to be written at RES_ADDR + 256 */
720 mask = aq->res->base + 256;
721
722 switch (req->op) {
723 case NIX_AQ_INSTOP_WRITE:
724 if (req->ctype == NIX_AQ_CTYPE_RQ)
725 memcpy(mask, &req->rq_mask,
726 sizeof(struct nix_rq_ctx_s));
727 else if (req->ctype == NIX_AQ_CTYPE_SQ)
728 memcpy(mask, &req->sq_mask,
729 sizeof(struct nix_sq_ctx_s));
730 else if (req->ctype == NIX_AQ_CTYPE_CQ)
731 memcpy(mask, &req->cq_mask,
732 sizeof(struct nix_cq_ctx_s));
733 else if (req->ctype == NIX_AQ_CTYPE_RSS)
734 memcpy(mask, &req->rss_mask,
735 sizeof(struct nix_rsse_s));
736 else if (req->ctype == NIX_AQ_CTYPE_MCE)
737 memcpy(mask, &req->mce_mask,
738 sizeof(struct nix_rx_mce_s));
739 fallthrough;
740 case NIX_AQ_INSTOP_INIT:
741 if (req->ctype == NIX_AQ_CTYPE_RQ)
742 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
743 else if (req->ctype == NIX_AQ_CTYPE_SQ)
744 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
745 else if (req->ctype == NIX_AQ_CTYPE_CQ)
746 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
747 else if (req->ctype == NIX_AQ_CTYPE_RSS)
748 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
749 else if (req->ctype == NIX_AQ_CTYPE_MCE)
750 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
751 break;
752 case NIX_AQ_INSTOP_NOP:
753 case NIX_AQ_INSTOP_READ:
754 case NIX_AQ_INSTOP_LOCK:
755 case NIX_AQ_INSTOP_UNLOCK:
756 break;
757 default:
758 rc = NIX_AF_ERR_AQ_ENQUEUE;
759 spin_unlock(&aq->lock);
760 return rc;
761 }
762
763 /* Submit the instruction to AQ */
764 rc = nix_aq_enqueue_wait(rvu, block, &inst);
765 if (rc) {
766 spin_unlock(&aq->lock);
767 return rc;
768 }
769
770 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
771 if (req->op == NIX_AQ_INSTOP_INIT) {
772 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
773 __set_bit(req->qidx, pfvf->rq_bmap);
774 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
775 __set_bit(req->qidx, pfvf->sq_bmap);
776 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
777 __set_bit(req->qidx, pfvf->cq_bmap);
778 }
779
780 if (req->op == NIX_AQ_INSTOP_WRITE) {
781 if (req->ctype == NIX_AQ_CTYPE_RQ) {
782 ena = (req->rq.ena & req->rq_mask.ena) |
783 (test_bit(req->qidx, pfvf->rq_bmap) &
784 ~req->rq_mask.ena);
785 if (ena)
786 __set_bit(req->qidx, pfvf->rq_bmap);
787 else
788 __clear_bit(req->qidx, pfvf->rq_bmap);
789 }
790 if (req->ctype == NIX_AQ_CTYPE_SQ) {
791 ena = (req->rq.ena & req->sq_mask.ena) |
792 (test_bit(req->qidx, pfvf->sq_bmap) &
793 ~req->sq_mask.ena);
794 if (ena)
795 __set_bit(req->qidx, pfvf->sq_bmap);
796 else
797 __clear_bit(req->qidx, pfvf->sq_bmap);
798 }
799 if (req->ctype == NIX_AQ_CTYPE_CQ) {
800 ena = (req->rq.ena & req->cq_mask.ena) |
801 (test_bit(req->qidx, pfvf->cq_bmap) &
802 ~req->cq_mask.ena);
803 if (ena)
804 __set_bit(req->qidx, pfvf->cq_bmap);
805 else
806 __clear_bit(req->qidx, pfvf->cq_bmap);
807 }
808 }
809
810 if (rsp) {
811 /* Copy read context into mailbox */
812 if (req->op == NIX_AQ_INSTOP_READ) {
813 if (req->ctype == NIX_AQ_CTYPE_RQ)
814 memcpy(&rsp->rq, ctx,
815 sizeof(struct nix_rq_ctx_s));
816 else if (req->ctype == NIX_AQ_CTYPE_SQ)
817 memcpy(&rsp->sq, ctx,
818 sizeof(struct nix_sq_ctx_s));
819 else if (req->ctype == NIX_AQ_CTYPE_CQ)
820 memcpy(&rsp->cq, ctx,
821 sizeof(struct nix_cq_ctx_s));
822 else if (req->ctype == NIX_AQ_CTYPE_RSS)
823 memcpy(&rsp->rss, ctx,
824 sizeof(struct nix_rsse_s));
825 else if (req->ctype == NIX_AQ_CTYPE_MCE)
826 memcpy(&rsp->mce, ctx,
827 sizeof(struct nix_rx_mce_s));
828 }
829 }
830
831 spin_unlock(&aq->lock);
832 return 0;
833 }
834
nix_get_ctx_name(int ctype)835 static const char *nix_get_ctx_name(int ctype)
836 {
837 switch (ctype) {
838 case NIX_AQ_CTYPE_CQ:
839 return "CQ";
840 case NIX_AQ_CTYPE_SQ:
841 return "SQ";
842 case NIX_AQ_CTYPE_RQ:
843 return "RQ";
844 case NIX_AQ_CTYPE_RSS:
845 return "RSS";
846 }
847 return "";
848 }
849
nix_lf_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req)850 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
851 {
852 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
853 struct nix_aq_enq_req aq_req;
854 unsigned long *bmap;
855 int qidx, q_cnt = 0;
856 int err = 0, rc;
857
858 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
859 return NIX_AF_ERR_AQ_ENQUEUE;
860
861 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
862 aq_req.hdr.pcifunc = req->hdr.pcifunc;
863
864 if (req->ctype == NIX_AQ_CTYPE_CQ) {
865 aq_req.cq.ena = 0;
866 aq_req.cq_mask.ena = 1;
867 aq_req.cq.bp_ena = 0;
868 aq_req.cq_mask.bp_ena = 1;
869 q_cnt = pfvf->cq_ctx->qsize;
870 bmap = pfvf->cq_bmap;
871 }
872 if (req->ctype == NIX_AQ_CTYPE_SQ) {
873 aq_req.sq.ena = 0;
874 aq_req.sq_mask.ena = 1;
875 q_cnt = pfvf->sq_ctx->qsize;
876 bmap = pfvf->sq_bmap;
877 }
878 if (req->ctype == NIX_AQ_CTYPE_RQ) {
879 aq_req.rq.ena = 0;
880 aq_req.rq_mask.ena = 1;
881 q_cnt = pfvf->rq_ctx->qsize;
882 bmap = pfvf->rq_bmap;
883 }
884
885 aq_req.ctype = req->ctype;
886 aq_req.op = NIX_AQ_INSTOP_WRITE;
887
888 for (qidx = 0; qidx < q_cnt; qidx++) {
889 if (!test_bit(qidx, bmap))
890 continue;
891 aq_req.qidx = qidx;
892 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
893 if (rc) {
894 err = rc;
895 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
896 nix_get_ctx_name(req->ctype), qidx);
897 }
898 }
899
900 return err;
901 }
902
903 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
nix_lf_hwctx_lockdown(struct rvu * rvu,struct nix_aq_enq_req * req)904 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
905 {
906 struct nix_aq_enq_req lock_ctx_req;
907 int err;
908
909 if (req->op != NIX_AQ_INSTOP_INIT)
910 return 0;
911
912 if (req->ctype == NIX_AQ_CTYPE_MCE ||
913 req->ctype == NIX_AQ_CTYPE_DYNO)
914 return 0;
915
916 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
917 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
918 lock_ctx_req.ctype = req->ctype;
919 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
920 lock_ctx_req.qidx = req->qidx;
921 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
922 if (err)
923 dev_err(rvu->dev,
924 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
925 req->hdr.pcifunc,
926 nix_get_ctx_name(req->ctype), req->qidx);
927 return err;
928 }
929
rvu_mbox_handler_nix_aq_enq(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)930 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
931 struct nix_aq_enq_req *req,
932 struct nix_aq_enq_rsp *rsp)
933 {
934 int err;
935
936 err = rvu_nix_aq_enq_inst(rvu, req, rsp);
937 if (!err)
938 err = nix_lf_hwctx_lockdown(rvu, req);
939 return err;
940 }
941 #else
942
rvu_mbox_handler_nix_aq_enq(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)943 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
944 struct nix_aq_enq_req *req,
945 struct nix_aq_enq_rsp *rsp)
946 {
947 return rvu_nix_aq_enq_inst(rvu, req, rsp);
948 }
949 #endif
950
rvu_mbox_handler_nix_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req,struct msg_rsp * rsp)951 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
952 struct hwctx_disable_req *req,
953 struct msg_rsp *rsp)
954 {
955 return nix_lf_hwctx_disable(rvu, req);
956 }
957
rvu_mbox_handler_nix_lf_alloc(struct rvu * rvu,struct nix_lf_alloc_req * req,struct nix_lf_alloc_rsp * rsp)958 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
959 struct nix_lf_alloc_req *req,
960 struct nix_lf_alloc_rsp *rsp)
961 {
962 int nixlf, qints, hwctx_size, intf, err, rc = 0;
963 struct rvu_hwinfo *hw = rvu->hw;
964 u16 pcifunc = req->hdr.pcifunc;
965 struct rvu_block *block;
966 struct rvu_pfvf *pfvf;
967 u64 cfg, ctx_cfg;
968 int blkaddr;
969
970 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
971 return NIX_AF_ERR_PARAM;
972
973 if (req->way_mask)
974 req->way_mask &= 0xFFFF;
975
976 pfvf = rvu_get_pfvf(rvu, pcifunc);
977 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
978 if (!pfvf->nixlf || blkaddr < 0)
979 return NIX_AF_ERR_AF_LF_INVALID;
980
981 block = &hw->block[blkaddr];
982 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
983 if (nixlf < 0)
984 return NIX_AF_ERR_AF_LF_INVALID;
985
986 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
987 if (req->npa_func) {
988 /* If default, use 'this' NIXLF's PFFUNC */
989 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
990 req->npa_func = pcifunc;
991 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
992 return NIX_AF_INVAL_NPA_PF_FUNC;
993 }
994
995 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
996 if (req->sso_func) {
997 /* If default, use 'this' NIXLF's PFFUNC */
998 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
999 req->sso_func = pcifunc;
1000 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1001 return NIX_AF_INVAL_SSO_PF_FUNC;
1002 }
1003
1004 /* If RSS is being enabled, check if requested config is valid.
1005 * RSS table size should be power of two, otherwise
1006 * RSS_GRP::OFFSET + adder might go beyond that group or
1007 * won't be able to use entire table.
1008 */
1009 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1010 !is_power_of_2(req->rss_sz)))
1011 return NIX_AF_ERR_RSS_SIZE_INVALID;
1012
1013 if (req->rss_sz &&
1014 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1015 return NIX_AF_ERR_RSS_GRPS_INVALID;
1016
1017 /* Reset this NIX LF */
1018 err = rvu_lf_reset(rvu, block, nixlf);
1019 if (err) {
1020 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1021 block->addr - BLKADDR_NIX0, nixlf);
1022 return NIX_AF_ERR_LF_RESET;
1023 }
1024
1025 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1026
1027 /* Alloc NIX RQ HW context memory and config the base */
1028 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1029 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1030 if (err)
1031 goto free_mem;
1032
1033 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1034 if (!pfvf->rq_bmap)
1035 goto free_mem;
1036
1037 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1038 (u64)pfvf->rq_ctx->iova);
1039
1040 /* Set caching and queue count in HW */
1041 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1042 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1043
1044 /* Alloc NIX SQ HW context memory and config the base */
1045 hwctx_size = 1UL << (ctx_cfg & 0xF);
1046 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1047 if (err)
1048 goto free_mem;
1049
1050 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1051 if (!pfvf->sq_bmap)
1052 goto free_mem;
1053
1054 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1055 (u64)pfvf->sq_ctx->iova);
1056
1057 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1058 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1059
1060 /* Alloc NIX CQ HW context memory and config the base */
1061 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1062 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1063 if (err)
1064 goto free_mem;
1065
1066 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1067 if (!pfvf->cq_bmap)
1068 goto free_mem;
1069
1070 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1071 (u64)pfvf->cq_ctx->iova);
1072
1073 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1074 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1075
1076 /* Initialize receive side scaling (RSS) */
1077 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1078 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1079 req->rss_grps, hwctx_size, req->way_mask);
1080 if (err)
1081 goto free_mem;
1082
1083 /* Alloc memory for CQINT's HW contexts */
1084 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1085 qints = (cfg >> 24) & 0xFFF;
1086 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1087 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1088 if (err)
1089 goto free_mem;
1090
1091 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1092 (u64)pfvf->cq_ints_ctx->iova);
1093
1094 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1095 BIT_ULL(36) | req->way_mask << 20);
1096
1097 /* Alloc memory for QINT's HW contexts */
1098 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1099 qints = (cfg >> 12) & 0xFFF;
1100 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1101 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1102 if (err)
1103 goto free_mem;
1104
1105 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1106 (u64)pfvf->nix_qints_ctx->iova);
1107 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1108 BIT_ULL(36) | req->way_mask << 20);
1109
1110 /* Setup VLANX TPID's.
1111 * Use VLAN1 for 802.1Q
1112 * and VLAN0 for 802.1AD.
1113 */
1114 cfg = (0x8100ULL << 16) | 0x88A8ULL;
1115 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1116
1117 /* Enable LMTST for this NIX LF */
1118 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1119
1120 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1121 if (req->npa_func)
1122 cfg = req->npa_func;
1123 if (req->sso_func)
1124 cfg |= (u64)req->sso_func << 16;
1125
1126 cfg |= (u64)req->xqe_sz << 33;
1127 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1128
1129 /* Config Rx pkt length, csum checks and apad enable / disable */
1130 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1131
1132 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1133 err = nix_interface_init(rvu, pcifunc, intf, nixlf);
1134 if (err)
1135 goto free_mem;
1136
1137 /* Disable NPC entries as NIXLF's contexts are not initialized yet */
1138 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1139
1140 goto exit;
1141
1142 free_mem:
1143 nix_ctx_free(rvu, pfvf);
1144 rc = -ENOMEM;
1145
1146 exit:
1147 /* Set macaddr of this PF/VF */
1148 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1149
1150 /* set SQB size info */
1151 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1152 rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1153 rsp->rx_chan_base = pfvf->rx_chan_base;
1154 rsp->tx_chan_base = pfvf->tx_chan_base;
1155 rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1156 rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1157 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1158 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1159 /* Get HW supported stat count */
1160 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1161 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1162 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1163 /* Get count of CQ IRQs and error IRQs supported per LF */
1164 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1165 rsp->qints = ((cfg >> 12) & 0xFFF);
1166 rsp->cints = ((cfg >> 24) & 0xFFF);
1167 return rc;
1168 }
1169
rvu_mbox_handler_nix_lf_free(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)1170 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
1171 struct msg_rsp *rsp)
1172 {
1173 struct rvu_hwinfo *hw = rvu->hw;
1174 u16 pcifunc = req->hdr.pcifunc;
1175 struct rvu_block *block;
1176 int blkaddr, nixlf, err;
1177 struct rvu_pfvf *pfvf;
1178
1179 pfvf = rvu_get_pfvf(rvu, pcifunc);
1180 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1181 if (!pfvf->nixlf || blkaddr < 0)
1182 return NIX_AF_ERR_AF_LF_INVALID;
1183
1184 block = &hw->block[blkaddr];
1185 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1186 if (nixlf < 0)
1187 return NIX_AF_ERR_AF_LF_INVALID;
1188
1189 nix_interface_deinit(rvu, pcifunc, nixlf);
1190
1191 /* Reset this NIX LF */
1192 err = rvu_lf_reset(rvu, block, nixlf);
1193 if (err) {
1194 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1195 block->addr - BLKADDR_NIX0, nixlf);
1196 return NIX_AF_ERR_LF_RESET;
1197 }
1198
1199 nix_ctx_free(rvu, pfvf);
1200
1201 return 0;
1202 }
1203
rvu_mbox_handler_nix_mark_format_cfg(struct rvu * rvu,struct nix_mark_format_cfg * req,struct nix_mark_format_cfg_rsp * rsp)1204 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1205 struct nix_mark_format_cfg *req,
1206 struct nix_mark_format_cfg_rsp *rsp)
1207 {
1208 u16 pcifunc = req->hdr.pcifunc;
1209 struct nix_hw *nix_hw;
1210 struct rvu_pfvf *pfvf;
1211 int blkaddr, rc;
1212 u32 cfg;
1213
1214 pfvf = rvu_get_pfvf(rvu, pcifunc);
1215 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1216 if (!pfvf->nixlf || blkaddr < 0)
1217 return NIX_AF_ERR_AF_LF_INVALID;
1218
1219 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1220 if (!nix_hw)
1221 return -EINVAL;
1222
1223 cfg = (((u32)req->offset & 0x7) << 16) |
1224 (((u32)req->y_mask & 0xF) << 12) |
1225 (((u32)req->y_val & 0xF) << 8) |
1226 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1227
1228 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1229 if (rc < 0) {
1230 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1231 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1232 return NIX_AF_ERR_MARK_CFG_FAIL;
1233 }
1234
1235 rsp->mark_format_idx = rc;
1236 return 0;
1237 }
1238
1239 /* Disable shaping of pkts by a scheduler queue
1240 * at a given scheduler level.
1241 */
nix_reset_tx_shaping(struct rvu * rvu,int blkaddr,int lvl,int schq)1242 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1243 int lvl, int schq)
1244 {
1245 u64 cir_reg = 0, pir_reg = 0;
1246 u64 cfg;
1247
1248 switch (lvl) {
1249 case NIX_TXSCH_LVL_TL1:
1250 cir_reg = NIX_AF_TL1X_CIR(schq);
1251 pir_reg = 0; /* PIR not available at TL1 */
1252 break;
1253 case NIX_TXSCH_LVL_TL2:
1254 cir_reg = NIX_AF_TL2X_CIR(schq);
1255 pir_reg = NIX_AF_TL2X_PIR(schq);
1256 break;
1257 case NIX_TXSCH_LVL_TL3:
1258 cir_reg = NIX_AF_TL3X_CIR(schq);
1259 pir_reg = NIX_AF_TL3X_PIR(schq);
1260 break;
1261 case NIX_TXSCH_LVL_TL4:
1262 cir_reg = NIX_AF_TL4X_CIR(schq);
1263 pir_reg = NIX_AF_TL4X_PIR(schq);
1264 break;
1265 }
1266
1267 if (!cir_reg)
1268 return;
1269 cfg = rvu_read64(rvu, blkaddr, cir_reg);
1270 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1271
1272 if (!pir_reg)
1273 return;
1274 cfg = rvu_read64(rvu, blkaddr, pir_reg);
1275 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1276 }
1277
nix_reset_tx_linkcfg(struct rvu * rvu,int blkaddr,int lvl,int schq)1278 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1279 int lvl, int schq)
1280 {
1281 struct rvu_hwinfo *hw = rvu->hw;
1282 int link;
1283
1284 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1285 return;
1286
1287 /* Reset TL4's SDP link config */
1288 if (lvl == NIX_TXSCH_LVL_TL4)
1289 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1290
1291 if (lvl != NIX_TXSCH_LVL_TL2)
1292 return;
1293
1294 /* Reset TL2's CGX or LBK link config */
1295 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1296 rvu_write64(rvu, blkaddr,
1297 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1298 }
1299
nix_get_tx_link(struct rvu * rvu,u16 pcifunc)1300 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1301 {
1302 struct rvu_hwinfo *hw = rvu->hw;
1303 int pf = rvu_get_pf(pcifunc);
1304 u8 cgx_id = 0, lmac_id = 0;
1305
1306 if (is_afvf(pcifunc)) {/* LBK links */
1307 return hw->cgx_links;
1308 } else if (is_pf_cgxmapped(rvu, pf)) {
1309 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1310 return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1311 }
1312
1313 /* SDP link */
1314 return hw->cgx_links + hw->lbk_links;
1315 }
1316
nix_get_txschq_range(struct rvu * rvu,u16 pcifunc,int link,int * start,int * end)1317 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1318 int link, int *start, int *end)
1319 {
1320 struct rvu_hwinfo *hw = rvu->hw;
1321 int pf = rvu_get_pf(pcifunc);
1322
1323 if (is_afvf(pcifunc)) { /* LBK links */
1324 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1325 *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1326 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1327 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1328 *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1329 } else { /* SDP link */
1330 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1331 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1332 *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1333 }
1334 }
1335
nix_check_txschq_alloc_req(struct rvu * rvu,int lvl,u16 pcifunc,struct nix_hw * nix_hw,struct nix_txsch_alloc_req * req)1336 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1337 struct nix_hw *nix_hw,
1338 struct nix_txsch_alloc_req *req)
1339 {
1340 struct rvu_hwinfo *hw = rvu->hw;
1341 int schq, req_schq, free_cnt;
1342 struct nix_txsch *txsch;
1343 int link, start, end;
1344
1345 txsch = &nix_hw->txsch[lvl];
1346 req_schq = req->schq_contig[lvl] + req->schq[lvl];
1347
1348 if (!req_schq)
1349 return 0;
1350
1351 link = nix_get_tx_link(rvu, pcifunc);
1352
1353 /* For traffic aggregating scheduler level, one queue is enough */
1354 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1355 if (req_schq != 1)
1356 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1357 return 0;
1358 }
1359
1360 /* Get free SCHQ count and check if request can be accomodated */
1361 if (hw->cap.nix_fixed_txschq_mapping) {
1362 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1363 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1364 if (end <= txsch->schq.max && schq < end &&
1365 !test_bit(schq, txsch->schq.bmap))
1366 free_cnt = 1;
1367 else
1368 free_cnt = 0;
1369 } else {
1370 free_cnt = rvu_rsrc_free_count(&txsch->schq);
1371 }
1372
1373 if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1374 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1375
1376 /* If contiguous queues are needed, check for availability */
1377 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1378 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1379 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1380
1381 return 0;
1382 }
1383
nix_txsch_alloc(struct rvu * rvu,struct nix_txsch * txsch,struct nix_txsch_alloc_rsp * rsp,int lvl,int start,int end)1384 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1385 struct nix_txsch_alloc_rsp *rsp,
1386 int lvl, int start, int end)
1387 {
1388 struct rvu_hwinfo *hw = rvu->hw;
1389 u16 pcifunc = rsp->hdr.pcifunc;
1390 int idx, schq;
1391
1392 /* For traffic aggregating levels, queue alloc is based
1393 * on transmit link to which PF_FUNC is mapped to.
1394 */
1395 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1396 /* A single TL queue is allocated */
1397 if (rsp->schq_contig[lvl]) {
1398 rsp->schq_contig[lvl] = 1;
1399 rsp->schq_contig_list[lvl][0] = start;
1400 }
1401
1402 /* Both contig and non-contig reqs doesn't make sense here */
1403 if (rsp->schq_contig[lvl])
1404 rsp->schq[lvl] = 0;
1405
1406 if (rsp->schq[lvl]) {
1407 rsp->schq[lvl] = 1;
1408 rsp->schq_list[lvl][0] = start;
1409 }
1410 return;
1411 }
1412
1413 /* Adjust the queue request count if HW supports
1414 * only one queue per level configuration.
1415 */
1416 if (hw->cap.nix_fixed_txschq_mapping) {
1417 idx = pcifunc & RVU_PFVF_FUNC_MASK;
1418 schq = start + idx;
1419 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1420 rsp->schq_contig[lvl] = 0;
1421 rsp->schq[lvl] = 0;
1422 return;
1423 }
1424
1425 if (rsp->schq_contig[lvl]) {
1426 rsp->schq_contig[lvl] = 1;
1427 set_bit(schq, txsch->schq.bmap);
1428 rsp->schq_contig_list[lvl][0] = schq;
1429 rsp->schq[lvl] = 0;
1430 } else if (rsp->schq[lvl]) {
1431 rsp->schq[lvl] = 1;
1432 set_bit(schq, txsch->schq.bmap);
1433 rsp->schq_list[lvl][0] = schq;
1434 }
1435 return;
1436 }
1437
1438 /* Allocate contiguous queue indices requesty first */
1439 if (rsp->schq_contig[lvl]) {
1440 schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1441 txsch->schq.max, start,
1442 rsp->schq_contig[lvl], 0);
1443 if (schq >= end)
1444 rsp->schq_contig[lvl] = 0;
1445 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1446 set_bit(schq, txsch->schq.bmap);
1447 rsp->schq_contig_list[lvl][idx] = schq;
1448 schq++;
1449 }
1450 }
1451
1452 /* Allocate non-contiguous queue indices */
1453 if (rsp->schq[lvl]) {
1454 idx = 0;
1455 for (schq = start; schq < end; schq++) {
1456 if (!test_bit(schq, txsch->schq.bmap)) {
1457 set_bit(schq, txsch->schq.bmap);
1458 rsp->schq_list[lvl][idx++] = schq;
1459 }
1460 if (idx == rsp->schq[lvl])
1461 break;
1462 }
1463 /* Update how many were allocated */
1464 rsp->schq[lvl] = idx;
1465 }
1466 }
1467
rvu_mbox_handler_nix_txsch_alloc(struct rvu * rvu,struct nix_txsch_alloc_req * req,struct nix_txsch_alloc_rsp * rsp)1468 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1469 struct nix_txsch_alloc_req *req,
1470 struct nix_txsch_alloc_rsp *rsp)
1471 {
1472 struct rvu_hwinfo *hw = rvu->hw;
1473 u16 pcifunc = req->hdr.pcifunc;
1474 int link, blkaddr, rc = 0;
1475 int lvl, idx, start, end;
1476 struct nix_txsch *txsch;
1477 struct rvu_pfvf *pfvf;
1478 struct nix_hw *nix_hw;
1479 u32 *pfvf_map;
1480 u16 schq;
1481
1482 pfvf = rvu_get_pfvf(rvu, pcifunc);
1483 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1484 if (!pfvf->nixlf || blkaddr < 0)
1485 return NIX_AF_ERR_AF_LF_INVALID;
1486
1487 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1488 if (!nix_hw)
1489 return -EINVAL;
1490
1491 mutex_lock(&rvu->rsrc_lock);
1492
1493 /* Check if request is valid as per HW capabilities
1494 * and can be accomodated.
1495 */
1496 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1497 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1498 if (rc)
1499 goto err;
1500 }
1501
1502 /* Allocate requested Tx scheduler queues */
1503 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1504 txsch = &nix_hw->txsch[lvl];
1505 pfvf_map = txsch->pfvf_map;
1506
1507 if (!req->schq[lvl] && !req->schq_contig[lvl])
1508 continue;
1509
1510 rsp->schq[lvl] = req->schq[lvl];
1511 rsp->schq_contig[lvl] = req->schq_contig[lvl];
1512
1513 link = nix_get_tx_link(rvu, pcifunc);
1514
1515 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1516 start = link;
1517 end = link;
1518 } else if (hw->cap.nix_fixed_txschq_mapping) {
1519 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1520 } else {
1521 start = 0;
1522 end = txsch->schq.max;
1523 }
1524
1525 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1526
1527 /* Reset queue config */
1528 for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1529 schq = rsp->schq_contig_list[lvl][idx];
1530 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1531 NIX_TXSCHQ_CFG_DONE))
1532 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1533 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1534 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1535 }
1536
1537 for (idx = 0; idx < req->schq[lvl]; idx++) {
1538 schq = rsp->schq_list[lvl][idx];
1539 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1540 NIX_TXSCHQ_CFG_DONE))
1541 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1542 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1543 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1544 }
1545 }
1546
1547 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1548 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1549 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1550 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1551 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1552 goto exit;
1553 err:
1554 rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1555 exit:
1556 mutex_unlock(&rvu->rsrc_lock);
1557 return rc;
1558 }
1559
nix_smq_flush(struct rvu * rvu,int blkaddr,int smq,u16 pcifunc,int nixlf)1560 static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1561 int smq, u16 pcifunc, int nixlf)
1562 {
1563 int pf = rvu_get_pf(pcifunc);
1564 u8 cgx_id = 0, lmac_id = 0;
1565 int err, restore_tx_en = 0;
1566 u64 cfg;
1567
1568 /* enable cgx tx if disabled */
1569 if (is_pf_cgxmapped(rvu, pf)) {
1570 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1571 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1572 lmac_id, true);
1573 }
1574
1575 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1576 /* Do SMQ flush and set enqueue xoff */
1577 cfg |= BIT_ULL(50) | BIT_ULL(49);
1578 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1579
1580 /* Disable backpressure from physical link,
1581 * otherwise SMQ flush may stall.
1582 */
1583 rvu_cgx_enadis_rx_bp(rvu, pf, false);
1584
1585 /* Wait for flush to complete */
1586 err = rvu_poll_reg(rvu, blkaddr,
1587 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1588 if (err)
1589 dev_err(rvu->dev,
1590 "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1591
1592 rvu_cgx_enadis_rx_bp(rvu, pf, true);
1593 /* restore cgx tx state */
1594 if (restore_tx_en)
1595 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1596 }
1597
nix_txschq_free(struct rvu * rvu,u16 pcifunc)1598 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1599 {
1600 int blkaddr, nixlf, lvl, schq, err;
1601 struct rvu_hwinfo *hw = rvu->hw;
1602 struct nix_txsch *txsch;
1603 struct nix_hw *nix_hw;
1604
1605 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1606 if (blkaddr < 0)
1607 return NIX_AF_ERR_AF_LF_INVALID;
1608
1609 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1610 if (!nix_hw)
1611 return -EINVAL;
1612
1613 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1614 if (nixlf < 0)
1615 return NIX_AF_ERR_AF_LF_INVALID;
1616
1617 /* Disable TL2/3 queue links before SMQ flush*/
1618 mutex_lock(&rvu->rsrc_lock);
1619 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1620 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1621 continue;
1622
1623 txsch = &nix_hw->txsch[lvl];
1624 for (schq = 0; schq < txsch->schq.max; schq++) {
1625 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1626 continue;
1627 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1628 }
1629 }
1630
1631 /* Flush SMQs */
1632 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1633 for (schq = 0; schq < txsch->schq.max; schq++) {
1634 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1635 continue;
1636 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1637 }
1638
1639 /* Now free scheduler queues to free pool */
1640 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1641 /* TLs above aggregation level are shared across all PF
1642 * and it's VFs, hence skip freeing them.
1643 */
1644 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1645 continue;
1646
1647 txsch = &nix_hw->txsch[lvl];
1648 for (schq = 0; schq < txsch->schq.max; schq++) {
1649 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1650 continue;
1651 rvu_free_rsrc(&txsch->schq, schq);
1652 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1653 }
1654 }
1655 mutex_unlock(&rvu->rsrc_lock);
1656
1657 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1658 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1659 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1660 if (err)
1661 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1662
1663 return 0;
1664 }
1665
nix_txschq_free_one(struct rvu * rvu,struct nix_txsch_free_req * req)1666 static int nix_txschq_free_one(struct rvu *rvu,
1667 struct nix_txsch_free_req *req)
1668 {
1669 struct rvu_hwinfo *hw = rvu->hw;
1670 u16 pcifunc = req->hdr.pcifunc;
1671 int lvl, schq, nixlf, blkaddr;
1672 struct nix_txsch *txsch;
1673 struct nix_hw *nix_hw;
1674 u32 *pfvf_map;
1675
1676 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1677 if (blkaddr < 0)
1678 return NIX_AF_ERR_AF_LF_INVALID;
1679
1680 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1681 if (!nix_hw)
1682 return -EINVAL;
1683
1684 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1685 if (nixlf < 0)
1686 return NIX_AF_ERR_AF_LF_INVALID;
1687
1688 lvl = req->schq_lvl;
1689 schq = req->schq;
1690 txsch = &nix_hw->txsch[lvl];
1691
1692 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1693 return 0;
1694
1695 pfvf_map = txsch->pfvf_map;
1696 mutex_lock(&rvu->rsrc_lock);
1697
1698 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1699 mutex_unlock(&rvu->rsrc_lock);
1700 goto err;
1701 }
1702
1703 /* Flush if it is a SMQ. Onus of disabling
1704 * TL2/3 queue links before SMQ flush is on user
1705 */
1706 if (lvl == NIX_TXSCH_LVL_SMQ)
1707 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1708
1709 /* Free the resource */
1710 rvu_free_rsrc(&txsch->schq, schq);
1711 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1712 mutex_unlock(&rvu->rsrc_lock);
1713 return 0;
1714 err:
1715 return NIX_AF_ERR_TLX_INVALID;
1716 }
1717
rvu_mbox_handler_nix_txsch_free(struct rvu * rvu,struct nix_txsch_free_req * req,struct msg_rsp * rsp)1718 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1719 struct nix_txsch_free_req *req,
1720 struct msg_rsp *rsp)
1721 {
1722 if (req->flags & TXSCHQ_FREE_ALL)
1723 return nix_txschq_free(rvu, req->hdr.pcifunc);
1724 else
1725 return nix_txschq_free_one(rvu, req);
1726 }
1727
is_txschq_hierarchy_valid(struct rvu * rvu,u16 pcifunc,int blkaddr,int lvl,u64 reg,u64 regval)1728 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1729 int lvl, u64 reg, u64 regval)
1730 {
1731 u64 regbase = reg & 0xFFFF;
1732 u16 schq, parent;
1733
1734 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1735 return false;
1736
1737 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1738 /* Check if this schq belongs to this PF/VF or not */
1739 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1740 return false;
1741
1742 parent = (regval >> 16) & 0x1FF;
1743 /* Validate MDQ's TL4 parent */
1744 if (regbase == NIX_AF_MDQX_PARENT(0) &&
1745 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1746 return false;
1747
1748 /* Validate TL4's TL3 parent */
1749 if (regbase == NIX_AF_TL4X_PARENT(0) &&
1750 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1751 return false;
1752
1753 /* Validate TL3's TL2 parent */
1754 if (regbase == NIX_AF_TL3X_PARENT(0) &&
1755 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1756 return false;
1757
1758 /* Validate TL2's TL1 parent */
1759 if (regbase == NIX_AF_TL2X_PARENT(0) &&
1760 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1761 return false;
1762
1763 return true;
1764 }
1765
is_txschq_shaping_valid(struct rvu_hwinfo * hw,int lvl,u64 reg)1766 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1767 {
1768 u64 regbase;
1769
1770 if (hw->cap.nix_shaping)
1771 return true;
1772
1773 /* If shaping and coloring is not supported, then
1774 * *_CIR and *_PIR registers should not be configured.
1775 */
1776 regbase = reg & 0xFFFF;
1777
1778 switch (lvl) {
1779 case NIX_TXSCH_LVL_TL1:
1780 if (regbase == NIX_AF_TL1X_CIR(0))
1781 return false;
1782 break;
1783 case NIX_TXSCH_LVL_TL2:
1784 if (regbase == NIX_AF_TL2X_CIR(0) ||
1785 regbase == NIX_AF_TL2X_PIR(0))
1786 return false;
1787 break;
1788 case NIX_TXSCH_LVL_TL3:
1789 if (regbase == NIX_AF_TL3X_CIR(0) ||
1790 regbase == NIX_AF_TL3X_PIR(0))
1791 return false;
1792 break;
1793 case NIX_TXSCH_LVL_TL4:
1794 if (regbase == NIX_AF_TL4X_CIR(0) ||
1795 regbase == NIX_AF_TL4X_PIR(0))
1796 return false;
1797 break;
1798 }
1799 return true;
1800 }
1801
nix_tl1_default_cfg(struct rvu * rvu,struct nix_hw * nix_hw,u16 pcifunc,int blkaddr)1802 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1803 u16 pcifunc, int blkaddr)
1804 {
1805 u32 *pfvf_map;
1806 int schq;
1807
1808 schq = nix_get_tx_link(rvu, pcifunc);
1809 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1810 /* Skip if PF has already done the config */
1811 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1812 return;
1813 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1814 (TXSCH_TL1_DFLT_RR_PRIO << 1));
1815 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1816 TXSCH_TL1_DFLT_RR_QTM);
1817 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1818 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1819 }
1820
rvu_mbox_handler_nix_txschq_cfg(struct rvu * rvu,struct nix_txschq_config * req,struct msg_rsp * rsp)1821 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1822 struct nix_txschq_config *req,
1823 struct msg_rsp *rsp)
1824 {
1825 struct rvu_hwinfo *hw = rvu->hw;
1826 u16 pcifunc = req->hdr.pcifunc;
1827 u64 reg, regval, schq_regbase;
1828 struct nix_txsch *txsch;
1829 struct nix_hw *nix_hw;
1830 int blkaddr, idx, err;
1831 int nixlf, schq;
1832 u32 *pfvf_map;
1833
1834 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1835 req->num_regs > MAX_REGS_PER_MBOX_MSG)
1836 return NIX_AF_INVAL_TXSCHQ_CFG;
1837
1838 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1839 if (err)
1840 return err;
1841
1842 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1843 if (!nix_hw)
1844 return -EINVAL;
1845
1846 txsch = &nix_hw->txsch[req->lvl];
1847 pfvf_map = txsch->pfvf_map;
1848
1849 if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
1850 pcifunc & RVU_PFVF_FUNC_MASK) {
1851 mutex_lock(&rvu->rsrc_lock);
1852 if (req->lvl == NIX_TXSCH_LVL_TL1)
1853 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
1854 mutex_unlock(&rvu->rsrc_lock);
1855 return 0;
1856 }
1857
1858 for (idx = 0; idx < req->num_regs; idx++) {
1859 reg = req->reg[idx];
1860 regval = req->regval[idx];
1861 schq_regbase = reg & 0xFFFF;
1862
1863 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
1864 txsch->lvl, reg, regval))
1865 return NIX_AF_INVAL_TXSCHQ_CFG;
1866
1867 /* Check if shaping and coloring is supported */
1868 if (!is_txschq_shaping_valid(hw, req->lvl, reg))
1869 continue;
1870
1871 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1872 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1873 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1874 pcifunc, 0);
1875 regval &= ~(0x7FULL << 24);
1876 regval |= ((u64)nixlf << 24);
1877 }
1878
1879 /* Clear 'BP_ENA' config, if it's not allowed */
1880 if (!hw->cap.nix_tx_link_bp) {
1881 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
1882 (schq_regbase & 0xFF00) ==
1883 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
1884 regval &= ~BIT_ULL(13);
1885 }
1886
1887 /* Mark config as done for TL1 by PF */
1888 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
1889 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
1890 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1891 mutex_lock(&rvu->rsrc_lock);
1892 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
1893 NIX_TXSCHQ_CFG_DONE);
1894 mutex_unlock(&rvu->rsrc_lock);
1895 }
1896
1897 /* SMQ flush is special hence split register writes such
1898 * that flush first and write rest of the bits later.
1899 */
1900 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1901 (regval & BIT_ULL(49))) {
1902 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1903 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1904 regval &= ~BIT_ULL(49);
1905 }
1906 rvu_write64(rvu, blkaddr, reg, regval);
1907 }
1908
1909 return 0;
1910 }
1911
nix_rx_vtag_cfg(struct rvu * rvu,int nixlf,int blkaddr,struct nix_vtag_config * req)1912 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
1913 struct nix_vtag_config *req)
1914 {
1915 u64 regval = req->vtag_size;
1916
1917 if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
1918 return -EINVAL;
1919
1920 if (req->rx.capture_vtag)
1921 regval |= BIT_ULL(5);
1922 if (req->rx.strip_vtag)
1923 regval |= BIT_ULL(4);
1924
1925 rvu_write64(rvu, blkaddr,
1926 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
1927 return 0;
1928 }
1929
rvu_mbox_handler_nix_vtag_cfg(struct rvu * rvu,struct nix_vtag_config * req,struct msg_rsp * rsp)1930 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
1931 struct nix_vtag_config *req,
1932 struct msg_rsp *rsp)
1933 {
1934 u16 pcifunc = req->hdr.pcifunc;
1935 int blkaddr, nixlf, err;
1936
1937 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1938 if (err)
1939 return err;
1940
1941 if (req->cfg_type) {
1942 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
1943 if (err)
1944 return NIX_AF_ERR_PARAM;
1945 } else {
1946 /* TODO: handle tx vtag configuration */
1947 return 0;
1948 }
1949
1950 return 0;
1951 }
1952
nix_setup_mce(struct rvu * rvu,int mce,u8 op,u16 pcifunc,int next,bool eol)1953 static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
1954 u16 pcifunc, int next, bool eol)
1955 {
1956 struct nix_aq_enq_req aq_req;
1957 int err;
1958
1959 aq_req.hdr.pcifunc = 0;
1960 aq_req.ctype = NIX_AQ_CTYPE_MCE;
1961 aq_req.op = op;
1962 aq_req.qidx = mce;
1963
1964 /* Forward bcast pkts to RQ0, RSS not needed */
1965 aq_req.mce.op = 0;
1966 aq_req.mce.index = 0;
1967 aq_req.mce.eol = eol;
1968 aq_req.mce.pf_func = pcifunc;
1969 aq_req.mce.next = next;
1970
1971 /* All fields valid */
1972 *(u64 *)(&aq_req.mce_mask) = ~0ULL;
1973
1974 err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1975 if (err) {
1976 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
1977 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1978 return err;
1979 }
1980 return 0;
1981 }
1982
nix_update_mce_list(struct nix_mce_list * mce_list,u16 pcifunc,bool add)1983 static int nix_update_mce_list(struct nix_mce_list *mce_list,
1984 u16 pcifunc, bool add)
1985 {
1986 struct mce *mce, *tail = NULL;
1987 bool delete = false;
1988
1989 /* Scan through the current list */
1990 hlist_for_each_entry(mce, &mce_list->head, node) {
1991 /* If already exists, then delete */
1992 if (mce->pcifunc == pcifunc && !add) {
1993 delete = true;
1994 break;
1995 }
1996 tail = mce;
1997 }
1998
1999 if (delete) {
2000 hlist_del(&mce->node);
2001 kfree(mce);
2002 mce_list->count--;
2003 return 0;
2004 }
2005
2006 if (!add)
2007 return 0;
2008
2009 /* Add a new one to the list, at the tail */
2010 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2011 if (!mce)
2012 return -ENOMEM;
2013 mce->pcifunc = pcifunc;
2014 if (!tail)
2015 hlist_add_head(&mce->node, &mce_list->head);
2016 else
2017 hlist_add_behind(&mce->node, &tail->node);
2018 mce_list->count++;
2019 return 0;
2020 }
2021
nix_update_bcast_mce_list(struct rvu * rvu,u16 pcifunc,bool add)2022 int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
2023 {
2024 int err = 0, idx, next_idx, last_idx;
2025 struct nix_mce_list *mce_list;
2026 struct nix_mcast *mcast;
2027 struct nix_hw *nix_hw;
2028 struct rvu_pfvf *pfvf;
2029 struct mce *mce;
2030 int blkaddr;
2031
2032 /* Broadcast pkt replication is not needed for AF's VFs, hence skip */
2033 if (is_afvf(pcifunc))
2034 return 0;
2035
2036 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2037 if (blkaddr < 0)
2038 return 0;
2039
2040 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2041 if (!nix_hw)
2042 return 0;
2043
2044 mcast = &nix_hw->mcast;
2045
2046 /* Get this PF/VF func's MCE index */
2047 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2048 idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2049
2050 mce_list = &pfvf->bcast_mce_list;
2051 if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
2052 dev_err(rvu->dev,
2053 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2054 __func__, idx, mce_list->max,
2055 pcifunc >> RVU_PFVF_PF_SHIFT);
2056 return -EINVAL;
2057 }
2058
2059 mutex_lock(&mcast->mce_lock);
2060
2061 err = nix_update_mce_list(mce_list, pcifunc, add);
2062 if (err)
2063 goto end;
2064
2065 /* Disable MCAM entry in NPC */
2066 if (!mce_list->count) {
2067 rvu_npc_enable_bcast_entry(rvu, pcifunc, false);
2068 goto end;
2069 }
2070
2071 /* Dump the updated list to HW */
2072 idx = pfvf->bcast_mce_idx;
2073 last_idx = idx + mce_list->count - 1;
2074 hlist_for_each_entry(mce, &mce_list->head, node) {
2075 if (idx > last_idx)
2076 break;
2077
2078 next_idx = idx + 1;
2079 /* EOL should be set in last MCE */
2080 err = nix_setup_mce(rvu, idx, NIX_AQ_INSTOP_WRITE,
2081 mce->pcifunc, next_idx,
2082 (next_idx > last_idx) ? true : false);
2083 if (err)
2084 goto end;
2085 idx++;
2086 }
2087
2088 end:
2089 mutex_unlock(&mcast->mce_lock);
2090 return err;
2091 }
2092
nix_setup_bcast_tables(struct rvu * rvu,struct nix_hw * nix_hw)2093 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2094 {
2095 struct nix_mcast *mcast = &nix_hw->mcast;
2096 int err, pf, numvfs, idx;
2097 struct rvu_pfvf *pfvf;
2098 u16 pcifunc;
2099 u64 cfg;
2100
2101 /* Skip PF0 (i.e AF) */
2102 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2103 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2104 /* If PF is not enabled, nothing to do */
2105 if (!((cfg >> 20) & 0x01))
2106 continue;
2107 /* Get numVFs attached to this PF */
2108 numvfs = (cfg >> 12) & 0xFF;
2109
2110 pfvf = &rvu->pf[pf];
2111 /* Save the start MCE */
2112 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2113
2114 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2115
2116 for (idx = 0; idx < (numvfs + 1); idx++) {
2117 /* idx-0 is for PF, followed by VFs */
2118 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2119 pcifunc |= idx;
2120 /* Add dummy entries now, so that we don't have to check
2121 * for whether AQ_OP should be INIT/WRITE later on.
2122 * Will be updated when a NIXLF is attached/detached to
2123 * these PF/VFs.
2124 */
2125 err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
2126 NIX_AQ_INSTOP_INIT,
2127 pcifunc, 0, true);
2128 if (err)
2129 return err;
2130 }
2131 }
2132 return 0;
2133 }
2134
nix_setup_mcast(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)2135 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2136 {
2137 struct nix_mcast *mcast = &nix_hw->mcast;
2138 struct rvu_hwinfo *hw = rvu->hw;
2139 int err, size;
2140
2141 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
2142 size = (1ULL << size);
2143
2144 /* Alloc memory for multicast/mirror replication entries */
2145 err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
2146 (256UL << MC_TBL_SIZE), size);
2147 if (err)
2148 return -ENOMEM;
2149
2150 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2151 (u64)mcast->mce_ctx->iova);
2152
2153 /* Set max list length equal to max no of VFs per PF + PF itself */
2154 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2155 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2156
2157 /* Alloc memory for multicast replication buffers */
2158 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2159 err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2160 (8UL << MC_BUF_CNT), size);
2161 if (err)
2162 return -ENOMEM;
2163
2164 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2165 (u64)mcast->mcast_buf->iova);
2166
2167 /* Alloc pkind for NIX internal RX multicast/mirror replay */
2168 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2169
2170 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2171 BIT_ULL(63) | (mcast->replay_pkind << 24) |
2172 BIT_ULL(20) | MC_BUF_CNT);
2173
2174 mutex_init(&mcast->mce_lock);
2175
2176 return nix_setup_bcast_tables(rvu, nix_hw);
2177 }
2178
nix_setup_txschq(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)2179 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2180 {
2181 struct nix_txsch *txsch;
2182 int err, lvl, schq;
2183 u64 cfg, reg;
2184
2185 /* Get scheduler queue count of each type and alloc
2186 * bitmap for each for alloc/free/attach operations.
2187 */
2188 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2189 txsch = &nix_hw->txsch[lvl];
2190 txsch->lvl = lvl;
2191 switch (lvl) {
2192 case NIX_TXSCH_LVL_SMQ:
2193 reg = NIX_AF_MDQ_CONST;
2194 break;
2195 case NIX_TXSCH_LVL_TL4:
2196 reg = NIX_AF_TL4_CONST;
2197 break;
2198 case NIX_TXSCH_LVL_TL3:
2199 reg = NIX_AF_TL3_CONST;
2200 break;
2201 case NIX_TXSCH_LVL_TL2:
2202 reg = NIX_AF_TL2_CONST;
2203 break;
2204 case NIX_TXSCH_LVL_TL1:
2205 reg = NIX_AF_TL1_CONST;
2206 break;
2207 }
2208 cfg = rvu_read64(rvu, blkaddr, reg);
2209 txsch->schq.max = cfg & 0xFFFF;
2210 err = rvu_alloc_bitmap(&txsch->schq);
2211 if (err)
2212 return err;
2213
2214 /* Allocate memory for scheduler queues to
2215 * PF/VF pcifunc mapping info.
2216 */
2217 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2218 sizeof(u32), GFP_KERNEL);
2219 if (!txsch->pfvf_map)
2220 return -ENOMEM;
2221 for (schq = 0; schq < txsch->schq.max; schq++)
2222 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2223 }
2224 return 0;
2225 }
2226
rvu_nix_reserve_mark_format(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr,u32 cfg)2227 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2228 int blkaddr, u32 cfg)
2229 {
2230 int fmt_idx;
2231
2232 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2233 if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2234 return fmt_idx;
2235 }
2236 if (fmt_idx >= nix_hw->mark_format.total)
2237 return -ERANGE;
2238
2239 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2240 nix_hw->mark_format.cfg[fmt_idx] = cfg;
2241 nix_hw->mark_format.in_use++;
2242 return fmt_idx;
2243 }
2244
nix_af_mark_format_setup(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)2245 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2246 int blkaddr)
2247 {
2248 u64 cfgs[] = {
2249 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
2250 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
2251 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
2252 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
2253 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
2254 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
2255 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
2256 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
2257 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2258 };
2259 int i, rc;
2260 u64 total;
2261
2262 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2263 nix_hw->mark_format.total = (u8)total;
2264 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2265 GFP_KERNEL);
2266 if (!nix_hw->mark_format.cfg)
2267 return -ENOMEM;
2268 for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2269 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2270 if (rc < 0)
2271 dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2272 i, rc);
2273 }
2274
2275 return 0;
2276 }
2277
rvu_mbox_handler_nix_stats_rst(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)2278 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2279 struct msg_rsp *rsp)
2280 {
2281 u16 pcifunc = req->hdr.pcifunc;
2282 int i, nixlf, blkaddr, err;
2283 u64 stats;
2284
2285 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2286 if (err)
2287 return err;
2288
2289 /* Get stats count supported by HW */
2290 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2291
2292 /* Reset tx stats */
2293 for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2294 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2295
2296 /* Reset rx stats */
2297 for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2298 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2299
2300 return 0;
2301 }
2302
2303 /* Returns the ALG index to be set into NPC_RX_ACTION */
get_flowkey_alg_idx(struct nix_hw * nix_hw,u32 flow_cfg)2304 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2305 {
2306 int i;
2307
2308 /* Scan over exiting algo entries to find a match */
2309 for (i = 0; i < nix_hw->flowkey.in_use; i++)
2310 if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2311 return i;
2312
2313 return -ERANGE;
2314 }
2315
set_flowkey_fields(struct nix_rx_flowkey_alg * alg,u32 flow_cfg)2316 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2317 {
2318 int idx, nr_field, key_off, field_marker, keyoff_marker;
2319 int max_key_off, max_bit_pos, group_member;
2320 struct nix_rx_flowkey_alg *field;
2321 struct nix_rx_flowkey_alg tmp;
2322 u32 key_type, valid_key;
2323
2324 if (!alg)
2325 return -EINVAL;
2326
2327 #define FIELDS_PER_ALG 5
2328 #define MAX_KEY_OFF 40
2329 /* Clear all fields */
2330 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2331
2332 /* Each of the 32 possible flow key algorithm definitions should
2333 * fall into above incremental config (except ALG0). Otherwise a
2334 * single NPC MCAM entry is not sufficient for supporting RSS.
2335 *
2336 * If a different definition or combination needed then NPC MCAM
2337 * has to be programmed to filter such pkts and it's action should
2338 * point to this definition to calculate flowtag or hash.
2339 *
2340 * The `for loop` goes over _all_ protocol field and the following
2341 * variables depicts the state machine forward progress logic.
2342 *
2343 * keyoff_marker - Enabled when hash byte length needs to be accounted
2344 * in field->key_offset update.
2345 * field_marker - Enabled when a new field needs to be selected.
2346 * group_member - Enabled when protocol is part of a group.
2347 */
2348
2349 keyoff_marker = 0; max_key_off = 0; group_member = 0;
2350 nr_field = 0; key_off = 0; field_marker = 1;
2351 field = &tmp; max_bit_pos = fls(flow_cfg);
2352 for (idx = 0;
2353 idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2354 key_off < MAX_KEY_OFF; idx++) {
2355 key_type = BIT(idx);
2356 valid_key = flow_cfg & key_type;
2357 /* Found a field marker, reset the field values */
2358 if (field_marker)
2359 memset(&tmp, 0, sizeof(tmp));
2360
2361 field_marker = true;
2362 keyoff_marker = true;
2363 switch (key_type) {
2364 case NIX_FLOW_KEY_TYPE_PORT:
2365 field->sel_chan = true;
2366 /* This should be set to 1, when SEL_CHAN is set */
2367 field->bytesm1 = 1;
2368 break;
2369 case NIX_FLOW_KEY_TYPE_IPV4:
2370 case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2371 field->lid = NPC_LID_LC;
2372 field->ltype_match = NPC_LT_LC_IP;
2373 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2374 field->lid = NPC_LID_LG;
2375 field->ltype_match = NPC_LT_LG_TU_IP;
2376 }
2377 field->hdr_offset = 12; /* SIP offset */
2378 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2379 field->ltype_mask = 0xF; /* Match only IPv4 */
2380 keyoff_marker = false;
2381 break;
2382 case NIX_FLOW_KEY_TYPE_IPV6:
2383 case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2384 field->lid = NPC_LID_LC;
2385 field->ltype_match = NPC_LT_LC_IP6;
2386 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2387 field->lid = NPC_LID_LG;
2388 field->ltype_match = NPC_LT_LG_TU_IP6;
2389 }
2390 field->hdr_offset = 8; /* SIP offset */
2391 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2392 field->ltype_mask = 0xF; /* Match only IPv6 */
2393 break;
2394 case NIX_FLOW_KEY_TYPE_TCP:
2395 case NIX_FLOW_KEY_TYPE_UDP:
2396 case NIX_FLOW_KEY_TYPE_SCTP:
2397 case NIX_FLOW_KEY_TYPE_INNR_TCP:
2398 case NIX_FLOW_KEY_TYPE_INNR_UDP:
2399 case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2400 field->lid = NPC_LID_LD;
2401 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2402 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2403 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2404 field->lid = NPC_LID_LH;
2405 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2406
2407 /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2408 * so no need to change the ltype_match, just change
2409 * the lid for inner protocols
2410 */
2411 BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2412 (int)NPC_LT_LH_TU_TCP);
2413 BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2414 (int)NPC_LT_LH_TU_UDP);
2415 BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2416 (int)NPC_LT_LH_TU_SCTP);
2417
2418 if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2419 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2420 valid_key) {
2421 field->ltype_match |= NPC_LT_LD_TCP;
2422 group_member = true;
2423 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2424 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2425 valid_key) {
2426 field->ltype_match |= NPC_LT_LD_UDP;
2427 group_member = true;
2428 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2429 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2430 valid_key) {
2431 field->ltype_match |= NPC_LT_LD_SCTP;
2432 group_member = true;
2433 }
2434 field->ltype_mask = ~field->ltype_match;
2435 if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2436 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2437 /* Handle the case where any of the group item
2438 * is enabled in the group but not the final one
2439 */
2440 if (group_member) {
2441 valid_key = true;
2442 group_member = false;
2443 }
2444 } else {
2445 field_marker = false;
2446 keyoff_marker = false;
2447 }
2448 break;
2449 case NIX_FLOW_KEY_TYPE_NVGRE:
2450 field->lid = NPC_LID_LD;
2451 field->hdr_offset = 4; /* VSID offset */
2452 field->bytesm1 = 2;
2453 field->ltype_match = NPC_LT_LD_NVGRE;
2454 field->ltype_mask = 0xF;
2455 break;
2456 case NIX_FLOW_KEY_TYPE_VXLAN:
2457 case NIX_FLOW_KEY_TYPE_GENEVE:
2458 field->lid = NPC_LID_LE;
2459 field->bytesm1 = 2;
2460 field->hdr_offset = 4;
2461 field->ltype_mask = 0xF;
2462 field_marker = false;
2463 keyoff_marker = false;
2464
2465 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2466 field->ltype_match |= NPC_LT_LE_VXLAN;
2467 group_member = true;
2468 }
2469
2470 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2471 field->ltype_match |= NPC_LT_LE_GENEVE;
2472 group_member = true;
2473 }
2474
2475 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2476 if (group_member) {
2477 field->ltype_mask = ~field->ltype_match;
2478 field_marker = true;
2479 keyoff_marker = true;
2480 valid_key = true;
2481 group_member = false;
2482 }
2483 }
2484 break;
2485 case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2486 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2487 field->lid = NPC_LID_LA;
2488 field->ltype_match = NPC_LT_LA_ETHER;
2489 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2490 field->lid = NPC_LID_LF;
2491 field->ltype_match = NPC_LT_LF_TU_ETHER;
2492 }
2493 field->hdr_offset = 0;
2494 field->bytesm1 = 5; /* DMAC 6 Byte */
2495 field->ltype_mask = 0xF;
2496 break;
2497 case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2498 field->lid = NPC_LID_LC;
2499 field->hdr_offset = 40; /* IPV6 hdr */
2500 field->bytesm1 = 0; /* 1 Byte ext hdr*/
2501 field->ltype_match = NPC_LT_LC_IP6_EXT;
2502 field->ltype_mask = 0xF;
2503 break;
2504 case NIX_FLOW_KEY_TYPE_GTPU:
2505 field->lid = NPC_LID_LE;
2506 field->hdr_offset = 4;
2507 field->bytesm1 = 3; /* 4 bytes TID*/
2508 field->ltype_match = NPC_LT_LE_GTPU;
2509 field->ltype_mask = 0xF;
2510 break;
2511 case NIX_FLOW_KEY_TYPE_VLAN:
2512 field->lid = NPC_LID_LB;
2513 field->hdr_offset = 2; /* Skip TPID (2-bytes) */
2514 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
2515 field->ltype_match = NPC_LT_LB_CTAG;
2516 field->ltype_mask = 0xF;
2517 field->fn_mask = 1; /* Mask out the first nibble */
2518 break;
2519 }
2520 field->ena = 1;
2521
2522 /* Found a valid flow key type */
2523 if (valid_key) {
2524 field->key_offset = key_off;
2525 memcpy(&alg[nr_field], field, sizeof(*field));
2526 max_key_off = max(max_key_off, field->bytesm1 + 1);
2527
2528 /* Found a field marker, get the next field */
2529 if (field_marker)
2530 nr_field++;
2531 }
2532
2533 /* Found a keyoff marker, update the new key_off */
2534 if (keyoff_marker) {
2535 key_off += max_key_off;
2536 max_key_off = 0;
2537 }
2538 }
2539 /* Processed all the flow key types */
2540 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
2541 return 0;
2542 else
2543 return NIX_AF_ERR_RSS_NOSPC_FIELD;
2544 }
2545
reserve_flowkey_alg_idx(struct rvu * rvu,int blkaddr,u32 flow_cfg)2546 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
2547 {
2548 u64 field[FIELDS_PER_ALG];
2549 struct nix_hw *hw;
2550 int fid, rc;
2551
2552 hw = get_nix_hw(rvu->hw, blkaddr);
2553 if (!hw)
2554 return -EINVAL;
2555
2556 /* No room to add new flow hash algoritham */
2557 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
2558 return NIX_AF_ERR_RSS_NOSPC_ALGO;
2559
2560 /* Generate algo fields for the given flow_cfg */
2561 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
2562 if (rc)
2563 return rc;
2564
2565 /* Update ALGX_FIELDX register with generated fields */
2566 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2567 rvu_write64(rvu, blkaddr,
2568 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
2569 fid), field[fid]);
2570
2571 /* Store the flow_cfg for futher lookup */
2572 rc = hw->flowkey.in_use;
2573 hw->flowkey.flowkey[rc] = flow_cfg;
2574 hw->flowkey.in_use++;
2575
2576 return rc;
2577 }
2578
rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu * rvu,struct nix_rss_flowkey_cfg * req,struct nix_rss_flowkey_cfg_rsp * rsp)2579 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
2580 struct nix_rss_flowkey_cfg *req,
2581 struct nix_rss_flowkey_cfg_rsp *rsp)
2582 {
2583 u16 pcifunc = req->hdr.pcifunc;
2584 int alg_idx, nixlf, blkaddr;
2585 struct nix_hw *nix_hw;
2586 int err;
2587
2588 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2589 if (err)
2590 return err;
2591
2592 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2593 if (!nix_hw)
2594 return -EINVAL;
2595
2596 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
2597 /* Failed to get algo index from the exiting list, reserve new */
2598 if (alg_idx < 0) {
2599 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
2600 req->flowkey_cfg);
2601 if (alg_idx < 0)
2602 return alg_idx;
2603 }
2604 rsp->alg_idx = alg_idx;
2605 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
2606 alg_idx, req->mcam_index);
2607 return 0;
2608 }
2609
nix_rx_flowkey_alg_cfg(struct rvu * rvu,int blkaddr)2610 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
2611 {
2612 u32 flowkey_cfg, minkey_cfg;
2613 int alg, fid, rc;
2614
2615 /* Disable all flow key algx fieldx */
2616 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
2617 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2618 rvu_write64(rvu, blkaddr,
2619 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
2620 0);
2621 }
2622
2623 /* IPv4/IPv6 SIP/DIPs */
2624 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
2625 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2626 if (rc < 0)
2627 return rc;
2628
2629 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2630 minkey_cfg = flowkey_cfg;
2631 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
2632 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2633 if (rc < 0)
2634 return rc;
2635
2636 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2637 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
2638 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2639 if (rc < 0)
2640 return rc;
2641
2642 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2643 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
2644 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2645 if (rc < 0)
2646 return rc;
2647
2648 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
2649 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2650 NIX_FLOW_KEY_TYPE_UDP;
2651 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2652 if (rc < 0)
2653 return rc;
2654
2655 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2656 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2657 NIX_FLOW_KEY_TYPE_SCTP;
2658 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2659 if (rc < 0)
2660 return rc;
2661
2662 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2663 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
2664 NIX_FLOW_KEY_TYPE_SCTP;
2665 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2666 if (rc < 0)
2667 return rc;
2668
2669 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2670 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2671 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
2672 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2673 if (rc < 0)
2674 return rc;
2675
2676 return 0;
2677 }
2678
rvu_mbox_handler_nix_set_mac_addr(struct rvu * rvu,struct nix_set_mac_addr * req,struct msg_rsp * rsp)2679 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
2680 struct nix_set_mac_addr *req,
2681 struct msg_rsp *rsp)
2682 {
2683 u16 pcifunc = req->hdr.pcifunc;
2684 int blkaddr, nixlf, err;
2685 struct rvu_pfvf *pfvf;
2686
2687 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2688 if (err)
2689 return err;
2690
2691 pfvf = rvu_get_pfvf(rvu, pcifunc);
2692
2693 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
2694
2695 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
2696 pfvf->rx_chan_base, req->mac_addr);
2697
2698 rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2699
2700 return 0;
2701 }
2702
rvu_mbox_handler_nix_get_mac_addr(struct rvu * rvu,struct msg_req * req,struct nix_get_mac_addr_rsp * rsp)2703 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
2704 struct msg_req *req,
2705 struct nix_get_mac_addr_rsp *rsp)
2706 {
2707 u16 pcifunc = req->hdr.pcifunc;
2708 struct rvu_pfvf *pfvf;
2709
2710 if (!is_nixlf_attached(rvu, pcifunc))
2711 return NIX_AF_ERR_AF_LF_INVALID;
2712
2713 pfvf = rvu_get_pfvf(rvu, pcifunc);
2714
2715 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
2716
2717 return 0;
2718 }
2719
rvu_mbox_handler_nix_set_rx_mode(struct rvu * rvu,struct nix_rx_mode * req,struct msg_rsp * rsp)2720 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
2721 struct msg_rsp *rsp)
2722 {
2723 bool allmulti = false, disable_promisc = false;
2724 u16 pcifunc = req->hdr.pcifunc;
2725 int blkaddr, nixlf, err;
2726 struct rvu_pfvf *pfvf;
2727
2728 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2729 if (err)
2730 return err;
2731
2732 pfvf = rvu_get_pfvf(rvu, pcifunc);
2733
2734 if (req->mode & NIX_RX_MODE_PROMISC)
2735 allmulti = false;
2736 else if (req->mode & NIX_RX_MODE_ALLMULTI)
2737 allmulti = true;
2738 else
2739 disable_promisc = true;
2740
2741 if (disable_promisc)
2742 rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
2743 else
2744 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
2745 pfvf->rx_chan_base, allmulti);
2746
2747 rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2748
2749 return 0;
2750 }
2751
nix_find_link_frs(struct rvu * rvu,struct nix_frs_cfg * req,u16 pcifunc)2752 static void nix_find_link_frs(struct rvu *rvu,
2753 struct nix_frs_cfg *req, u16 pcifunc)
2754 {
2755 int pf = rvu_get_pf(pcifunc);
2756 struct rvu_pfvf *pfvf;
2757 int maxlen, minlen;
2758 int numvfs, hwvf;
2759 int vf;
2760
2761 /* Update with requester's min/max lengths */
2762 pfvf = rvu_get_pfvf(rvu, pcifunc);
2763 pfvf->maxlen = req->maxlen;
2764 if (req->update_minlen)
2765 pfvf->minlen = req->minlen;
2766
2767 maxlen = req->maxlen;
2768 minlen = req->update_minlen ? req->minlen : 0;
2769
2770 /* Get this PF's numVFs and starting hwvf */
2771 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
2772
2773 /* For each VF, compare requested max/minlen */
2774 for (vf = 0; vf < numvfs; vf++) {
2775 pfvf = &rvu->hwvf[hwvf + vf];
2776 if (pfvf->maxlen > maxlen)
2777 maxlen = pfvf->maxlen;
2778 if (req->update_minlen &&
2779 pfvf->minlen && pfvf->minlen < minlen)
2780 minlen = pfvf->minlen;
2781 }
2782
2783 /* Compare requested max/minlen with PF's max/minlen */
2784 pfvf = &rvu->pf[pf];
2785 if (pfvf->maxlen > maxlen)
2786 maxlen = pfvf->maxlen;
2787 if (req->update_minlen &&
2788 pfvf->minlen && pfvf->minlen < minlen)
2789 minlen = pfvf->minlen;
2790
2791 /* Update the request with max/min PF's and it's VF's max/min */
2792 req->maxlen = maxlen;
2793 if (req->update_minlen)
2794 req->minlen = minlen;
2795 }
2796
rvu_mbox_handler_nix_set_hw_frs(struct rvu * rvu,struct nix_frs_cfg * req,struct msg_rsp * rsp)2797 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
2798 struct msg_rsp *rsp)
2799 {
2800 struct rvu_hwinfo *hw = rvu->hw;
2801 u16 pcifunc = req->hdr.pcifunc;
2802 int pf = rvu_get_pf(pcifunc);
2803 int blkaddr, schq, link = -1;
2804 struct nix_txsch *txsch;
2805 u64 cfg, lmac_fifo_len;
2806 struct nix_hw *nix_hw;
2807 u8 cgx = 0, lmac = 0;
2808
2809 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2810 if (blkaddr < 0)
2811 return NIX_AF_ERR_AF_LF_INVALID;
2812
2813 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2814 if (!nix_hw)
2815 return -EINVAL;
2816
2817 if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
2818 return NIX_AF_ERR_FRS_INVALID;
2819
2820 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
2821 return NIX_AF_ERR_FRS_INVALID;
2822
2823 /* Check if requester wants to update SMQ's */
2824 if (!req->update_smq)
2825 goto rx_frscfg;
2826
2827 /* Update min/maxlen in each of the SMQ attached to this PF/VF */
2828 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2829 mutex_lock(&rvu->rsrc_lock);
2830 for (schq = 0; schq < txsch->schq.max; schq++) {
2831 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2832 continue;
2833 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
2834 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
2835 if (req->update_minlen)
2836 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
2837 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
2838 }
2839 mutex_unlock(&rvu->rsrc_lock);
2840
2841 rx_frscfg:
2842 /* Check if config is for SDP link */
2843 if (req->sdp_link) {
2844 if (!hw->sdp_links)
2845 return NIX_AF_ERR_RX_LINK_INVALID;
2846 link = hw->cgx_links + hw->lbk_links;
2847 goto linkcfg;
2848 }
2849
2850 /* Check if the request is from CGX mapped RVU PF */
2851 if (is_pf_cgxmapped(rvu, pf)) {
2852 /* Get CGX and LMAC to which this PF is mapped and find link */
2853 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
2854 link = (cgx * hw->lmac_per_cgx) + lmac;
2855 } else if (pf == 0) {
2856 /* For VFs of PF0 ingress is LBK port, so config LBK link */
2857 link = hw->cgx_links;
2858 }
2859
2860 if (link < 0)
2861 return NIX_AF_ERR_RX_LINK_INVALID;
2862
2863 nix_find_link_frs(rvu, req, pcifunc);
2864
2865 linkcfg:
2866 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
2867 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
2868 if (req->update_minlen)
2869 cfg = (cfg & ~0xFFFFULL) | req->minlen;
2870 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
2871
2872 if (req->sdp_link || pf == 0)
2873 return 0;
2874
2875 /* Update transmit credits for CGX links */
2876 lmac_fifo_len =
2877 CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
2878 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
2879 cfg &= ~(0xFFFFFULL << 12);
2880 cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
2881 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
2882 return 0;
2883 }
2884
rvu_mbox_handler_nix_rxvlan_alloc(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)2885 int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
2886 struct msg_rsp *rsp)
2887 {
2888 struct npc_mcam_alloc_entry_req alloc_req = { };
2889 struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
2890 struct npc_mcam_free_entry_req free_req = { };
2891 u16 pcifunc = req->hdr.pcifunc;
2892 int blkaddr, nixlf, err;
2893 struct rvu_pfvf *pfvf;
2894
2895 /* LBK VFs do not have separate MCAM UCAST entry hence
2896 * skip allocating rxvlan for them
2897 */
2898 if (is_afvf(pcifunc))
2899 return 0;
2900
2901 pfvf = rvu_get_pfvf(rvu, pcifunc);
2902 if (pfvf->rxvlan)
2903 return 0;
2904
2905 /* alloc new mcam entry */
2906 alloc_req.hdr.pcifunc = pcifunc;
2907 alloc_req.count = 1;
2908
2909 err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
2910 &alloc_rsp);
2911 if (err)
2912 return err;
2913
2914 /* update entry to enable rxvlan offload */
2915 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2916 if (blkaddr < 0) {
2917 err = NIX_AF_ERR_AF_LF_INVALID;
2918 goto free_entry;
2919 }
2920
2921 nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
2922 if (nixlf < 0) {
2923 err = NIX_AF_ERR_AF_LF_INVALID;
2924 goto free_entry;
2925 }
2926
2927 pfvf->rxvlan_index = alloc_rsp.entry_list[0];
2928 /* all it means is that rxvlan_index is valid */
2929 pfvf->rxvlan = true;
2930
2931 err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2932 if (err)
2933 goto free_entry;
2934
2935 return 0;
2936 free_entry:
2937 free_req.hdr.pcifunc = pcifunc;
2938 free_req.entry = alloc_rsp.entry_list[0];
2939 rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
2940 pfvf->rxvlan = false;
2941 return err;
2942 }
2943
rvu_mbox_handler_nix_set_rx_cfg(struct rvu * rvu,struct nix_rx_cfg * req,struct msg_rsp * rsp)2944 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
2945 struct msg_rsp *rsp)
2946 {
2947 int nixlf, blkaddr, err;
2948 u64 cfg;
2949
2950 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
2951 if (err)
2952 return err;
2953
2954 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
2955 /* Set the interface configuration */
2956 if (req->len_verify & BIT(0))
2957 cfg |= BIT_ULL(41);
2958 else
2959 cfg &= ~BIT_ULL(41);
2960
2961 if (req->len_verify & BIT(1))
2962 cfg |= BIT_ULL(40);
2963 else
2964 cfg &= ~BIT_ULL(40);
2965
2966 if (req->csum_verify & BIT(0))
2967 cfg |= BIT_ULL(37);
2968 else
2969 cfg &= ~BIT_ULL(37);
2970
2971 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
2972
2973 return 0;
2974 }
2975
nix_link_config(struct rvu * rvu,int blkaddr)2976 static void nix_link_config(struct rvu *rvu, int blkaddr)
2977 {
2978 struct rvu_hwinfo *hw = rvu->hw;
2979 int cgx, lmac_cnt, slink, link;
2980 u64 tx_credits;
2981
2982 /* Set default min/max packet lengths allowed on NIX Rx links.
2983 *
2984 * With HW reset minlen value of 60byte, HW will treat ARP pkts
2985 * as undersize and report them to SW as error pkts, hence
2986 * setting it to 40 bytes.
2987 */
2988 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
2989 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
2990 NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
2991 }
2992
2993 if (hw->sdp_links) {
2994 link = hw->cgx_links + hw->lbk_links;
2995 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
2996 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
2997 }
2998
2999 /* Set credits for Tx links assuming max packet length allowed.
3000 * This will be reconfigured based on MTU set for PF/VF.
3001 */
3002 for (cgx = 0; cgx < hw->cgx; cgx++) {
3003 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3004 tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
3005 /* Enable credits and set credit pkt count to max allowed */
3006 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3007 slink = cgx * hw->lmac_per_cgx;
3008 for (link = slink; link < (slink + lmac_cnt); link++) {
3009 rvu_write64(rvu, blkaddr,
3010 NIX_AF_TX_LINKX_NORM_CREDIT(link),
3011 tx_credits);
3012 }
3013 }
3014
3015 /* Set Tx credits for LBK link */
3016 slink = hw->cgx_links;
3017 for (link = slink; link < (slink + hw->lbk_links); link++) {
3018 tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
3019 /* Enable credits and set credit pkt count to max allowed */
3020 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3021 rvu_write64(rvu, blkaddr,
3022 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
3023 }
3024 }
3025
nix_calibrate_x2p(struct rvu * rvu,int blkaddr)3026 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
3027 {
3028 int idx, err;
3029 u64 status;
3030
3031 /* Start X2P bus calibration */
3032 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3033 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
3034 /* Wait for calibration to complete */
3035 err = rvu_poll_reg(rvu, blkaddr,
3036 NIX_AF_STATUS, BIT_ULL(10), false);
3037 if (err) {
3038 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
3039 return err;
3040 }
3041
3042 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
3043 /* Check if CGX devices are ready */
3044 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
3045 /* Skip when cgx port is not available */
3046 if (!rvu_cgx_pdata(idx, rvu) ||
3047 (status & (BIT_ULL(16 + idx))))
3048 continue;
3049 dev_err(rvu->dev,
3050 "CGX%d didn't respond to NIX X2P calibration\n", idx);
3051 err = -EBUSY;
3052 }
3053
3054 /* Check if LBK is ready */
3055 if (!(status & BIT_ULL(19))) {
3056 dev_err(rvu->dev,
3057 "LBK didn't respond to NIX X2P calibration\n");
3058 err = -EBUSY;
3059 }
3060
3061 /* Clear 'calibrate_x2p' bit */
3062 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3063 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
3064 if (err || (status & 0x3FFULL))
3065 dev_err(rvu->dev,
3066 "NIX X2P calibration failed, status 0x%llx\n", status);
3067 if (err)
3068 return err;
3069 return 0;
3070 }
3071
nix_aq_init(struct rvu * rvu,struct rvu_block * block)3072 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
3073 {
3074 u64 cfg;
3075 int err;
3076
3077 /* Set admin queue endianness */
3078 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
3079 #ifdef __BIG_ENDIAN
3080 cfg |= BIT_ULL(8);
3081 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3082 #else
3083 cfg &= ~BIT_ULL(8);
3084 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3085 #endif
3086
3087 /* Do not bypass NDC cache */
3088 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
3089 cfg &= ~0x3FFEULL;
3090 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
3091 /* Disable caching of SQB aka SQEs */
3092 cfg |= 0x04ULL;
3093 #endif
3094 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
3095
3096 /* Result structure can be followed by RQ/SQ/CQ context at
3097 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
3098 * operation type. Alloc sufficient result memory for all operations.
3099 */
3100 err = rvu_aq_alloc(rvu, &block->aq,
3101 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
3102 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
3103 if (err)
3104 return err;
3105
3106 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
3107 rvu_write64(rvu, block->addr,
3108 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
3109 return 0;
3110 }
3111
rvu_nix_init(struct rvu * rvu)3112 int rvu_nix_init(struct rvu *rvu)
3113 {
3114 const struct npc_lt_def_cfg *ltdefs;
3115 struct rvu_hwinfo *hw = rvu->hw;
3116 struct rvu_block *block;
3117 int blkaddr, err;
3118 u64 cfg;
3119
3120 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
3121 if (blkaddr < 0)
3122 return 0;
3123 block = &hw->block[blkaddr];
3124
3125 if (is_rvu_96xx_B0(rvu)) {
3126 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
3127 * internal state when conditional clocks are turned off.
3128 * Hence enable them.
3129 */
3130 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3131 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
3132
3133 /* Set chan/link to backpressure TL3 instead of TL2 */
3134 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
3135
3136 /* Disable SQ manager's sticky mode operation (set TM6 = 0)
3137 * This sticky mode is known to cause SQ stalls when multiple
3138 * SQs are mapped to same SMQ and transmitting pkts at a time.
3139 */
3140 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
3141 cfg &= ~BIT_ULL(15);
3142 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
3143 }
3144
3145 ltdefs = rvu->kpu.lt_def;
3146 /* Calibrate X2P bus to check if CGX/LBK links are fine */
3147 err = nix_calibrate_x2p(rvu, blkaddr);
3148 if (err)
3149 return err;
3150
3151 /* Set num of links of each type */
3152 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
3153 hw->cgx = (cfg >> 12) & 0xF;
3154 hw->lmac_per_cgx = (cfg >> 8) & 0xF;
3155 hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
3156 hw->lbk_links = 1;
3157 hw->sdp_links = 1;
3158
3159 /* Initialize admin queue */
3160 err = nix_aq_init(rvu, block);
3161 if (err)
3162 return err;
3163
3164 /* Restore CINT timer delay to HW reset values */
3165 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3166
3167 if (blkaddr == BLKADDR_NIX0) {
3168 hw->nix0 = devm_kzalloc(rvu->dev,
3169 sizeof(struct nix_hw), GFP_KERNEL);
3170 if (!hw->nix0)
3171 return -ENOMEM;
3172
3173 err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
3174 if (err)
3175 return err;
3176
3177 err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr);
3178 if (err)
3179 return err;
3180
3181 err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
3182 if (err)
3183 return err;
3184
3185 /* Configure segmentation offload formats */
3186 nix_setup_lso(rvu, hw->nix0, blkaddr);
3187
3188 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3189 * This helps HW protocol checker to identify headers
3190 * and validate length and checksums.
3191 */
3192 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3193 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
3194 ltdefs->rx_ol2.ltype_mask);
3195 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3196 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
3197 ltdefs->rx_oip4.ltype_mask);
3198 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3199 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
3200 ltdefs->rx_iip4.ltype_mask);
3201 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3202 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
3203 ltdefs->rx_oip6.ltype_mask);
3204 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3205 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
3206 ltdefs->rx_iip6.ltype_mask);
3207 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3208 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
3209 ltdefs->rx_otcp.ltype_mask);
3210 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3211 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
3212 ltdefs->rx_itcp.ltype_mask);
3213 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3214 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
3215 ltdefs->rx_oudp.ltype_mask);
3216 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3217 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
3218 ltdefs->rx_iudp.ltype_mask);
3219 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3220 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
3221 ltdefs->rx_osctp.ltype_mask);
3222 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3223 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
3224 ltdefs->rx_isctp.ltype_mask);
3225
3226 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3227 if (err)
3228 return err;
3229
3230 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3231 nix_link_config(rvu, blkaddr);
3232
3233 /* Enable Channel backpressure */
3234 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
3235 }
3236 return 0;
3237 }
3238
rvu_nix_freemem(struct rvu * rvu)3239 void rvu_nix_freemem(struct rvu *rvu)
3240 {
3241 struct rvu_hwinfo *hw = rvu->hw;
3242 struct rvu_block *block;
3243 struct nix_txsch *txsch;
3244 struct nix_mcast *mcast;
3245 struct nix_hw *nix_hw;
3246 int blkaddr, lvl;
3247
3248 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
3249 if (blkaddr < 0)
3250 return;
3251
3252 block = &hw->block[blkaddr];
3253 rvu_aq_free(rvu, block->aq);
3254
3255 if (blkaddr == BLKADDR_NIX0) {
3256 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3257 if (!nix_hw)
3258 return;
3259
3260 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3261 txsch = &nix_hw->txsch[lvl];
3262 kfree(txsch->schq.bmap);
3263 }
3264
3265 mcast = &nix_hw->mcast;
3266 qmem_free(rvu->dev, mcast->mce_ctx);
3267 qmem_free(rvu->dev, mcast->mcast_buf);
3268 mutex_destroy(&mcast->mce_lock);
3269 }
3270 }
3271
rvu_mbox_handler_nix_lf_start_rx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)3272 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3273 struct msg_rsp *rsp)
3274 {
3275 u16 pcifunc = req->hdr.pcifunc;
3276 int nixlf, err;
3277
3278 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3279 if (err)
3280 return err;
3281
3282 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3283
3284 return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3285 }
3286
rvu_mbox_handler_nix_lf_stop_rx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)3287 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3288 struct msg_rsp *rsp)
3289 {
3290 u16 pcifunc = req->hdr.pcifunc;
3291 int nixlf, err;
3292
3293 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3294 if (err)
3295 return err;
3296
3297 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
3298
3299 return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3300 }
3301
rvu_nix_lf_teardown(struct rvu * rvu,u16 pcifunc,int blkaddr,int nixlf)3302 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3303 {
3304 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3305 struct hwctx_disable_req ctx_req;
3306 int err;
3307
3308 ctx_req.hdr.pcifunc = pcifunc;
3309
3310 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3311 nix_interface_deinit(rvu, pcifunc, nixlf);
3312 nix_rx_sync(rvu, blkaddr);
3313 nix_txschq_free(rvu, pcifunc);
3314
3315 rvu_cgx_start_stop_io(rvu, pcifunc, false);
3316
3317 if (pfvf->sq_ctx) {
3318 ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3319 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3320 if (err)
3321 dev_err(rvu->dev, "SQ ctx disable failed\n");
3322 }
3323
3324 if (pfvf->rq_ctx) {
3325 ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3326 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3327 if (err)
3328 dev_err(rvu->dev, "RQ ctx disable failed\n");
3329 }
3330
3331 if (pfvf->cq_ctx) {
3332 ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3333 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3334 if (err)
3335 dev_err(rvu->dev, "CQ ctx disable failed\n");
3336 }
3337
3338 nix_ctx_free(rvu, pfvf);
3339 }
3340
3341 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
3342
rvu_nix_lf_ptp_tx_cfg(struct rvu * rvu,u16 pcifunc,bool enable)3343 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
3344 {
3345 struct rvu_hwinfo *hw = rvu->hw;
3346 struct rvu_block *block;
3347 int blkaddr;
3348 int nixlf;
3349 u64 cfg;
3350
3351 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3352 if (blkaddr < 0)
3353 return NIX_AF_ERR_AF_LF_INVALID;
3354
3355 block = &hw->block[blkaddr];
3356 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
3357 if (nixlf < 0)
3358 return NIX_AF_ERR_AF_LF_INVALID;
3359
3360 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
3361
3362 if (enable)
3363 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
3364 else
3365 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
3366
3367 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
3368
3369 return 0;
3370 }
3371
rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)3372 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
3373 struct msg_rsp *rsp)
3374 {
3375 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
3376 }
3377
rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)3378 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
3379 struct msg_rsp *rsp)
3380 {
3381 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
3382 }
3383
rvu_mbox_handler_nix_lso_format_cfg(struct rvu * rvu,struct nix_lso_format_cfg * req,struct nix_lso_format_cfg_rsp * rsp)3384 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
3385 struct nix_lso_format_cfg *req,
3386 struct nix_lso_format_cfg_rsp *rsp)
3387 {
3388 u16 pcifunc = req->hdr.pcifunc;
3389 struct nix_hw *nix_hw;
3390 struct rvu_pfvf *pfvf;
3391 int blkaddr, idx, f;
3392 u64 reg;
3393
3394 pfvf = rvu_get_pfvf(rvu, pcifunc);
3395 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3396 if (!pfvf->nixlf || blkaddr < 0)
3397 return NIX_AF_ERR_AF_LF_INVALID;
3398
3399 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3400 if (!nix_hw)
3401 return -EINVAL;
3402
3403 /* Find existing matching LSO format, if any */
3404 for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
3405 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
3406 reg = rvu_read64(rvu, blkaddr,
3407 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
3408 if (req->fields[f] != (reg & req->field_mask))
3409 break;
3410 }
3411
3412 if (f == NIX_LSO_FIELD_MAX)
3413 break;
3414 }
3415
3416 if (idx < nix_hw->lso.in_use) {
3417 /* Match found */
3418 rsp->lso_format_idx = idx;
3419 return 0;
3420 }
3421
3422 if (nix_hw->lso.in_use == nix_hw->lso.total)
3423 return NIX_AF_ERR_LSO_CFG_FAIL;
3424
3425 rsp->lso_format_idx = nix_hw->lso.in_use++;
3426
3427 for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
3428 rvu_write64(rvu, blkaddr,
3429 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
3430 req->fields[f]);
3431
3432 return 0;
3433 }
3434