1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell CN10K MCS driver
3 *
4 * Copyright (C) 2022 Marvell.
5 */
6
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11
12 #include "mcs.h"
13 #include "rvu.h"
14 #include "mcs_reg.h"
15 #include "lmac_common.h"
16
17 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
18 static struct _req_type __maybe_unused \
19 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
20 { \
21 struct _req_type *req; \
22 \
23 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
24 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
25 sizeof(struct _rsp_type)); \
26 if (!req) \
27 return NULL; \
28 req->hdr.sig = OTX2_MBOX_REQ_SIG; \
29 req->hdr.id = _id; \
30 return req; \
31 }
32
33 MBOX_UP_MCS_MESSAGES
34 #undef M
35
rvu_mcs_ptp_cfg(struct rvu * rvu,u8 rpm_id,u8 lmac_id,bool ena)36 void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena)
37 {
38 struct mcs *mcs;
39 u64 cfg;
40 u8 port;
41
42 if (!rvu->mcs_blk_cnt)
43 return;
44
45 /* When ptp is enabled, RPM appends 8B header for all
46 * RX packets. MCS PEX need to configure to skip 8B
47 * during packet parsing.
48 */
49
50 /* CNF10K-B */
51 if (rvu->mcs_blk_cnt > 1) {
52 mcs = mcs_get_pdata(rpm_id);
53 cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
54 if (ena)
55 cfg |= BIT_ULL(lmac_id);
56 else
57 cfg &= ~BIT_ULL(lmac_id);
58 mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, cfg);
59 return;
60 }
61 /* CN10KB */
62 mcs = mcs_get_pdata(0);
63 port = (rpm_id * rvu->hw->lmac_per_cgx) + lmac_id;
64 cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port));
65 if (ena)
66 cfg |= BIT_ULL(0);
67 else
68 cfg &= ~BIT_ULL(0);
69 mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port), cfg);
70 }
71
rvu_mbox_handler_mcs_set_lmac_mode(struct rvu * rvu,struct mcs_set_lmac_mode * req,struct msg_rsp * rsp)72 int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu,
73 struct mcs_set_lmac_mode *req,
74 struct msg_rsp *rsp)
75 {
76 struct mcs *mcs;
77
78 if (req->mcs_id >= rvu->mcs_blk_cnt)
79 return MCS_AF_ERR_INVALID_MCSID;
80
81 mcs = mcs_get_pdata(req->mcs_id);
82
83 if (BIT_ULL(req->lmac_id) & mcs->hw->lmac_bmap)
84 mcs_set_lmac_mode(mcs, req->lmac_id, req->mode);
85
86 return 0;
87 }
88
mcs_add_intr_wq_entry(struct mcs * mcs,struct mcs_intr_event * event)89 int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
90 {
91 struct mcs_intrq_entry *qentry;
92 u16 pcifunc = event->pcifunc;
93 struct rvu *rvu = mcs->rvu;
94 struct mcs_pfvf *pfvf;
95
96 /* Check if it is PF or VF */
97 if (pcifunc & RVU_PFVF_FUNC_MASK)
98 pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
99 else
100 pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
101
102 event->intr_mask &= pfvf->intr_mask;
103
104 /* Check PF/VF interrupt notification is enabled */
105 if (!(pfvf->intr_mask && event->intr_mask))
106 return 0;
107
108 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
109 if (!qentry)
110 return -ENOMEM;
111
112 qentry->intr_event = *event;
113 spin_lock(&rvu->mcs_intrq_lock);
114 list_add_tail(&qentry->node, &rvu->mcs_intrq_head);
115 spin_unlock(&rvu->mcs_intrq_lock);
116 queue_work(rvu->mcs_intr_wq, &rvu->mcs_intr_work);
117
118 return 0;
119 }
120
mcs_notify_pfvf(struct mcs_intr_event * event,struct rvu * rvu)121 static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
122 {
123 struct mcs_intr_info *req;
124 int err, pf;
125
126 pf = rvu_get_pf(event->pcifunc);
127
128 req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
129 if (!req)
130 return -ENOMEM;
131
132 req->mcs_id = event->mcs_id;
133 req->intr_mask = event->intr_mask;
134 req->sa_id = event->sa_id;
135 req->hdr.pcifunc = event->pcifunc;
136 req->lmac_id = event->lmac_id;
137
138 otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
139 err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
140 if (err)
141 dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf);
142
143 return 0;
144 }
145
mcs_intr_handler_task(struct work_struct * work)146 static void mcs_intr_handler_task(struct work_struct *work)
147 {
148 struct rvu *rvu = container_of(work, struct rvu, mcs_intr_work);
149 struct mcs_intrq_entry *qentry;
150 struct mcs_intr_event *event;
151 unsigned long flags;
152
153 do {
154 spin_lock_irqsave(&rvu->mcs_intrq_lock, flags);
155 qentry = list_first_entry_or_null(&rvu->mcs_intrq_head,
156 struct mcs_intrq_entry,
157 node);
158 if (qentry)
159 list_del(&qentry->node);
160
161 spin_unlock_irqrestore(&rvu->mcs_intrq_lock, flags);
162 if (!qentry)
163 break; /* nothing more to process */
164
165 event = &qentry->intr_event;
166
167 mcs_notify_pfvf(event, rvu);
168 kfree(qentry);
169 } while (1);
170 }
171
rvu_mbox_handler_mcs_intr_cfg(struct rvu * rvu,struct mcs_intr_cfg * req,struct msg_rsp * rsp)172 int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu,
173 struct mcs_intr_cfg *req,
174 struct msg_rsp *rsp)
175 {
176 u16 pcifunc = req->hdr.pcifunc;
177 struct mcs_pfvf *pfvf;
178 struct mcs *mcs;
179
180 if (req->mcs_id >= rvu->mcs_blk_cnt)
181 return MCS_AF_ERR_INVALID_MCSID;
182
183 mcs = mcs_get_pdata(req->mcs_id);
184
185 /* Check if it is PF or VF */
186 if (pcifunc & RVU_PFVF_FUNC_MASK)
187 pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
188 else
189 pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
190
191 mcs->pf_map[0] = pcifunc;
192 pfvf->intr_mask = req->intr_mask;
193
194 return 0;
195 }
196
rvu_mbox_handler_mcs_get_hw_info(struct rvu * rvu,struct msg_req * req,struct mcs_hw_info * rsp)197 int rvu_mbox_handler_mcs_get_hw_info(struct rvu *rvu,
198 struct msg_req *req,
199 struct mcs_hw_info *rsp)
200 {
201 struct mcs *mcs;
202
203 if (!rvu->mcs_blk_cnt)
204 return MCS_AF_ERR_NOT_MAPPED;
205
206 /* MCS resources are same across all blocks */
207 mcs = mcs_get_pdata(0);
208 rsp->num_mcs_blks = rvu->mcs_blk_cnt;
209 rsp->tcam_entries = mcs->hw->tcam_entries;
210 rsp->secy_entries = mcs->hw->secy_entries;
211 rsp->sc_entries = mcs->hw->sc_entries;
212 rsp->sa_entries = mcs->hw->sa_entries;
213 return 0;
214 }
215
rvu_mbox_handler_mcs_port_reset(struct rvu * rvu,struct mcs_port_reset_req * req,struct msg_rsp * rsp)216 int rvu_mbox_handler_mcs_port_reset(struct rvu *rvu, struct mcs_port_reset_req *req,
217 struct msg_rsp *rsp)
218 {
219 struct mcs *mcs;
220
221 if (req->mcs_id >= rvu->mcs_blk_cnt)
222 return MCS_AF_ERR_INVALID_MCSID;
223
224 mcs = mcs_get_pdata(req->mcs_id);
225
226 mcs_reset_port(mcs, req->port_id, req->reset);
227
228 return 0;
229 }
230
rvu_mbox_handler_mcs_clear_stats(struct rvu * rvu,struct mcs_clear_stats * req,struct msg_rsp * rsp)231 int rvu_mbox_handler_mcs_clear_stats(struct rvu *rvu,
232 struct mcs_clear_stats *req,
233 struct msg_rsp *rsp)
234 {
235 u16 pcifunc = req->hdr.pcifunc;
236 struct mcs *mcs;
237
238 if (req->mcs_id >= rvu->mcs_blk_cnt)
239 return MCS_AF_ERR_INVALID_MCSID;
240
241 mcs = mcs_get_pdata(req->mcs_id);
242
243 mutex_lock(&mcs->stats_lock);
244 if (req->all)
245 mcs_clear_all_stats(mcs, pcifunc, req->dir);
246 else
247 mcs_clear_stats(mcs, req->type, req->id, req->dir);
248
249 mutex_unlock(&mcs->stats_lock);
250 return 0;
251 }
252
rvu_mbox_handler_mcs_get_flowid_stats(struct rvu * rvu,struct mcs_stats_req * req,struct mcs_flowid_stats * rsp)253 int rvu_mbox_handler_mcs_get_flowid_stats(struct rvu *rvu,
254 struct mcs_stats_req *req,
255 struct mcs_flowid_stats *rsp)
256 {
257 struct mcs *mcs;
258
259 if (req->mcs_id >= rvu->mcs_blk_cnt)
260 return MCS_AF_ERR_INVALID_MCSID;
261
262 mcs = mcs_get_pdata(req->mcs_id);
263
264 /* In CNF10K-B, before reading the statistics,
265 * MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP needs to be set
266 * to get accurate statistics
267 */
268 if (mcs->hw->mcs_blks > 1)
269 mcs_set_force_clk_en(mcs, true);
270
271 mutex_lock(&mcs->stats_lock);
272 mcs_get_flowid_stats(mcs, rsp, req->id, req->dir);
273 mutex_unlock(&mcs->stats_lock);
274
275 /* Clear MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP after reading
276 * the statistics
277 */
278 if (mcs->hw->mcs_blks > 1)
279 mcs_set_force_clk_en(mcs, false);
280
281 return 0;
282 }
283
rvu_mbox_handler_mcs_get_secy_stats(struct rvu * rvu,struct mcs_stats_req * req,struct mcs_secy_stats * rsp)284 int rvu_mbox_handler_mcs_get_secy_stats(struct rvu *rvu,
285 struct mcs_stats_req *req,
286 struct mcs_secy_stats *rsp)
287 { struct mcs *mcs;
288
289 if (req->mcs_id >= rvu->mcs_blk_cnt)
290 return MCS_AF_ERR_INVALID_MCSID;
291
292 mcs = mcs_get_pdata(req->mcs_id);
293
294 if (mcs->hw->mcs_blks > 1)
295 mcs_set_force_clk_en(mcs, true);
296
297 mutex_lock(&mcs->stats_lock);
298
299 if (req->dir == MCS_RX)
300 mcs_get_rx_secy_stats(mcs, rsp, req->id);
301 else
302 mcs_get_tx_secy_stats(mcs, rsp, req->id);
303
304 mutex_unlock(&mcs->stats_lock);
305
306 if (mcs->hw->mcs_blks > 1)
307 mcs_set_force_clk_en(mcs, false);
308
309 return 0;
310 }
311
rvu_mbox_handler_mcs_get_sc_stats(struct rvu * rvu,struct mcs_stats_req * req,struct mcs_sc_stats * rsp)312 int rvu_mbox_handler_mcs_get_sc_stats(struct rvu *rvu,
313 struct mcs_stats_req *req,
314 struct mcs_sc_stats *rsp)
315 {
316 struct mcs *mcs;
317
318 if (req->mcs_id >= rvu->mcs_blk_cnt)
319 return MCS_AF_ERR_INVALID_MCSID;
320
321 mcs = mcs_get_pdata(req->mcs_id);
322
323 if (mcs->hw->mcs_blks > 1)
324 mcs_set_force_clk_en(mcs, true);
325
326 mutex_lock(&mcs->stats_lock);
327 mcs_get_sc_stats(mcs, rsp, req->id, req->dir);
328 mutex_unlock(&mcs->stats_lock);
329
330 if (mcs->hw->mcs_blks > 1)
331 mcs_set_force_clk_en(mcs, false);
332
333 return 0;
334 }
335
rvu_mbox_handler_mcs_get_sa_stats(struct rvu * rvu,struct mcs_stats_req * req,struct mcs_sa_stats * rsp)336 int rvu_mbox_handler_mcs_get_sa_stats(struct rvu *rvu,
337 struct mcs_stats_req *req,
338 struct mcs_sa_stats *rsp)
339 {
340 struct mcs *mcs;
341
342 if (req->mcs_id >= rvu->mcs_blk_cnt)
343 return MCS_AF_ERR_INVALID_MCSID;
344
345 mcs = mcs_get_pdata(req->mcs_id);
346
347 if (mcs->hw->mcs_blks > 1)
348 mcs_set_force_clk_en(mcs, true);
349
350 mutex_lock(&mcs->stats_lock);
351 mcs_get_sa_stats(mcs, rsp, req->id, req->dir);
352 mutex_unlock(&mcs->stats_lock);
353
354 if (mcs->hw->mcs_blks > 1)
355 mcs_set_force_clk_en(mcs, false);
356
357 return 0;
358 }
359
rvu_mbox_handler_mcs_get_port_stats(struct rvu * rvu,struct mcs_stats_req * req,struct mcs_port_stats * rsp)360 int rvu_mbox_handler_mcs_get_port_stats(struct rvu *rvu,
361 struct mcs_stats_req *req,
362 struct mcs_port_stats *rsp)
363 {
364 struct mcs *mcs;
365
366 if (req->mcs_id >= rvu->mcs_blk_cnt)
367 return MCS_AF_ERR_INVALID_MCSID;
368
369 mcs = mcs_get_pdata(req->mcs_id);
370
371 if (mcs->hw->mcs_blks > 1)
372 mcs_set_force_clk_en(mcs, true);
373
374 mutex_lock(&mcs->stats_lock);
375 mcs_get_port_stats(mcs, rsp, req->id, req->dir);
376 mutex_unlock(&mcs->stats_lock);
377
378 if (mcs->hw->mcs_blks > 1)
379 mcs_set_force_clk_en(mcs, false);
380
381 return 0;
382 }
383
rvu_mbox_handler_mcs_set_active_lmac(struct rvu * rvu,struct mcs_set_active_lmac * req,struct msg_rsp * rsp)384 int rvu_mbox_handler_mcs_set_active_lmac(struct rvu *rvu,
385 struct mcs_set_active_lmac *req,
386 struct msg_rsp *rsp)
387 {
388 struct mcs *mcs;
389
390 if (req->mcs_id >= rvu->mcs_blk_cnt)
391 return MCS_AF_ERR_INVALID_MCSID;
392
393 mcs = mcs_get_pdata(req->mcs_id);
394 if (!mcs)
395 return MCS_AF_ERR_NOT_MAPPED;
396
397 mcs->hw->lmac_bmap = req->lmac_bmap;
398 mcs_set_lmac_channels(req->mcs_id, req->chan_base);
399 return 0;
400 }
401
rvu_mbox_handler_mcs_port_cfg_set(struct rvu * rvu,struct mcs_port_cfg_set_req * req,struct msg_rsp * rsp)402 int rvu_mbox_handler_mcs_port_cfg_set(struct rvu *rvu, struct mcs_port_cfg_set_req *req,
403 struct msg_rsp *rsp)
404 {
405 struct mcs *mcs;
406
407 if (req->mcs_id >= rvu->mcs_blk_cnt)
408 return MCS_AF_ERR_INVALID_MCSID;
409
410 mcs = mcs_get_pdata(req->mcs_id);
411
412 if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
413 return -EINVAL;
414
415 mcs_set_port_cfg(mcs, req);
416
417 return 0;
418 }
419
rvu_mbox_handler_mcs_port_cfg_get(struct rvu * rvu,struct mcs_port_cfg_get_req * req,struct mcs_port_cfg_get_rsp * rsp)420 int rvu_mbox_handler_mcs_port_cfg_get(struct rvu *rvu, struct mcs_port_cfg_get_req *req,
421 struct mcs_port_cfg_get_rsp *rsp)
422 {
423 struct mcs *mcs;
424
425 if (req->mcs_id >= rvu->mcs_blk_cnt)
426 return MCS_AF_ERR_INVALID_MCSID;
427
428 mcs = mcs_get_pdata(req->mcs_id);
429
430 if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
431 return -EINVAL;
432
433 mcs_get_port_cfg(mcs, req, rsp);
434
435 return 0;
436 }
437
rvu_mbox_handler_mcs_custom_tag_cfg_get(struct rvu * rvu,struct mcs_custom_tag_cfg_get_req * req,struct mcs_custom_tag_cfg_get_rsp * rsp)438 int rvu_mbox_handler_mcs_custom_tag_cfg_get(struct rvu *rvu, struct mcs_custom_tag_cfg_get_req *req,
439 struct mcs_custom_tag_cfg_get_rsp *rsp)
440 {
441 struct mcs *mcs;
442
443 if (req->mcs_id >= rvu->mcs_blk_cnt)
444 return MCS_AF_ERR_INVALID_MCSID;
445
446 mcs = mcs_get_pdata(req->mcs_id);
447
448 mcs_get_custom_tag_cfg(mcs, req, rsp);
449
450 return 0;
451 }
452
rvu_mcs_flr_handler(struct rvu * rvu,u16 pcifunc)453 int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc)
454 {
455 struct mcs *mcs;
456 int mcs_id;
457
458 /* CNF10K-B mcs0-6 are mapped to RPM2-8*/
459 if (rvu->mcs_blk_cnt > 1) {
460 for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
461 mcs = mcs_get_pdata(mcs_id);
462 mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
463 mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
464 }
465 } else {
466 /* CN10K-B has only one mcs block */
467 mcs = mcs_get_pdata(0);
468 mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
469 mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
470 }
471 return 0;
472 }
473
rvu_mbox_handler_mcs_flowid_ena_entry(struct rvu * rvu,struct mcs_flowid_ena_dis_entry * req,struct msg_rsp * rsp)474 int rvu_mbox_handler_mcs_flowid_ena_entry(struct rvu *rvu,
475 struct mcs_flowid_ena_dis_entry *req,
476 struct msg_rsp *rsp)
477 {
478 struct mcs *mcs;
479
480 if (req->mcs_id >= rvu->mcs_blk_cnt)
481 return MCS_AF_ERR_INVALID_MCSID;
482
483 mcs = mcs_get_pdata(req->mcs_id);
484 mcs_ena_dis_flowid_entry(mcs, req->flow_id, req->dir, req->ena);
485 return 0;
486 }
487
rvu_mbox_handler_mcs_pn_table_write(struct rvu * rvu,struct mcs_pn_table_write_req * req,struct msg_rsp * rsp)488 int rvu_mbox_handler_mcs_pn_table_write(struct rvu *rvu,
489 struct mcs_pn_table_write_req *req,
490 struct msg_rsp *rsp)
491 {
492 struct mcs *mcs;
493
494 if (req->mcs_id >= rvu->mcs_blk_cnt)
495 return MCS_AF_ERR_INVALID_MCSID;
496
497 mcs = mcs_get_pdata(req->mcs_id);
498 mcs_pn_table_write(mcs, req->pn_id, req->next_pn, req->dir);
499 return 0;
500 }
501
rvu_mbox_handler_mcs_set_pn_threshold(struct rvu * rvu,struct mcs_set_pn_threshold * req,struct msg_rsp * rsp)502 int rvu_mbox_handler_mcs_set_pn_threshold(struct rvu *rvu,
503 struct mcs_set_pn_threshold *req,
504 struct msg_rsp *rsp)
505 {
506 struct mcs *mcs;
507
508 if (req->mcs_id >= rvu->mcs_blk_cnt)
509 return MCS_AF_ERR_INVALID_MCSID;
510
511 mcs = mcs_get_pdata(req->mcs_id);
512
513 mcs_pn_threshold_set(mcs, req);
514
515 return 0;
516 }
517
rvu_mbox_handler_mcs_rx_sc_sa_map_write(struct rvu * rvu,struct mcs_rx_sc_sa_map * req,struct msg_rsp * rsp)518 int rvu_mbox_handler_mcs_rx_sc_sa_map_write(struct rvu *rvu,
519 struct mcs_rx_sc_sa_map *req,
520 struct msg_rsp *rsp)
521 {
522 struct mcs *mcs;
523
524 if (req->mcs_id >= rvu->mcs_blk_cnt)
525 return MCS_AF_ERR_INVALID_MCSID;
526
527 mcs = mcs_get_pdata(req->mcs_id);
528 mcs->mcs_ops->mcs_rx_sa_mem_map_write(mcs, req);
529 return 0;
530 }
531
rvu_mbox_handler_mcs_tx_sc_sa_map_write(struct rvu * rvu,struct mcs_tx_sc_sa_map * req,struct msg_rsp * rsp)532 int rvu_mbox_handler_mcs_tx_sc_sa_map_write(struct rvu *rvu,
533 struct mcs_tx_sc_sa_map *req,
534 struct msg_rsp *rsp)
535 {
536 struct mcs *mcs;
537
538 if (req->mcs_id >= rvu->mcs_blk_cnt)
539 return MCS_AF_ERR_INVALID_MCSID;
540
541 mcs = mcs_get_pdata(req->mcs_id);
542 mcs->mcs_ops->mcs_tx_sa_mem_map_write(mcs, req);
543 mcs->tx_sa_active[req->sc_id] = req->tx_sa_active;
544
545 return 0;
546 }
547
rvu_mbox_handler_mcs_sa_plcy_write(struct rvu * rvu,struct mcs_sa_plcy_write_req * req,struct msg_rsp * rsp)548 int rvu_mbox_handler_mcs_sa_plcy_write(struct rvu *rvu,
549 struct mcs_sa_plcy_write_req *req,
550 struct msg_rsp *rsp)
551 {
552 struct mcs *mcs;
553 int i;
554
555 if (req->mcs_id >= rvu->mcs_blk_cnt)
556 return MCS_AF_ERR_INVALID_MCSID;
557
558 mcs = mcs_get_pdata(req->mcs_id);
559
560 for (i = 0; i < req->sa_cnt; i++)
561 mcs_sa_plcy_write(mcs, &req->plcy[i][0],
562 req->sa_index[i], req->dir);
563 return 0;
564 }
565
rvu_mbox_handler_mcs_rx_sc_cam_write(struct rvu * rvu,struct mcs_rx_sc_cam_write_req * req,struct msg_rsp * rsp)566 int rvu_mbox_handler_mcs_rx_sc_cam_write(struct rvu *rvu,
567 struct mcs_rx_sc_cam_write_req *req,
568 struct msg_rsp *rsp)
569 {
570 struct mcs *mcs;
571
572 if (req->mcs_id >= rvu->mcs_blk_cnt)
573 return MCS_AF_ERR_INVALID_MCSID;
574
575 mcs = mcs_get_pdata(req->mcs_id);
576 mcs_rx_sc_cam_write(mcs, req->sci, req->secy_id, req->sc_id);
577 return 0;
578 }
579
rvu_mbox_handler_mcs_secy_plcy_write(struct rvu * rvu,struct mcs_secy_plcy_write_req * req,struct msg_rsp * rsp)580 int rvu_mbox_handler_mcs_secy_plcy_write(struct rvu *rvu,
581 struct mcs_secy_plcy_write_req *req,
582 struct msg_rsp *rsp)
583 { struct mcs *mcs;
584
585 if (req->mcs_id >= rvu->mcs_blk_cnt)
586 return MCS_AF_ERR_INVALID_MCSID;
587
588 mcs = mcs_get_pdata(req->mcs_id);
589
590 mcs_secy_plcy_write(mcs, req->plcy,
591 req->secy_id, req->dir);
592 return 0;
593 }
594
rvu_mbox_handler_mcs_flowid_entry_write(struct rvu * rvu,struct mcs_flowid_entry_write_req * req,struct msg_rsp * rsp)595 int rvu_mbox_handler_mcs_flowid_entry_write(struct rvu *rvu,
596 struct mcs_flowid_entry_write_req *req,
597 struct msg_rsp *rsp)
598 {
599 struct secy_mem_map map;
600 struct mcs *mcs;
601
602 if (req->mcs_id >= rvu->mcs_blk_cnt)
603 return MCS_AF_ERR_INVALID_MCSID;
604
605 mcs = mcs_get_pdata(req->mcs_id);
606
607 /* TODO validate the flowid */
608 mcs_flowid_entry_write(mcs, req->data, req->mask,
609 req->flow_id, req->dir);
610 map.secy = req->secy_id;
611 map.sc = req->sc_id;
612 map.ctrl_pkt = req->ctrl_pkt;
613 map.flow_id = req->flow_id;
614 map.sci = req->sci;
615 mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, req->dir);
616 if (req->ena)
617 mcs_ena_dis_flowid_entry(mcs, req->flow_id,
618 req->dir, true);
619 return 0;
620 }
621
rvu_mbox_handler_mcs_free_resources(struct rvu * rvu,struct mcs_free_rsrc_req * req,struct msg_rsp * rsp)622 int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu,
623 struct mcs_free_rsrc_req *req,
624 struct msg_rsp *rsp)
625 {
626 u16 pcifunc = req->hdr.pcifunc;
627 struct mcs_rsrc_map *map;
628 struct mcs *mcs;
629 int rc = 0;
630
631 if (req->mcs_id >= rvu->mcs_blk_cnt)
632 return MCS_AF_ERR_INVALID_MCSID;
633
634 mcs = mcs_get_pdata(req->mcs_id);
635
636 if (req->dir == MCS_RX)
637 map = &mcs->rx;
638 else
639 map = &mcs->tx;
640
641 mutex_lock(&rvu->rsrc_lock);
642 /* Free all the cam resources mapped to PF/VF */
643 if (req->all) {
644 rc = mcs_free_all_rsrc(mcs, req->dir, pcifunc);
645 goto exit;
646 }
647
648 switch (req->rsrc_type) {
649 case MCS_RSRC_TYPE_FLOWID:
650 rc = mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map, req->rsrc_id, pcifunc);
651 mcs_ena_dis_flowid_entry(mcs, req->rsrc_id, req->dir, false);
652 break;
653 case MCS_RSRC_TYPE_SECY:
654 rc = mcs_free_rsrc(&map->secy, map->secy2pf_map, req->rsrc_id, pcifunc);
655 mcs_clear_secy_plcy(mcs, req->rsrc_id, req->dir);
656 break;
657 case MCS_RSRC_TYPE_SC:
658 rc = mcs_free_rsrc(&map->sc, map->sc2pf_map, req->rsrc_id, pcifunc);
659 /* Disable SC CAM only on RX side */
660 if (req->dir == MCS_RX)
661 mcs_ena_dis_sc_cam_entry(mcs, req->rsrc_id, false);
662 break;
663 case MCS_RSRC_TYPE_SA:
664 rc = mcs_free_rsrc(&map->sa, map->sa2pf_map, req->rsrc_id, pcifunc);
665 break;
666 }
667 exit:
668 mutex_unlock(&rvu->rsrc_lock);
669 return rc;
670 }
671
rvu_mbox_handler_mcs_alloc_resources(struct rvu * rvu,struct mcs_alloc_rsrc_req * req,struct mcs_alloc_rsrc_rsp * rsp)672 int rvu_mbox_handler_mcs_alloc_resources(struct rvu *rvu,
673 struct mcs_alloc_rsrc_req *req,
674 struct mcs_alloc_rsrc_rsp *rsp)
675 {
676 u16 pcifunc = req->hdr.pcifunc;
677 struct mcs_rsrc_map *map;
678 struct mcs *mcs;
679 int rsrc_id, i;
680
681 if (req->mcs_id >= rvu->mcs_blk_cnt)
682 return MCS_AF_ERR_INVALID_MCSID;
683
684 mcs = mcs_get_pdata(req->mcs_id);
685
686 if (req->dir == MCS_RX)
687 map = &mcs->rx;
688 else
689 map = &mcs->tx;
690
691 mutex_lock(&rvu->rsrc_lock);
692
693 if (req->all) {
694 rsrc_id = mcs_alloc_all_rsrc(mcs, &rsp->flow_ids[0],
695 &rsp->secy_ids[0],
696 &rsp->sc_ids[0],
697 &rsp->sa_ids[0],
698 &rsp->sa_ids[1],
699 pcifunc, req->dir);
700 goto exit;
701 }
702
703 switch (req->rsrc_type) {
704 case MCS_RSRC_TYPE_FLOWID:
705 for (i = 0; i < req->rsrc_cnt; i++) {
706 rsrc_id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
707 if (rsrc_id < 0)
708 goto exit;
709 rsp->flow_ids[i] = rsrc_id;
710 rsp->rsrc_cnt++;
711 }
712 break;
713 case MCS_RSRC_TYPE_SECY:
714 for (i = 0; i < req->rsrc_cnt; i++) {
715 rsrc_id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
716 if (rsrc_id < 0)
717 goto exit;
718 rsp->secy_ids[i] = rsrc_id;
719 rsp->rsrc_cnt++;
720 }
721 break;
722 case MCS_RSRC_TYPE_SC:
723 for (i = 0; i < req->rsrc_cnt; i++) {
724 rsrc_id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
725 if (rsrc_id < 0)
726 goto exit;
727 rsp->sc_ids[i] = rsrc_id;
728 rsp->rsrc_cnt++;
729 }
730 break;
731 case MCS_RSRC_TYPE_SA:
732 for (i = 0; i < req->rsrc_cnt; i++) {
733 rsrc_id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
734 if (rsrc_id < 0)
735 goto exit;
736 rsp->sa_ids[i] = rsrc_id;
737 rsp->rsrc_cnt++;
738 }
739 break;
740 }
741
742 rsp->rsrc_type = req->rsrc_type;
743 rsp->dir = req->dir;
744 rsp->mcs_id = req->mcs_id;
745 rsp->all = req->all;
746
747 exit:
748 if (rsrc_id < 0)
749 dev_err(rvu->dev, "Failed to allocate the mcs resources for PCIFUNC:%d\n", pcifunc);
750 mutex_unlock(&rvu->rsrc_lock);
751 return 0;
752 }
753
rvu_mbox_handler_mcs_alloc_ctrl_pkt_rule(struct rvu * rvu,struct mcs_alloc_ctrl_pkt_rule_req * req,struct mcs_alloc_ctrl_pkt_rule_rsp * rsp)754 int rvu_mbox_handler_mcs_alloc_ctrl_pkt_rule(struct rvu *rvu,
755 struct mcs_alloc_ctrl_pkt_rule_req *req,
756 struct mcs_alloc_ctrl_pkt_rule_rsp *rsp)
757 {
758 u16 pcifunc = req->hdr.pcifunc;
759 struct mcs_rsrc_map *map;
760 struct mcs *mcs;
761 int rsrc_id;
762 u16 offset;
763
764 if (req->mcs_id >= rvu->mcs_blk_cnt)
765 return MCS_AF_ERR_INVALID_MCSID;
766
767 mcs = mcs_get_pdata(req->mcs_id);
768
769 map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
770
771 mutex_lock(&rvu->rsrc_lock);
772
773 switch (req->rule_type) {
774 case MCS_CTRL_PKT_RULE_TYPE_ETH:
775 offset = MCS_CTRLPKT_ETYPE_RULE_OFFSET;
776 break;
777 case MCS_CTRL_PKT_RULE_TYPE_DA:
778 offset = MCS_CTRLPKT_DA_RULE_OFFSET;
779 break;
780 case MCS_CTRL_PKT_RULE_TYPE_RANGE:
781 offset = MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
782 break;
783 case MCS_CTRL_PKT_RULE_TYPE_COMBO:
784 offset = MCS_CTRLPKT_COMBO_RULE_OFFSET;
785 break;
786 case MCS_CTRL_PKT_RULE_TYPE_MAC:
787 offset = MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
788 break;
789 }
790
791 rsrc_id = mcs_alloc_ctrlpktrule(&map->ctrlpktrule, map->ctrlpktrule2pf_map, offset,
792 pcifunc);
793 if (rsrc_id < 0)
794 goto exit;
795
796 rsp->rule_idx = rsrc_id;
797 rsp->rule_type = req->rule_type;
798 rsp->dir = req->dir;
799 rsp->mcs_id = req->mcs_id;
800
801 mutex_unlock(&rvu->rsrc_lock);
802 return 0;
803 exit:
804 if (rsrc_id < 0)
805 dev_err(rvu->dev, "Failed to allocate the mcs ctrl pkt rule for PCIFUNC:%d\n",
806 pcifunc);
807 mutex_unlock(&rvu->rsrc_lock);
808 return rsrc_id;
809 }
810
rvu_mbox_handler_mcs_free_ctrl_pkt_rule(struct rvu * rvu,struct mcs_free_ctrl_pkt_rule_req * req,struct msg_rsp * rsp)811 int rvu_mbox_handler_mcs_free_ctrl_pkt_rule(struct rvu *rvu,
812 struct mcs_free_ctrl_pkt_rule_req *req,
813 struct msg_rsp *rsp)
814 {
815 struct mcs *mcs;
816 int rc;
817
818 if (req->mcs_id >= rvu->mcs_blk_cnt)
819 return MCS_AF_ERR_INVALID_MCSID;
820
821 mcs = mcs_get_pdata(req->mcs_id);
822
823 mutex_lock(&rvu->rsrc_lock);
824
825 rc = mcs_free_ctrlpktrule(mcs, req);
826
827 mutex_unlock(&rvu->rsrc_lock);
828
829 return rc;
830 }
831
rvu_mbox_handler_mcs_ctrl_pkt_rule_write(struct rvu * rvu,struct mcs_ctrl_pkt_rule_write_req * req,struct msg_rsp * rsp)832 int rvu_mbox_handler_mcs_ctrl_pkt_rule_write(struct rvu *rvu,
833 struct mcs_ctrl_pkt_rule_write_req *req,
834 struct msg_rsp *rsp)
835 {
836 struct mcs *mcs;
837 int rc;
838
839 if (req->mcs_id >= rvu->mcs_blk_cnt)
840 return MCS_AF_ERR_INVALID_MCSID;
841
842 mcs = mcs_get_pdata(req->mcs_id);
843
844 rc = mcs_ctrlpktrule_write(mcs, req);
845
846 return rc;
847 }
848
rvu_mcs_set_lmac_bmap(struct rvu * rvu)849 static void rvu_mcs_set_lmac_bmap(struct rvu *rvu)
850 {
851 struct mcs *mcs = mcs_get_pdata(0);
852 unsigned long lmac_bmap;
853 int cgx, lmac, port;
854
855 for (port = 0; port < mcs->hw->lmac_cnt; port++) {
856 cgx = port / rvu->hw->lmac_per_cgx;
857 lmac = port % rvu->hw->lmac_per_cgx;
858 if (!is_lmac_valid(rvu_cgx_pdata(cgx, rvu), lmac))
859 continue;
860 set_bit(port, &lmac_bmap);
861 }
862 mcs->hw->lmac_bmap = lmac_bmap;
863 }
864
rvu_mcs_init(struct rvu * rvu)865 int rvu_mcs_init(struct rvu *rvu)
866 {
867 struct rvu_hwinfo *hw = rvu->hw;
868 int lmac, err = 0, mcs_id;
869 struct mcs *mcs;
870
871 rvu->mcs_blk_cnt = mcs_get_blkcnt();
872
873 if (!rvu->mcs_blk_cnt)
874 return 0;
875
876 /* Needed only for CN10K-B */
877 if (rvu->mcs_blk_cnt == 1) {
878 err = mcs_set_lmac_channels(0, hw->cgx_chan_base);
879 if (err)
880 return err;
881 /* Set active lmacs */
882 rvu_mcs_set_lmac_bmap(rvu);
883 }
884
885 /* Install default tcam bypass entry and set port to operational mode */
886 for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
887 mcs = mcs_get_pdata(mcs_id);
888 mcs_install_flowid_bypass_entry(mcs);
889 for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
890 mcs_set_lmac_mode(mcs, lmac, 0);
891
892 mcs->rvu = rvu;
893
894 /* Allocated memory for PFVF data */
895 mcs->pf = devm_kcalloc(mcs->dev, hw->total_pfs,
896 sizeof(struct mcs_pfvf), GFP_KERNEL);
897 if (!mcs->pf)
898 return -ENOMEM;
899
900 mcs->vf = devm_kcalloc(mcs->dev, hw->total_vfs,
901 sizeof(struct mcs_pfvf), GFP_KERNEL);
902 if (!mcs->vf)
903 return -ENOMEM;
904 }
905
906 /* Initialize the wq for handling mcs interrupts */
907 INIT_LIST_HEAD(&rvu->mcs_intrq_head);
908 INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task);
909 rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", 0, 0);
910 if (!rvu->mcs_intr_wq) {
911 dev_err(rvu->dev, "mcs alloc workqueue failed\n");
912 return -ENOMEM;
913 }
914
915 return err;
916 }
917
rvu_mcs_exit(struct rvu * rvu)918 void rvu_mcs_exit(struct rvu *rvu)
919 {
920 if (!rvu->mcs_intr_wq)
921 return;
922
923 flush_workqueue(rvu->mcs_intr_wq);
924 destroy_workqueue(rvu->mcs_intr_wq);
925 rvu->mcs_intr_wq = NULL;
926 }
927