1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell MCS driver
3 *
4 * Copyright (C) 2022 Marvell.
5 */
6
7 #include <linux/bitfield.h>
8 #include <linux/delay.h>
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12
13 #include "mcs.h"
14 #include "mcs_reg.h"
15
16 #define DRV_NAME "Marvell MCS Driver"
17
18 #define PCI_CFG_REG_BAR_NUM 0
19
20 static const struct pci_device_id mcs_id_table[] = {
21 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_MCS) },
22 { 0, } /* end of table */
23 };
24
25 static LIST_HEAD(mcs_list);
26
mcs_get_tx_secy_stats(struct mcs * mcs,struct mcs_secy_stats * stats,int id)27 void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
28 {
29 u64 reg;
30
31 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(id);
32 stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
33
34 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(id);
35 stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
36
37 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(id);
38 stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
39
40 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(id);
41 stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
42
43 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(id);
44 stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
45
46 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(id);
47 stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
48
49 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(id);
50 stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
51
52 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(id);
53 stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
54
55 reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(id);
56 stats->octet_encrypted_cnt = mcs_reg_read(mcs, reg);
57
58 reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(id);
59 stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
60
61 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(id);
62 stats->pkt_noactivesa_cnt = mcs_reg_read(mcs, reg);
63
64 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(id);
65 stats->pkt_toolong_cnt = mcs_reg_read(mcs, reg);
66
67 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(id);
68 stats->pkt_untagged_cnt = mcs_reg_read(mcs, reg);
69 }
70
mcs_get_rx_secy_stats(struct mcs * mcs,struct mcs_secy_stats * stats,int id)71 void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
72 {
73 u64 reg;
74
75 reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(id);
76 stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
77
78 reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(id);
79 stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
80
81 reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(id);
82 stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
83
84 reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(id);
85 stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
86
87 reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(id);
88 stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
89
90 reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(id);
91 stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
92
93 reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(id);
94 stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
95
96 reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(id);
97 stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
98
99 reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(id);
100 stats->octet_decrypted_cnt = mcs_reg_read(mcs, reg);
101
102 reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(id);
103 stats->octet_validated_cnt = mcs_reg_read(mcs, reg);
104
105 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(id);
106 stats->pkt_port_disabled_cnt = mcs_reg_read(mcs, reg);
107
108 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(id);
109 stats->pkt_badtag_cnt = mcs_reg_read(mcs, reg);
110
111 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(id);
112 stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
113
114 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(id);
115 stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
116
117 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id);
118 stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg);
119
120 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id);
121 stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg);
122
123 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id);
124 stats->pkt_ctl_cnt = mcs_reg_read(mcs, reg);
125
126 if (mcs->hw->mcs_blks > 1) {
127 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(id);
128 stats->pkt_notag_cnt = mcs_reg_read(mcs, reg);
129 }
130 }
131
mcs_get_flowid_stats(struct mcs * mcs,struct mcs_flowid_stats * stats,int id,int dir)132 void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats,
133 int id, int dir)
134 {
135 u64 reg;
136
137 if (dir == MCS_RX)
138 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(id);
139 else
140 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(id);
141
142 stats->tcam_hit_cnt = mcs_reg_read(mcs, reg);
143 }
144
mcs_get_port_stats(struct mcs * mcs,struct mcs_port_stats * stats,int id,int dir)145 void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats,
146 int id, int dir)
147 {
148 u64 reg;
149
150 if (dir == MCS_RX) {
151 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(id);
152 stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
153
154 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(id);
155 stats->parser_err_cnt = mcs_reg_read(mcs, reg);
156 if (mcs->hw->mcs_blks > 1) {
157 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(id);
158 stats->preempt_err_cnt = mcs_reg_read(mcs, reg);
159 }
160 } else {
161 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(id);
162 stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
163
164 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(id);
165 stats->parser_err_cnt = mcs_reg_read(mcs, reg);
166
167 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(id);
168 stats->sectag_insert_err_cnt = mcs_reg_read(mcs, reg);
169 }
170 }
171
mcs_get_sa_stats(struct mcs * mcs,struct mcs_sa_stats * stats,int id,int dir)172 void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir)
173 {
174 u64 reg;
175
176 if (dir == MCS_RX) {
177 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(id);
178 stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
179
180 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(id);
181 stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
182
183 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(id);
184 stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
185
186 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(id);
187 stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
188
189 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(id);
190 stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
191 } else {
192 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(id);
193 stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
194
195 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(id);
196 stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
197 }
198 }
199
mcs_get_sc_stats(struct mcs * mcs,struct mcs_sc_stats * stats,int id,int dir)200 void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats,
201 int id, int dir)
202 {
203 u64 reg;
204
205 if (dir == MCS_RX) {
206 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(id);
207 stats->hit_cnt = mcs_reg_read(mcs, reg);
208
209 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(id);
210 stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
211
212 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(id);
213 stats->pkt_late_cnt = mcs_reg_read(mcs, reg);
214
215 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id);
216 stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
217
218 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id);
219 stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg);
220
221 if (mcs->hw->mcs_blks > 1) {
222 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(id);
223 stats->pkt_delay_cnt = mcs_reg_read(mcs, reg);
224
225 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(id);
226 stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
227 }
228 if (mcs->hw->mcs_blks == 1) {
229 reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(id);
230 stats->octet_decrypt_cnt = mcs_reg_read(mcs, reg);
231
232 reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(id);
233 stats->octet_validate_cnt = mcs_reg_read(mcs, reg);
234 }
235 } else {
236 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(id);
237 stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
238
239 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(id);
240 stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
241
242 if (mcs->hw->mcs_blks == 1) {
243 reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(id);
244 stats->octet_encrypt_cnt = mcs_reg_read(mcs, reg);
245
246 reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(id);
247 stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
248 }
249 }
250 }
251
mcs_clear_stats(struct mcs * mcs,u8 type,u8 id,int dir)252 void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir)
253 {
254 struct mcs_flowid_stats flowid_st;
255 struct mcs_port_stats port_st;
256 struct mcs_secy_stats secy_st;
257 struct mcs_sc_stats sc_st;
258 struct mcs_sa_stats sa_st;
259 u64 reg;
260
261 if (dir == MCS_RX)
262 reg = MCSX_CSE_RX_SLAVE_CTRL;
263 else
264 reg = MCSX_CSE_TX_SLAVE_CTRL;
265
266 mcs_reg_write(mcs, reg, BIT_ULL(0));
267
268 switch (type) {
269 case MCS_FLOWID_STATS:
270 mcs_get_flowid_stats(mcs, &flowid_st, id, dir);
271 break;
272 case MCS_SECY_STATS:
273 if (dir == MCS_RX)
274 mcs_get_rx_secy_stats(mcs, &secy_st, id);
275 else
276 mcs_get_tx_secy_stats(mcs, &secy_st, id);
277 break;
278 case MCS_SC_STATS:
279 mcs_get_sc_stats(mcs, &sc_st, id, dir);
280 break;
281 case MCS_SA_STATS:
282 mcs_get_sa_stats(mcs, &sa_st, id, dir);
283 break;
284 case MCS_PORT_STATS:
285 mcs_get_port_stats(mcs, &port_st, id, dir);
286 break;
287 }
288
289 mcs_reg_write(mcs, reg, 0x0);
290 }
291
mcs_clear_all_stats(struct mcs * mcs,u16 pcifunc,int dir)292 int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir)
293 {
294 struct mcs_rsrc_map *map;
295 int id;
296
297 if (dir == MCS_RX)
298 map = &mcs->rx;
299 else
300 map = &mcs->tx;
301
302 /* Clear FLOWID stats */
303 for (id = 0; id < map->flow_ids.max; id++) {
304 if (map->flowid2pf_map[id] != pcifunc)
305 continue;
306 mcs_clear_stats(mcs, MCS_FLOWID_STATS, id, dir);
307 }
308
309 /* Clear SECY stats */
310 for (id = 0; id < map->secy.max; id++) {
311 if (map->secy2pf_map[id] != pcifunc)
312 continue;
313 mcs_clear_stats(mcs, MCS_SECY_STATS, id, dir);
314 }
315
316 /* Clear SC stats */
317 for (id = 0; id < map->secy.max; id++) {
318 if (map->sc2pf_map[id] != pcifunc)
319 continue;
320 mcs_clear_stats(mcs, MCS_SC_STATS, id, dir);
321 }
322
323 /* Clear SA stats */
324 for (id = 0; id < map->sa.max; id++) {
325 if (map->sa2pf_map[id] != pcifunc)
326 continue;
327 mcs_clear_stats(mcs, MCS_SA_STATS, id, dir);
328 }
329 return 0;
330 }
331
mcs_pn_table_write(struct mcs * mcs,u8 pn_id,u64 next_pn,u8 dir)332 void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir)
333 {
334 u64 reg;
335
336 if (dir == MCS_RX)
337 reg = MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
338 else
339 reg = MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
340 mcs_reg_write(mcs, reg, next_pn);
341 }
342
cn10kb_mcs_tx_sa_mem_map_write(struct mcs * mcs,struct mcs_tx_sc_sa_map * map)343 void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
344 {
345 u64 reg, val;
346
347 val = (map->sa_index0 & 0xFF) |
348 (map->sa_index1 & 0xFF) << 9 |
349 (map->rekey_ena & 0x1) << 18 |
350 (map->sa_index0_vld & 0x1) << 19 |
351 (map->sa_index1_vld & 0x1) << 20 |
352 (map->tx_sa_active & 0x1) << 21 |
353 map->sectag_sci << 22;
354 reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
355 mcs_reg_write(mcs, reg, val);
356
357 val = map->sectag_sci >> 42;
358 reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(map->sc_id);
359 mcs_reg_write(mcs, reg, val);
360 }
361
cn10kb_mcs_rx_sa_mem_map_write(struct mcs * mcs,struct mcs_rx_sc_sa_map * map)362 void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
363 {
364 u64 val, reg;
365
366 val = (map->sa_index & 0xFF) | map->sa_in_use << 9;
367
368 reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
369 mcs_reg_write(mcs, reg, val);
370 }
371
mcs_sa_plcy_write(struct mcs * mcs,u64 * plcy,int sa_id,int dir)372 void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa_id, int dir)
373 {
374 int reg_id;
375 u64 reg;
376
377 if (dir == MCS_RX) {
378 for (reg_id = 0; reg_id < 8; reg_id++) {
379 reg = MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
380 mcs_reg_write(mcs, reg, plcy[reg_id]);
381 }
382 } else {
383 for (reg_id = 0; reg_id < 9; reg_id++) {
384 reg = MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
385 mcs_reg_write(mcs, reg, plcy[reg_id]);
386 }
387 }
388 }
389
mcs_ena_dis_sc_cam_entry(struct mcs * mcs,int sc_id,int ena)390 void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int sc_id, int ena)
391 {
392 u64 reg, val;
393
394 reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(0);
395 if (sc_id > 63)
396 reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(1);
397
398 if (ena)
399 val = mcs_reg_read(mcs, reg) | BIT_ULL(sc_id);
400 else
401 val = mcs_reg_read(mcs, reg) & ~BIT_ULL(sc_id);
402
403 mcs_reg_write(mcs, reg, val);
404 }
405
mcs_rx_sc_cam_write(struct mcs * mcs,u64 sci,u64 secy,int sc_id)406 void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id)
407 {
408 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(0, sc_id), sci);
409 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(1, sc_id), secy);
410 /* Enable SC CAM */
411 mcs_ena_dis_sc_cam_entry(mcs, sc_id, true);
412 }
413
mcs_secy_plcy_write(struct mcs * mcs,u64 plcy,int secy_id,int dir)414 void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int secy_id, int dir)
415 {
416 u64 reg;
417
418 if (dir == MCS_RX)
419 reg = MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(secy_id);
420 else
421 reg = MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(secy_id);
422
423 mcs_reg_write(mcs, reg, plcy);
424
425 if (mcs->hw->mcs_blks == 1 && dir == MCS_RX)
426 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(secy_id), 0x0ull);
427 }
428
cn10kb_mcs_flowid_secy_map(struct mcs * mcs,struct secy_mem_map * map,int dir)429 void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
430 {
431 u64 reg, val;
432
433 val = (map->secy & 0x7F) | (map->ctrl_pkt & 0x1) << 8;
434 if (dir == MCS_RX) {
435 reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
436 } else {
437 val |= (map->sc & 0x7F) << 9;
438 reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
439 }
440
441 mcs_reg_write(mcs, reg, val);
442 }
443
mcs_ena_dis_flowid_entry(struct mcs * mcs,int flow_id,int dir,int ena)444 void mcs_ena_dis_flowid_entry(struct mcs *mcs, int flow_id, int dir, int ena)
445 {
446 u64 reg, val;
447
448 if (dir == MCS_RX) {
449 reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0;
450 if (flow_id > 63)
451 reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1;
452 } else {
453 reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0;
454 if (flow_id > 63)
455 reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1;
456 }
457
458 /* Enable/Disable the tcam entry */
459 if (ena)
460 val = mcs_reg_read(mcs, reg) | BIT_ULL(flow_id);
461 else
462 val = mcs_reg_read(mcs, reg) & ~BIT_ULL(flow_id);
463
464 mcs_reg_write(mcs, reg, val);
465 }
466
mcs_flowid_entry_write(struct mcs * mcs,u64 * data,u64 * mask,int flow_id,int dir)467 void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id, int dir)
468 {
469 int reg_id;
470 u64 reg;
471
472 if (dir == MCS_RX) {
473 for (reg_id = 0; reg_id < 4; reg_id++) {
474 reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
475 mcs_reg_write(mcs, reg, data[reg_id]);
476 reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
477 mcs_reg_write(mcs, reg, mask[reg_id]);
478 }
479 } else {
480 for (reg_id = 0; reg_id < 4; reg_id++) {
481 reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
482 mcs_reg_write(mcs, reg, data[reg_id]);
483 reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
484 mcs_reg_write(mcs, reg, mask[reg_id]);
485 }
486 }
487 }
488
mcs_install_flowid_bypass_entry(struct mcs * mcs)489 int mcs_install_flowid_bypass_entry(struct mcs *mcs)
490 {
491 int flow_id, secy_id, reg_id;
492 struct secy_mem_map map;
493 u64 reg, plcy = 0;
494
495 /* Flow entry */
496 flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT;
497 for (reg_id = 0; reg_id < 4; reg_id++) {
498 reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
499 mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
500 }
501 for (reg_id = 0; reg_id < 4; reg_id++) {
502 reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
503 mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
504 }
505 /* secy */
506 secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT;
507
508 /* Set validate frames to NULL and enable control port */
509 plcy = 0x7ull;
510 if (mcs->hw->mcs_blks > 1)
511 plcy = BIT_ULL(0) | 0x3ull << 4;
512 mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_RX);
513
514 /* Enable control port and set mtu to max */
515 plcy = BIT_ULL(0) | GENMASK_ULL(43, 28);
516 if (mcs->hw->mcs_blks > 1)
517 plcy = BIT_ULL(0) | GENMASK_ULL(63, 48);
518 mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_TX);
519
520 /* Map flowid to secy */
521 map.secy = secy_id;
522 map.ctrl_pkt = 0;
523 map.flow_id = flow_id;
524 mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_RX);
525 map.sc = secy_id;
526 mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_TX);
527
528 /* Enable Flowid entry */
529 mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true);
530 mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true);
531 return 0;
532 }
533
mcs_clear_secy_plcy(struct mcs * mcs,int secy_id,int dir)534 void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir)
535 {
536 struct mcs_rsrc_map *map;
537 int flow_id;
538
539 if (dir == MCS_RX)
540 map = &mcs->rx;
541 else
542 map = &mcs->tx;
543
544 /* Clear secy memory to zero */
545 mcs_secy_plcy_write(mcs, 0, secy_id, dir);
546
547 /* Disable the tcam entry using this secy */
548 for (flow_id = 0; flow_id < map->flow_ids.max; flow_id++) {
549 if (map->flowid2secy_map[flow_id] != secy_id)
550 continue;
551 mcs_ena_dis_flowid_entry(mcs, flow_id, dir, false);
552 }
553 }
554
mcs_alloc_ctrlpktrule(struct rsrc_bmap * rsrc,u16 * pf_map,u16 offset,u16 pcifunc)555 int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc)
556 {
557 int rsrc_id;
558
559 if (!rsrc->bmap)
560 return -EINVAL;
561
562 rsrc_id = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, offset, 1, 0);
563 if (rsrc_id >= rsrc->max)
564 return -ENOSPC;
565
566 bitmap_set(rsrc->bmap, rsrc_id, 1);
567 pf_map[rsrc_id] = pcifunc;
568
569 return rsrc_id;
570 }
571
mcs_free_ctrlpktrule(struct mcs * mcs,struct mcs_free_ctrl_pkt_rule_req * req)572 int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req)
573 {
574 u16 pcifunc = req->hdr.pcifunc;
575 struct mcs_rsrc_map *map;
576 u64 dis, reg;
577 int id, rc;
578
579 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
580 map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
581
582 if (req->all) {
583 for (id = 0; id < map->ctrlpktrule.max; id++) {
584 if (map->ctrlpktrule2pf_map[id] != pcifunc)
585 continue;
586 mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, id, pcifunc);
587 dis = mcs_reg_read(mcs, reg);
588 dis &= ~BIT_ULL(id);
589 mcs_reg_write(mcs, reg, dis);
590 }
591 return 0;
592 }
593
594 rc = mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, req->rule_idx, pcifunc);
595 dis = mcs_reg_read(mcs, reg);
596 dis &= ~BIT_ULL(req->rule_idx);
597 mcs_reg_write(mcs, reg, dis);
598
599 return rc;
600 }
601
mcs_ctrlpktrule_write(struct mcs * mcs,struct mcs_ctrl_pkt_rule_write_req * req)602 int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req)
603 {
604 u64 reg, enb;
605 u64 idx;
606
607 switch (req->rule_type) {
608 case MCS_CTRL_PKT_RULE_TYPE_ETH:
609 req->data0 &= GENMASK(15, 0);
610 if (req->data0 != ETH_P_PAE)
611 return -EINVAL;
612
613 idx = req->rule_idx - MCS_CTRLPKT_ETYPE_RULE_OFFSET;
614 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(idx) :
615 MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(idx);
616
617 mcs_reg_write(mcs, reg, req->data0);
618 break;
619 case MCS_CTRL_PKT_RULE_TYPE_DA:
620 if (!(req->data0 & BIT_ULL(40)))
621 return -EINVAL;
622
623 idx = req->rule_idx - MCS_CTRLPKT_DA_RULE_OFFSET;
624 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_DAX(idx) :
625 MCSX_PEX_TX_SLAVE_RULE_DAX(idx);
626
627 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
628 break;
629 case MCS_CTRL_PKT_RULE_TYPE_RANGE:
630 if (!(req->data0 & BIT_ULL(40)) || !(req->data1 & BIT_ULL(40)))
631 return -EINVAL;
632
633 idx = req->rule_idx - MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
634 if (req->dir == MCS_RX) {
635 reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(idx);
636 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
637 reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(idx);
638 mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
639 } else {
640 reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(idx);
641 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
642 reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(idx);
643 mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
644 }
645 break;
646 case MCS_CTRL_PKT_RULE_TYPE_COMBO:
647 req->data2 &= GENMASK(15, 0);
648 if (req->data2 != ETH_P_PAE || !(req->data0 & BIT_ULL(40)) ||
649 !(req->data1 & BIT_ULL(40)))
650 return -EINVAL;
651
652 idx = req->rule_idx - MCS_CTRLPKT_COMBO_RULE_OFFSET;
653 if (req->dir == MCS_RX) {
654 reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(idx);
655 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
656 reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(idx);
657 mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
658 reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(idx);
659 mcs_reg_write(mcs, reg, req->data2);
660 } else {
661 reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(idx);
662 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
663 reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(idx);
664 mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
665 reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(idx);
666 mcs_reg_write(mcs, reg, req->data2);
667 }
668 break;
669 case MCS_CTRL_PKT_RULE_TYPE_MAC:
670 if (!(req->data0 & BIT_ULL(40)))
671 return -EINVAL;
672
673 idx = req->rule_idx - MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
674 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_MAC :
675 MCSX_PEX_TX_SLAVE_RULE_MAC;
676
677 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
678 break;
679 }
680
681 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
682
683 enb = mcs_reg_read(mcs, reg);
684 enb |= BIT_ULL(req->rule_idx);
685 mcs_reg_write(mcs, reg, enb);
686
687 return 0;
688 }
689
mcs_free_rsrc(struct rsrc_bmap * rsrc,u16 * pf_map,int rsrc_id,u16 pcifunc)690 int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc)
691 {
692 /* Check if the rsrc_id is mapped to PF/VF */
693 if (pf_map[rsrc_id] != pcifunc)
694 return -EINVAL;
695
696 rvu_free_rsrc(rsrc, rsrc_id);
697 pf_map[rsrc_id] = 0;
698 return 0;
699 }
700
701 /* Free all the cam resources mapped to pf */
mcs_free_all_rsrc(struct mcs * mcs,int dir,u16 pcifunc)702 int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc)
703 {
704 struct mcs_rsrc_map *map;
705 int id;
706
707 if (dir == MCS_RX)
708 map = &mcs->rx;
709 else
710 map = &mcs->tx;
711
712 /* free tcam entries */
713 for (id = 0; id < map->flow_ids.max; id++) {
714 if (map->flowid2pf_map[id] != pcifunc)
715 continue;
716 mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map,
717 id, pcifunc);
718 mcs_ena_dis_flowid_entry(mcs, id, dir, false);
719 }
720
721 /* free secy entries */
722 for (id = 0; id < map->secy.max; id++) {
723 if (map->secy2pf_map[id] != pcifunc)
724 continue;
725 mcs_free_rsrc(&map->secy, map->secy2pf_map,
726 id, pcifunc);
727 mcs_clear_secy_plcy(mcs, id, dir);
728 }
729
730 /* free sc entries */
731 for (id = 0; id < map->secy.max; id++) {
732 if (map->sc2pf_map[id] != pcifunc)
733 continue;
734 mcs_free_rsrc(&map->sc, map->sc2pf_map, id, pcifunc);
735
736 /* Disable SC CAM only on RX side */
737 if (dir == MCS_RX)
738 mcs_ena_dis_sc_cam_entry(mcs, id, false);
739 }
740
741 /* free sa entries */
742 for (id = 0; id < map->sa.max; id++) {
743 if (map->sa2pf_map[id] != pcifunc)
744 continue;
745 mcs_free_rsrc(&map->sa, map->sa2pf_map, id, pcifunc);
746 }
747 return 0;
748 }
749
mcs_alloc_rsrc(struct rsrc_bmap * rsrc,u16 * pf_map,u16 pcifunc)750 int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc)
751 {
752 int rsrc_id;
753
754 rsrc_id = rvu_alloc_rsrc(rsrc);
755 if (rsrc_id < 0)
756 return -ENOMEM;
757 pf_map[rsrc_id] = pcifunc;
758 return rsrc_id;
759 }
760
mcs_alloc_all_rsrc(struct mcs * mcs,u8 * flow_id,u8 * secy_id,u8 * sc_id,u8 * sa1_id,u8 * sa2_id,u16 pcifunc,int dir)761 int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flow_id, u8 *secy_id,
762 u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir)
763 {
764 struct mcs_rsrc_map *map;
765 int id;
766
767 if (dir == MCS_RX)
768 map = &mcs->rx;
769 else
770 map = &mcs->tx;
771
772 id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
773 if (id < 0)
774 return -ENOMEM;
775 *flow_id = id;
776
777 id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
778 if (id < 0)
779 return -ENOMEM;
780 *secy_id = id;
781
782 id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
783 if (id < 0)
784 return -ENOMEM;
785 *sc_id = id;
786
787 id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
788 if (id < 0)
789 return -ENOMEM;
790 *sa1_id = id;
791
792 id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
793 if (id < 0)
794 return -ENOMEM;
795 *sa2_id = id;
796
797 return 0;
798 }
799
cn10kb_mcs_tx_pn_wrapped_handler(struct mcs * mcs)800 static void cn10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
801 {
802 struct mcs_intr_event event = { 0 };
803 struct rsrc_bmap *sc_bmap;
804 u64 val;
805 int sc;
806
807 sc_bmap = &mcs->tx.sc;
808
809 event.mcs_id = mcs->mcs_id;
810 event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
811
812 for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
813 val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
814
815 if (mcs->tx_sa_active[sc])
816 /* SA_index1 was used and got expired */
817 event.sa_id = (val >> 9) & 0xFF;
818 else
819 /* SA_index0 was used and got expired */
820 event.sa_id = val & 0xFF;
821
822 event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
823 mcs_add_intr_wq_entry(mcs, &event);
824 }
825 }
826
cn10kb_mcs_tx_pn_thresh_reached_handler(struct mcs * mcs)827 static void cn10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
828 {
829 struct mcs_intr_event event = { 0 };
830 struct rsrc_bmap *sc_bmap;
831 u64 val, status;
832 int sc;
833
834 sc_bmap = &mcs->tx.sc;
835
836 event.mcs_id = mcs->mcs_id;
837 event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
838
839 /* TX SA interrupt is raised only if autorekey is enabled.
840 * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
841 * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
842 * SA in SA_index1 got expired else SA in SA_index0 got expired.
843 */
844 for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
845 val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
846 /* Auto rekey is enable */
847 if (!((val >> 18) & 0x1))
848 continue;
849
850 status = (val >> 21) & 0x1;
851
852 /* Check if tx_sa_active status had changed */
853 if (status == mcs->tx_sa_active[sc])
854 continue;
855 /* SA_index0 is expired */
856 if (status)
857 event.sa_id = val & 0xFF;
858 else
859 event.sa_id = (val >> 9) & 0xFF;
860
861 event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
862 mcs_add_intr_wq_entry(mcs, &event);
863 }
864 }
865
mcs_rx_pn_thresh_reached_handler(struct mcs * mcs)866 static void mcs_rx_pn_thresh_reached_handler(struct mcs *mcs)
867 {
868 struct mcs_intr_event event = { 0 };
869 int sa, reg;
870 u64 intr;
871
872 /* Check expired SAs */
873 for (reg = 0; reg < (mcs->hw->sa_entries / 64); reg++) {
874 /* Bit high in *PN_THRESH_REACHEDX implies
875 * corresponding SAs are expired.
876 */
877 intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(reg));
878 for (sa = 0; sa < 64; sa++) {
879 if (!(intr & BIT_ULL(sa)))
880 continue;
881
882 event.mcs_id = mcs->mcs_id;
883 event.intr_mask = MCS_CPM_RX_PN_THRESH_REACHED_INT;
884 event.sa_id = sa + (reg * 64);
885 event.pcifunc = mcs->rx.sa2pf_map[event.sa_id];
886 mcs_add_intr_wq_entry(mcs, &event);
887 }
888 }
889 }
890
mcs_rx_misc_intr_handler(struct mcs * mcs,u64 intr)891 static void mcs_rx_misc_intr_handler(struct mcs *mcs, u64 intr)
892 {
893 struct mcs_intr_event event = { 0 };
894
895 event.mcs_id = mcs->mcs_id;
896 event.pcifunc = mcs->pf_map[0];
897
898 if (intr & MCS_CPM_RX_INT_SECTAG_V_EQ1)
899 event.intr_mask = MCS_CPM_RX_SECTAG_V_EQ1_INT;
900 if (intr & MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1)
901 event.intr_mask |= MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT;
902 if (intr & MCS_CPM_RX_INT_SL_GTE48)
903 event.intr_mask |= MCS_CPM_RX_SECTAG_SL_GTE48_INT;
904 if (intr & MCS_CPM_RX_INT_ES_EQ1_SC_EQ1)
905 event.intr_mask |= MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT;
906 if (intr & MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1)
907 event.intr_mask |= MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT;
908 if (intr & MCS_CPM_RX_INT_PACKET_XPN_EQ0)
909 event.intr_mask |= MCS_CPM_RX_PACKET_XPN_EQ0_INT;
910
911 mcs_add_intr_wq_entry(mcs, &event);
912 }
913
mcs_tx_misc_intr_handler(struct mcs * mcs,u64 intr)914 static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr)
915 {
916 struct mcs_intr_event event = { 0 };
917
918 if (!(intr & MCS_CPM_TX_INT_SA_NOT_VALID))
919 return;
920
921 event.mcs_id = mcs->mcs_id;
922 event.pcifunc = mcs->pf_map[0];
923
924 event.intr_mask = MCS_CPM_TX_SA_NOT_VALID_INT;
925
926 mcs_add_intr_wq_entry(mcs, &event);
927 }
928
mcs_bbe_intr_handler(struct mcs * mcs,u64 intr,enum mcs_direction dir)929 static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
930 {
931 struct mcs_intr_event event = { 0 };
932 int i;
933
934 if (!(intr & MCS_BBE_INT_MASK))
935 return;
936
937 event.mcs_id = mcs->mcs_id;
938 event.pcifunc = mcs->pf_map[0];
939
940 for (i = 0; i < MCS_MAX_BBE_INT; i++) {
941 if (!(intr & BIT_ULL(i)))
942 continue;
943
944 /* Lower nibble denotes data fifo overflow interrupts and
945 * upper nibble indicates policy fifo overflow interrupts.
946 */
947 if (intr & 0xFULL)
948 event.intr_mask = (dir == MCS_RX) ?
949 MCS_BBE_RX_DFIFO_OVERFLOW_INT :
950 MCS_BBE_TX_DFIFO_OVERFLOW_INT;
951 else
952 event.intr_mask = (dir == MCS_RX) ?
953 MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
954 MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
955
956 /* Notify the lmac_id info which ran into BBE fatal error */
957 event.lmac_id = i & 0x3ULL;
958 mcs_add_intr_wq_entry(mcs, &event);
959 }
960 }
961
mcs_pab_intr_handler(struct mcs * mcs,u64 intr,enum mcs_direction dir)962 static void mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
963 {
964 struct mcs_intr_event event = { 0 };
965 int i;
966
967 if (!(intr & MCS_PAB_INT_MASK))
968 return;
969
970 event.mcs_id = mcs->mcs_id;
971 event.pcifunc = mcs->pf_map[0];
972
973 for (i = 0; i < MCS_MAX_PAB_INT; i++) {
974 if (!(intr & BIT_ULL(i)))
975 continue;
976
977 event.intr_mask = (dir == MCS_RX) ? MCS_PAB_RX_CHAN_OVERFLOW_INT :
978 MCS_PAB_TX_CHAN_OVERFLOW_INT;
979
980 /* Notify the lmac_id info which ran into PAB fatal error */
981 event.lmac_id = i;
982 mcs_add_intr_wq_entry(mcs, &event);
983 }
984 }
985
mcs_ip_intr_handler(int irq,void * mcs_irq)986 static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
987 {
988 struct mcs *mcs = (struct mcs *)mcs_irq;
989 u64 intr, cpm_intr, bbe_intr, pab_intr;
990
991 /* Disable and clear the interrupt */
992 mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0));
993 mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
994
995 /* Check which block has interrupt*/
996 intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM);
997
998 /* CPM RX */
999 if (intr & MCS_CPM_RX_INT_ENA) {
1000 /* Check for PN thresh interrupt bit */
1001 cpm_intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_RX_INT);
1002
1003 if (cpm_intr & MCS_CPM_RX_INT_PN_THRESH_REACHED)
1004 mcs_rx_pn_thresh_reached_handler(mcs);
1005
1006 if (cpm_intr & MCS_CPM_RX_INT_ALL)
1007 mcs_rx_misc_intr_handler(mcs, cpm_intr);
1008
1009 /* Clear the interrupt */
1010 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT, cpm_intr);
1011 }
1012
1013 /* CPM TX */
1014 if (intr & MCS_CPM_TX_INT_ENA) {
1015 cpm_intr = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_INT);
1016
1017 if (cpm_intr & MCS_CPM_TX_INT_PN_THRESH_REACHED) {
1018 if (mcs->hw->mcs_blks > 1)
1019 cnf10kb_mcs_tx_pn_thresh_reached_handler(mcs);
1020 else
1021 cn10kb_mcs_tx_pn_thresh_reached_handler(mcs);
1022 }
1023
1024 if (cpm_intr & MCS_CPM_TX_INT_SA_NOT_VALID)
1025 mcs_tx_misc_intr_handler(mcs, cpm_intr);
1026
1027 if (cpm_intr & MCS_CPM_TX_INT_PACKET_XPN_EQ0) {
1028 if (mcs->hw->mcs_blks > 1)
1029 cnf10kb_mcs_tx_pn_wrapped_handler(mcs);
1030 else
1031 cn10kb_mcs_tx_pn_wrapped_handler(mcs);
1032 }
1033 /* Clear the interrupt */
1034 mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT, cpm_intr);
1035 }
1036
1037 /* BBE RX */
1038 if (intr & MCS_BBE_RX_INT_ENA) {
1039 bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT);
1040 mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
1041
1042 /* Clear the interrupt */
1043 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0);
1044 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT, bbe_intr);
1045 }
1046
1047 /* BBE TX */
1048 if (intr & MCS_BBE_TX_INT_ENA) {
1049 bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT);
1050 mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
1051
1052 /* Clear the interrupt */
1053 mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0);
1054 mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT, bbe_intr);
1055 }
1056
1057 /* PAB RX */
1058 if (intr & MCS_PAB_RX_INT_ENA) {
1059 pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT);
1060 mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
1061
1062 /* Clear the interrupt */
1063 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0);
1064 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT, pab_intr);
1065 }
1066
1067 /* PAB TX */
1068 if (intr & MCS_PAB_TX_INT_ENA) {
1069 pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT);
1070 mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
1071
1072 /* Clear the interrupt */
1073 mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0);
1074 mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr);
1075 }
1076
1077 /* Enable the interrupt */
1078 mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
1079
1080 return IRQ_HANDLED;
1081 }
1082
alloc_mem(struct mcs * mcs,int n)1083 static void *alloc_mem(struct mcs *mcs, int n)
1084 {
1085 return devm_kcalloc(mcs->dev, n, sizeof(u16), GFP_KERNEL);
1086 }
1087
mcs_alloc_struct_mem(struct mcs * mcs,struct mcs_rsrc_map * res)1088 static int mcs_alloc_struct_mem(struct mcs *mcs, struct mcs_rsrc_map *res)
1089 {
1090 struct hwinfo *hw = mcs->hw;
1091 int err;
1092
1093 res->flowid2pf_map = alloc_mem(mcs, hw->tcam_entries);
1094 if (!res->flowid2pf_map)
1095 return -ENOMEM;
1096
1097 res->secy2pf_map = alloc_mem(mcs, hw->secy_entries);
1098 if (!res->secy2pf_map)
1099 return -ENOMEM;
1100
1101 res->sc2pf_map = alloc_mem(mcs, hw->sc_entries);
1102 if (!res->sc2pf_map)
1103 return -ENOMEM;
1104
1105 res->sa2pf_map = alloc_mem(mcs, hw->sa_entries);
1106 if (!res->sa2pf_map)
1107 return -ENOMEM;
1108
1109 res->flowid2secy_map = alloc_mem(mcs, hw->tcam_entries);
1110 if (!res->flowid2secy_map)
1111 return -ENOMEM;
1112
1113 res->ctrlpktrule2pf_map = alloc_mem(mcs, MCS_MAX_CTRLPKT_RULES);
1114 if (!res->ctrlpktrule2pf_map)
1115 return -ENOMEM;
1116
1117 res->flow_ids.max = hw->tcam_entries - MCS_RSRC_RSVD_CNT;
1118 err = rvu_alloc_bitmap(&res->flow_ids);
1119 if (err)
1120 return err;
1121
1122 res->secy.max = hw->secy_entries - MCS_RSRC_RSVD_CNT;
1123 err = rvu_alloc_bitmap(&res->secy);
1124 if (err)
1125 return err;
1126
1127 res->sc.max = hw->sc_entries;
1128 err = rvu_alloc_bitmap(&res->sc);
1129 if (err)
1130 return err;
1131
1132 res->sa.max = hw->sa_entries;
1133 err = rvu_alloc_bitmap(&res->sa);
1134 if (err)
1135 return err;
1136
1137 res->ctrlpktrule.max = MCS_MAX_CTRLPKT_RULES;
1138 err = rvu_alloc_bitmap(&res->ctrlpktrule);
1139 if (err)
1140 return err;
1141
1142 return 0;
1143 }
1144
mcs_register_interrupts(struct mcs * mcs)1145 static int mcs_register_interrupts(struct mcs *mcs)
1146 {
1147 int ret = 0;
1148
1149 mcs->num_vec = pci_msix_vec_count(mcs->pdev);
1150
1151 ret = pci_alloc_irq_vectors(mcs->pdev, mcs->num_vec,
1152 mcs->num_vec, PCI_IRQ_MSIX);
1153 if (ret < 0) {
1154 dev_err(mcs->dev, "MCS Request for %d msix vector failed err:%d\n",
1155 mcs->num_vec, ret);
1156 return ret;
1157 }
1158
1159 ret = request_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP),
1160 mcs_ip_intr_handler, 0, "MCS_IP", mcs);
1161 if (ret) {
1162 dev_err(mcs->dev, "MCS IP irq registration failed\n");
1163 goto exit;
1164 }
1165
1166 /* MCS enable IP interrupts */
1167 mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
1168
1169 /* Enable CPM Rx/Tx interrupts */
1170 mcs_reg_write(mcs, MCSX_TOP_SLAVE_INT_SUM_ENB,
1171 MCS_CPM_RX_INT_ENA | MCS_CPM_TX_INT_ENA |
1172 MCS_BBE_RX_INT_ENA | MCS_BBE_TX_INT_ENA |
1173 MCS_PAB_RX_INT_ENA | MCS_PAB_TX_INT_ENA);
1174
1175 mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL);
1176 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL);
1177
1178 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xff);
1179 mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xff);
1180
1181 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xff);
1182 mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff);
1183
1184 mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
1185 if (!mcs->tx_sa_active) {
1186 ret = -ENOMEM;
1187 goto exit;
1188 }
1189
1190 return ret;
1191 exit:
1192 pci_free_irq_vectors(mcs->pdev);
1193 mcs->num_vec = 0;
1194 return ret;
1195 }
1196
mcs_get_blkcnt(void)1197 int mcs_get_blkcnt(void)
1198 {
1199 struct mcs *mcs;
1200 int idmax = -ENODEV;
1201
1202 /* Check MCS block is present in hardware */
1203 if (!pci_dev_present(mcs_id_table))
1204 return 0;
1205
1206 list_for_each_entry(mcs, &mcs_list, mcs_list)
1207 if (mcs->mcs_id > idmax)
1208 idmax = mcs->mcs_id;
1209
1210 if (idmax < 0)
1211 return 0;
1212
1213 return idmax + 1;
1214 }
1215
mcs_get_pdata(int mcs_id)1216 struct mcs *mcs_get_pdata(int mcs_id)
1217 {
1218 struct mcs *mcs_dev;
1219
1220 list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
1221 if (mcs_dev->mcs_id == mcs_id)
1222 return mcs_dev;
1223 }
1224 return NULL;
1225 }
1226
mcs_set_port_cfg(struct mcs * mcs,struct mcs_port_cfg_set_req * req)1227 void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req)
1228 {
1229 u64 val = 0;
1230
1231 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id),
1232 req->port_mode & MCS_PORT_MODE_MASK);
1233
1234 req->cstm_tag_rel_mode_sel &= 0x3;
1235
1236 if (mcs->hw->mcs_blks > 1) {
1237 req->fifo_skid &= MCS_PORT_FIFO_SKID_MASK;
1238 val = (u32)req->fifo_skid << 0x10;
1239 val |= req->fifo_skid;
1240 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id), val);
1241 mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id),
1242 req->cstm_tag_rel_mode_sel);
1243 val = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
1244
1245 if (req->custom_hdr_enb)
1246 val |= BIT_ULL(req->port_id);
1247 else
1248 val &= ~BIT_ULL(req->port_id);
1249
1250 mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, val);
1251 } else {
1252 val = mcs_reg_read(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id));
1253 val |= (req->cstm_tag_rel_mode_sel << 2);
1254 mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id), val);
1255 }
1256 }
1257
mcs_get_port_cfg(struct mcs * mcs,struct mcs_port_cfg_get_req * req,struct mcs_port_cfg_get_rsp * rsp)1258 void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
1259 struct mcs_port_cfg_get_rsp *rsp)
1260 {
1261 u64 reg = 0;
1262
1263 rsp->port_mode = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id)) &
1264 MCS_PORT_MODE_MASK;
1265
1266 if (mcs->hw->mcs_blks > 1) {
1267 reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id);
1268 rsp->fifo_skid = mcs_reg_read(mcs, reg) & MCS_PORT_FIFO_SKID_MASK;
1269 reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id);
1270 rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) & 0x3;
1271 if (mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION) & BIT_ULL(req->port_id))
1272 rsp->custom_hdr_enb = 1;
1273 } else {
1274 reg = MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id);
1275 rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) >> 2;
1276 }
1277
1278 rsp->port_id = req->port_id;
1279 rsp->mcs_id = req->mcs_id;
1280 }
1281
mcs_get_custom_tag_cfg(struct mcs * mcs,struct mcs_custom_tag_cfg_get_req * req,struct mcs_custom_tag_cfg_get_rsp * rsp)1282 void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
1283 struct mcs_custom_tag_cfg_get_rsp *rsp)
1284 {
1285 u64 reg = 0, val = 0;
1286 u8 idx;
1287
1288 for (idx = 0; idx < MCS_MAX_CUSTOM_TAGS; idx++) {
1289 if (mcs->hw->mcs_blks > 1)
1290 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(idx) :
1291 MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(idx);
1292 else
1293 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_VLAN_CFGX(idx) :
1294 MCSX_PEX_TX_SLAVE_VLAN_CFGX(idx);
1295
1296 val = mcs_reg_read(mcs, reg);
1297 if (mcs->hw->mcs_blks > 1) {
1298 rsp->cstm_etype[idx] = val & GENMASK(15, 0);
1299 rsp->cstm_indx[idx] = (val >> 0x16) & 0x3;
1300 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_ETYPE_ENABLE :
1301 MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
1302 rsp->cstm_etype_en = mcs_reg_read(mcs, reg) & 0xFF;
1303 } else {
1304 rsp->cstm_etype[idx] = (val >> 0x1) & GENMASK(15, 0);
1305 rsp->cstm_indx[idx] = (val >> 0x11) & 0x3;
1306 rsp->cstm_etype_en |= (val & 0x1) << idx;
1307 }
1308 }
1309
1310 rsp->mcs_id = req->mcs_id;
1311 rsp->dir = req->dir;
1312 }
1313
mcs_reset_port(struct mcs * mcs,u8 port_id,u8 reset)1314 void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset)
1315 {
1316 u64 reg = MCSX_MCS_TOP_SLAVE_PORT_RESET(port_id);
1317
1318 mcs_reg_write(mcs, reg, reset & 0x1);
1319 }
1320
1321 /* Set lmac to bypass/operational mode */
mcs_set_lmac_mode(struct mcs * mcs,int lmac_id,u8 mode)1322 void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode)
1323 {
1324 u64 reg;
1325
1326 reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(lmac_id * 2);
1327 mcs_reg_write(mcs, reg, (u64)mode);
1328 }
1329
mcs_pn_threshold_set(struct mcs * mcs,struct mcs_set_pn_threshold * pn)1330 void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn)
1331 {
1332 u64 reg;
1333
1334 if (pn->dir == MCS_RX)
1335 reg = pn->xpn ? MCSX_CPM_RX_SLAVE_XPN_THRESHOLD : MCSX_CPM_RX_SLAVE_PN_THRESHOLD;
1336 else
1337 reg = pn->xpn ? MCSX_CPM_TX_SLAVE_XPN_THRESHOLD : MCSX_CPM_TX_SLAVE_PN_THRESHOLD;
1338
1339 mcs_reg_write(mcs, reg, pn->threshold);
1340 }
1341
cn10kb_mcs_parser_cfg(struct mcs * mcs)1342 void cn10kb_mcs_parser_cfg(struct mcs *mcs)
1343 {
1344 u64 reg, val;
1345
1346 /* VLAN CTag */
1347 val = BIT_ULL(0) | (0x8100ull & 0xFFFF) << 1 | BIT_ULL(17);
1348 /* RX */
1349 reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(0);
1350 mcs_reg_write(mcs, reg, val);
1351
1352 /* TX */
1353 reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(0);
1354 mcs_reg_write(mcs, reg, val);
1355
1356 /* VLAN STag */
1357 val = BIT_ULL(0) | (0x88a8ull & 0xFFFF) << 1 | BIT_ULL(18);
1358 /* RX */
1359 reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(1);
1360 mcs_reg_write(mcs, reg, val);
1361
1362 /* TX */
1363 reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(1);
1364 mcs_reg_write(mcs, reg, val);
1365 }
1366
mcs_lmac_init(struct mcs * mcs,int lmac_id)1367 static void mcs_lmac_init(struct mcs *mcs, int lmac_id)
1368 {
1369 u64 reg;
1370
1371 /* Port mode 25GB */
1372 reg = MCSX_PAB_RX_SLAVE_PORT_CFGX(lmac_id);
1373 mcs_reg_write(mcs, reg, 0);
1374
1375 if (mcs->hw->mcs_blks > 1) {
1376 reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(lmac_id);
1377 mcs_reg_write(mcs, reg, 0xe000e);
1378 return;
1379 }
1380
1381 reg = MCSX_PAB_TX_SLAVE_PORT_CFGX(lmac_id);
1382 mcs_reg_write(mcs, reg, 0);
1383 }
1384
mcs_set_lmac_channels(int mcs_id,u16 base)1385 int mcs_set_lmac_channels(int mcs_id, u16 base)
1386 {
1387 struct mcs *mcs;
1388 int lmac;
1389 u64 cfg;
1390
1391 mcs = mcs_get_pdata(mcs_id);
1392 if (!mcs)
1393 return -ENODEV;
1394 for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
1395 cfg = mcs_reg_read(mcs, MCSX_LINK_LMACX_CFG(lmac));
1396 cfg &= ~(MCSX_LINK_LMAC_BASE_MASK | MCSX_LINK_LMAC_RANGE_MASK);
1397 cfg |= FIELD_PREP(MCSX_LINK_LMAC_RANGE_MASK, ilog2(16));
1398 cfg |= FIELD_PREP(MCSX_LINK_LMAC_BASE_MASK, base);
1399 mcs_reg_write(mcs, MCSX_LINK_LMACX_CFG(lmac), cfg);
1400 base += 16;
1401 }
1402 return 0;
1403 }
1404
mcs_x2p_calibration(struct mcs * mcs)1405 static int mcs_x2p_calibration(struct mcs *mcs)
1406 {
1407 unsigned long timeout = jiffies + usecs_to_jiffies(20000);
1408 int i, err = 0;
1409 u64 val;
1410
1411 /* set X2P calibration */
1412 val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
1413 val |= BIT_ULL(5);
1414 mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
1415
1416 /* Wait for calibration to complete */
1417 while (!(mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS) & BIT_ULL(0))) {
1418 if (time_before(jiffies, timeout)) {
1419 usleep_range(80, 100);
1420 continue;
1421 } else {
1422 err = -EBUSY;
1423 dev_err(mcs->dev, "MCS X2P calibration failed..ignoring\n");
1424 return err;
1425 }
1426 }
1427
1428 val = mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS);
1429 for (i = 0; i < mcs->hw->mcs_x2p_intf; i++) {
1430 if (val & BIT_ULL(1 + i))
1431 continue;
1432 err = -EBUSY;
1433 dev_err(mcs->dev, "MCS:%d didn't respond to X2P calibration\n", i);
1434 }
1435 /* Clear X2P calibrate */
1436 mcs_reg_write(mcs, MCSX_MIL_GLOBAL, mcs_reg_read(mcs, MCSX_MIL_GLOBAL) & ~BIT_ULL(5));
1437
1438 return err;
1439 }
1440
mcs_set_external_bypass(struct mcs * mcs,u8 bypass)1441 static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
1442 {
1443 u64 val;
1444
1445 /* Set MCS to external bypass */
1446 val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
1447 if (bypass)
1448 val |= BIT_ULL(6);
1449 else
1450 val &= ~BIT_ULL(6);
1451 mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
1452 }
1453
mcs_global_cfg(struct mcs * mcs)1454 static void mcs_global_cfg(struct mcs *mcs)
1455 {
1456 /* Disable external bypass */
1457 mcs_set_external_bypass(mcs, false);
1458
1459 /* Reset TX/RX stats memory */
1460 mcs_reg_write(mcs, MCSX_CSE_RX_SLAVE_STATS_CLEAR, 0x1F);
1461 mcs_reg_write(mcs, MCSX_CSE_TX_SLAVE_STATS_CLEAR, 0x1F);
1462
1463 /* Set MCS to perform standard IEEE802.1AE macsec processing */
1464 if (mcs->hw->mcs_blks == 1) {
1465 mcs_reg_write(mcs, MCSX_IP_MODE, BIT_ULL(3));
1466 return;
1467 }
1468
1469 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_ENTRY, 0xe4);
1470 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_LEN, 4);
1471 }
1472
cn10kb_mcs_set_hw_capabilities(struct mcs * mcs)1473 void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
1474 {
1475 struct hwinfo *hw = mcs->hw;
1476
1477 hw->tcam_entries = 128; /* TCAM entries */
1478 hw->secy_entries = 128; /* SecY entries */
1479 hw->sc_entries = 128; /* SC CAM entries */
1480 hw->sa_entries = 256; /* SA entries */
1481 hw->lmac_cnt = 20; /* lmacs/ports per mcs block */
1482 hw->mcs_x2p_intf = 5; /* x2p clabration intf */
1483 hw->mcs_blks = 1; /* MCS blocks */
1484 }
1485
1486 static struct mcs_ops cn10kb_mcs_ops = {
1487 .mcs_set_hw_capabilities = cn10kb_mcs_set_hw_capabilities,
1488 .mcs_parser_cfg = cn10kb_mcs_parser_cfg,
1489 .mcs_tx_sa_mem_map_write = cn10kb_mcs_tx_sa_mem_map_write,
1490 .mcs_rx_sa_mem_map_write = cn10kb_mcs_rx_sa_mem_map_write,
1491 .mcs_flowid_secy_map = cn10kb_mcs_flowid_secy_map,
1492 };
1493
mcs_probe(struct pci_dev * pdev,const struct pci_device_id * id)1494 static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1495 {
1496 struct device *dev = &pdev->dev;
1497 int lmac, err = 0;
1498 struct mcs *mcs;
1499
1500 mcs = devm_kzalloc(dev, sizeof(*mcs), GFP_KERNEL);
1501 if (!mcs)
1502 return -ENOMEM;
1503
1504 mcs->hw = devm_kzalloc(dev, sizeof(struct hwinfo), GFP_KERNEL);
1505 if (!mcs->hw)
1506 return -ENOMEM;
1507
1508 err = pci_enable_device(pdev);
1509 if (err) {
1510 dev_err(dev, "Failed to enable PCI device\n");
1511 pci_set_drvdata(pdev, NULL);
1512 return err;
1513 }
1514
1515 err = pci_request_regions(pdev, DRV_NAME);
1516 if (err) {
1517 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1518 goto exit;
1519 }
1520
1521 mcs->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1522 if (!mcs->reg_base) {
1523 dev_err(dev, "mcs: Cannot map CSR memory space, aborting\n");
1524 err = -ENOMEM;
1525 goto exit;
1526 }
1527
1528 pci_set_drvdata(pdev, mcs);
1529 mcs->pdev = pdev;
1530 mcs->dev = &pdev->dev;
1531
1532 if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B)
1533 mcs->mcs_ops = &cn10kb_mcs_ops;
1534 else
1535 mcs->mcs_ops = cnf10kb_get_mac_ops();
1536
1537 /* Set hardware capabilities */
1538 mcs->mcs_ops->mcs_set_hw_capabilities(mcs);
1539
1540 mcs_global_cfg(mcs);
1541
1542 /* Perform X2P clibration */
1543 err = mcs_x2p_calibration(mcs);
1544 if (err)
1545 goto err_x2p;
1546
1547 mcs->mcs_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
1548 & MCS_ID_MASK;
1549
1550 /* Set mcs tx side resources */
1551 err = mcs_alloc_struct_mem(mcs, &mcs->tx);
1552 if (err)
1553 goto err_x2p;
1554
1555 /* Set mcs rx side resources */
1556 err = mcs_alloc_struct_mem(mcs, &mcs->rx);
1557 if (err)
1558 goto err_x2p;
1559
1560 /* per port config */
1561 for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
1562 mcs_lmac_init(mcs, lmac);
1563
1564 /* Parser configuration */
1565 mcs->mcs_ops->mcs_parser_cfg(mcs);
1566
1567 err = mcs_register_interrupts(mcs);
1568 if (err)
1569 goto exit;
1570
1571 list_add(&mcs->mcs_list, &mcs_list);
1572 mutex_init(&mcs->stats_lock);
1573
1574 return 0;
1575
1576 err_x2p:
1577 /* Enable external bypass */
1578 mcs_set_external_bypass(mcs, true);
1579 exit:
1580 pci_release_regions(pdev);
1581 pci_disable_device(pdev);
1582 pci_set_drvdata(pdev, NULL);
1583 return err;
1584 }
1585
mcs_remove(struct pci_dev * pdev)1586 static void mcs_remove(struct pci_dev *pdev)
1587 {
1588 struct mcs *mcs = pci_get_drvdata(pdev);
1589
1590 /* Set MCS to external bypass */
1591 mcs_set_external_bypass(mcs, true);
1592 pci_free_irq_vectors(pdev);
1593 pci_release_regions(pdev);
1594 pci_disable_device(pdev);
1595 pci_set_drvdata(pdev, NULL);
1596 }
1597
1598 struct pci_driver mcs_driver = {
1599 .name = DRV_NAME,
1600 .id_table = mcs_id_table,
1601 .probe = mcs_probe,
1602 .remove = mcs_remove,
1603 };
1604