1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell MACSEC hardware offload driver
3 *
4 * Copyright (C) 2022 Marvell.
5 */
6
7 #include <linux/rtnetlink.h>
8 #include <linux/bitfield.h>
9 #include <net/macsec.h>
10 #include "otx2_common.h"
11
12 #define MCS_TCAM0_MAC_SA_MASK GENMASK_ULL(63, 48)
13 #define MCS_TCAM1_MAC_SA_MASK GENMASK_ULL(31, 0)
14 #define MCS_TCAM1_ETYPE_MASK GENMASK_ULL(47, 32)
15
16 #define MCS_SA_MAP_MEM_SA_USE BIT_ULL(9)
17
18 #define MCS_RX_SECY_PLCY_RW_MASK GENMASK_ULL(49, 18)
19 #define MCS_RX_SECY_PLCY_RP BIT_ULL(17)
20 #define MCS_RX_SECY_PLCY_AUTH_ENA BIT_ULL(16)
21 #define MCS_RX_SECY_PLCY_CIP GENMASK_ULL(8, 5)
22 #define MCS_RX_SECY_PLCY_VAL GENMASK_ULL(2, 1)
23 #define MCS_RX_SECY_PLCY_ENA BIT_ULL(0)
24
25 #define MCS_TX_SECY_PLCY_MTU GENMASK_ULL(43, 28)
26 #define MCS_TX_SECY_PLCY_ST_TCI GENMASK_ULL(27, 22)
27 #define MCS_TX_SECY_PLCY_ST_OFFSET GENMASK_ULL(21, 15)
28 #define MCS_TX_SECY_PLCY_INS_MODE BIT_ULL(14)
29 #define MCS_TX_SECY_PLCY_AUTH_ENA BIT_ULL(13)
30 #define MCS_TX_SECY_PLCY_CIP GENMASK_ULL(5, 2)
31 #define MCS_TX_SECY_PLCY_PROTECT BIT_ULL(1)
32 #define MCS_TX_SECY_PLCY_ENA BIT_ULL(0)
33
34 #define MCS_GCM_AES_128 0
35 #define MCS_GCM_AES_256 1
36 #define MCS_GCM_AES_XPN_128 2
37 #define MCS_GCM_AES_XPN_256 3
38
39 #define MCS_TCI_ES 0x40 /* end station */
40 #define MCS_TCI_SC 0x20 /* SCI present */
41 #define MCS_TCI_SCB 0x10 /* epon */
42 #define MCS_TCI_E 0x08 /* encryption */
43 #define MCS_TCI_C 0x04 /* changed text */
44
cn10k_mcs_get_txsc(struct cn10k_mcs_cfg * cfg,struct macsec_secy * secy)45 static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg,
46 struct macsec_secy *secy)
47 {
48 struct cn10k_mcs_txsc *txsc;
49
50 list_for_each_entry(txsc, &cfg->txsc_list, entry) {
51 if (txsc->sw_secy == secy)
52 return txsc;
53 }
54
55 return NULL;
56 }
57
cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg * cfg,struct macsec_secy * secy,struct macsec_rx_sc * rx_sc)58 static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg,
59 struct macsec_secy *secy,
60 struct macsec_rx_sc *rx_sc)
61 {
62 struct cn10k_mcs_rxsc *rxsc;
63
64 list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
65 if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy)
66 return rxsc;
67 }
68
69 return NULL;
70 }
71
rsrc_name(enum mcs_rsrc_type rsrc_type)72 static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
73 {
74 switch (rsrc_type) {
75 case MCS_RSRC_TYPE_FLOWID:
76 return "FLOW";
77 case MCS_RSRC_TYPE_SC:
78 return "SC";
79 case MCS_RSRC_TYPE_SECY:
80 return "SECY";
81 case MCS_RSRC_TYPE_SA:
82 return "SA";
83 default:
84 return "Unknown";
85 };
86
87 return "Unknown";
88 }
89
cn10k_mcs_alloc_rsrc(struct otx2_nic * pfvf,enum mcs_direction dir,enum mcs_rsrc_type type,u16 * rsrc_id)90 static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
91 enum mcs_rsrc_type type, u16 *rsrc_id)
92 {
93 struct mbox *mbox = &pfvf->mbox;
94 struct mcs_alloc_rsrc_req *req;
95 struct mcs_alloc_rsrc_rsp *rsp;
96 int ret = -ENOMEM;
97
98 mutex_lock(&mbox->lock);
99
100 req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox);
101 if (!req)
102 goto fail;
103
104 req->rsrc_type = type;
105 req->rsrc_cnt = 1;
106 req->dir = dir;
107
108 ret = otx2_sync_mbox_msg(mbox);
109 if (ret)
110 goto fail;
111
112 rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
113 0, &req->hdr);
114 if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt ||
115 req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) {
116 ret = -EINVAL;
117 goto fail;
118 }
119
120 switch (rsp->rsrc_type) {
121 case MCS_RSRC_TYPE_FLOWID:
122 *rsrc_id = rsp->flow_ids[0];
123 break;
124 case MCS_RSRC_TYPE_SC:
125 *rsrc_id = rsp->sc_ids[0];
126 break;
127 case MCS_RSRC_TYPE_SECY:
128 *rsrc_id = rsp->secy_ids[0];
129 break;
130 case MCS_RSRC_TYPE_SA:
131 *rsrc_id = rsp->sa_ids[0];
132 break;
133 default:
134 ret = -EINVAL;
135 goto fail;
136 }
137
138 mutex_unlock(&mbox->lock);
139
140 return 0;
141 fail:
142 dev_err(pfvf->dev, "Failed to allocate %s %s resource\n",
143 dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
144 mutex_unlock(&mbox->lock);
145 return ret;
146 }
147
cn10k_mcs_free_rsrc(struct otx2_nic * pfvf,enum mcs_direction dir,enum mcs_rsrc_type type,u16 hw_rsrc_id,bool all)148 static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
149 enum mcs_rsrc_type type, u16 hw_rsrc_id,
150 bool all)
151 {
152 struct mbox *mbox = &pfvf->mbox;
153 struct mcs_free_rsrc_req *req;
154
155 mutex_lock(&mbox->lock);
156
157 req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
158 if (!req)
159 goto fail;
160
161 req->rsrc_id = hw_rsrc_id;
162 req->rsrc_type = type;
163 req->dir = dir;
164 if (all)
165 req->all = 1;
166
167 if (otx2_sync_mbox_msg(&pfvf->mbox))
168 goto fail;
169
170 mutex_unlock(&mbox->lock);
171
172 return;
173 fail:
174 dev_err(pfvf->dev, "Failed to free %s %s resource\n",
175 dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
176 mutex_unlock(&mbox->lock);
177 }
178
cn10k_mcs_alloc_txsa(struct otx2_nic * pfvf,u16 * hw_sa_id)179 static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
180 {
181 return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id);
182 }
183
cn10k_mcs_alloc_rxsa(struct otx2_nic * pfvf,u16 * hw_sa_id)184 static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
185 {
186 return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id);
187 }
188
cn10k_mcs_free_txsa(struct otx2_nic * pfvf,u16 hw_sa_id)189 static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id)
190 {
191 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
192 }
193
cn10k_mcs_free_rxsa(struct otx2_nic * pfvf,u16 hw_sa_id)194 static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id)
195 {
196 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
197 }
198
cn10k_mcs_write_rx_secy(struct otx2_nic * pfvf,struct macsec_secy * secy,u8 hw_secy_id)199 static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf,
200 struct macsec_secy *secy, u8 hw_secy_id)
201 {
202 struct mcs_secy_plcy_write_req *req;
203 struct mbox *mbox = &pfvf->mbox;
204 u64 policy;
205 int ret;
206
207 mutex_lock(&mbox->lock);
208
209 req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
210 if (!req) {
211 ret = -ENOMEM;
212 goto fail;
213 }
214
215 policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window);
216 if (secy->replay_protect)
217 policy |= MCS_RX_SECY_PLCY_RP;
218
219 policy |= MCS_RX_SECY_PLCY_AUTH_ENA;
220 policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, MCS_GCM_AES_128);
221 policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames);
222
223 policy |= MCS_RX_SECY_PLCY_ENA;
224
225 req->plcy = policy;
226 req->secy_id = hw_secy_id;
227 req->dir = MCS_RX;
228
229 ret = otx2_sync_mbox_msg(mbox);
230
231 fail:
232 mutex_unlock(&mbox->lock);
233 return ret;
234 }
235
cn10k_mcs_write_rx_flowid(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc,u8 hw_secy_id)236 static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
237 struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
238 {
239 struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
240 struct mcs_flowid_entry_write_req *req;
241 struct mbox *mbox = &pfvf->mbox;
242 int ret;
243
244 mutex_lock(&mbox->lock);
245
246 req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
247 if (!req) {
248 ret = -ENOMEM;
249 goto fail;
250 }
251
252 req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
253 req->mask[1] = ~0ULL;
254 req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
255
256 req->mask[0] = ~0ULL;
257 req->mask[2] = ~0ULL;
258 req->mask[3] = ~0ULL;
259
260 req->flow_id = rxsc->hw_flow_id;
261 req->secy_id = hw_secy_id;
262 req->sc_id = rxsc->hw_sc_id;
263 req->dir = MCS_RX;
264
265 if (sw_rx_sc->active)
266 req->ena = 1;
267
268 ret = otx2_sync_mbox_msg(mbox);
269
270 fail:
271 mutex_unlock(&mbox->lock);
272 return ret;
273 }
274
cn10k_mcs_write_sc_cam(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc,u8 hw_secy_id)275 static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
276 struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
277 {
278 struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
279 struct mcs_rx_sc_cam_write_req *sc_req;
280 struct mbox *mbox = &pfvf->mbox;
281 int ret;
282
283 mutex_lock(&mbox->lock);
284
285 sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
286 if (!sc_req) {
287 ret = -ENOMEM;
288 goto fail;
289 }
290
291 sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci);
292 sc_req->sc_id = rxsc->hw_sc_id;
293 sc_req->secy_id = hw_secy_id;
294
295 ret = otx2_sync_mbox_msg(mbox);
296
297 fail:
298 mutex_unlock(&mbox->lock);
299 return ret;
300 }
301
cn10k_mcs_write_rx_sa_plcy(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_rxsc * rxsc,u8 assoc_num,bool sa_in_use)302 static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
303 struct macsec_secy *secy,
304 struct cn10k_mcs_rxsc *rxsc,
305 u8 assoc_num, bool sa_in_use)
306 {
307 unsigned char *src = rxsc->sa_key[assoc_num];
308 struct mcs_sa_plcy_write_req *plcy_req;
309 struct mcs_rx_sc_sa_map *map_req;
310 struct mbox *mbox = &pfvf->mbox;
311 u8 reg, key_len;
312 int ret;
313
314 mutex_lock(&mbox->lock);
315
316 plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
317 if (!plcy_req) {
318 ret = -ENOMEM;
319 goto fail;
320 }
321
322 map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox);
323 if (!map_req) {
324 otx2_mbox_reset(&mbox->mbox, 0);
325 ret = -ENOMEM;
326 goto fail;
327 }
328
329 for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
330 memcpy((u8 *)&plcy_req->plcy[0][reg],
331 (src + reg * 8), 8);
332 reg++;
333 }
334
335 plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num];
336 plcy_req->sa_cnt = 1;
337 plcy_req->dir = MCS_RX;
338
339 map_req->sa_index = rxsc->hw_sa_id[assoc_num];
340 map_req->sa_in_use = sa_in_use;
341 map_req->sc_id = rxsc->hw_sc_id;
342 map_req->an = assoc_num;
343
344 /* Send two messages together */
345 ret = otx2_sync_mbox_msg(mbox);
346
347 fail:
348 mutex_unlock(&mbox->lock);
349 return ret;
350 }
351
cn10k_mcs_write_rx_sa_pn(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc,u8 assoc_num,u64 next_pn)352 static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf,
353 struct cn10k_mcs_rxsc *rxsc,
354 u8 assoc_num, u64 next_pn)
355 {
356 struct mcs_pn_table_write_req *req;
357 struct mbox *mbox = &pfvf->mbox;
358 int ret;
359
360 mutex_lock(&mbox->lock);
361
362 req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
363 if (!req) {
364 ret = -ENOMEM;
365 goto fail;
366 }
367
368 req->pn_id = rxsc->hw_sa_id[assoc_num];
369 req->next_pn = next_pn;
370 req->dir = MCS_RX;
371
372 ret = otx2_sync_mbox_msg(mbox);
373
374 fail:
375 mutex_unlock(&mbox->lock);
376 return ret;
377 }
378
cn10k_mcs_write_tx_secy(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc)379 static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
380 struct macsec_secy *secy,
381 struct cn10k_mcs_txsc *txsc)
382 {
383 struct mcs_secy_plcy_write_req *req;
384 struct mbox *mbox = &pfvf->mbox;
385 struct macsec_tx_sc *sw_tx_sc;
386 /* Insert SecTag after 12 bytes (DA+SA)*/
387 u8 tag_offset = 12;
388 u8 sectag_tci = 0;
389 u64 policy;
390 int ret;
391
392 sw_tx_sc = &secy->tx_sc;
393
394 mutex_lock(&mbox->lock);
395
396 req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
397 if (!req) {
398 ret = -ENOMEM;
399 goto fail;
400 }
401
402 if (sw_tx_sc->send_sci) {
403 sectag_tci |= MCS_TCI_SC;
404 } else {
405 if (sw_tx_sc->end_station)
406 sectag_tci |= MCS_TCI_ES;
407 if (sw_tx_sc->scb)
408 sectag_tci |= MCS_TCI_SCB;
409 }
410
411 if (sw_tx_sc->encrypt)
412 sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
413
414 policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
415 /* Write SecTag excluding AN bits(1..0) */
416 policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
417 policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
418 policy |= MCS_TX_SECY_PLCY_INS_MODE;
419 policy |= MCS_TX_SECY_PLCY_AUTH_ENA;
420 policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, MCS_GCM_AES_128);
421
422 if (secy->protect_frames)
423 policy |= MCS_TX_SECY_PLCY_PROTECT;
424
425 /* If the encodingsa does not exist/active and protect is
426 * not set then frames can be sent out as it is. Hence enable
427 * the policy irrespective of secy operational when !protect.
428 */
429 if (!secy->protect_frames || secy->operational)
430 policy |= MCS_TX_SECY_PLCY_ENA;
431
432 req->plcy = policy;
433 req->secy_id = txsc->hw_secy_id_tx;
434 req->dir = MCS_TX;
435
436 ret = otx2_sync_mbox_msg(mbox);
437
438 fail:
439 mutex_unlock(&mbox->lock);
440 return ret;
441 }
442
cn10k_mcs_write_tx_flowid(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc)443 static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf,
444 struct macsec_secy *secy,
445 struct cn10k_mcs_txsc *txsc)
446 {
447 struct mcs_flowid_entry_write_req *req;
448 struct mbox *mbox = &pfvf->mbox;
449 u64 mac_sa;
450 int ret;
451
452 mutex_lock(&mbox->lock);
453
454 req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
455 if (!req) {
456 ret = -ENOMEM;
457 goto fail;
458 }
459
460 mac_sa = ether_addr_to_u64(secy->netdev->dev_addr);
461
462 req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa);
463 req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16);
464
465 req->mask[0] = ~0ULL;
466 req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK;
467
468 req->mask[1] = ~0ULL;
469 req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK;
470
471 req->mask[2] = ~0ULL;
472 req->mask[3] = ~0ULL;
473
474 req->flow_id = txsc->hw_flow_id;
475 req->secy_id = txsc->hw_secy_id_tx;
476 req->sc_id = txsc->hw_sc_id;
477 req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
478 req->dir = MCS_TX;
479 /* This can be enabled since stack xmits packets only when interface is up */
480 req->ena = 1;
481
482 ret = otx2_sync_mbox_msg(mbox);
483
484 fail:
485 mutex_unlock(&mbox->lock);
486 return ret;
487 }
488
cn10k_mcs_link_tx_sa2sc(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc,u8 sa_num,bool sa_active)489 static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf,
490 struct macsec_secy *secy,
491 struct cn10k_mcs_txsc *txsc,
492 u8 sa_num, bool sa_active)
493 {
494 struct mcs_tx_sc_sa_map *map_req;
495 struct mbox *mbox = &pfvf->mbox;
496 int ret;
497
498 /* Link the encoding_sa only to SC out of all SAs */
499 if (txsc->encoding_sa != sa_num)
500 return 0;
501
502 mutex_lock(&mbox->lock);
503
504 map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox);
505 if (!map_req) {
506 otx2_mbox_reset(&mbox->mbox, 0);
507 ret = -ENOMEM;
508 goto fail;
509 }
510
511 map_req->sa_index0 = txsc->hw_sa_id[sa_num];
512 map_req->sa_index0_vld = sa_active;
513 map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
514 map_req->sc_id = txsc->hw_sc_id;
515
516 ret = otx2_sync_mbox_msg(mbox);
517
518 fail:
519 mutex_unlock(&mbox->lock);
520 return ret;
521 }
522
cn10k_mcs_write_tx_sa_plcy(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc,u8 assoc_num)523 static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf,
524 struct macsec_secy *secy,
525 struct cn10k_mcs_txsc *txsc,
526 u8 assoc_num)
527 {
528 unsigned char *src = txsc->sa_key[assoc_num];
529 struct mcs_sa_plcy_write_req *plcy_req;
530 struct mbox *mbox = &pfvf->mbox;
531 u8 reg, key_len;
532 int ret;
533
534 mutex_lock(&mbox->lock);
535
536 plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
537 if (!plcy_req) {
538 ret = -ENOMEM;
539 goto fail;
540 }
541
542 for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
543 memcpy((u8 *)&plcy_req->plcy[0][reg], (src + reg * 8), 8);
544 reg++;
545 }
546
547 plcy_req->plcy[0][8] = assoc_num;
548 plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num];
549 plcy_req->sa_cnt = 1;
550 plcy_req->dir = MCS_TX;
551
552 ret = otx2_sync_mbox_msg(mbox);
553
554 fail:
555 mutex_unlock(&mbox->lock);
556 return ret;
557 }
558
cn10k_write_tx_sa_pn(struct otx2_nic * pfvf,struct cn10k_mcs_txsc * txsc,u8 assoc_num,u64 next_pn)559 static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf,
560 struct cn10k_mcs_txsc *txsc,
561 u8 assoc_num, u64 next_pn)
562 {
563 struct mcs_pn_table_write_req *req;
564 struct mbox *mbox = &pfvf->mbox;
565 int ret;
566
567 mutex_lock(&mbox->lock);
568
569 req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
570 if (!req) {
571 ret = -ENOMEM;
572 goto fail;
573 }
574
575 req->pn_id = txsc->hw_sa_id[assoc_num];
576 req->next_pn = next_pn;
577 req->dir = MCS_TX;
578
579 ret = otx2_sync_mbox_msg(mbox);
580
581 fail:
582 mutex_unlock(&mbox->lock);
583 return ret;
584 }
585
cn10k_mcs_ena_dis_flowid(struct otx2_nic * pfvf,u16 hw_flow_id,bool enable,enum mcs_direction dir)586 static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
587 bool enable, enum mcs_direction dir)
588 {
589 struct mcs_flowid_ena_dis_entry *req;
590 struct mbox *mbox = &pfvf->mbox;
591 int ret;
592
593 mutex_lock(&mbox->lock);
594
595 req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
596 if (!req) {
597 ret = -ENOMEM;
598 goto fail;
599 }
600
601 req->flow_id = hw_flow_id;
602 req->ena = enable;
603 req->dir = dir;
604
605 ret = otx2_sync_mbox_msg(mbox);
606
607 fail:
608 mutex_unlock(&mbox->lock);
609 return ret;
610 }
611
cn10k_mcs_sa_stats(struct otx2_nic * pfvf,u8 hw_sa_id,struct mcs_sa_stats * rsp_p,enum mcs_direction dir,bool clear)612 static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id,
613 struct mcs_sa_stats *rsp_p,
614 enum mcs_direction dir, bool clear)
615 {
616 struct mcs_clear_stats *clear_req;
617 struct mbox *mbox = &pfvf->mbox;
618 struct mcs_stats_req *req;
619 struct mcs_sa_stats *rsp;
620 int ret;
621
622 mutex_lock(&mbox->lock);
623
624 req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox);
625 if (!req) {
626 ret = -ENOMEM;
627 goto fail;
628 }
629
630 req->id = hw_sa_id;
631 req->dir = dir;
632
633 if (!clear)
634 goto send_msg;
635
636 clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
637 if (!clear_req) {
638 ret = -ENOMEM;
639 goto fail;
640 }
641 clear_req->id = hw_sa_id;
642 clear_req->dir = dir;
643 clear_req->type = MCS_RSRC_TYPE_SA;
644
645 send_msg:
646 ret = otx2_sync_mbox_msg(mbox);
647 if (ret)
648 goto fail;
649
650 rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
651 0, &req->hdr);
652 if (IS_ERR(rsp)) {
653 ret = PTR_ERR(rsp);
654 goto fail;
655 }
656
657 memcpy(rsp_p, rsp, sizeof(*rsp_p));
658
659 mutex_unlock(&mbox->lock);
660
661 return 0;
662 fail:
663 mutex_unlock(&mbox->lock);
664 return ret;
665 }
666
cn10k_mcs_sc_stats(struct otx2_nic * pfvf,u8 hw_sc_id,struct mcs_sc_stats * rsp_p,enum mcs_direction dir,bool clear)667 static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id,
668 struct mcs_sc_stats *rsp_p,
669 enum mcs_direction dir, bool clear)
670 {
671 struct mcs_clear_stats *clear_req;
672 struct mbox *mbox = &pfvf->mbox;
673 struct mcs_stats_req *req;
674 struct mcs_sc_stats *rsp;
675 int ret;
676
677 mutex_lock(&mbox->lock);
678
679 req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox);
680 if (!req) {
681 ret = -ENOMEM;
682 goto fail;
683 }
684
685 req->id = hw_sc_id;
686 req->dir = dir;
687
688 if (!clear)
689 goto send_msg;
690
691 clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
692 if (!clear_req) {
693 ret = -ENOMEM;
694 goto fail;
695 }
696 clear_req->id = hw_sc_id;
697 clear_req->dir = dir;
698 clear_req->type = MCS_RSRC_TYPE_SC;
699
700 send_msg:
701 ret = otx2_sync_mbox_msg(mbox);
702 if (ret)
703 goto fail;
704
705 rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
706 0, &req->hdr);
707 if (IS_ERR(rsp)) {
708 ret = PTR_ERR(rsp);
709 goto fail;
710 }
711
712 memcpy(rsp_p, rsp, sizeof(*rsp_p));
713
714 mutex_unlock(&mbox->lock);
715
716 return 0;
717 fail:
718 mutex_unlock(&mbox->lock);
719 return ret;
720 }
721
cn10k_mcs_secy_stats(struct otx2_nic * pfvf,u8 hw_secy_id,struct mcs_secy_stats * rsp_p,enum mcs_direction dir,bool clear)722 static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id,
723 struct mcs_secy_stats *rsp_p,
724 enum mcs_direction dir, bool clear)
725 {
726 struct mcs_clear_stats *clear_req;
727 struct mbox *mbox = &pfvf->mbox;
728 struct mcs_secy_stats *rsp;
729 struct mcs_stats_req *req;
730 int ret;
731
732 mutex_lock(&mbox->lock);
733
734 req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox);
735 if (!req) {
736 ret = -ENOMEM;
737 goto fail;
738 }
739
740 req->id = hw_secy_id;
741 req->dir = dir;
742
743 if (!clear)
744 goto send_msg;
745
746 clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
747 if (!clear_req) {
748 ret = -ENOMEM;
749 goto fail;
750 }
751 clear_req->id = hw_secy_id;
752 clear_req->dir = dir;
753 clear_req->type = MCS_RSRC_TYPE_SECY;
754
755 send_msg:
756 ret = otx2_sync_mbox_msg(mbox);
757 if (ret)
758 goto fail;
759
760 rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
761 0, &req->hdr);
762 if (IS_ERR(rsp)) {
763 ret = PTR_ERR(rsp);
764 goto fail;
765 }
766
767 memcpy(rsp_p, rsp, sizeof(*rsp_p));
768
769 mutex_unlock(&mbox->lock);
770
771 return 0;
772 fail:
773 mutex_unlock(&mbox->lock);
774 return ret;
775 }
776
cn10k_mcs_create_txsc(struct otx2_nic * pfvf)777 static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf)
778 {
779 struct cn10k_mcs_txsc *txsc;
780 int ret;
781
782 txsc = kzalloc(sizeof(*txsc), GFP_KERNEL);
783 if (!txsc)
784 return ERR_PTR(-ENOMEM);
785
786 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
787 &txsc->hw_flow_id);
788 if (ret)
789 goto fail;
790
791 /* For a SecY, one TX secy and one RX secy HW resources are needed */
792 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
793 &txsc->hw_secy_id_tx);
794 if (ret)
795 goto free_flowid;
796
797 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
798 &txsc->hw_secy_id_rx);
799 if (ret)
800 goto free_tx_secy;
801
802 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
803 &txsc->hw_sc_id);
804 if (ret)
805 goto free_rx_secy;
806
807 return txsc;
808 free_rx_secy:
809 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
810 txsc->hw_secy_id_rx, false);
811 free_tx_secy:
812 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
813 txsc->hw_secy_id_tx, false);
814 free_flowid:
815 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
816 txsc->hw_flow_id, false);
817 fail:
818 kfree(txsc);
819 return ERR_PTR(ret);
820 }
821
822 /* Free Tx SC and its SAs(if any) resources to AF
823 */
cn10k_mcs_delete_txsc(struct otx2_nic * pfvf,struct cn10k_mcs_txsc * txsc)824 static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf,
825 struct cn10k_mcs_txsc *txsc)
826 {
827 u8 sa_bmap = txsc->sa_bmap;
828 u8 sa_num = 0;
829
830 while (sa_bmap) {
831 if (sa_bmap & 1) {
832 cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy,
833 txsc, sa_num);
834 cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
835 }
836 sa_num++;
837 sa_bmap >>= 1;
838 }
839
840 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
841 txsc->hw_sc_id, false);
842 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
843 txsc->hw_secy_id_rx, false);
844 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
845 txsc->hw_secy_id_tx, false);
846 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
847 txsc->hw_flow_id, false);
848 }
849
cn10k_mcs_create_rxsc(struct otx2_nic * pfvf)850 static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf)
851 {
852 struct cn10k_mcs_rxsc *rxsc;
853 int ret;
854
855 rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL);
856 if (!rxsc)
857 return ERR_PTR(-ENOMEM);
858
859 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
860 &rxsc->hw_flow_id);
861 if (ret)
862 goto fail;
863
864 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
865 &rxsc->hw_sc_id);
866 if (ret)
867 goto free_flowid;
868
869 return rxsc;
870 free_flowid:
871 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
872 rxsc->hw_flow_id, false);
873 fail:
874 kfree(rxsc);
875 return ERR_PTR(ret);
876 }
877
878 /* Free Rx SC and its SAs(if any) resources to AF
879 */
cn10k_mcs_delete_rxsc(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc)880 static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf,
881 struct cn10k_mcs_rxsc *rxsc)
882 {
883 u8 sa_bmap = rxsc->sa_bmap;
884 u8 sa_num = 0;
885
886 while (sa_bmap) {
887 if (sa_bmap & 1) {
888 cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc,
889 sa_num, false);
890 cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
891 }
892 sa_num++;
893 sa_bmap >>= 1;
894 }
895
896 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
897 rxsc->hw_sc_id, false);
898 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
899 rxsc->hw_flow_id, false);
900 }
901
cn10k_mcs_secy_tx_cfg(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc,struct macsec_tx_sa * sw_tx_sa,u8 sa_num)902 static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy,
903 struct cn10k_mcs_txsc *txsc,
904 struct macsec_tx_sa *sw_tx_sa, u8 sa_num)
905 {
906 if (sw_tx_sa) {
907 cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
908 cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
909 sw_tx_sa->next_pn_halves.lower);
910 cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num,
911 sw_tx_sa->active);
912 }
913
914 cn10k_mcs_write_tx_secy(pfvf, secy, txsc);
915 cn10k_mcs_write_tx_flowid(pfvf, secy, txsc);
916 /* When updating secy, change RX secy also */
917 cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx);
918
919 return 0;
920 }
921
cn10k_mcs_secy_rx_cfg(struct otx2_nic * pfvf,struct macsec_secy * secy,u8 hw_secy_id)922 static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf,
923 struct macsec_secy *secy, u8 hw_secy_id)
924 {
925 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
926 struct cn10k_mcs_rxsc *mcs_rx_sc;
927 struct macsec_rx_sc *sw_rx_sc;
928 struct macsec_rx_sa *sw_rx_sa;
929 u8 sa_num;
930
931 for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
932 sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
933 mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
934 if (unlikely(!mcs_rx_sc))
935 continue;
936
937 for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) {
938 sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]);
939 if (!sw_rx_sa)
940 continue;
941
942 cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc,
943 sa_num, sw_rx_sa->active);
944 cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num,
945 sw_rx_sa->next_pn_halves.lower);
946 }
947
948 cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id);
949 cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id);
950 }
951
952 return 0;
953 }
954
cn10k_mcs_disable_rxscs(struct otx2_nic * pfvf,struct macsec_secy * secy,bool delete)955 static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf,
956 struct macsec_secy *secy,
957 bool delete)
958 {
959 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
960 struct cn10k_mcs_rxsc *mcs_rx_sc;
961 struct macsec_rx_sc *sw_rx_sc;
962 int ret;
963
964 for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
965 sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
966 mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
967 if (unlikely(!mcs_rx_sc))
968 continue;
969
970 ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id,
971 false, MCS_RX);
972 if (ret)
973 dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n",
974 mcs_rx_sc->hw_sc_id);
975 if (delete) {
976 cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc);
977 list_del(&mcs_rx_sc->entry);
978 kfree(mcs_rx_sc);
979 }
980 }
981
982 return 0;
983 }
984
cn10k_mcs_sync_stats(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc)985 static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy,
986 struct cn10k_mcs_txsc *txsc)
987 {
988 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
989 struct mcs_secy_stats rx_rsp = { 0 };
990 struct mcs_sc_stats sc_rsp = { 0 };
991 struct cn10k_mcs_rxsc *rxsc;
992
993 /* Because of shared counters for some stats in the hardware, when
994 * updating secy policy take a snapshot of current stats and reset them.
995 * Below are the effected stats because of shared counters.
996 */
997
998 /* Check if sync is really needed */
999 if (secy->validate_frames == txsc->last_validate_frames &&
1000 secy->protect_frames == txsc->last_protect_frames)
1001 return;
1002
1003 cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
1004
1005 txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
1006 txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
1007 txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
1008 if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT)
1009 txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
1010 else
1011 txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
1012
1013 list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
1014 cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true);
1015
1016 rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt;
1017 rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt;
1018
1019 rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
1020 rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
1021
1022 if (txsc->last_protect_frames)
1023 rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
1024 else
1025 rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
1026
1027 if (txsc->last_validate_frames == MACSEC_VALIDATE_CHECK)
1028 rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
1029 else
1030 rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
1031 }
1032
1033 txsc->last_validate_frames = secy->validate_frames;
1034 txsc->last_protect_frames = secy->protect_frames;
1035 }
1036
cn10k_mdo_open(struct macsec_context * ctx)1037 static int cn10k_mdo_open(struct macsec_context *ctx)
1038 {
1039 struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1040 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1041 struct macsec_secy *secy = ctx->secy;
1042 struct macsec_tx_sa *sw_tx_sa;
1043 struct cn10k_mcs_txsc *txsc;
1044 u8 sa_num;
1045 int err;
1046
1047 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1048 if (!txsc)
1049 return -ENOENT;
1050
1051 sa_num = txsc->encoding_sa;
1052 sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
1053
1054 err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
1055 if (err)
1056 return err;
1057
1058 return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx);
1059 }
1060
cn10k_mdo_stop(struct macsec_context * ctx)1061 static int cn10k_mdo_stop(struct macsec_context *ctx)
1062 {
1063 struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1064 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1065 struct cn10k_mcs_txsc *txsc;
1066 int err;
1067
1068 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1069 if (!txsc)
1070 return -ENOENT;
1071
1072 err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
1073 if (err)
1074 return err;
1075
1076 return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false);
1077 }
1078
cn10k_mdo_add_secy(struct macsec_context * ctx)1079 static int cn10k_mdo_add_secy(struct macsec_context *ctx)
1080 {
1081 struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1082 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1083 struct macsec_secy *secy = ctx->secy;
1084 struct cn10k_mcs_txsc *txsc;
1085
1086 if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
1087 return -EOPNOTSUPP;
1088
1089 /* Stick to 16 bytes key len until XPN support is added */
1090 if (secy->key_len != 16)
1091 return -EOPNOTSUPP;
1092
1093 if (secy->xpn)
1094 return -EOPNOTSUPP;
1095
1096 txsc = cn10k_mcs_create_txsc(pfvf);
1097 if (IS_ERR(txsc))
1098 return -ENOSPC;
1099
1100 txsc->sw_secy = secy;
1101 txsc->encoding_sa = secy->tx_sc.encoding_sa;
1102 txsc->last_validate_frames = secy->validate_frames;
1103 txsc->last_protect_frames = secy->protect_frames;
1104
1105 list_add(&txsc->entry, &cfg->txsc_list);
1106
1107 if (netif_running(secy->netdev))
1108 return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
1109
1110 return 0;
1111 }
1112
cn10k_mdo_upd_secy(struct macsec_context * ctx)1113 static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
1114 {
1115 struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1116 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1117 struct macsec_secy *secy = ctx->secy;
1118 struct macsec_tx_sa *sw_tx_sa;
1119 struct cn10k_mcs_txsc *txsc;
1120 u8 sa_num;
1121 int err;
1122
1123 txsc = cn10k_mcs_get_txsc(cfg, secy);
1124 if (!txsc)
1125 return -ENOENT;
1126
1127 txsc->encoding_sa = secy->tx_sc.encoding_sa;
1128
1129 sa_num = txsc->encoding_sa;
1130 sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
1131
1132 if (netif_running(secy->netdev)) {
1133 cn10k_mcs_sync_stats(pfvf, secy, txsc);
1134
1135 err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
1136 if (err)
1137 return err;
1138 }
1139
1140 return 0;
1141 }
1142
cn10k_mdo_del_secy(struct macsec_context * ctx)1143 static int cn10k_mdo_del_secy(struct macsec_context *ctx)
1144 {
1145 struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1146 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1147 struct cn10k_mcs_txsc *txsc;
1148
1149 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1150 if (!txsc)
1151 return -ENOENT;
1152
1153 cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
1154 cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true);
1155 cn10k_mcs_delete_txsc(pfvf, txsc);
1156 list_del(&txsc->entry);
1157 kfree(txsc);
1158
1159 return 0;
1160 }
1161
cn10k_mdo_add_txsa(struct macsec_context * ctx)1162 static int cn10k_mdo_add_txsa(struct macsec_context *ctx)
1163 {
1164 struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1165 struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
1166 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1167 struct macsec_secy *secy = ctx->secy;
1168 u8 sa_num = ctx->sa.assoc_num;
1169 struct cn10k_mcs_txsc *txsc;
1170 int err;
1171
1172 txsc = cn10k_mcs_get_txsc(cfg, secy);
1173 if (!txsc)
1174 return -ENOENT;
1175
1176 if (sa_num >= CN10K_MCS_SA_PER_SC)
1177 return -EOPNOTSUPP;
1178
1179 if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num]))
1180 return -ENOSPC;
1181
1182 memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len);
1183 txsc->sa_bmap |= 1 << sa_num;
1184
1185 if (netif_running(secy->netdev)) {
1186 err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
1187 if (err)
1188 return err;
1189
1190 err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
1191 sw_tx_sa->next_pn_halves.lower);
1192 if (err)
1193 return err;
1194
1195 err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
1196 sa_num, sw_tx_sa->active);
1197 if (err)
1198 return err;
1199 }
1200
1201 return 0;
1202 }
1203
cn10k_mdo_upd_txsa(struct macsec_context * ctx)1204 static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
1205 {
1206 struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1207 struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
1208 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1209 struct macsec_secy *secy = ctx->secy;
1210 u8 sa_num = ctx->sa.assoc_num;
1211 struct cn10k_mcs_txsc *txsc;
1212 int err;
1213
1214 txsc = cn10k_mcs_get_txsc(cfg, secy);
1215 if (!txsc)
1216 return -ENOENT;
1217
1218 if (sa_num >= CN10K_MCS_SA_PER_SC)
1219 return -EOPNOTSUPP;
1220
1221 if (netif_running(secy->netdev)) {
1222 /* Keys cannot be changed after creation */
1223 err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
1224 sw_tx_sa->next_pn_halves.lower);
1225 if (err)
1226 return err;
1227
1228 err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
1229 sa_num, sw_tx_sa->active);
1230 if (err)
1231 return err;
1232 }
1233
1234 return 0;
1235 }
1236
cn10k_mdo_del_txsa(struct macsec_context * ctx)1237 static int cn10k_mdo_del_txsa(struct macsec_context *ctx)
1238 {
1239 struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1240 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1241 u8 sa_num = ctx->sa.assoc_num;
1242 struct cn10k_mcs_txsc *txsc;
1243
1244 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1245 if (!txsc)
1246 return -ENOENT;
1247
1248 if (sa_num >= CN10K_MCS_SA_PER_SC)
1249 return -EOPNOTSUPP;
1250
1251 cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
1252 txsc->sa_bmap &= ~(1 << sa_num);
1253
1254 return 0;
1255 }
1256
cn10k_mdo_add_rxsc(struct macsec_context * ctx)1257 static int cn10k_mdo_add_rxsc(struct macsec_context *ctx)
1258 {
1259 struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1260 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1261 struct macsec_secy *secy = ctx->secy;
1262 struct cn10k_mcs_rxsc *rxsc;
1263 struct cn10k_mcs_txsc *txsc;
1264 int err;
1265
1266 txsc = cn10k_mcs_get_txsc(cfg, secy);
1267 if (!txsc)
1268 return -ENOENT;
1269
1270 rxsc = cn10k_mcs_create_rxsc(pfvf);
1271 if (IS_ERR(rxsc))
1272 return -ENOSPC;
1273
1274 rxsc->sw_secy = ctx->secy;
1275 rxsc->sw_rxsc = ctx->rx_sc;
1276 list_add(&rxsc->entry, &cfg->rxsc_list);
1277
1278 if (netif_running(secy->netdev)) {
1279 err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx);
1280 if (err)
1281 return err;
1282
1283 err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx);
1284 if (err)
1285 return err;
1286 }
1287
1288 return 0;
1289 }
1290
cn10k_mdo_upd_rxsc(struct macsec_context * ctx)1291 static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx)
1292 {
1293 struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1294 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1295 struct macsec_secy *secy = ctx->secy;
1296 bool enable = ctx->rx_sc->active;
1297 struct cn10k_mcs_rxsc *rxsc;
1298
1299 rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
1300 if (!rxsc)
1301 return -ENOENT;
1302
1303 if (netif_running(secy->netdev))
1304 return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id,
1305 enable, MCS_RX);
1306
1307 return 0;
1308 }
1309
cn10k_mdo_del_rxsc(struct macsec_context * ctx)1310 static int cn10k_mdo_del_rxsc(struct macsec_context *ctx)
1311 {
1312 struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1313 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1314 struct cn10k_mcs_rxsc *rxsc;
1315
1316 rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc);
1317 if (!rxsc)
1318 return -ENOENT;
1319
1320 cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX);
1321 cn10k_mcs_delete_rxsc(pfvf, rxsc);
1322 list_del(&rxsc->entry);
1323 kfree(rxsc);
1324
1325 return 0;
1326 }
1327
cn10k_mdo_add_rxsa(struct macsec_context * ctx)1328 static int cn10k_mdo_add_rxsa(struct macsec_context *ctx)
1329 {
1330 struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1331 struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1332 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1333 struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
1334 u64 next_pn = rx_sa->next_pn_halves.lower;
1335 struct macsec_secy *secy = ctx->secy;
1336 bool sa_in_use = rx_sa->active;
1337 u8 sa_num = ctx->sa.assoc_num;
1338 struct cn10k_mcs_rxsc *rxsc;
1339 int err;
1340
1341 rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1342 if (!rxsc)
1343 return -ENOENT;
1344
1345 if (sa_num >= CN10K_MCS_SA_PER_SC)
1346 return -EOPNOTSUPP;
1347
1348 if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num]))
1349 return -ENOSPC;
1350
1351 memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len);
1352 rxsc->sa_bmap |= 1 << sa_num;
1353
1354 if (netif_running(secy->netdev)) {
1355 err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc,
1356 sa_num, sa_in_use);
1357 if (err)
1358 return err;
1359
1360 err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
1361 if (err)
1362 return err;
1363 }
1364
1365 return 0;
1366 }
1367
cn10k_mdo_upd_rxsa(struct macsec_context * ctx)1368 static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
1369 {
1370 struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1371 struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1372 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1373 struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
1374 u64 next_pn = rx_sa->next_pn_halves.lower;
1375 struct macsec_secy *secy = ctx->secy;
1376 bool sa_in_use = rx_sa->active;
1377 u8 sa_num = ctx->sa.assoc_num;
1378 struct cn10k_mcs_rxsc *rxsc;
1379 int err;
1380
1381 rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1382 if (!rxsc)
1383 return -ENOENT;
1384
1385 if (sa_num >= CN10K_MCS_SA_PER_SC)
1386 return -EOPNOTSUPP;
1387
1388 if (netif_running(secy->netdev)) {
1389 err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use);
1390 if (err)
1391 return err;
1392
1393 err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
1394 if (err)
1395 return err;
1396 }
1397
1398 return 0;
1399 }
1400
cn10k_mdo_del_rxsa(struct macsec_context * ctx)1401 static int cn10k_mdo_del_rxsa(struct macsec_context *ctx)
1402 {
1403 struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1404 struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1405 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1406 u8 sa_num = ctx->sa.assoc_num;
1407 struct cn10k_mcs_rxsc *rxsc;
1408
1409 rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
1410 if (!rxsc)
1411 return -ENOENT;
1412
1413 if (sa_num >= CN10K_MCS_SA_PER_SC)
1414 return -EOPNOTSUPP;
1415
1416 cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false);
1417 cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
1418
1419 rxsc->sa_bmap &= ~(1 << sa_num);
1420
1421 return 0;
1422 }
1423
cn10k_mdo_get_dev_stats(struct macsec_context * ctx)1424 static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx)
1425 {
1426 struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 };
1427 struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1428 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1429 struct macsec_secy *secy = ctx->secy;
1430 struct cn10k_mcs_txsc *txsc;
1431
1432 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1433 if (!txsc)
1434 return -ENOENT;
1435
1436 cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false);
1437 ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt;
1438 ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt;
1439
1440 cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
1441 txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
1442 txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
1443 txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
1444 if (secy->validate_frames == MACSEC_VALIDATE_STRICT)
1445 txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
1446 else
1447 txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
1448 txsc->stats.InPktsOverrun = 0;
1449
1450 ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag;
1451 ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged;
1452 ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag;
1453 ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI;
1454 ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI;
1455 ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun;
1456
1457 return 0;
1458 }
1459
cn10k_mdo_get_tx_sc_stats(struct macsec_context * ctx)1460 static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx)
1461 {
1462 struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1463 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1464 struct mcs_sc_stats rsp = { 0 };
1465 struct cn10k_mcs_txsc *txsc;
1466
1467 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1468 if (!txsc)
1469 return -ENOENT;
1470
1471 cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false);
1472
1473 ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt;
1474 ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
1475 ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt;
1476 ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt;
1477
1478 return 0;
1479 }
1480
cn10k_mdo_get_tx_sa_stats(struct macsec_context * ctx)1481 static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx)
1482 {
1483 struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1484 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1485 struct mcs_sa_stats rsp = { 0 };
1486 u8 sa_num = ctx->sa.assoc_num;
1487 struct cn10k_mcs_txsc *txsc;
1488
1489 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1490 if (!txsc)
1491 return -ENOENT;
1492
1493 if (sa_num >= CN10K_MCS_SA_PER_SC)
1494 return -EOPNOTSUPP;
1495
1496 cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false);
1497
1498 ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt;
1499 ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
1500
1501 return 0;
1502 }
1503
cn10k_mdo_get_rx_sc_stats(struct macsec_context * ctx)1504 static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
1505 {
1506 struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1507 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1508 struct macsec_secy *secy = ctx->secy;
1509 struct mcs_sc_stats rsp = { 0 };
1510 struct cn10k_mcs_rxsc *rxsc;
1511
1512 rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
1513 if (!rxsc)
1514 return -ENOENT;
1515
1516 cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true);
1517
1518 rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt;
1519 rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt;
1520
1521 rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
1522 rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
1523
1524 if (secy->protect_frames)
1525 rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
1526 else
1527 rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
1528
1529 if (secy->validate_frames == MACSEC_VALIDATE_CHECK)
1530 rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
1531 else
1532 rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
1533
1534 ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated;
1535 ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted;
1536 ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid;
1537 ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid;
1538 ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate;
1539 ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed;
1540 ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked;
1541 ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK;
1542
1543 return 0;
1544 }
1545
cn10k_mdo_get_rx_sa_stats(struct macsec_context * ctx)1546 static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx)
1547 {
1548 struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1549 struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
1550 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1551 struct mcs_sa_stats rsp = { 0 };
1552 u8 sa_num = ctx->sa.assoc_num;
1553 struct cn10k_mcs_rxsc *rxsc;
1554
1555 rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
1556 if (!rxsc)
1557 return -ENOENT;
1558
1559 if (sa_num >= CN10K_MCS_SA_PER_SC)
1560 return -EOPNOTSUPP;
1561
1562 cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false);
1563
1564 ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt;
1565 ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt;
1566 ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt;
1567 ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt;
1568 ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt;
1569
1570 return 0;
1571 }
1572
1573 static const struct macsec_ops cn10k_mcs_ops = {
1574 .mdo_dev_open = cn10k_mdo_open,
1575 .mdo_dev_stop = cn10k_mdo_stop,
1576 .mdo_add_secy = cn10k_mdo_add_secy,
1577 .mdo_upd_secy = cn10k_mdo_upd_secy,
1578 .mdo_del_secy = cn10k_mdo_del_secy,
1579 .mdo_add_rxsc = cn10k_mdo_add_rxsc,
1580 .mdo_upd_rxsc = cn10k_mdo_upd_rxsc,
1581 .mdo_del_rxsc = cn10k_mdo_del_rxsc,
1582 .mdo_add_rxsa = cn10k_mdo_add_rxsa,
1583 .mdo_upd_rxsa = cn10k_mdo_upd_rxsa,
1584 .mdo_del_rxsa = cn10k_mdo_del_rxsa,
1585 .mdo_add_txsa = cn10k_mdo_add_txsa,
1586 .mdo_upd_txsa = cn10k_mdo_upd_txsa,
1587 .mdo_del_txsa = cn10k_mdo_del_txsa,
1588 .mdo_get_dev_stats = cn10k_mdo_get_dev_stats,
1589 .mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats,
1590 .mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats,
1591 .mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats,
1592 .mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats,
1593 };
1594
cn10k_handle_mcs_event(struct otx2_nic * pfvf,struct mcs_intr_info * event)1595 void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event)
1596 {
1597 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1598 struct macsec_tx_sa *sw_tx_sa = NULL;
1599 struct macsec_secy *secy = NULL;
1600 struct cn10k_mcs_txsc *txsc;
1601 u8 an;
1602
1603 if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1604 return;
1605
1606 if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT))
1607 return;
1608
1609 /* Find the SecY to which the expired hardware SA is mapped */
1610 list_for_each_entry(txsc, &cfg->txsc_list, entry) {
1611 for (an = 0; an < CN10K_MCS_SA_PER_SC; an++)
1612 if (txsc->hw_sa_id[an] == event->sa_id) {
1613 secy = txsc->sw_secy;
1614 sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
1615 }
1616 }
1617
1618 if (secy && sw_tx_sa)
1619 macsec_pn_wrapped(secy, sw_tx_sa);
1620 }
1621
cn10k_mcs_init(struct otx2_nic * pfvf)1622 int cn10k_mcs_init(struct otx2_nic *pfvf)
1623 {
1624 struct mbox *mbox = &pfvf->mbox;
1625 struct cn10k_mcs_cfg *cfg;
1626 struct mcs_intr_cfg *req;
1627
1628 if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1629 return 0;
1630
1631 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1632 if (!cfg)
1633 return -ENOMEM;
1634
1635 INIT_LIST_HEAD(&cfg->txsc_list);
1636 INIT_LIST_HEAD(&cfg->rxsc_list);
1637 pfvf->macsec_cfg = cfg;
1638
1639 pfvf->netdev->features |= NETIF_F_HW_MACSEC;
1640 pfvf->netdev->macsec_ops = &cn10k_mcs_ops;
1641
1642 mutex_lock(&mbox->lock);
1643
1644 req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox);
1645 if (!req)
1646 goto fail;
1647
1648 req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
1649
1650 if (otx2_sync_mbox_msg(mbox))
1651 goto fail;
1652
1653 mutex_unlock(&mbox->lock);
1654
1655 return 0;
1656 fail:
1657 dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
1658 mutex_unlock(&mbox->lock);
1659 return 0;
1660 }
1661
cn10k_mcs_free(struct otx2_nic * pfvf)1662 void cn10k_mcs_free(struct otx2_nic *pfvf)
1663 {
1664 if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1665 return;
1666
1667 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true);
1668 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true);
1669 kfree(pfvf->macsec_cfg);
1670 pfvf->macsec_cfg = NULL;
1671 }
1672