1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3 *
4 * Copyright (C) 2021 Marvell.
5 *
6 */
7
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/inetdevice.h>
11 #include <linux/rhashtable.h>
12 #include <linux/bitfield.h>
13 #include <net/flow_dissector.h>
14 #include <net/pkt_cls.h>
15 #include <net/tc_act/tc_gact.h>
16 #include <net/tc_act/tc_mirred.h>
17 #include <net/tc_act/tc_vlan.h>
18 #include <net/ipv6.h>
19
20 #include "cn10k.h"
21 #include "otx2_common.h"
22 #include "qos.h"
23
24 #define CN10K_MAX_BURST_MANTISSA 0x7FFFULL
25 #define CN10K_MAX_BURST_SIZE 8453888ULL
26
27 #define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29)
28 #define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44)
29
30 struct otx2_tc_flow_stats {
31 u64 bytes;
32 u64 pkts;
33 u64 used;
34 };
35
36 struct otx2_tc_flow {
37 struct list_head list;
38 unsigned long cookie;
39 struct rcu_head rcu;
40 struct otx2_tc_flow_stats stats;
41 spinlock_t lock; /* lock for stats */
42 u16 rq;
43 u16 entry;
44 u16 leaf_profile;
45 bool is_act_police;
46 u32 prio;
47 struct npc_install_flow_req req;
48 };
49
otx2_get_egress_burst_cfg(struct otx2_nic * nic,u32 burst,u32 * burst_exp,u32 * burst_mantissa)50 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
51 u32 *burst_exp, u32 *burst_mantissa)
52 {
53 int max_burst, max_mantissa;
54 unsigned int tmp;
55
56 if (is_dev_otx2(nic->pdev)) {
57 max_burst = MAX_BURST_SIZE;
58 max_mantissa = MAX_BURST_MANTISSA;
59 } else {
60 max_burst = CN10K_MAX_BURST_SIZE;
61 max_mantissa = CN10K_MAX_BURST_MANTISSA;
62 }
63
64 /* Burst is calculated as
65 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
66 * Max supported burst size is 130,816 bytes.
67 */
68 burst = min_t(u32, burst, max_burst);
69 if (burst) {
70 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
71 tmp = burst - rounddown_pow_of_two(burst);
72 if (burst < max_mantissa)
73 *burst_mantissa = tmp * 2;
74 else
75 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
76 } else {
77 *burst_exp = MAX_BURST_EXPONENT;
78 *burst_mantissa = max_mantissa;
79 }
80 }
81
otx2_get_egress_rate_cfg(u64 maxrate,u32 * exp,u32 * mantissa,u32 * div_exp)82 static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp,
83 u32 *mantissa, u32 *div_exp)
84 {
85 u64 tmp;
86
87 /* Rate calculation by hardware
88 *
89 * PIR_ADD = ((256 + mantissa) << exp) / 256
90 * rate = (2 * PIR_ADD) / ( 1 << div_exp)
91 * The resultant rate is in Mbps.
92 */
93
94 /* 2Mbps to 100Gbps can be expressed with div_exp = 0.
95 * Setting this to '0' will ease the calculation of
96 * exponent and mantissa.
97 */
98 *div_exp = 0;
99
100 if (maxrate) {
101 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0;
102 tmp = maxrate - rounddown_pow_of_two(maxrate);
103 if (maxrate < MAX_RATE_MANTISSA)
104 *mantissa = tmp * 2;
105 else
106 *mantissa = tmp / (1ULL << (*exp - 7));
107 } else {
108 /* Instead of disabling rate limiting, set all values to max */
109 *exp = MAX_RATE_EXPONENT;
110 *mantissa = MAX_RATE_MANTISSA;
111 }
112 }
113
otx2_get_txschq_rate_regval(struct otx2_nic * nic,u64 maxrate,u32 burst)114 u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic,
115 u64 maxrate, u32 burst)
116 {
117 u32 burst_exp, burst_mantissa;
118 u32 exp, mantissa, div_exp;
119 u64 regval = 0;
120
121 /* Get exponent and mantissa values from the desired rate */
122 otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa);
123 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
124
125 if (is_dev_otx2(nic->pdev)) {
126 regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) |
127 FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) |
128 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
129 FIELD_PREP(TLX_RATE_EXPONENT, exp) |
130 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
131 } else {
132 regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) |
133 FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) |
134 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
135 FIELD_PREP(TLX_RATE_EXPONENT, exp) |
136 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
137 }
138
139 return regval;
140 }
141
otx2_set_matchall_egress_rate(struct otx2_nic * nic,u32 burst,u64 maxrate)142 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic,
143 u32 burst, u64 maxrate)
144 {
145 struct otx2_hw *hw = &nic->hw;
146 struct nix_txschq_config *req;
147 int txschq, err;
148
149 /* All SQs share the same TL4, so pick the first scheduler */
150 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
151
152 mutex_lock(&nic->mbox.lock);
153 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
154 if (!req) {
155 mutex_unlock(&nic->mbox.lock);
156 return -ENOMEM;
157 }
158
159 req->lvl = NIX_TXSCH_LVL_TL4;
160 req->num_regs = 1;
161 req->reg[0] = NIX_AF_TL4X_PIR(txschq);
162 req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst);
163
164 err = otx2_sync_mbox_msg(&nic->mbox);
165 mutex_unlock(&nic->mbox.lock);
166 return err;
167 }
168
otx2_tc_validate_flow(struct otx2_nic * nic,struct flow_action * actions,struct netlink_ext_ack * extack)169 static int otx2_tc_validate_flow(struct otx2_nic *nic,
170 struct flow_action *actions,
171 struct netlink_ext_ack *extack)
172 {
173 if (nic->flags & OTX2_FLAG_INTF_DOWN) {
174 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
175 return -EINVAL;
176 }
177
178 if (!flow_action_has_entries(actions)) {
179 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action");
180 return -EINVAL;
181 }
182
183 if (!flow_offload_has_one_action(actions)) {
184 NL_SET_ERR_MSG_MOD(extack,
185 "Egress MATCHALL offload supports only 1 policing action");
186 return -EINVAL;
187 }
188 return 0;
189 }
190
otx2_policer_validate(const struct flow_action * action,const struct flow_action_entry * act,struct netlink_ext_ack * extack)191 static int otx2_policer_validate(const struct flow_action *action,
192 const struct flow_action_entry *act,
193 struct netlink_ext_ack *extack)
194 {
195 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
196 NL_SET_ERR_MSG_MOD(extack,
197 "Offload not supported when exceed action is not drop");
198 return -EOPNOTSUPP;
199 }
200
201 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
202 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
203 NL_SET_ERR_MSG_MOD(extack,
204 "Offload not supported when conform action is not pipe or ok");
205 return -EOPNOTSUPP;
206 }
207
208 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
209 !flow_action_is_last_entry(action, act)) {
210 NL_SET_ERR_MSG_MOD(extack,
211 "Offload not supported when conform action is ok, but action is not last");
212 return -EOPNOTSUPP;
213 }
214
215 if (act->police.peakrate_bytes_ps ||
216 act->police.avrate || act->police.overhead) {
217 NL_SET_ERR_MSG_MOD(extack,
218 "Offload not supported when peakrate/avrate/overhead is configured");
219 return -EOPNOTSUPP;
220 }
221
222 return 0;
223 }
224
otx2_tc_egress_matchall_install(struct otx2_nic * nic,struct tc_cls_matchall_offload * cls)225 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
226 struct tc_cls_matchall_offload *cls)
227 {
228 struct netlink_ext_ack *extack = cls->common.extack;
229 struct flow_action *actions = &cls->rule->action;
230 struct flow_action_entry *entry;
231 int err;
232
233 err = otx2_tc_validate_flow(nic, actions, extack);
234 if (err)
235 return err;
236
237 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) {
238 NL_SET_ERR_MSG_MOD(extack,
239 "Only one Egress MATCHALL ratelimiter can be offloaded");
240 return -ENOMEM;
241 }
242
243 entry = &cls->rule->action.entries[0];
244 switch (entry->id) {
245 case FLOW_ACTION_POLICE:
246 err = otx2_policer_validate(&cls->rule->action, entry, extack);
247 if (err)
248 return err;
249
250 if (entry->police.rate_pkt_ps) {
251 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
252 return -EOPNOTSUPP;
253 }
254 err = otx2_set_matchall_egress_rate(nic, entry->police.burst,
255 otx2_convert_rate(entry->police.rate_bytes_ps));
256 if (err)
257 return err;
258 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
259 break;
260 default:
261 NL_SET_ERR_MSG_MOD(extack,
262 "Only police action is supported with Egress MATCHALL offload");
263 return -EOPNOTSUPP;
264 }
265
266 return 0;
267 }
268
otx2_tc_egress_matchall_delete(struct otx2_nic * nic,struct tc_cls_matchall_offload * cls)269 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
270 struct tc_cls_matchall_offload *cls)
271 {
272 struct netlink_ext_ack *extack = cls->common.extack;
273 int err;
274
275 if (nic->flags & OTX2_FLAG_INTF_DOWN) {
276 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
277 return -EINVAL;
278 }
279
280 err = otx2_set_matchall_egress_rate(nic, 0, 0);
281 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
282 return err;
283 }
284
otx2_tc_act_set_police(struct otx2_nic * nic,struct otx2_tc_flow * node,struct flow_cls_offload * f,u64 rate,u32 burst,u32 mark,struct npc_install_flow_req * req,bool pps)285 static int otx2_tc_act_set_police(struct otx2_nic *nic,
286 struct otx2_tc_flow *node,
287 struct flow_cls_offload *f,
288 u64 rate, u32 burst, u32 mark,
289 struct npc_install_flow_req *req, bool pps)
290 {
291 struct netlink_ext_ack *extack = f->common.extack;
292 struct otx2_hw *hw = &nic->hw;
293 int rq_idx, rc;
294
295 rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
296 if (rq_idx >= hw->rx_queues) {
297 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
298 return -EINVAL;
299 }
300
301 mutex_lock(&nic->mbox.lock);
302
303 rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
304 if (rc) {
305 mutex_unlock(&nic->mbox.lock);
306 return rc;
307 }
308
309 rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
310 if (rc)
311 goto free_leaf;
312
313 rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
314 if (rc)
315 goto free_leaf;
316
317 mutex_unlock(&nic->mbox.lock);
318
319 req->match_id = mark & 0xFFFFULL;
320 req->index = rq_idx;
321 req->op = NIX_RX_ACTIONOP_UCAST;
322 set_bit(rq_idx, &nic->rq_bmap);
323 node->is_act_police = true;
324 node->rq = rq_idx;
325
326 return 0;
327
328 free_leaf:
329 if (cn10k_free_leaf_profile(nic, node->leaf_profile))
330 netdev_err(nic->netdev,
331 "Unable to free leaf bandwidth profile(%d)\n",
332 node->leaf_profile);
333 mutex_unlock(&nic->mbox.lock);
334 return rc;
335 }
336
otx2_tc_parse_actions(struct otx2_nic * nic,struct flow_action * flow_action,struct npc_install_flow_req * req,struct flow_cls_offload * f,struct otx2_tc_flow * node)337 static int otx2_tc_parse_actions(struct otx2_nic *nic,
338 struct flow_action *flow_action,
339 struct npc_install_flow_req *req,
340 struct flow_cls_offload *f,
341 struct otx2_tc_flow *node)
342 {
343 struct netlink_ext_ack *extack = f->common.extack;
344 struct flow_action_entry *act;
345 struct net_device *target;
346 struct otx2_nic *priv;
347 u32 burst, mark = 0;
348 u8 nr_police = 0;
349 bool pps = false;
350 u64 rate;
351 int err;
352 int i;
353
354 if (!flow_action_has_entries(flow_action)) {
355 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
356 return -EINVAL;
357 }
358
359 flow_action_for_each(i, act, flow_action) {
360 switch (act->id) {
361 case FLOW_ACTION_DROP:
362 req->op = NIX_RX_ACTIONOP_DROP;
363 return 0;
364 case FLOW_ACTION_ACCEPT:
365 req->op = NIX_RX_ACTION_DEFAULT;
366 return 0;
367 case FLOW_ACTION_REDIRECT_INGRESS:
368 target = act->dev;
369 priv = netdev_priv(target);
370 /* npc_install_flow_req doesn't support passing a target pcifunc */
371 if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
372 NL_SET_ERR_MSG_MOD(extack,
373 "can't redirect to other pf/vf");
374 return -EOPNOTSUPP;
375 }
376 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
377
378 /* if op is already set; avoid overwriting the same */
379 if (!req->op)
380 req->op = NIX_RX_ACTION_DEFAULT;
381 break;
382
383 case FLOW_ACTION_VLAN_POP:
384 req->vtag0_valid = true;
385 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
386 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
387 break;
388 case FLOW_ACTION_POLICE:
389 /* Ingress ratelimiting is not supported on OcteonTx2 */
390 if (is_dev_otx2(nic->pdev)) {
391 NL_SET_ERR_MSG_MOD(extack,
392 "Ingress policing not supported on this platform");
393 return -EOPNOTSUPP;
394 }
395
396 err = otx2_policer_validate(flow_action, act, extack);
397 if (err)
398 return err;
399
400 if (act->police.rate_bytes_ps > 0) {
401 rate = act->police.rate_bytes_ps * 8;
402 burst = act->police.burst;
403 } else if (act->police.rate_pkt_ps > 0) {
404 /* The algorithm used to calculate rate
405 * mantissa, exponent values for a given token
406 * rate (token can be byte or packet) requires
407 * token rate to be mutiplied by 8.
408 */
409 rate = act->police.rate_pkt_ps * 8;
410 burst = act->police.burst_pkt;
411 pps = true;
412 }
413 nr_police++;
414 break;
415 case FLOW_ACTION_MARK:
416 mark = act->mark;
417 break;
418
419 case FLOW_ACTION_RX_QUEUE_MAPPING:
420 req->op = NIX_RX_ACTIONOP_UCAST;
421 req->index = act->rx_queue;
422 break;
423
424 default:
425 return -EOPNOTSUPP;
426 }
427 }
428
429 if (nr_police > 1) {
430 NL_SET_ERR_MSG_MOD(extack,
431 "rate limit police offload requires a single action");
432 return -EOPNOTSUPP;
433 }
434
435 if (nr_police)
436 return otx2_tc_act_set_police(nic, node, f, rate, burst,
437 mark, req, pps);
438
439 return 0;
440 }
441
otx2_tc_process_vlan(struct otx2_nic * nic,struct flow_msg * flow_spec,struct flow_msg * flow_mask,struct flow_rule * rule,struct npc_install_flow_req * req,bool is_inner)442 static int otx2_tc_process_vlan(struct otx2_nic *nic, struct flow_msg *flow_spec,
443 struct flow_msg *flow_mask, struct flow_rule *rule,
444 struct npc_install_flow_req *req, bool is_inner)
445 {
446 struct flow_match_vlan match;
447 u16 vlan_tci, vlan_tci_mask;
448
449 if (is_inner)
450 flow_rule_match_cvlan(rule, &match);
451 else
452 flow_rule_match_vlan(rule, &match);
453
454 if (!eth_type_vlan(match.key->vlan_tpid)) {
455 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
456 ntohs(match.key->vlan_tpid));
457 return -EOPNOTSUPP;
458 }
459
460 if (!match.mask->vlan_id) {
461 struct flow_action_entry *act;
462 int i;
463
464 flow_action_for_each(i, act, &rule->action) {
465 if (act->id == FLOW_ACTION_DROP) {
466 netdev_err(nic->netdev,
467 "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n",
468 ntohs(match.key->vlan_tpid), match.key->vlan_id);
469 return -EOPNOTSUPP;
470 }
471 }
472 }
473
474 if (match.mask->vlan_id ||
475 match.mask->vlan_dei ||
476 match.mask->vlan_priority) {
477 vlan_tci = match.key->vlan_id |
478 match.key->vlan_dei << 12 |
479 match.key->vlan_priority << 13;
480
481 vlan_tci_mask = match.mask->vlan_id |
482 match.mask->vlan_dei << 12 |
483 match.mask->vlan_priority << 13;
484 if (is_inner) {
485 flow_spec->vlan_itci = htons(vlan_tci);
486 flow_mask->vlan_itci = htons(vlan_tci_mask);
487 req->features |= BIT_ULL(NPC_INNER_VID);
488 } else {
489 flow_spec->vlan_tci = htons(vlan_tci);
490 flow_mask->vlan_tci = htons(vlan_tci_mask);
491 req->features |= BIT_ULL(NPC_OUTER_VID);
492 }
493 }
494
495 return 0;
496 }
497
otx2_tc_prepare_flow(struct otx2_nic * nic,struct otx2_tc_flow * node,struct flow_cls_offload * f,struct npc_install_flow_req * req)498 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
499 struct flow_cls_offload *f,
500 struct npc_install_flow_req *req)
501 {
502 struct netlink_ext_ack *extack = f->common.extack;
503 struct flow_msg *flow_spec = &req->packet;
504 struct flow_msg *flow_mask = &req->mask;
505 struct flow_dissector *dissector;
506 struct flow_rule *rule;
507 u8 ip_proto = 0;
508
509 rule = flow_cls_offload_flow_rule(f);
510 dissector = rule->match.dissector;
511
512 if ((dissector->used_keys &
513 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
514 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
515 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
516 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
517 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
518 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
519 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
520 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
521 BIT(FLOW_DISSECTOR_KEY_IPSEC) |
522 BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) {
523 netdev_info(nic->netdev, "unsupported flow used key 0x%llx",
524 dissector->used_keys);
525 return -EOPNOTSUPP;
526 }
527
528 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
529 struct flow_match_basic match;
530
531 flow_rule_match_basic(rule, &match);
532
533 /* All EtherTypes can be matched, no hw limitation */
534 flow_spec->etype = match.key->n_proto;
535 flow_mask->etype = match.mask->n_proto;
536 req->features |= BIT_ULL(NPC_ETYPE);
537
538 if (match.mask->ip_proto &&
539 (match.key->ip_proto != IPPROTO_TCP &&
540 match.key->ip_proto != IPPROTO_UDP &&
541 match.key->ip_proto != IPPROTO_SCTP &&
542 match.key->ip_proto != IPPROTO_ICMP &&
543 match.key->ip_proto != IPPROTO_ESP &&
544 match.key->ip_proto != IPPROTO_AH &&
545 match.key->ip_proto != IPPROTO_ICMPV6)) {
546 netdev_info(nic->netdev,
547 "ip_proto=0x%x not supported\n",
548 match.key->ip_proto);
549 return -EOPNOTSUPP;
550 }
551 if (match.mask->ip_proto)
552 ip_proto = match.key->ip_proto;
553
554 if (ip_proto == IPPROTO_UDP)
555 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
556 else if (ip_proto == IPPROTO_TCP)
557 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
558 else if (ip_proto == IPPROTO_SCTP)
559 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
560 else if (ip_proto == IPPROTO_ICMP)
561 req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
562 else if (ip_proto == IPPROTO_ICMPV6)
563 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6);
564 else if (ip_proto == IPPROTO_ESP)
565 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
566 else if (ip_proto == IPPROTO_AH)
567 req->features |= BIT_ULL(NPC_IPPROTO_AH);
568 }
569
570 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
571 struct flow_match_control match;
572
573 flow_rule_match_control(rule, &match);
574 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
575 NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later");
576 return -EOPNOTSUPP;
577 }
578
579 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
580 if (ntohs(flow_spec->etype) == ETH_P_IP) {
581 flow_spec->ip_flag = IPV4_FLAG_MORE;
582 flow_mask->ip_flag = IPV4_FLAG_MORE;
583 req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
584 } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
585 flow_spec->next_header = IPPROTO_FRAGMENT;
586 flow_mask->next_header = 0xff;
587 req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
588 } else {
589 NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6");
590 return -EOPNOTSUPP;
591 }
592 }
593 }
594
595 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
596 struct flow_match_eth_addrs match;
597
598 flow_rule_match_eth_addrs(rule, &match);
599 if (!is_zero_ether_addr(match.mask->src)) {
600 NL_SET_ERR_MSG_MOD(extack, "src mac match not supported");
601 return -EOPNOTSUPP;
602 }
603
604 if (!is_zero_ether_addr(match.mask->dst)) {
605 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst);
606 ether_addr_copy(flow_mask->dmac,
607 (u8 *)&match.mask->dst);
608 req->features |= BIT_ULL(NPC_DMAC);
609 }
610 }
611
612 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPSEC)) {
613 struct flow_match_ipsec match;
614
615 flow_rule_match_ipsec(rule, &match);
616 if (!match.mask->spi) {
617 NL_SET_ERR_MSG_MOD(extack, "spi index not specified");
618 return -EOPNOTSUPP;
619 }
620 if (ip_proto != IPPROTO_ESP &&
621 ip_proto != IPPROTO_AH) {
622 NL_SET_ERR_MSG_MOD(extack,
623 "SPI index is valid only for ESP/AH proto");
624 return -EOPNOTSUPP;
625 }
626
627 flow_spec->spi = match.key->spi;
628 flow_mask->spi = match.mask->spi;
629 req->features |= BIT_ULL(NPC_IPSEC_SPI);
630 }
631
632 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
633 struct flow_match_ip match;
634
635 flow_rule_match_ip(rule, &match);
636 if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
637 match.mask->tos) {
638 NL_SET_ERR_MSG_MOD(extack, "tos not supported");
639 return -EOPNOTSUPP;
640 }
641 if (match.mask->ttl) {
642 NL_SET_ERR_MSG_MOD(extack, "ttl not supported");
643 return -EOPNOTSUPP;
644 }
645 flow_spec->tos = match.key->tos;
646 flow_mask->tos = match.mask->tos;
647 req->features |= BIT_ULL(NPC_TOS);
648 }
649
650 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
651 int ret;
652
653 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, false);
654 if (ret)
655 return ret;
656 }
657
658 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
659 int ret;
660
661 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, true);
662 if (ret)
663 return ret;
664 }
665
666 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
667 struct flow_match_ipv4_addrs match;
668
669 flow_rule_match_ipv4_addrs(rule, &match);
670
671 flow_spec->ip4dst = match.key->dst;
672 flow_mask->ip4dst = match.mask->dst;
673 req->features |= BIT_ULL(NPC_DIP_IPV4);
674
675 flow_spec->ip4src = match.key->src;
676 flow_mask->ip4src = match.mask->src;
677 req->features |= BIT_ULL(NPC_SIP_IPV4);
678 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
679 struct flow_match_ipv6_addrs match;
680
681 flow_rule_match_ipv6_addrs(rule, &match);
682
683 if (ipv6_addr_loopback(&match.key->dst) ||
684 ipv6_addr_loopback(&match.key->src)) {
685 NL_SET_ERR_MSG_MOD(extack,
686 "Flow matching IPv6 loopback addr not supported");
687 return -EOPNOTSUPP;
688 }
689
690 if (!ipv6_addr_any(&match.mask->dst)) {
691 memcpy(&flow_spec->ip6dst,
692 (struct in6_addr *)&match.key->dst,
693 sizeof(flow_spec->ip6dst));
694 memcpy(&flow_mask->ip6dst,
695 (struct in6_addr *)&match.mask->dst,
696 sizeof(flow_spec->ip6dst));
697 req->features |= BIT_ULL(NPC_DIP_IPV6);
698 }
699
700 if (!ipv6_addr_any(&match.mask->src)) {
701 memcpy(&flow_spec->ip6src,
702 (struct in6_addr *)&match.key->src,
703 sizeof(flow_spec->ip6src));
704 memcpy(&flow_mask->ip6src,
705 (struct in6_addr *)&match.mask->src,
706 sizeof(flow_spec->ip6src));
707 req->features |= BIT_ULL(NPC_SIP_IPV6);
708 }
709 }
710
711 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
712 struct flow_match_ports match;
713
714 flow_rule_match_ports(rule, &match);
715
716 flow_spec->dport = match.key->dst;
717 flow_mask->dport = match.mask->dst;
718
719 if (flow_mask->dport) {
720 if (ip_proto == IPPROTO_UDP)
721 req->features |= BIT_ULL(NPC_DPORT_UDP);
722 else if (ip_proto == IPPROTO_TCP)
723 req->features |= BIT_ULL(NPC_DPORT_TCP);
724 else if (ip_proto == IPPROTO_SCTP)
725 req->features |= BIT_ULL(NPC_DPORT_SCTP);
726 }
727
728 flow_spec->sport = match.key->src;
729 flow_mask->sport = match.mask->src;
730
731 if (flow_mask->sport) {
732 if (ip_proto == IPPROTO_UDP)
733 req->features |= BIT_ULL(NPC_SPORT_UDP);
734 else if (ip_proto == IPPROTO_TCP)
735 req->features |= BIT_ULL(NPC_SPORT_TCP);
736 else if (ip_proto == IPPROTO_SCTP)
737 req->features |= BIT_ULL(NPC_SPORT_SCTP);
738 }
739 }
740
741 return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
742 }
743
otx2_destroy_tc_flow_list(struct otx2_nic * pfvf)744 static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf)
745 {
746 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
747 struct otx2_tc_flow *iter, *tmp;
748
749 if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
750 return;
751
752 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) {
753 list_del(&iter->list);
754 kfree(iter);
755 flow_cfg->nr_flows--;
756 }
757 }
758
otx2_tc_get_entry_by_cookie(struct otx2_flow_config * flow_cfg,unsigned long cookie)759 static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg,
760 unsigned long cookie)
761 {
762 struct otx2_tc_flow *tmp;
763
764 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
765 if (tmp->cookie == cookie)
766 return tmp;
767 }
768
769 return NULL;
770 }
771
otx2_tc_get_entry_by_index(struct otx2_flow_config * flow_cfg,int index)772 static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg,
773 int index)
774 {
775 struct otx2_tc_flow *tmp;
776 int i = 0;
777
778 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
779 if (i == index)
780 return tmp;
781 i++;
782 }
783
784 return NULL;
785 }
786
otx2_tc_del_from_flow_list(struct otx2_flow_config * flow_cfg,struct otx2_tc_flow * node)787 static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg,
788 struct otx2_tc_flow *node)
789 {
790 struct list_head *pos, *n;
791 struct otx2_tc_flow *tmp;
792
793 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
794 tmp = list_entry(pos, struct otx2_tc_flow, list);
795 if (node == tmp) {
796 list_del(&node->list);
797 return;
798 }
799 }
800 }
801
otx2_tc_add_to_flow_list(struct otx2_flow_config * flow_cfg,struct otx2_tc_flow * node)802 static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg,
803 struct otx2_tc_flow *node)
804 {
805 struct list_head *pos, *n;
806 struct otx2_tc_flow *tmp;
807 int index = 0;
808
809 /* If the flow list is empty then add the new node */
810 if (list_empty(&flow_cfg->flow_list_tc)) {
811 list_add(&node->list, &flow_cfg->flow_list_tc);
812 return index;
813 }
814
815 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
816 tmp = list_entry(pos, struct otx2_tc_flow, list);
817 if (node->prio < tmp->prio)
818 break;
819 index++;
820 }
821
822 list_add(&node->list, pos->prev);
823 return index;
824 }
825
otx2_add_mcam_flow_entry(struct otx2_nic * nic,struct npc_install_flow_req * req)826 static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req)
827 {
828 struct npc_install_flow_req *tmp_req;
829 int err;
830
831 mutex_lock(&nic->mbox.lock);
832 tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
833 if (!tmp_req) {
834 mutex_unlock(&nic->mbox.lock);
835 return -ENOMEM;
836 }
837
838 memcpy(tmp_req, req, sizeof(struct npc_install_flow_req));
839 /* Send message to AF */
840 err = otx2_sync_mbox_msg(&nic->mbox);
841 if (err) {
842 netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n",
843 req->entry);
844 mutex_unlock(&nic->mbox.lock);
845 return -EFAULT;
846 }
847
848 mutex_unlock(&nic->mbox.lock);
849 return 0;
850 }
851
otx2_del_mcam_flow_entry(struct otx2_nic * nic,u16 entry,u16 * cntr_val)852 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val)
853 {
854 struct npc_delete_flow_rsp *rsp;
855 struct npc_delete_flow_req *req;
856 int err;
857
858 mutex_lock(&nic->mbox.lock);
859 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox);
860 if (!req) {
861 mutex_unlock(&nic->mbox.lock);
862 return -ENOMEM;
863 }
864
865 req->entry = entry;
866
867 /* Send message to AF */
868 err = otx2_sync_mbox_msg(&nic->mbox);
869 if (err) {
870 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n",
871 entry);
872 mutex_unlock(&nic->mbox.lock);
873 return -EFAULT;
874 }
875
876 if (cntr_val) {
877 rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
878 0, &req->hdr);
879 if (IS_ERR(rsp)) {
880 netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n",
881 entry);
882 mutex_unlock(&nic->mbox.lock);
883 return -EFAULT;
884 }
885
886 *cntr_val = rsp->cntr_val;
887 }
888
889 mutex_unlock(&nic->mbox.lock);
890 return 0;
891 }
892
otx2_tc_update_mcam_table_del_req(struct otx2_nic * nic,struct otx2_flow_config * flow_cfg,struct otx2_tc_flow * node)893 static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic,
894 struct otx2_flow_config *flow_cfg,
895 struct otx2_tc_flow *node)
896 {
897 struct list_head *pos, *n;
898 struct otx2_tc_flow *tmp;
899 int i = 0, index = 0;
900 u16 cntr_val = 0;
901
902 /* Find and delete the entry from the list and re-install
903 * all the entries from beginning to the index of the
904 * deleted entry to higher mcam indexes.
905 */
906 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
907 tmp = list_entry(pos, struct otx2_tc_flow, list);
908 if (node == tmp) {
909 list_del(&tmp->list);
910 break;
911 }
912
913 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
914 tmp->entry++;
915 tmp->req.entry = tmp->entry;
916 tmp->req.cntr_val = cntr_val;
917 index++;
918 }
919
920 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
921 if (i == index)
922 break;
923
924 tmp = list_entry(pos, struct otx2_tc_flow, list);
925 otx2_add_mcam_flow_entry(nic, &tmp->req);
926 i++;
927 }
928
929 return 0;
930 }
931
otx2_tc_update_mcam_table_add_req(struct otx2_nic * nic,struct otx2_flow_config * flow_cfg,struct otx2_tc_flow * node)932 static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic,
933 struct otx2_flow_config *flow_cfg,
934 struct otx2_tc_flow *node)
935 {
936 int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1;
937 struct otx2_tc_flow *tmp;
938 int list_idx, i;
939 u16 cntr_val = 0;
940
941 /* Find the index of the entry(list_idx) whose priority
942 * is greater than the new entry and re-install all
943 * the entries from beginning to list_idx to higher
944 * mcam indexes.
945 */
946 list_idx = otx2_tc_add_to_flow_list(flow_cfg, node);
947 for (i = 0; i < list_idx; i++) {
948 tmp = otx2_tc_get_entry_by_index(flow_cfg, i);
949 if (!tmp)
950 return -ENOMEM;
951
952 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
953 tmp->entry = flow_cfg->flow_ent[mcam_idx];
954 tmp->req.entry = tmp->entry;
955 tmp->req.cntr_val = cntr_val;
956 otx2_add_mcam_flow_entry(nic, &tmp->req);
957 mcam_idx++;
958 }
959
960 return mcam_idx;
961 }
962
otx2_tc_update_mcam_table(struct otx2_nic * nic,struct otx2_flow_config * flow_cfg,struct otx2_tc_flow * node,bool add_req)963 static int otx2_tc_update_mcam_table(struct otx2_nic *nic,
964 struct otx2_flow_config *flow_cfg,
965 struct otx2_tc_flow *node,
966 bool add_req)
967 {
968 if (add_req)
969 return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node);
970
971 return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node);
972 }
973
otx2_tc_del_flow(struct otx2_nic * nic,struct flow_cls_offload * tc_flow_cmd)974 static int otx2_tc_del_flow(struct otx2_nic *nic,
975 struct flow_cls_offload *tc_flow_cmd)
976 {
977 struct otx2_flow_config *flow_cfg = nic->flow_cfg;
978 struct otx2_tc_flow *flow_node;
979 int err;
980
981 flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
982 if (!flow_node) {
983 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
984 tc_flow_cmd->cookie);
985 return -EINVAL;
986 }
987
988 if (flow_node->is_act_police) {
989 mutex_lock(&nic->mbox.lock);
990
991 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
992 flow_node->leaf_profile, false);
993 if (err)
994 netdev_err(nic->netdev,
995 "Unmapping RQ %d & profile %d failed\n",
996 flow_node->rq, flow_node->leaf_profile);
997
998 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile);
999 if (err)
1000 netdev_err(nic->netdev,
1001 "Unable to free leaf bandwidth profile(%d)\n",
1002 flow_node->leaf_profile);
1003
1004 __clear_bit(flow_node->rq, &nic->rq_bmap);
1005
1006 mutex_unlock(&nic->mbox.lock);
1007 }
1008
1009 otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
1010 otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
1011 kfree_rcu(flow_node, rcu);
1012 flow_cfg->nr_flows--;
1013 return 0;
1014 }
1015
otx2_tc_add_flow(struct otx2_nic * nic,struct flow_cls_offload * tc_flow_cmd)1016 static int otx2_tc_add_flow(struct otx2_nic *nic,
1017 struct flow_cls_offload *tc_flow_cmd)
1018 {
1019 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
1020 struct otx2_flow_config *flow_cfg = nic->flow_cfg;
1021 struct otx2_tc_flow *new_node, *old_node;
1022 struct npc_install_flow_req *req, dummy;
1023 int rc, err, mcam_idx;
1024
1025 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
1026 return -ENOMEM;
1027
1028 if (flow_cfg->nr_flows == flow_cfg->max_flows) {
1029 NL_SET_ERR_MSG_MOD(extack,
1030 "Free MCAM entry not available to add the flow");
1031 return -ENOMEM;
1032 }
1033
1034 /* allocate memory for the new flow and it's node */
1035 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
1036 if (!new_node)
1037 return -ENOMEM;
1038 spin_lock_init(&new_node->lock);
1039 new_node->cookie = tc_flow_cmd->cookie;
1040 new_node->prio = tc_flow_cmd->common.prio;
1041
1042 memset(&dummy, 0, sizeof(struct npc_install_flow_req));
1043
1044 rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy);
1045 if (rc) {
1046 kfree_rcu(new_node, rcu);
1047 return rc;
1048 }
1049
1050 /* If a flow exists with the same cookie, delete it */
1051 old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
1052 if (old_node)
1053 otx2_tc_del_flow(nic, tc_flow_cmd);
1054
1055 mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true);
1056 mutex_lock(&nic->mbox.lock);
1057 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
1058 if (!req) {
1059 mutex_unlock(&nic->mbox.lock);
1060 rc = -ENOMEM;
1061 goto free_leaf;
1062 }
1063
1064 memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
1065 memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
1066 req->channel = nic->hw.rx_chan_base;
1067 req->entry = flow_cfg->flow_ent[mcam_idx];
1068 req->intf = NIX_INTF_RX;
1069 req->set_cntr = 1;
1070 new_node->entry = req->entry;
1071
1072 /* Send message to AF */
1073 rc = otx2_sync_mbox_msg(&nic->mbox);
1074 if (rc) {
1075 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
1076 mutex_unlock(&nic->mbox.lock);
1077 goto free_leaf;
1078 }
1079
1080 mutex_unlock(&nic->mbox.lock);
1081 memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req));
1082
1083 flow_cfg->nr_flows++;
1084 return 0;
1085
1086 free_leaf:
1087 otx2_tc_del_from_flow_list(flow_cfg, new_node);
1088 kfree_rcu(new_node, rcu);
1089 if (new_node->is_act_police) {
1090 mutex_lock(&nic->mbox.lock);
1091
1092 err = cn10k_map_unmap_rq_policer(nic, new_node->rq,
1093 new_node->leaf_profile, false);
1094 if (err)
1095 netdev_err(nic->netdev,
1096 "Unmapping RQ %d & profile %d failed\n",
1097 new_node->rq, new_node->leaf_profile);
1098 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile);
1099 if (err)
1100 netdev_err(nic->netdev,
1101 "Unable to free leaf bandwidth profile(%d)\n",
1102 new_node->leaf_profile);
1103
1104 __clear_bit(new_node->rq, &nic->rq_bmap);
1105
1106 mutex_unlock(&nic->mbox.lock);
1107 }
1108
1109 return rc;
1110 }
1111
otx2_tc_get_flow_stats(struct otx2_nic * nic,struct flow_cls_offload * tc_flow_cmd)1112 static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
1113 struct flow_cls_offload *tc_flow_cmd)
1114 {
1115 struct npc_mcam_get_stats_req *req;
1116 struct npc_mcam_get_stats_rsp *rsp;
1117 struct otx2_tc_flow_stats *stats;
1118 struct otx2_tc_flow *flow_node;
1119 int err;
1120
1121 flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie);
1122 if (!flow_node) {
1123 netdev_info(nic->netdev, "tc flow not found for cookie %lx",
1124 tc_flow_cmd->cookie);
1125 return -EINVAL;
1126 }
1127
1128 mutex_lock(&nic->mbox.lock);
1129
1130 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox);
1131 if (!req) {
1132 mutex_unlock(&nic->mbox.lock);
1133 return -ENOMEM;
1134 }
1135
1136 req->entry = flow_node->entry;
1137
1138 err = otx2_sync_mbox_msg(&nic->mbox);
1139 if (err) {
1140 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n",
1141 req->entry);
1142 mutex_unlock(&nic->mbox.lock);
1143 return -EFAULT;
1144 }
1145
1146 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp
1147 (&nic->mbox.mbox, 0, &req->hdr);
1148 if (IS_ERR(rsp)) {
1149 mutex_unlock(&nic->mbox.lock);
1150 return PTR_ERR(rsp);
1151 }
1152
1153 mutex_unlock(&nic->mbox.lock);
1154
1155 if (!rsp->stat_ena)
1156 return -EINVAL;
1157
1158 stats = &flow_node->stats;
1159
1160 spin_lock(&flow_node->lock);
1161 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0,
1162 FLOW_ACTION_HW_STATS_IMMEDIATE);
1163 stats->pkts = rsp->stat;
1164 spin_unlock(&flow_node->lock);
1165
1166 return 0;
1167 }
1168
otx2_setup_tc_cls_flower(struct otx2_nic * nic,struct flow_cls_offload * cls_flower)1169 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
1170 struct flow_cls_offload *cls_flower)
1171 {
1172 switch (cls_flower->command) {
1173 case FLOW_CLS_REPLACE:
1174 return otx2_tc_add_flow(nic, cls_flower);
1175 case FLOW_CLS_DESTROY:
1176 return otx2_tc_del_flow(nic, cls_flower);
1177 case FLOW_CLS_STATS:
1178 return otx2_tc_get_flow_stats(nic, cls_flower);
1179 default:
1180 return -EOPNOTSUPP;
1181 }
1182 }
1183
otx2_tc_ingress_matchall_install(struct otx2_nic * nic,struct tc_cls_matchall_offload * cls)1184 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic,
1185 struct tc_cls_matchall_offload *cls)
1186 {
1187 struct netlink_ext_ack *extack = cls->common.extack;
1188 struct flow_action *actions = &cls->rule->action;
1189 struct flow_action_entry *entry;
1190 u64 rate;
1191 int err;
1192
1193 err = otx2_tc_validate_flow(nic, actions, extack);
1194 if (err)
1195 return err;
1196
1197 if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) {
1198 NL_SET_ERR_MSG_MOD(extack,
1199 "Only one ingress MATCHALL ratelimitter can be offloaded");
1200 return -ENOMEM;
1201 }
1202
1203 entry = &cls->rule->action.entries[0];
1204 switch (entry->id) {
1205 case FLOW_ACTION_POLICE:
1206 /* Ingress ratelimiting is not supported on OcteonTx2 */
1207 if (is_dev_otx2(nic->pdev)) {
1208 NL_SET_ERR_MSG_MOD(extack,
1209 "Ingress policing not supported on this platform");
1210 return -EOPNOTSUPP;
1211 }
1212
1213 err = cn10k_alloc_matchall_ipolicer(nic);
1214 if (err)
1215 return err;
1216
1217 /* Convert to bits per second */
1218 rate = entry->police.rate_bytes_ps * 8;
1219 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate);
1220 if (err)
1221 return err;
1222 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
1223 break;
1224 default:
1225 NL_SET_ERR_MSG_MOD(extack,
1226 "Only police action supported with Ingress MATCHALL offload");
1227 return -EOPNOTSUPP;
1228 }
1229
1230 return 0;
1231 }
1232
otx2_tc_ingress_matchall_delete(struct otx2_nic * nic,struct tc_cls_matchall_offload * cls)1233 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic,
1234 struct tc_cls_matchall_offload *cls)
1235 {
1236 struct netlink_ext_ack *extack = cls->common.extack;
1237 int err;
1238
1239 if (nic->flags & OTX2_FLAG_INTF_DOWN) {
1240 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
1241 return -EINVAL;
1242 }
1243
1244 err = cn10k_free_matchall_ipolicer(nic);
1245 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
1246 return err;
1247 }
1248
otx2_setup_tc_ingress_matchall(struct otx2_nic * nic,struct tc_cls_matchall_offload * cls_matchall)1249 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic,
1250 struct tc_cls_matchall_offload *cls_matchall)
1251 {
1252 switch (cls_matchall->command) {
1253 case TC_CLSMATCHALL_REPLACE:
1254 return otx2_tc_ingress_matchall_install(nic, cls_matchall);
1255 case TC_CLSMATCHALL_DESTROY:
1256 return otx2_tc_ingress_matchall_delete(nic, cls_matchall);
1257 case TC_CLSMATCHALL_STATS:
1258 default:
1259 break;
1260 }
1261
1262 return -EOPNOTSUPP;
1263 }
1264
otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,void * type_data,void * cb_priv)1265 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
1266 void *type_data, void *cb_priv)
1267 {
1268 struct otx2_nic *nic = cb_priv;
1269 bool ntuple;
1270
1271 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
1272 return -EOPNOTSUPP;
1273
1274 ntuple = nic->netdev->features & NETIF_F_NTUPLE;
1275 switch (type) {
1276 case TC_SETUP_CLSFLOWER:
1277 if (ntuple) {
1278 netdev_warn(nic->netdev,
1279 "Can't install TC flower offload rule when NTUPLE is active");
1280 return -EOPNOTSUPP;
1281 }
1282
1283 return otx2_setup_tc_cls_flower(nic, type_data);
1284 case TC_SETUP_CLSMATCHALL:
1285 return otx2_setup_tc_ingress_matchall(nic, type_data);
1286 default:
1287 break;
1288 }
1289
1290 return -EOPNOTSUPP;
1291 }
1292
otx2_setup_tc_egress_matchall(struct otx2_nic * nic,struct tc_cls_matchall_offload * cls_matchall)1293 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic,
1294 struct tc_cls_matchall_offload *cls_matchall)
1295 {
1296 switch (cls_matchall->command) {
1297 case TC_CLSMATCHALL_REPLACE:
1298 return otx2_tc_egress_matchall_install(nic, cls_matchall);
1299 case TC_CLSMATCHALL_DESTROY:
1300 return otx2_tc_egress_matchall_delete(nic, cls_matchall);
1301 case TC_CLSMATCHALL_STATS:
1302 default:
1303 break;
1304 }
1305
1306 return -EOPNOTSUPP;
1307 }
1308
otx2_setup_tc_block_egress_cb(enum tc_setup_type type,void * type_data,void * cb_priv)1309 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type,
1310 void *type_data, void *cb_priv)
1311 {
1312 struct otx2_nic *nic = cb_priv;
1313
1314 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
1315 return -EOPNOTSUPP;
1316
1317 switch (type) {
1318 case TC_SETUP_CLSMATCHALL:
1319 return otx2_setup_tc_egress_matchall(nic, type_data);
1320 default:
1321 break;
1322 }
1323
1324 return -EOPNOTSUPP;
1325 }
1326
1327 static LIST_HEAD(otx2_block_cb_list);
1328
otx2_setup_tc_block(struct net_device * netdev,struct flow_block_offload * f)1329 static int otx2_setup_tc_block(struct net_device *netdev,
1330 struct flow_block_offload *f)
1331 {
1332 struct otx2_nic *nic = netdev_priv(netdev);
1333 flow_setup_cb_t *cb;
1334 bool ingress;
1335
1336 if (f->block_shared)
1337 return -EOPNOTSUPP;
1338
1339 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
1340 cb = otx2_setup_tc_block_ingress_cb;
1341 ingress = true;
1342 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
1343 cb = otx2_setup_tc_block_egress_cb;
1344 ingress = false;
1345 } else {
1346 return -EOPNOTSUPP;
1347 }
1348
1349 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb,
1350 nic, nic, ingress);
1351 }
1352
otx2_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)1353 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
1354 void *type_data)
1355 {
1356 switch (type) {
1357 case TC_SETUP_BLOCK:
1358 return otx2_setup_tc_block(netdev, type_data);
1359 case TC_SETUP_QDISC_HTB:
1360 return otx2_setup_tc_htb(netdev, type_data);
1361 default:
1362 return -EOPNOTSUPP;
1363 }
1364 }
1365 EXPORT_SYMBOL(otx2_setup_tc);
1366
otx2_init_tc(struct otx2_nic * nic)1367 int otx2_init_tc(struct otx2_nic *nic)
1368 {
1369 /* Exclude receive queue 0 being used for police action */
1370 set_bit(0, &nic->rq_bmap);
1371
1372 if (!nic->flow_cfg) {
1373 netdev_err(nic->netdev,
1374 "Can't init TC, nic->flow_cfg is not setup\n");
1375 return -EINVAL;
1376 }
1377
1378 return 0;
1379 }
1380 EXPORT_SYMBOL(otx2_init_tc);
1381
otx2_shutdown_tc(struct otx2_nic * nic)1382 void otx2_shutdown_tc(struct otx2_nic *nic)
1383 {
1384 otx2_destroy_tc_flow_list(nic);
1385 }
1386 EXPORT_SYMBOL(otx2_shutdown_tc);
1387