1 /*
2 * Copyright (C) 2017 Netronome Systems, Inc.
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/skbuff.h>
35 #include <net/devlink.h>
36 #include <net/pkt_cls.h>
37
38 #include "cmsg.h"
39 #include "main.h"
40 #include "../nfpcore/nfp_cpp.h"
41 #include "../nfpcore/nfp_nsp.h"
42 #include "../nfp_app.h"
43 #include "../nfp_main.h"
44 #include "../nfp_net.h"
45 #include "../nfp_port.h"
46
47 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
48 (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
49 TCPHDR_PSH | TCPHDR_URG)
50
51 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
52 (FLOW_DIS_IS_FRAGMENT | \
53 FLOW_DIS_FIRST_FRAG)
54
55 #define NFP_FLOWER_WHITELIST_DISSECTOR \
56 (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
57 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
58 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
59 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
60 BIT(FLOW_DISSECTOR_KEY_TCP) | \
61 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
62 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
63 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
64 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
65 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
66 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
67 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
68 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
69 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
70 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
71 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
72 BIT(FLOW_DISSECTOR_KEY_IP))
73
74 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
75 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
76 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
77 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
78 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
79 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
80 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
81 BIT(FLOW_DISSECTOR_KEY_ENC_IP))
82
83 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
84 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
85 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
86 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
87
88 static int
nfp_flower_xmit_flow(struct net_device * netdev,struct nfp_fl_payload * nfp_flow,u8 mtype)89 nfp_flower_xmit_flow(struct net_device *netdev,
90 struct nfp_fl_payload *nfp_flow, u8 mtype)
91 {
92 u32 meta_len, key_len, mask_len, act_len, tot_len;
93 struct nfp_repr *priv = netdev_priv(netdev);
94 struct sk_buff *skb;
95 unsigned char *msg;
96
97 meta_len = sizeof(struct nfp_fl_rule_metadata);
98 key_len = nfp_flow->meta.key_len;
99 mask_len = nfp_flow->meta.mask_len;
100 act_len = nfp_flow->meta.act_len;
101
102 tot_len = meta_len + key_len + mask_len + act_len;
103
104 /* Convert to long words as firmware expects
105 * lengths in units of NFP_FL_LW_SIZ.
106 */
107 nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
108 nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
109 nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
110
111 skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
112 if (!skb)
113 return -ENOMEM;
114
115 msg = nfp_flower_cmsg_get_data(skb);
116 memcpy(msg, &nfp_flow->meta, meta_len);
117 memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
118 memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
119 memcpy(&msg[meta_len + key_len + mask_len],
120 nfp_flow->action_data, act_len);
121
122 /* Convert back to bytes as software expects
123 * lengths in units of bytes.
124 */
125 nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
126 nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
127 nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
128
129 nfp_ctrl_tx(priv->app->ctrl, skb);
130
131 return 0;
132 }
133
nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload * f)134 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
135 {
136 return dissector_uses_key(f->dissector,
137 FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
138 dissector_uses_key(f->dissector,
139 FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
140 dissector_uses_key(f->dissector,
141 FLOW_DISSECTOR_KEY_PORTS) ||
142 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP);
143 }
144
145 static int
nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts * enc_opts,u32 * key_layer_two,int * key_size)146 nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
147 u32 *key_layer_two, int *key_size)
148 {
149 if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY)
150 return -EOPNOTSUPP;
151
152 if (enc_opts->len > 0) {
153 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
154 *key_size += sizeof(struct nfp_flower_geneve_options);
155 }
156
157 return 0;
158 }
159
160 static int
nfp_flower_calculate_key_layers(struct nfp_app * app,struct nfp_fl_key_ls * ret_key_ls,struct tc_cls_flower_offload * flow,bool egress,enum nfp_flower_tun_type * tun_type)161 nfp_flower_calculate_key_layers(struct nfp_app *app,
162 struct nfp_fl_key_ls *ret_key_ls,
163 struct tc_cls_flower_offload *flow,
164 bool egress,
165 enum nfp_flower_tun_type *tun_type)
166 {
167 struct flow_dissector_key_basic *mask_basic = NULL;
168 struct flow_dissector_key_basic *key_basic = NULL;
169 struct nfp_flower_priv *priv = app->priv;
170 u32 key_layer_two;
171 u8 key_layer;
172 int key_size;
173 int err;
174
175 if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
176 return -EOPNOTSUPP;
177
178 /* If any tun dissector is used then the required set must be used. */
179 if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
180 (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
181 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
182 return -EOPNOTSUPP;
183
184 key_layer_two = 0;
185 key_layer = NFP_FLOWER_LAYER_PORT;
186 key_size = sizeof(struct nfp_flower_meta_tci) +
187 sizeof(struct nfp_flower_in_port);
188
189 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
190 dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
191 key_layer |= NFP_FLOWER_LAYER_MAC;
192 key_size += sizeof(struct nfp_flower_mac_mpls);
193 }
194
195 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
196 struct flow_dissector_key_vlan *flow_vlan;
197
198 flow_vlan = skb_flow_dissector_target(flow->dissector,
199 FLOW_DISSECTOR_KEY_VLAN,
200 flow->mask);
201 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
202 flow_vlan->vlan_priority)
203 return -EOPNOTSUPP;
204 }
205
206 if (dissector_uses_key(flow->dissector,
207 FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
208 struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
209 struct flow_dissector_key_ports *mask_enc_ports = NULL;
210 struct flow_dissector_key_enc_opts *enc_op = NULL;
211 struct flow_dissector_key_ports *enc_ports = NULL;
212 struct flow_dissector_key_control *mask_enc_ctl =
213 skb_flow_dissector_target(flow->dissector,
214 FLOW_DISSECTOR_KEY_ENC_CONTROL,
215 flow->mask);
216 struct flow_dissector_key_control *enc_ctl =
217 skb_flow_dissector_target(flow->dissector,
218 FLOW_DISSECTOR_KEY_ENC_CONTROL,
219 flow->key);
220 if (!egress)
221 return -EOPNOTSUPP;
222
223 if (mask_enc_ctl->addr_type != 0xffff ||
224 enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
225 return -EOPNOTSUPP;
226
227 /* These fields are already verified as used. */
228 mask_ipv4 =
229 skb_flow_dissector_target(flow->dissector,
230 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
231 flow->mask);
232 if (mask_ipv4->dst != cpu_to_be32(~0))
233 return -EOPNOTSUPP;
234
235 mask_enc_ports =
236 skb_flow_dissector_target(flow->dissector,
237 FLOW_DISSECTOR_KEY_ENC_PORTS,
238 flow->mask);
239 enc_ports =
240 skb_flow_dissector_target(flow->dissector,
241 FLOW_DISSECTOR_KEY_ENC_PORTS,
242 flow->key);
243
244 if (mask_enc_ports->dst != cpu_to_be16(~0))
245 return -EOPNOTSUPP;
246
247 if (dissector_uses_key(flow->dissector,
248 FLOW_DISSECTOR_KEY_ENC_OPTS)) {
249 enc_op = skb_flow_dissector_target(flow->dissector,
250 FLOW_DISSECTOR_KEY_ENC_OPTS,
251 flow->key);
252 }
253
254 switch (enc_ports->dst) {
255 case htons(NFP_FL_VXLAN_PORT):
256 *tun_type = NFP_FL_TUNNEL_VXLAN;
257 key_layer |= NFP_FLOWER_LAYER_VXLAN;
258 key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
259
260 if (enc_op)
261 return -EOPNOTSUPP;
262 break;
263 case htons(NFP_FL_GENEVE_PORT):
264 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
265 return -EOPNOTSUPP;
266 *tun_type = NFP_FL_TUNNEL_GENEVE;
267 key_layer |= NFP_FLOWER_LAYER_EXT_META;
268 key_size += sizeof(struct nfp_flower_ext_meta);
269 key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
270 key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
271
272 if (!enc_op)
273 break;
274 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
275 return -EOPNOTSUPP;
276 err = nfp_flower_calc_opt_layer(enc_op, &key_layer_two,
277 &key_size);
278 if (err)
279 return err;
280 break;
281 default:
282 return -EOPNOTSUPP;
283 }
284 } else if (egress) {
285 /* Reject non tunnel matches offloaded to egress repr. */
286 return -EOPNOTSUPP;
287 }
288
289 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
290 mask_basic = skb_flow_dissector_target(flow->dissector,
291 FLOW_DISSECTOR_KEY_BASIC,
292 flow->mask);
293
294 key_basic = skb_flow_dissector_target(flow->dissector,
295 FLOW_DISSECTOR_KEY_BASIC,
296 flow->key);
297 }
298
299 if (mask_basic && mask_basic->n_proto) {
300 /* Ethernet type is present in the key. */
301 switch (key_basic->n_proto) {
302 case cpu_to_be16(ETH_P_IP):
303 key_layer |= NFP_FLOWER_LAYER_IPV4;
304 key_size += sizeof(struct nfp_flower_ipv4);
305 break;
306
307 case cpu_to_be16(ETH_P_IPV6):
308 key_layer |= NFP_FLOWER_LAYER_IPV6;
309 key_size += sizeof(struct nfp_flower_ipv6);
310 break;
311
312 /* Currently we do not offload ARP
313 * because we rely on it to get to the host.
314 */
315 case cpu_to_be16(ETH_P_ARP):
316 return -EOPNOTSUPP;
317
318 case cpu_to_be16(ETH_P_MPLS_UC):
319 case cpu_to_be16(ETH_P_MPLS_MC):
320 if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
321 key_layer |= NFP_FLOWER_LAYER_MAC;
322 key_size += sizeof(struct nfp_flower_mac_mpls);
323 }
324 break;
325
326 /* Will be included in layer 2. */
327 case cpu_to_be16(ETH_P_8021Q):
328 break;
329
330 default:
331 /* Other ethtype - we need check the masks for the
332 * remainder of the key to ensure we can offload.
333 */
334 if (nfp_flower_check_higher_than_mac(flow))
335 return -EOPNOTSUPP;
336 break;
337 }
338 }
339
340 if (mask_basic && mask_basic->ip_proto) {
341 /* Ethernet type is present in the key. */
342 switch (key_basic->ip_proto) {
343 case IPPROTO_TCP:
344 case IPPROTO_UDP:
345 case IPPROTO_SCTP:
346 case IPPROTO_ICMP:
347 case IPPROTO_ICMPV6:
348 key_layer |= NFP_FLOWER_LAYER_TP;
349 key_size += sizeof(struct nfp_flower_tp_ports);
350 break;
351 default:
352 /* Other ip proto - we need check the masks for the
353 * remainder of the key to ensure we can offload.
354 */
355 return -EOPNOTSUPP;
356 }
357 }
358
359 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
360 struct flow_dissector_key_tcp *tcp;
361 u32 tcp_flags;
362
363 tcp = skb_flow_dissector_target(flow->dissector,
364 FLOW_DISSECTOR_KEY_TCP,
365 flow->key);
366 tcp_flags = be16_to_cpu(tcp->flags);
367
368 if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
369 return -EOPNOTSUPP;
370
371 /* We only support PSH and URG flags when either
372 * FIN, SYN or RST is present as well.
373 */
374 if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
375 !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
376 return -EOPNOTSUPP;
377
378 /* We need to store TCP flags in the IPv4 key space, thus
379 * we need to ensure we include a IPv4 key layer if we have
380 * not done so already.
381 */
382 if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) {
383 key_layer |= NFP_FLOWER_LAYER_IPV4;
384 key_size += sizeof(struct nfp_flower_ipv4);
385 }
386 }
387
388 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
389 struct flow_dissector_key_control *key_ctl;
390
391 key_ctl = skb_flow_dissector_target(flow->dissector,
392 FLOW_DISSECTOR_KEY_CONTROL,
393 flow->key);
394
395 if (key_ctl->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
396 return -EOPNOTSUPP;
397 }
398
399 ret_key_ls->key_layer = key_layer;
400 ret_key_ls->key_layer_two = key_layer_two;
401 ret_key_ls->key_size = key_size;
402
403 return 0;
404 }
405
406 static struct nfp_fl_payload *
nfp_flower_allocate_new(struct nfp_fl_key_ls * key_layer,bool egress)407 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
408 {
409 struct nfp_fl_payload *flow_pay;
410
411 flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
412 if (!flow_pay)
413 return NULL;
414
415 flow_pay->meta.key_len = key_layer->key_size;
416 flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
417 if (!flow_pay->unmasked_data)
418 goto err_free_flow;
419
420 flow_pay->meta.mask_len = key_layer->key_size;
421 flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
422 if (!flow_pay->mask_data)
423 goto err_free_unmasked;
424
425 flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
426 if (!flow_pay->action_data)
427 goto err_free_mask;
428
429 flow_pay->nfp_tun_ipv4_addr = 0;
430 flow_pay->meta.flags = 0;
431 spin_lock_init(&flow_pay->lock);
432
433 flow_pay->ingress_offload = !egress;
434
435 return flow_pay;
436
437 err_free_mask:
438 kfree(flow_pay->mask_data);
439 err_free_unmasked:
440 kfree(flow_pay->unmasked_data);
441 err_free_flow:
442 kfree(flow_pay);
443 return NULL;
444 }
445
446 /**
447 * nfp_flower_add_offload() - Adds a new flow to hardware.
448 * @app: Pointer to the APP handle
449 * @netdev: netdev structure.
450 * @flow: TC flower classifier offload structure.
451 * @egress: NFP netdev is the egress.
452 *
453 * Adds a new flow to the repeated hash structure and action payload.
454 *
455 * Return: negative value on error, 0 if configured successfully.
456 */
457 static int
nfp_flower_add_offload(struct nfp_app * app,struct net_device * netdev,struct tc_cls_flower_offload * flow,bool egress)458 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
459 struct tc_cls_flower_offload *flow, bool egress)
460 {
461 enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
462 struct nfp_port *port = nfp_port_from_netdev(netdev);
463 struct nfp_flower_priv *priv = app->priv;
464 struct nfp_fl_payload *flow_pay;
465 struct nfp_fl_key_ls *key_layer;
466 struct net_device *ingr_dev;
467 int err;
468
469 ingr_dev = egress ? NULL : netdev;
470 flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
471 NFP_FL_STATS_CTX_DONT_CARE);
472 if (flow_pay) {
473 /* Ignore as duplicate if it has been added by different cb. */
474 if (flow_pay->ingress_offload && egress)
475 return 0;
476 else
477 return -EOPNOTSUPP;
478 }
479
480 key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
481 if (!key_layer)
482 return -ENOMEM;
483
484 err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
485 &tun_type);
486 if (err)
487 goto err_free_key_ls;
488
489 flow_pay = nfp_flower_allocate_new(key_layer, egress);
490 if (!flow_pay) {
491 err = -ENOMEM;
492 goto err_free_key_ls;
493 }
494
495 flow_pay->ingress_dev = egress ? NULL : netdev;
496
497 err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
498 tun_type);
499 if (err)
500 goto err_destroy_flow;
501
502 err = nfp_flower_compile_action(app, flow, netdev, flow_pay);
503 if (err)
504 goto err_destroy_flow;
505
506 err = nfp_compile_flow_metadata(app, flow, flow_pay,
507 flow_pay->ingress_dev);
508 if (err)
509 goto err_destroy_flow;
510
511 err = nfp_flower_xmit_flow(netdev, flow_pay,
512 NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
513 if (err)
514 goto err_destroy_flow;
515
516 INIT_HLIST_NODE(&flow_pay->link);
517 flow_pay->tc_flower_cookie = flow->cookie;
518 hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie);
519 port->tc_offload_cnt++;
520
521 /* Deallocate flow payload when flower rule has been destroyed. */
522 kfree(key_layer);
523
524 return 0;
525
526 err_destroy_flow:
527 kfree(flow_pay->action_data);
528 kfree(flow_pay->mask_data);
529 kfree(flow_pay->unmasked_data);
530 kfree(flow_pay);
531 err_free_key_ls:
532 kfree(key_layer);
533 return err;
534 }
535
536 /**
537 * nfp_flower_del_offload() - Removes a flow from hardware.
538 * @app: Pointer to the APP handle
539 * @netdev: netdev structure.
540 * @flow: TC flower classifier offload structure
541 * @egress: Netdev is the egress dev.
542 *
543 * Removes a flow from the repeated hash structure and clears the
544 * action payload.
545 *
546 * Return: negative value on error, 0 if removed successfully.
547 */
548 static int
nfp_flower_del_offload(struct nfp_app * app,struct net_device * netdev,struct tc_cls_flower_offload * flow,bool egress)549 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
550 struct tc_cls_flower_offload *flow, bool egress)
551 {
552 struct nfp_port *port = nfp_port_from_netdev(netdev);
553 struct nfp_fl_payload *nfp_flow;
554 struct net_device *ingr_dev;
555 int err;
556
557 ingr_dev = egress ? NULL : netdev;
558 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
559 NFP_FL_STATS_CTX_DONT_CARE);
560 if (!nfp_flow)
561 return egress ? 0 : -ENOENT;
562
563 err = nfp_modify_flow_metadata(app, nfp_flow);
564 if (err)
565 goto err_free_flow;
566
567 if (nfp_flow->nfp_tun_ipv4_addr)
568 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
569
570 err = nfp_flower_xmit_flow(netdev, nfp_flow,
571 NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
572 if (err)
573 goto err_free_flow;
574
575 err_free_flow:
576 hash_del_rcu(&nfp_flow->link);
577 port->tc_offload_cnt--;
578 kfree(nfp_flow->action_data);
579 kfree(nfp_flow->mask_data);
580 kfree(nfp_flow->unmasked_data);
581 kfree_rcu(nfp_flow, rcu);
582 return err;
583 }
584
585 /**
586 * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
587 * @app: Pointer to the APP handle
588 * @netdev: Netdev structure.
589 * @flow: TC flower classifier offload structure
590 * @egress: Netdev is the egress dev.
591 *
592 * Populates a flow statistics structure which which corresponds to a
593 * specific flow.
594 *
595 * Return: negative value on error, 0 if stats populated successfully.
596 */
597 static int
nfp_flower_get_stats(struct nfp_app * app,struct net_device * netdev,struct tc_cls_flower_offload * flow,bool egress)598 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
599 struct tc_cls_flower_offload *flow, bool egress)
600 {
601 struct nfp_fl_payload *nfp_flow;
602 struct net_device *ingr_dev;
603
604 ingr_dev = egress ? NULL : netdev;
605 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
606 NFP_FL_STATS_CTX_DONT_CARE);
607 if (!nfp_flow)
608 return -EINVAL;
609
610 if (nfp_flow->ingress_offload && egress)
611 return 0;
612
613 spin_lock_bh(&nfp_flow->lock);
614 tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes,
615 nfp_flow->stats.pkts, nfp_flow->stats.used);
616
617 nfp_flow->stats.pkts = 0;
618 nfp_flow->stats.bytes = 0;
619 spin_unlock_bh(&nfp_flow->lock);
620
621 return 0;
622 }
623
624 static int
nfp_flower_repr_offload(struct nfp_app * app,struct net_device * netdev,struct tc_cls_flower_offload * flower,bool egress)625 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
626 struct tc_cls_flower_offload *flower, bool egress)
627 {
628 if (!eth_proto_is_802_3(flower->common.protocol))
629 return -EOPNOTSUPP;
630
631 switch (flower->command) {
632 case TC_CLSFLOWER_REPLACE:
633 return nfp_flower_add_offload(app, netdev, flower, egress);
634 case TC_CLSFLOWER_DESTROY:
635 return nfp_flower_del_offload(app, netdev, flower, egress);
636 case TC_CLSFLOWER_STATS:
637 return nfp_flower_get_stats(app, netdev, flower, egress);
638 default:
639 return -EOPNOTSUPP;
640 }
641 }
642
nfp_flower_setup_tc_egress_cb(enum tc_setup_type type,void * type_data,void * cb_priv)643 int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
644 void *cb_priv)
645 {
646 struct nfp_repr *repr = cb_priv;
647
648 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
649 return -EOPNOTSUPP;
650
651 switch (type) {
652 case TC_SETUP_CLSFLOWER:
653 return nfp_flower_repr_offload(repr->app, repr->netdev,
654 type_data, true);
655 default:
656 return -EOPNOTSUPP;
657 }
658 }
659
nfp_flower_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)660 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
661 void *type_data, void *cb_priv)
662 {
663 struct nfp_repr *repr = cb_priv;
664
665 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
666 return -EOPNOTSUPP;
667
668 switch (type) {
669 case TC_SETUP_CLSFLOWER:
670 return nfp_flower_repr_offload(repr->app, repr->netdev,
671 type_data, false);
672 default:
673 return -EOPNOTSUPP;
674 }
675 }
676
nfp_flower_setup_tc_block(struct net_device * netdev,struct tc_block_offload * f)677 static int nfp_flower_setup_tc_block(struct net_device *netdev,
678 struct tc_block_offload *f)
679 {
680 struct nfp_repr *repr = netdev_priv(netdev);
681
682 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
683 return -EOPNOTSUPP;
684
685 switch (f->command) {
686 case TC_BLOCK_BIND:
687 return tcf_block_cb_register(f->block,
688 nfp_flower_setup_tc_block_cb,
689 repr, repr, f->extack);
690 case TC_BLOCK_UNBIND:
691 tcf_block_cb_unregister(f->block,
692 nfp_flower_setup_tc_block_cb,
693 repr);
694 return 0;
695 default:
696 return -EOPNOTSUPP;
697 }
698 }
699
nfp_flower_setup_tc(struct nfp_app * app,struct net_device * netdev,enum tc_setup_type type,void * type_data)700 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
701 enum tc_setup_type type, void *type_data)
702 {
703 switch (type) {
704 case TC_SETUP_BLOCK:
705 return nfp_flower_setup_tc_block(netdev, type_data);
706 default:
707 return -EOPNOTSUPP;
708 }
709 }
710