1 /*
2 * Copyright (C) 2017 Netronome Systems, Inc.
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/etherdevice.h>
35 #include <linux/inetdevice.h>
36 #include <net/netevent.h>
37 #include <linux/idr.h>
38 #include <net/dst_metadata.h>
39 #include <net/arp.h>
40
41 #include "cmsg.h"
42 #include "main.h"
43 #include "../nfp_net_repr.h"
44 #include "../nfp_net.h"
45
46 #define NFP_FL_MAX_ROUTES 32
47
48 /**
49 * struct nfp_tun_active_tuns - periodic message of active tunnels
50 * @seq: sequence number of the message
51 * @count: number of tunnels report in message
52 * @flags: options part of the request
53 * @tun_info.ipv4: dest IPv4 address of active route
54 * @tun_info.egress_port: port the encapsulated packet egressed
55 * @tun_info.extra: reserved for future use
56 * @tun_info: tunnels that have sent traffic in reported period
57 */
58 struct nfp_tun_active_tuns {
59 __be32 seq;
60 __be32 count;
61 __be32 flags;
62 struct route_ip_info {
63 __be32 ipv4;
64 __be32 egress_port;
65 __be32 extra[2];
66 } tun_info[];
67 };
68
69 /**
70 * struct nfp_tun_neigh - neighbour/route entry on the NFP
71 * @dst_ipv4: destination IPv4 address
72 * @src_ipv4: source IPv4 address
73 * @dst_addr: destination MAC address
74 * @src_addr: source MAC address
75 * @port_id: NFP port to output packet on - associated with source IPv4
76 */
77 struct nfp_tun_neigh {
78 __be32 dst_ipv4;
79 __be32 src_ipv4;
80 u8 dst_addr[ETH_ALEN];
81 u8 src_addr[ETH_ALEN];
82 __be32 port_id;
83 };
84
85 /**
86 * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
87 * @ingress_port: ingress port of packet that signalled request
88 * @ipv4_addr: destination ipv4 address for route
89 * @reserved: reserved for future use
90 */
91 struct nfp_tun_req_route_ipv4 {
92 __be32 ingress_port;
93 __be32 ipv4_addr;
94 __be32 reserved[2];
95 };
96
97 /**
98 * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP
99 * @ipv4_addr: destination of route
100 * @list: list pointer
101 */
102 struct nfp_ipv4_route_entry {
103 __be32 ipv4_addr;
104 struct list_head list;
105 };
106
107 #define NFP_FL_IPV4_ADDRS_MAX 32
108
109 /**
110 * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
111 * @count: number of IPs populated in the array
112 * @ipv4_addr: array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
113 */
114 struct nfp_tun_ipv4_addr {
115 __be32 count;
116 __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
117 };
118
119 /**
120 * struct nfp_ipv4_addr_entry - cached IPv4 addresses
121 * @ipv4_addr: IP address
122 * @ref_count: number of rules currently using this IP
123 * @list: list pointer
124 */
125 struct nfp_ipv4_addr_entry {
126 __be32 ipv4_addr;
127 int ref_count;
128 struct list_head list;
129 };
130
131 /**
132 * struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP
133 * @reserved: reserved for future use
134 * @count: number of MAC addresses in the message
135 * @addresses.index: index of MAC address in the lookup table
136 * @addresses.addr: interface MAC address
137 * @addresses: series of MACs to offload
138 */
139 struct nfp_tun_mac_addr {
140 __be16 reserved;
141 __be16 count;
142 struct index_mac_addr {
143 __be16 index;
144 u8 addr[ETH_ALEN];
145 } addresses[];
146 };
147
148 /**
149 * struct nfp_tun_mac_offload_entry - list of MACs to offload
150 * @index: index of MAC address for offloading
151 * @addr: interface MAC address
152 * @list: list pointer
153 */
154 struct nfp_tun_mac_offload_entry {
155 __be16 index;
156 u8 addr[ETH_ALEN];
157 struct list_head list;
158 };
159
160 #define NFP_MAX_MAC_INDEX 0xff
161
162 /**
163 * struct nfp_tun_mac_non_nfp_idx - converts non NFP netdev ifindex to 8-bit id
164 * @ifindex: netdev ifindex of the device
165 * @index: index of netdevs mac on NFP
166 * @list: list pointer
167 */
168 struct nfp_tun_mac_non_nfp_idx {
169 int ifindex;
170 u8 index;
171 struct list_head list;
172 };
173
nfp_tunnel_keep_alive(struct nfp_app * app,struct sk_buff * skb)174 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
175 {
176 struct nfp_tun_active_tuns *payload;
177 struct net_device *netdev;
178 int count, i, pay_len;
179 struct neighbour *n;
180 __be32 ipv4_addr;
181 u32 port;
182
183 payload = nfp_flower_cmsg_get_data(skb);
184 count = be32_to_cpu(payload->count);
185 if (count > NFP_FL_MAX_ROUTES) {
186 nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
187 return;
188 }
189
190 pay_len = nfp_flower_cmsg_get_data_len(skb);
191 if (pay_len != sizeof(struct nfp_tun_active_tuns) +
192 sizeof(struct route_ip_info) * count) {
193 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
194 return;
195 }
196
197 for (i = 0; i < count; i++) {
198 ipv4_addr = payload->tun_info[i].ipv4;
199 port = be32_to_cpu(payload->tun_info[i].egress_port);
200 netdev = nfp_app_repr_get(app, port);
201 if (!netdev)
202 continue;
203
204 n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
205 if (!n)
206 continue;
207
208 /* Update the used timestamp of neighbour */
209 neigh_event_send(n, NULL);
210 neigh_release(n);
211 }
212 }
213
nfp_tun_is_netdev_to_offload(struct net_device * netdev)214 static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
215 {
216 if (!netdev->rtnl_link_ops)
217 return false;
218 if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
219 return true;
220 if (!strcmp(netdev->rtnl_link_ops->kind, "vxlan"))
221 return true;
222
223 return false;
224 }
225
226 static int
nfp_flower_xmit_tun_conf(struct nfp_app * app,u8 mtype,u16 plen,void * pdata,gfp_t flag)227 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
228 gfp_t flag)
229 {
230 struct sk_buff *skb;
231 unsigned char *msg;
232
233 skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
234 if (!skb)
235 return -ENOMEM;
236
237 msg = nfp_flower_cmsg_get_data(skb);
238 memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
239
240 nfp_ctrl_tx(app->ctrl, skb);
241 return 0;
242 }
243
nfp_tun_has_route(struct nfp_app * app,__be32 ipv4_addr)244 static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
245 {
246 struct nfp_flower_priv *priv = app->priv;
247 struct nfp_ipv4_route_entry *entry;
248 struct list_head *ptr, *storage;
249
250 spin_lock_bh(&priv->nfp_neigh_off_lock);
251 list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
252 entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
253 if (entry->ipv4_addr == ipv4_addr) {
254 spin_unlock_bh(&priv->nfp_neigh_off_lock);
255 return true;
256 }
257 }
258 spin_unlock_bh(&priv->nfp_neigh_off_lock);
259 return false;
260 }
261
nfp_tun_add_route_to_cache(struct nfp_app * app,__be32 ipv4_addr)262 static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
263 {
264 struct nfp_flower_priv *priv = app->priv;
265 struct nfp_ipv4_route_entry *entry;
266 struct list_head *ptr, *storage;
267
268 spin_lock_bh(&priv->nfp_neigh_off_lock);
269 list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
270 entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
271 if (entry->ipv4_addr == ipv4_addr) {
272 spin_unlock_bh(&priv->nfp_neigh_off_lock);
273 return;
274 }
275 }
276 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
277 if (!entry) {
278 spin_unlock_bh(&priv->nfp_neigh_off_lock);
279 nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
280 return;
281 }
282
283 entry->ipv4_addr = ipv4_addr;
284 list_add_tail(&entry->list, &priv->nfp_neigh_off_list);
285 spin_unlock_bh(&priv->nfp_neigh_off_lock);
286 }
287
nfp_tun_del_route_from_cache(struct nfp_app * app,__be32 ipv4_addr)288 static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
289 {
290 struct nfp_flower_priv *priv = app->priv;
291 struct nfp_ipv4_route_entry *entry;
292 struct list_head *ptr, *storage;
293
294 spin_lock_bh(&priv->nfp_neigh_off_lock);
295 list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
296 entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
297 if (entry->ipv4_addr == ipv4_addr) {
298 list_del(&entry->list);
299 kfree(entry);
300 break;
301 }
302 }
303 spin_unlock_bh(&priv->nfp_neigh_off_lock);
304 }
305
306 static void
nfp_tun_write_neigh(struct net_device * netdev,struct nfp_app * app,struct flowi4 * flow,struct neighbour * neigh,gfp_t flag)307 nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
308 struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
309 {
310 struct nfp_tun_neigh payload;
311
312 /* Only offload representor IPv4s for now. */
313 if (!nfp_netdev_is_nfp_repr(netdev))
314 return;
315
316 memset(&payload, 0, sizeof(struct nfp_tun_neigh));
317 payload.dst_ipv4 = flow->daddr;
318
319 /* If entry has expired send dst IP with all other fields 0. */
320 if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
321 nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
322 /* Trigger ARP to verify invalid neighbour state. */
323 neigh_event_send(neigh, NULL);
324 goto send_msg;
325 }
326
327 /* Have a valid neighbour so populate rest of entry. */
328 payload.src_ipv4 = flow->saddr;
329 ether_addr_copy(payload.src_addr, netdev->dev_addr);
330 neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
331 payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev));
332 /* Add destination of new route to NFP cache. */
333 nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
334
335 send_msg:
336 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
337 sizeof(struct nfp_tun_neigh),
338 (unsigned char *)&payload, flag);
339 }
340
341 static int
nfp_tun_neigh_event_handler(struct notifier_block * nb,unsigned long event,void * ptr)342 nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
343 void *ptr)
344 {
345 struct nfp_flower_priv *app_priv;
346 struct netevent_redirect *redir;
347 struct flowi4 flow = {};
348 struct neighbour *n;
349 struct nfp_app *app;
350 struct rtable *rt;
351 int err;
352
353 switch (event) {
354 case NETEVENT_REDIRECT:
355 redir = (struct netevent_redirect *)ptr;
356 n = redir->neigh;
357 break;
358 case NETEVENT_NEIGH_UPDATE:
359 n = (struct neighbour *)ptr;
360 break;
361 default:
362 return NOTIFY_DONE;
363 }
364
365 flow.daddr = *(__be32 *)n->primary_key;
366
367 /* Only concerned with route changes for representors. */
368 if (!nfp_netdev_is_nfp_repr(n->dev))
369 return NOTIFY_DONE;
370
371 app_priv = container_of(nb, struct nfp_flower_priv, nfp_tun_neigh_nb);
372 app = app_priv->app;
373
374 /* Only concerned with changes to routes already added to NFP. */
375 if (!nfp_tun_has_route(app, flow.daddr))
376 return NOTIFY_DONE;
377
378 #if IS_ENABLED(CONFIG_INET)
379 /* Do a route lookup to populate flow data. */
380 rt = ip_route_output_key(dev_net(n->dev), &flow);
381 err = PTR_ERR_OR_ZERO(rt);
382 if (err)
383 return NOTIFY_DONE;
384
385 ip_rt_put(rt);
386 #else
387 return NOTIFY_DONE;
388 #endif
389
390 flow.flowi4_proto = IPPROTO_UDP;
391 nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
392
393 return NOTIFY_OK;
394 }
395
nfp_tunnel_request_route(struct nfp_app * app,struct sk_buff * skb)396 void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
397 {
398 struct nfp_tun_req_route_ipv4 *payload;
399 struct net_device *netdev;
400 struct flowi4 flow = {};
401 struct neighbour *n;
402 struct rtable *rt;
403 int err;
404
405 payload = nfp_flower_cmsg_get_data(skb);
406
407 netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
408 if (!netdev)
409 goto route_fail_warning;
410
411 flow.daddr = payload->ipv4_addr;
412 flow.flowi4_proto = IPPROTO_UDP;
413
414 #if IS_ENABLED(CONFIG_INET)
415 /* Do a route lookup on same namespace as ingress port. */
416 rt = ip_route_output_key(dev_net(netdev), &flow);
417 err = PTR_ERR_OR_ZERO(rt);
418 if (err)
419 goto route_fail_warning;
420 #else
421 goto route_fail_warning;
422 #endif
423
424 /* Get the neighbour entry for the lookup */
425 n = dst_neigh_lookup(&rt->dst, &flow.daddr);
426 ip_rt_put(rt);
427 if (!n)
428 goto route_fail_warning;
429 nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL);
430 neigh_release(n);
431 return;
432
433 route_fail_warning:
434 nfp_flower_cmsg_warn(app, "Requested route not found.\n");
435 }
436
nfp_tun_write_ipv4_list(struct nfp_app * app)437 static void nfp_tun_write_ipv4_list(struct nfp_app *app)
438 {
439 struct nfp_flower_priv *priv = app->priv;
440 struct nfp_ipv4_addr_entry *entry;
441 struct nfp_tun_ipv4_addr payload;
442 struct list_head *ptr, *storage;
443 int count;
444
445 memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
446 mutex_lock(&priv->nfp_ipv4_off_lock);
447 count = 0;
448 list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
449 if (count >= NFP_FL_IPV4_ADDRS_MAX) {
450 mutex_unlock(&priv->nfp_ipv4_off_lock);
451 nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
452 return;
453 }
454 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
455 payload.ipv4_addr[count++] = entry->ipv4_addr;
456 }
457 payload.count = cpu_to_be32(count);
458 mutex_unlock(&priv->nfp_ipv4_off_lock);
459
460 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
461 sizeof(struct nfp_tun_ipv4_addr),
462 &payload, GFP_KERNEL);
463 }
464
nfp_tunnel_add_ipv4_off(struct nfp_app * app,__be32 ipv4)465 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
466 {
467 struct nfp_flower_priv *priv = app->priv;
468 struct nfp_ipv4_addr_entry *entry;
469 struct list_head *ptr, *storage;
470
471 mutex_lock(&priv->nfp_ipv4_off_lock);
472 list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
473 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
474 if (entry->ipv4_addr == ipv4) {
475 entry->ref_count++;
476 mutex_unlock(&priv->nfp_ipv4_off_lock);
477 return;
478 }
479 }
480
481 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
482 if (!entry) {
483 mutex_unlock(&priv->nfp_ipv4_off_lock);
484 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
485 return;
486 }
487 entry->ipv4_addr = ipv4;
488 entry->ref_count = 1;
489 list_add_tail(&entry->list, &priv->nfp_ipv4_off_list);
490 mutex_unlock(&priv->nfp_ipv4_off_lock);
491
492 nfp_tun_write_ipv4_list(app);
493 }
494
nfp_tunnel_del_ipv4_off(struct nfp_app * app,__be32 ipv4)495 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
496 {
497 struct nfp_flower_priv *priv = app->priv;
498 struct nfp_ipv4_addr_entry *entry;
499 struct list_head *ptr, *storage;
500
501 mutex_lock(&priv->nfp_ipv4_off_lock);
502 list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
503 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
504 if (entry->ipv4_addr == ipv4) {
505 entry->ref_count--;
506 if (!entry->ref_count) {
507 list_del(&entry->list);
508 kfree(entry);
509 }
510 break;
511 }
512 }
513 mutex_unlock(&priv->nfp_ipv4_off_lock);
514
515 nfp_tun_write_ipv4_list(app);
516 }
517
nfp_tunnel_write_macs(struct nfp_app * app)518 void nfp_tunnel_write_macs(struct nfp_app *app)
519 {
520 struct nfp_flower_priv *priv = app->priv;
521 struct nfp_tun_mac_offload_entry *entry;
522 struct nfp_tun_mac_addr *payload;
523 struct list_head *ptr, *storage;
524 int mac_count, err, pay_size;
525
526 mutex_lock(&priv->nfp_mac_off_lock);
527 if (!priv->nfp_mac_off_count) {
528 mutex_unlock(&priv->nfp_mac_off_lock);
529 return;
530 }
531
532 pay_size = sizeof(struct nfp_tun_mac_addr) +
533 sizeof(struct index_mac_addr) * priv->nfp_mac_off_count;
534
535 payload = kzalloc(pay_size, GFP_KERNEL);
536 if (!payload) {
537 mutex_unlock(&priv->nfp_mac_off_lock);
538 return;
539 }
540
541 payload->count = cpu_to_be16(priv->nfp_mac_off_count);
542
543 mac_count = 0;
544 list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
545 entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
546 list);
547 payload->addresses[mac_count].index = entry->index;
548 ether_addr_copy(payload->addresses[mac_count].addr,
549 entry->addr);
550 mac_count++;
551 }
552
553 err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
554 pay_size, payload, GFP_KERNEL);
555
556 kfree(payload);
557
558 if (err) {
559 mutex_unlock(&priv->nfp_mac_off_lock);
560 /* Write failed so retain list for future retry. */
561 return;
562 }
563
564 /* If list was successfully offloaded, flush it. */
565 list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
566 entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
567 list);
568 list_del(&entry->list);
569 kfree(entry);
570 }
571
572 priv->nfp_mac_off_count = 0;
573 mutex_unlock(&priv->nfp_mac_off_lock);
574 }
575
nfp_tun_get_mac_idx(struct nfp_app * app,int ifindex)576 static int nfp_tun_get_mac_idx(struct nfp_app *app, int ifindex)
577 {
578 struct nfp_flower_priv *priv = app->priv;
579 struct nfp_tun_mac_non_nfp_idx *entry;
580 struct list_head *ptr, *storage;
581 int idx;
582
583 mutex_lock(&priv->nfp_mac_index_lock);
584 list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
585 entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
586 if (entry->ifindex == ifindex) {
587 idx = entry->index;
588 mutex_unlock(&priv->nfp_mac_index_lock);
589 return idx;
590 }
591 }
592
593 idx = ida_simple_get(&priv->nfp_mac_off_ids, 0,
594 NFP_MAX_MAC_INDEX, GFP_KERNEL);
595 if (idx < 0) {
596 mutex_unlock(&priv->nfp_mac_index_lock);
597 return idx;
598 }
599
600 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
601 if (!entry) {
602 mutex_unlock(&priv->nfp_mac_index_lock);
603 return -ENOMEM;
604 }
605 entry->ifindex = ifindex;
606 entry->index = idx;
607 list_add_tail(&entry->list, &priv->nfp_mac_index_list);
608 mutex_unlock(&priv->nfp_mac_index_lock);
609
610 return idx;
611 }
612
nfp_tun_del_mac_idx(struct nfp_app * app,int ifindex)613 static void nfp_tun_del_mac_idx(struct nfp_app *app, int ifindex)
614 {
615 struct nfp_flower_priv *priv = app->priv;
616 struct nfp_tun_mac_non_nfp_idx *entry;
617 struct list_head *ptr, *storage;
618
619 mutex_lock(&priv->nfp_mac_index_lock);
620 list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
621 entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
622 if (entry->ifindex == ifindex) {
623 ida_simple_remove(&priv->nfp_mac_off_ids,
624 entry->index);
625 list_del(&entry->list);
626 kfree(entry);
627 break;
628 }
629 }
630 mutex_unlock(&priv->nfp_mac_index_lock);
631 }
632
nfp_tun_add_to_mac_offload_list(struct net_device * netdev,struct nfp_app * app)633 static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
634 struct nfp_app *app)
635 {
636 struct nfp_flower_priv *priv = app->priv;
637 struct nfp_tun_mac_offload_entry *entry;
638 u16 nfp_mac_idx;
639 int port = 0;
640
641 /* Check if MAC should be offloaded. */
642 if (!is_valid_ether_addr(netdev->dev_addr))
643 return;
644
645 if (nfp_netdev_is_nfp_repr(netdev))
646 port = nfp_repr_get_port_id(netdev);
647 else if (!nfp_tun_is_netdev_to_offload(netdev))
648 return;
649
650 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
651 if (!entry) {
652 nfp_flower_cmsg_warn(app, "Mem fail when offloading MAC.\n");
653 return;
654 }
655
656 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
657 NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) {
658 nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
659 } else if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
660 NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT) {
661 port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port);
662 nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT;
663 } else {
664 /* Must assign our own unique 8-bit index. */
665 int idx = nfp_tun_get_mac_idx(app, netdev->ifindex);
666
667 if (idx < 0) {
668 nfp_flower_cmsg_warn(app, "Can't assign non-repr MAC index.\n");
669 kfree(entry);
670 return;
671 }
672 nfp_mac_idx = idx << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
673 }
674
675 entry->index = cpu_to_be16(nfp_mac_idx);
676 ether_addr_copy(entry->addr, netdev->dev_addr);
677
678 mutex_lock(&priv->nfp_mac_off_lock);
679 priv->nfp_mac_off_count++;
680 list_add_tail(&entry->list, &priv->nfp_mac_off_list);
681 mutex_unlock(&priv->nfp_mac_off_lock);
682 }
683
nfp_tun_mac_event_handler(struct notifier_block * nb,unsigned long event,void * ptr)684 static int nfp_tun_mac_event_handler(struct notifier_block *nb,
685 unsigned long event, void *ptr)
686 {
687 struct nfp_flower_priv *app_priv;
688 struct net_device *netdev;
689 struct nfp_app *app;
690
691 if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
692 app_priv = container_of(nb, struct nfp_flower_priv,
693 nfp_tun_mac_nb);
694 app = app_priv->app;
695 netdev = netdev_notifier_info_to_dev(ptr);
696
697 /* If non-nfp netdev then free its offload index. */
698 if (nfp_tun_is_netdev_to_offload(netdev))
699 nfp_tun_del_mac_idx(app, netdev->ifindex);
700 } else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
701 event == NETDEV_REGISTER) {
702 app_priv = container_of(nb, struct nfp_flower_priv,
703 nfp_tun_mac_nb);
704 app = app_priv->app;
705 netdev = netdev_notifier_info_to_dev(ptr);
706
707 nfp_tun_add_to_mac_offload_list(netdev, app);
708
709 /* Force a list write to keep NFP up to date. */
710 nfp_tunnel_write_macs(app);
711 }
712 return NOTIFY_OK;
713 }
714
nfp_tunnel_config_start(struct nfp_app * app)715 int nfp_tunnel_config_start(struct nfp_app *app)
716 {
717 struct nfp_flower_priv *priv = app->priv;
718 struct net_device *netdev;
719 int err;
720
721 /* Initialise priv data for MAC offloading. */
722 priv->nfp_mac_off_count = 0;
723 mutex_init(&priv->nfp_mac_off_lock);
724 INIT_LIST_HEAD(&priv->nfp_mac_off_list);
725 priv->nfp_tun_mac_nb.notifier_call = nfp_tun_mac_event_handler;
726 mutex_init(&priv->nfp_mac_index_lock);
727 INIT_LIST_HEAD(&priv->nfp_mac_index_list);
728 ida_init(&priv->nfp_mac_off_ids);
729
730 /* Initialise priv data for IPv4 offloading. */
731 mutex_init(&priv->nfp_ipv4_off_lock);
732 INIT_LIST_HEAD(&priv->nfp_ipv4_off_list);
733
734 /* Initialise priv data for neighbour offloading. */
735 spin_lock_init(&priv->nfp_neigh_off_lock);
736 INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
737 priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
738
739 err = register_netdevice_notifier(&priv->nfp_tun_mac_nb);
740 if (err)
741 goto err_free_mac_ida;
742
743 err = register_netevent_notifier(&priv->nfp_tun_neigh_nb);
744 if (err)
745 goto err_unreg_mac_nb;
746
747 /* Parse netdevs already registered for MACs that need offloaded. */
748 rtnl_lock();
749 for_each_netdev(&init_net, netdev)
750 nfp_tun_add_to_mac_offload_list(netdev, app);
751 rtnl_unlock();
752
753 return 0;
754
755 err_unreg_mac_nb:
756 unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
757 err_free_mac_ida:
758 ida_destroy(&priv->nfp_mac_off_ids);
759 return err;
760 }
761
nfp_tunnel_config_stop(struct nfp_app * app)762 void nfp_tunnel_config_stop(struct nfp_app *app)
763 {
764 struct nfp_tun_mac_offload_entry *mac_entry;
765 struct nfp_flower_priv *priv = app->priv;
766 struct nfp_ipv4_route_entry *route_entry;
767 struct nfp_tun_mac_non_nfp_idx *mac_idx;
768 struct nfp_ipv4_addr_entry *ip_entry;
769 struct list_head *ptr, *storage;
770
771 unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
772 unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
773
774 /* Free any memory that may be occupied by MAC list. */
775 list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
776 mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
777 list);
778 list_del(&mac_entry->list);
779 kfree(mac_entry);
780 }
781
782 /* Free any memory that may be occupied by MAC index list. */
783 list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
784 mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx,
785 list);
786 list_del(&mac_idx->list);
787 kfree(mac_idx);
788 }
789
790 ida_destroy(&priv->nfp_mac_off_ids);
791
792 /* Free any memory that may be occupied by ipv4 list. */
793 list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
794 ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
795 list_del(&ip_entry->list);
796 kfree(ip_entry);
797 }
798
799 /* Free any memory that may be occupied by the route list. */
800 list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
801 route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
802 list);
803 list_del(&route_entry->list);
804 kfree(route_entry);
805 }
806 }
807