1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2011-2018 B.A.T.M.A.N. contributors:
3 *
4 * Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "bridge_loop_avoidance.h"
20 #include "main.h"
21
22 #include <linux/atomic.h>
23 #include <linux/byteorder/generic.h>
24 #include <linux/compiler.h>
25 #include <linux/crc16.h>
26 #include <linux/errno.h>
27 #include <linux/etherdevice.h>
28 #include <linux/gfp.h>
29 #include <linux/if_arp.h>
30 #include <linux/if_ether.h>
31 #include <linux/if_vlan.h>
32 #include <linux/jhash.h>
33 #include <linux/jiffies.h>
34 #include <linux/kernel.h>
35 #include <linux/kref.h>
36 #include <linux/list.h>
37 #include <linux/lockdep.h>
38 #include <linux/netdevice.h>
39 #include <linux/netlink.h>
40 #include <linux/rculist.h>
41 #include <linux/rcupdate.h>
42 #include <linux/seq_file.h>
43 #include <linux/skbuff.h>
44 #include <linux/slab.h>
45 #include <linux/spinlock.h>
46 #include <linux/stddef.h>
47 #include <linux/string.h>
48 #include <linux/workqueue.h>
49 #include <net/arp.h>
50 #include <net/genetlink.h>
51 #include <net/netlink.h>
52 #include <net/sock.h>
53 #include <uapi/linux/batadv_packet.h>
54 #include <uapi/linux/batman_adv.h>
55
56 #include "hard-interface.h"
57 #include "hash.h"
58 #include "log.h"
59 #include "netlink.h"
60 #include "originator.h"
61 #include "soft-interface.h"
62 #include "sysfs.h"
63 #include "translation-table.h"
64
65 static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
66
67 static void batadv_bla_periodic_work(struct work_struct *work);
68 static void
69 batadv_bla_send_announce(struct batadv_priv *bat_priv,
70 struct batadv_bla_backbone_gw *backbone_gw);
71
72 /**
73 * batadv_choose_claim() - choose the right bucket for a claim.
74 * @data: data to hash
75 * @size: size of the hash table
76 *
77 * Return: the hash index of the claim
78 */
batadv_choose_claim(const void * data,u32 size)79 static inline u32 batadv_choose_claim(const void *data, u32 size)
80 {
81 struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
82 u32 hash = 0;
83
84 hash = jhash(&claim->addr, sizeof(claim->addr), hash);
85 hash = jhash(&claim->vid, sizeof(claim->vid), hash);
86
87 return hash % size;
88 }
89
90 /**
91 * batadv_choose_backbone_gw() - choose the right bucket for a backbone gateway.
92 * @data: data to hash
93 * @size: size of the hash table
94 *
95 * Return: the hash index of the backbone gateway
96 */
batadv_choose_backbone_gw(const void * data,u32 size)97 static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
98 {
99 const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
100 u32 hash = 0;
101
102 hash = jhash(&claim->addr, sizeof(claim->addr), hash);
103 hash = jhash(&claim->vid, sizeof(claim->vid), hash);
104
105 return hash % size;
106 }
107
108 /**
109 * batadv_compare_backbone_gw() - compare address and vid of two backbone gws
110 * @node: list node of the first entry to compare
111 * @data2: pointer to the second backbone gateway
112 *
113 * Return: true if the backbones have the same data, false otherwise
114 */
batadv_compare_backbone_gw(const struct hlist_node * node,const void * data2)115 static bool batadv_compare_backbone_gw(const struct hlist_node *node,
116 const void *data2)
117 {
118 const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
119 hash_entry);
120 const struct batadv_bla_backbone_gw *gw1 = data1;
121 const struct batadv_bla_backbone_gw *gw2 = data2;
122
123 if (!batadv_compare_eth(gw1->orig, gw2->orig))
124 return false;
125
126 if (gw1->vid != gw2->vid)
127 return false;
128
129 return true;
130 }
131
132 /**
133 * batadv_compare_claim() - compare address and vid of two claims
134 * @node: list node of the first entry to compare
135 * @data2: pointer to the second claims
136 *
137 * Return: true if the claim have the same data, 0 otherwise
138 */
batadv_compare_claim(const struct hlist_node * node,const void * data2)139 static bool batadv_compare_claim(const struct hlist_node *node,
140 const void *data2)
141 {
142 const void *data1 = container_of(node, struct batadv_bla_claim,
143 hash_entry);
144 const struct batadv_bla_claim *cl1 = data1;
145 const struct batadv_bla_claim *cl2 = data2;
146
147 if (!batadv_compare_eth(cl1->addr, cl2->addr))
148 return false;
149
150 if (cl1->vid != cl2->vid)
151 return false;
152
153 return true;
154 }
155
156 /**
157 * batadv_backbone_gw_release() - release backbone gw from lists and queue for
158 * free after rcu grace period
159 * @ref: kref pointer of the backbone gw
160 */
batadv_backbone_gw_release(struct kref * ref)161 static void batadv_backbone_gw_release(struct kref *ref)
162 {
163 struct batadv_bla_backbone_gw *backbone_gw;
164
165 backbone_gw = container_of(ref, struct batadv_bla_backbone_gw,
166 refcount);
167
168 kfree_rcu(backbone_gw, rcu);
169 }
170
171 /**
172 * batadv_backbone_gw_put() - decrement the backbone gw refcounter and possibly
173 * release it
174 * @backbone_gw: backbone gateway to be free'd
175 */
batadv_backbone_gw_put(struct batadv_bla_backbone_gw * backbone_gw)176 static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
177 {
178 kref_put(&backbone_gw->refcount, batadv_backbone_gw_release);
179 }
180
181 /**
182 * batadv_claim_release() - release claim from lists and queue for free after
183 * rcu grace period
184 * @ref: kref pointer of the claim
185 */
batadv_claim_release(struct kref * ref)186 static void batadv_claim_release(struct kref *ref)
187 {
188 struct batadv_bla_claim *claim;
189 struct batadv_bla_backbone_gw *old_backbone_gw;
190
191 claim = container_of(ref, struct batadv_bla_claim, refcount);
192
193 spin_lock_bh(&claim->backbone_lock);
194 old_backbone_gw = claim->backbone_gw;
195 claim->backbone_gw = NULL;
196 spin_unlock_bh(&claim->backbone_lock);
197
198 spin_lock_bh(&old_backbone_gw->crc_lock);
199 old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
200 spin_unlock_bh(&old_backbone_gw->crc_lock);
201
202 batadv_backbone_gw_put(old_backbone_gw);
203
204 kfree_rcu(claim, rcu);
205 }
206
207 /**
208 * batadv_claim_put() - decrement the claim refcounter and possibly release it
209 * @claim: claim to be free'd
210 */
batadv_claim_put(struct batadv_bla_claim * claim)211 static void batadv_claim_put(struct batadv_bla_claim *claim)
212 {
213 kref_put(&claim->refcount, batadv_claim_release);
214 }
215
216 /**
217 * batadv_claim_hash_find() - looks for a claim in the claim hash
218 * @bat_priv: the bat priv with all the soft interface information
219 * @data: search data (may be local/static data)
220 *
221 * Return: claim if found or NULL otherwise.
222 */
223 static struct batadv_bla_claim *
batadv_claim_hash_find(struct batadv_priv * bat_priv,struct batadv_bla_claim * data)224 batadv_claim_hash_find(struct batadv_priv *bat_priv,
225 struct batadv_bla_claim *data)
226 {
227 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
228 struct hlist_head *head;
229 struct batadv_bla_claim *claim;
230 struct batadv_bla_claim *claim_tmp = NULL;
231 int index;
232
233 if (!hash)
234 return NULL;
235
236 index = batadv_choose_claim(data, hash->size);
237 head = &hash->table[index];
238
239 rcu_read_lock();
240 hlist_for_each_entry_rcu(claim, head, hash_entry) {
241 if (!batadv_compare_claim(&claim->hash_entry, data))
242 continue;
243
244 if (!kref_get_unless_zero(&claim->refcount))
245 continue;
246
247 claim_tmp = claim;
248 break;
249 }
250 rcu_read_unlock();
251
252 return claim_tmp;
253 }
254
255 /**
256 * batadv_backbone_hash_find() - looks for a backbone gateway in the hash
257 * @bat_priv: the bat priv with all the soft interface information
258 * @addr: the address of the originator
259 * @vid: the VLAN ID
260 *
261 * Return: backbone gateway if found or NULL otherwise
262 */
263 static struct batadv_bla_backbone_gw *
batadv_backbone_hash_find(struct batadv_priv * bat_priv,u8 * addr,unsigned short vid)264 batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
265 unsigned short vid)
266 {
267 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
268 struct hlist_head *head;
269 struct batadv_bla_backbone_gw search_entry, *backbone_gw;
270 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
271 int index;
272
273 if (!hash)
274 return NULL;
275
276 ether_addr_copy(search_entry.orig, addr);
277 search_entry.vid = vid;
278
279 index = batadv_choose_backbone_gw(&search_entry, hash->size);
280 head = &hash->table[index];
281
282 rcu_read_lock();
283 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
284 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
285 &search_entry))
286 continue;
287
288 if (!kref_get_unless_zero(&backbone_gw->refcount))
289 continue;
290
291 backbone_gw_tmp = backbone_gw;
292 break;
293 }
294 rcu_read_unlock();
295
296 return backbone_gw_tmp;
297 }
298
299 /**
300 * batadv_bla_del_backbone_claims() - delete all claims for a backbone
301 * @backbone_gw: backbone gateway where the claims should be removed
302 */
303 static void
batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw * backbone_gw)304 batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
305 {
306 struct batadv_hashtable *hash;
307 struct hlist_node *node_tmp;
308 struct hlist_head *head;
309 struct batadv_bla_claim *claim;
310 int i;
311 spinlock_t *list_lock; /* protects write access to the hash lists */
312
313 hash = backbone_gw->bat_priv->bla.claim_hash;
314 if (!hash)
315 return;
316
317 for (i = 0; i < hash->size; i++) {
318 head = &hash->table[i];
319 list_lock = &hash->list_locks[i];
320
321 spin_lock_bh(list_lock);
322 hlist_for_each_entry_safe(claim, node_tmp,
323 head, hash_entry) {
324 if (claim->backbone_gw != backbone_gw)
325 continue;
326
327 batadv_claim_put(claim);
328 hlist_del_rcu(&claim->hash_entry);
329 }
330 spin_unlock_bh(list_lock);
331 }
332
333 /* all claims gone, initialize CRC */
334 spin_lock_bh(&backbone_gw->crc_lock);
335 backbone_gw->crc = BATADV_BLA_CRC_INIT;
336 spin_unlock_bh(&backbone_gw->crc_lock);
337 }
338
339 /**
340 * batadv_bla_send_claim() - sends a claim frame according to the provided info
341 * @bat_priv: the bat priv with all the soft interface information
342 * @mac: the mac address to be announced within the claim
343 * @vid: the VLAN ID
344 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
345 */
batadv_bla_send_claim(struct batadv_priv * bat_priv,u8 * mac,unsigned short vid,int claimtype)346 static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
347 unsigned short vid, int claimtype)
348 {
349 struct sk_buff *skb;
350 struct ethhdr *ethhdr;
351 struct batadv_hard_iface *primary_if;
352 struct net_device *soft_iface;
353 u8 *hw_src;
354 struct batadv_bla_claim_dst local_claim_dest;
355 __be32 zeroip = 0;
356
357 primary_if = batadv_primary_if_get_selected(bat_priv);
358 if (!primary_if)
359 return;
360
361 memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
362 sizeof(local_claim_dest));
363 local_claim_dest.type = claimtype;
364
365 soft_iface = primary_if->soft_iface;
366
367 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
368 /* IP DST: 0.0.0.0 */
369 zeroip,
370 primary_if->soft_iface,
371 /* IP SRC: 0.0.0.0 */
372 zeroip,
373 /* Ethernet DST: Broadcast */
374 NULL,
375 /* Ethernet SRC/HW SRC: originator mac */
376 primary_if->net_dev->dev_addr,
377 /* HW DST: FF:43:05:XX:YY:YY
378 * with XX = claim type
379 * and YY:YY = group id
380 */
381 (u8 *)&local_claim_dest);
382
383 if (!skb)
384 goto out;
385
386 ethhdr = (struct ethhdr *)skb->data;
387 hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
388
389 /* now we pretend that the client would have sent this ... */
390 switch (claimtype) {
391 case BATADV_CLAIM_TYPE_CLAIM:
392 /* normal claim frame
393 * set Ethernet SRC to the clients mac
394 */
395 ether_addr_copy(ethhdr->h_source, mac);
396 batadv_dbg(BATADV_DBG_BLA, bat_priv,
397 "%s(): CLAIM %pM on vid %d\n", __func__, mac,
398 batadv_print_vid(vid));
399 break;
400 case BATADV_CLAIM_TYPE_UNCLAIM:
401 /* unclaim frame
402 * set HW SRC to the clients mac
403 */
404 ether_addr_copy(hw_src, mac);
405 batadv_dbg(BATADV_DBG_BLA, bat_priv,
406 "%s(): UNCLAIM %pM on vid %d\n", __func__, mac,
407 batadv_print_vid(vid));
408 break;
409 case BATADV_CLAIM_TYPE_ANNOUNCE:
410 /* announcement frame
411 * set HW SRC to the special mac containg the crc
412 */
413 ether_addr_copy(hw_src, mac);
414 batadv_dbg(BATADV_DBG_BLA, bat_priv,
415 "%s(): ANNOUNCE of %pM on vid %d\n", __func__,
416 ethhdr->h_source, batadv_print_vid(vid));
417 break;
418 case BATADV_CLAIM_TYPE_REQUEST:
419 /* request frame
420 * set HW SRC and header destination to the receiving backbone
421 * gws mac
422 */
423 ether_addr_copy(hw_src, mac);
424 ether_addr_copy(ethhdr->h_dest, mac);
425 batadv_dbg(BATADV_DBG_BLA, bat_priv,
426 "%s(): REQUEST of %pM to %pM on vid %d\n", __func__,
427 ethhdr->h_source, ethhdr->h_dest,
428 batadv_print_vid(vid));
429 break;
430 case BATADV_CLAIM_TYPE_LOOPDETECT:
431 ether_addr_copy(ethhdr->h_source, mac);
432 batadv_dbg(BATADV_DBG_BLA, bat_priv,
433 "%s(): LOOPDETECT of %pM to %pM on vid %d\n",
434 __func__, ethhdr->h_source, ethhdr->h_dest,
435 batadv_print_vid(vid));
436
437 break;
438 }
439
440 if (vid & BATADV_VLAN_HAS_TAG) {
441 skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
442 vid & VLAN_VID_MASK);
443 if (!skb)
444 goto out;
445 }
446
447 skb_reset_mac_header(skb);
448 skb->protocol = eth_type_trans(skb, soft_iface);
449 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
450 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
451 skb->len + ETH_HLEN);
452
453 netif_rx(skb);
454 out:
455 if (primary_if)
456 batadv_hardif_put(primary_if);
457 }
458
459 /**
460 * batadv_bla_loopdetect_report() - worker for reporting the loop
461 * @work: work queue item
462 *
463 * Throws an uevent, as the loopdetect check function can't do that itself
464 * since the kernel may sleep while throwing uevents.
465 */
batadv_bla_loopdetect_report(struct work_struct * work)466 static void batadv_bla_loopdetect_report(struct work_struct *work)
467 {
468 struct batadv_bla_backbone_gw *backbone_gw;
469 struct batadv_priv *bat_priv;
470 char vid_str[6] = { '\0' };
471
472 backbone_gw = container_of(work, struct batadv_bla_backbone_gw,
473 report_work);
474 bat_priv = backbone_gw->bat_priv;
475
476 batadv_info(bat_priv->soft_iface,
477 "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n",
478 batadv_print_vid(backbone_gw->vid));
479 snprintf(vid_str, sizeof(vid_str), "%d",
480 batadv_print_vid(backbone_gw->vid));
481 vid_str[sizeof(vid_str) - 1] = 0;
482
483 batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT,
484 vid_str);
485
486 batadv_backbone_gw_put(backbone_gw);
487 }
488
489 /**
490 * batadv_bla_get_backbone_gw() - finds or creates a backbone gateway
491 * @bat_priv: the bat priv with all the soft interface information
492 * @orig: the mac address of the originator
493 * @vid: the VLAN ID
494 * @own_backbone: set if the requested backbone is local
495 *
496 * Return: the (possibly created) backbone gateway or NULL on error
497 */
498 static struct batadv_bla_backbone_gw *
batadv_bla_get_backbone_gw(struct batadv_priv * bat_priv,u8 * orig,unsigned short vid,bool own_backbone)499 batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
500 unsigned short vid, bool own_backbone)
501 {
502 struct batadv_bla_backbone_gw *entry;
503 struct batadv_orig_node *orig_node;
504 int hash_added;
505
506 entry = batadv_backbone_hash_find(bat_priv, orig, vid);
507
508 if (entry)
509 return entry;
510
511 batadv_dbg(BATADV_DBG_BLA, bat_priv,
512 "%s(): not found (%pM, %d), creating new entry\n", __func__,
513 orig, batadv_print_vid(vid));
514
515 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
516 if (!entry)
517 return NULL;
518
519 entry->vid = vid;
520 entry->lasttime = jiffies;
521 entry->crc = BATADV_BLA_CRC_INIT;
522 entry->bat_priv = bat_priv;
523 spin_lock_init(&entry->crc_lock);
524 atomic_set(&entry->request_sent, 0);
525 atomic_set(&entry->wait_periods, 0);
526 ether_addr_copy(entry->orig, orig);
527 INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report);
528 kref_init(&entry->refcount);
529
530 kref_get(&entry->refcount);
531 hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
532 batadv_compare_backbone_gw,
533 batadv_choose_backbone_gw, entry,
534 &entry->hash_entry);
535
536 if (unlikely(hash_added != 0)) {
537 /* hash failed, free the structure */
538 kfree(entry);
539 return NULL;
540 }
541
542 /* this is a gateway now, remove any TT entry on this VLAN */
543 orig_node = batadv_orig_hash_find(bat_priv, orig);
544 if (orig_node) {
545 batadv_tt_global_del_orig(bat_priv, orig_node, vid,
546 "became a backbone gateway");
547 batadv_orig_node_put(orig_node);
548 }
549
550 if (own_backbone) {
551 batadv_bla_send_announce(bat_priv, entry);
552
553 /* this will be decreased in the worker thread */
554 atomic_inc(&entry->request_sent);
555 atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
556 atomic_inc(&bat_priv->bla.num_requests);
557 }
558
559 return entry;
560 }
561
562 /**
563 * batadv_bla_update_own_backbone_gw() - updates the own backbone gw for a VLAN
564 * @bat_priv: the bat priv with all the soft interface information
565 * @primary_if: the selected primary interface
566 * @vid: VLAN identifier
567 *
568 * update or add the own backbone gw to make sure we announce
569 * where we receive other backbone gws
570 */
571 static void
batadv_bla_update_own_backbone_gw(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,unsigned short vid)572 batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
573 struct batadv_hard_iface *primary_if,
574 unsigned short vid)
575 {
576 struct batadv_bla_backbone_gw *backbone_gw;
577
578 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
579 primary_if->net_dev->dev_addr,
580 vid, true);
581 if (unlikely(!backbone_gw))
582 return;
583
584 backbone_gw->lasttime = jiffies;
585 batadv_backbone_gw_put(backbone_gw);
586 }
587
588 /**
589 * batadv_bla_answer_request() - answer a bla request by sending own claims
590 * @bat_priv: the bat priv with all the soft interface information
591 * @primary_if: interface where the request came on
592 * @vid: the vid where the request came on
593 *
594 * Repeat all of our own claims, and finally send an ANNOUNCE frame
595 * to allow the requester another check if the CRC is correct now.
596 */
batadv_bla_answer_request(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,unsigned short vid)597 static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
598 struct batadv_hard_iface *primary_if,
599 unsigned short vid)
600 {
601 struct hlist_head *head;
602 struct batadv_hashtable *hash;
603 struct batadv_bla_claim *claim;
604 struct batadv_bla_backbone_gw *backbone_gw;
605 int i;
606
607 batadv_dbg(BATADV_DBG_BLA, bat_priv,
608 "%s(): received a claim request, send all of our own claims again\n",
609 __func__);
610
611 backbone_gw = batadv_backbone_hash_find(bat_priv,
612 primary_if->net_dev->dev_addr,
613 vid);
614 if (!backbone_gw)
615 return;
616
617 hash = bat_priv->bla.claim_hash;
618 for (i = 0; i < hash->size; i++) {
619 head = &hash->table[i];
620
621 rcu_read_lock();
622 hlist_for_each_entry_rcu(claim, head, hash_entry) {
623 /* only own claims are interesting */
624 if (claim->backbone_gw != backbone_gw)
625 continue;
626
627 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
628 BATADV_CLAIM_TYPE_CLAIM);
629 }
630 rcu_read_unlock();
631 }
632
633 /* finally, send an announcement frame */
634 batadv_bla_send_announce(bat_priv, backbone_gw);
635 batadv_backbone_gw_put(backbone_gw);
636 }
637
638 /**
639 * batadv_bla_send_request() - send a request to repeat claims
640 * @backbone_gw: the backbone gateway from whom we are out of sync
641 *
642 * When the crc is wrong, ask the backbone gateway for a full table update.
643 * After the request, it will repeat all of his own claims and finally
644 * send an announcement claim with which we can check again.
645 */
batadv_bla_send_request(struct batadv_bla_backbone_gw * backbone_gw)646 static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
647 {
648 /* first, remove all old entries */
649 batadv_bla_del_backbone_claims(backbone_gw);
650
651 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
652 "Sending REQUEST to %pM\n", backbone_gw->orig);
653
654 /* send request */
655 batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
656 backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
657
658 /* no local broadcasts should be sent or received, for now. */
659 if (!atomic_read(&backbone_gw->request_sent)) {
660 atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
661 atomic_set(&backbone_gw->request_sent, 1);
662 }
663 }
664
665 /**
666 * batadv_bla_send_announce() - Send an announcement frame
667 * @bat_priv: the bat priv with all the soft interface information
668 * @backbone_gw: our backbone gateway which should be announced
669 */
batadv_bla_send_announce(struct batadv_priv * bat_priv,struct batadv_bla_backbone_gw * backbone_gw)670 static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
671 struct batadv_bla_backbone_gw *backbone_gw)
672 {
673 u8 mac[ETH_ALEN];
674 __be16 crc;
675
676 memcpy(mac, batadv_announce_mac, 4);
677 spin_lock_bh(&backbone_gw->crc_lock);
678 crc = htons(backbone_gw->crc);
679 spin_unlock_bh(&backbone_gw->crc_lock);
680 memcpy(&mac[4], &crc, 2);
681
682 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
683 BATADV_CLAIM_TYPE_ANNOUNCE);
684 }
685
686 /**
687 * batadv_bla_add_claim() - Adds a claim in the claim hash
688 * @bat_priv: the bat priv with all the soft interface information
689 * @mac: the mac address of the claim
690 * @vid: the VLAN ID of the frame
691 * @backbone_gw: the backbone gateway which claims it
692 */
batadv_bla_add_claim(struct batadv_priv * bat_priv,const u8 * mac,const unsigned short vid,struct batadv_bla_backbone_gw * backbone_gw)693 static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
694 const u8 *mac, const unsigned short vid,
695 struct batadv_bla_backbone_gw *backbone_gw)
696 {
697 struct batadv_bla_backbone_gw *old_backbone_gw;
698 struct batadv_bla_claim *claim;
699 struct batadv_bla_claim search_claim;
700 bool remove_crc = false;
701 int hash_added;
702
703 ether_addr_copy(search_claim.addr, mac);
704 search_claim.vid = vid;
705 claim = batadv_claim_hash_find(bat_priv, &search_claim);
706
707 /* create a new claim entry if it does not exist yet. */
708 if (!claim) {
709 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
710 if (!claim)
711 return;
712
713 ether_addr_copy(claim->addr, mac);
714 spin_lock_init(&claim->backbone_lock);
715 claim->vid = vid;
716 claim->lasttime = jiffies;
717 kref_get(&backbone_gw->refcount);
718 claim->backbone_gw = backbone_gw;
719 kref_init(&claim->refcount);
720
721 batadv_dbg(BATADV_DBG_BLA, bat_priv,
722 "%s(): adding new entry %pM, vid %d to hash ...\n",
723 __func__, mac, batadv_print_vid(vid));
724
725 kref_get(&claim->refcount);
726 hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
727 batadv_compare_claim,
728 batadv_choose_claim, claim,
729 &claim->hash_entry);
730
731 if (unlikely(hash_added != 0)) {
732 /* only local changes happened. */
733 kfree(claim);
734 return;
735 }
736 } else {
737 claim->lasttime = jiffies;
738 if (claim->backbone_gw == backbone_gw)
739 /* no need to register a new backbone */
740 goto claim_free_ref;
741
742 batadv_dbg(BATADV_DBG_BLA, bat_priv,
743 "%s(): changing ownership for %pM, vid %d to gw %pM\n",
744 __func__, mac, batadv_print_vid(vid),
745 backbone_gw->orig);
746
747 remove_crc = true;
748 }
749
750 /* replace backbone_gw atomically and adjust reference counters */
751 spin_lock_bh(&claim->backbone_lock);
752 old_backbone_gw = claim->backbone_gw;
753 kref_get(&backbone_gw->refcount);
754 claim->backbone_gw = backbone_gw;
755 spin_unlock_bh(&claim->backbone_lock);
756
757 if (remove_crc) {
758 /* remove claim address from old backbone_gw */
759 spin_lock_bh(&old_backbone_gw->crc_lock);
760 old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
761 spin_unlock_bh(&old_backbone_gw->crc_lock);
762 }
763
764 batadv_backbone_gw_put(old_backbone_gw);
765
766 /* add claim address to new backbone_gw */
767 spin_lock_bh(&backbone_gw->crc_lock);
768 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
769 spin_unlock_bh(&backbone_gw->crc_lock);
770 backbone_gw->lasttime = jiffies;
771
772 claim_free_ref:
773 batadv_claim_put(claim);
774 }
775
776 /**
777 * batadv_bla_claim_get_backbone_gw() - Get valid reference for backbone_gw of
778 * claim
779 * @claim: claim whose backbone_gw should be returned
780 *
781 * Return: valid reference to claim::backbone_gw
782 */
783 static struct batadv_bla_backbone_gw *
batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim * claim)784 batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
785 {
786 struct batadv_bla_backbone_gw *backbone_gw;
787
788 spin_lock_bh(&claim->backbone_lock);
789 backbone_gw = claim->backbone_gw;
790 kref_get(&backbone_gw->refcount);
791 spin_unlock_bh(&claim->backbone_lock);
792
793 return backbone_gw;
794 }
795
796 /**
797 * batadv_bla_del_claim() - delete a claim from the claim hash
798 * @bat_priv: the bat priv with all the soft interface information
799 * @mac: mac address of the claim to be removed
800 * @vid: VLAN id for the claim to be removed
801 */
batadv_bla_del_claim(struct batadv_priv * bat_priv,const u8 * mac,const unsigned short vid)802 static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
803 const u8 *mac, const unsigned short vid)
804 {
805 struct batadv_bla_claim search_claim, *claim;
806
807 ether_addr_copy(search_claim.addr, mac);
808 search_claim.vid = vid;
809 claim = batadv_claim_hash_find(bat_priv, &search_claim);
810 if (!claim)
811 return;
812
813 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
814 mac, batadv_print_vid(vid));
815
816 batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
817 batadv_choose_claim, claim);
818 batadv_claim_put(claim); /* reference from the hash is gone */
819
820 /* don't need the reference from hash_find() anymore */
821 batadv_claim_put(claim);
822 }
823
824 /**
825 * batadv_handle_announce() - check for ANNOUNCE frame
826 * @bat_priv: the bat priv with all the soft interface information
827 * @an_addr: announcement mac address (ARP Sender HW address)
828 * @backbone_addr: originator address of the sender (Ethernet source MAC)
829 * @vid: the VLAN ID of the frame
830 *
831 * Return: true if handled
832 */
batadv_handle_announce(struct batadv_priv * bat_priv,u8 * an_addr,u8 * backbone_addr,unsigned short vid)833 static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
834 u8 *backbone_addr, unsigned short vid)
835 {
836 struct batadv_bla_backbone_gw *backbone_gw;
837 u16 backbone_crc, crc;
838
839 if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
840 return false;
841
842 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
843 false);
844
845 if (unlikely(!backbone_gw))
846 return true;
847
848 /* handle as ANNOUNCE frame */
849 backbone_gw->lasttime = jiffies;
850 crc = ntohs(*((__be16 *)(&an_addr[4])));
851
852 batadv_dbg(BATADV_DBG_BLA, bat_priv,
853 "%s(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
854 __func__, batadv_print_vid(vid), backbone_gw->orig, crc);
855
856 spin_lock_bh(&backbone_gw->crc_lock);
857 backbone_crc = backbone_gw->crc;
858 spin_unlock_bh(&backbone_gw->crc_lock);
859
860 if (backbone_crc != crc) {
861 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
862 "%s(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
863 __func__, backbone_gw->orig,
864 batadv_print_vid(backbone_gw->vid),
865 backbone_crc, crc);
866
867 batadv_bla_send_request(backbone_gw);
868 } else {
869 /* if we have sent a request and the crc was OK,
870 * we can allow traffic again.
871 */
872 if (atomic_read(&backbone_gw->request_sent)) {
873 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
874 atomic_set(&backbone_gw->request_sent, 0);
875 }
876 }
877
878 batadv_backbone_gw_put(backbone_gw);
879 return true;
880 }
881
882 /**
883 * batadv_handle_request() - check for REQUEST frame
884 * @bat_priv: the bat priv with all the soft interface information
885 * @primary_if: the primary hard interface of this batman soft interface
886 * @backbone_addr: backbone address to be requested (ARP sender HW MAC)
887 * @ethhdr: ethernet header of a packet
888 * @vid: the VLAN ID of the frame
889 *
890 * Return: true if handled
891 */
batadv_handle_request(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,u8 * backbone_addr,struct ethhdr * ethhdr,unsigned short vid)892 static bool batadv_handle_request(struct batadv_priv *bat_priv,
893 struct batadv_hard_iface *primary_if,
894 u8 *backbone_addr, struct ethhdr *ethhdr,
895 unsigned short vid)
896 {
897 /* check for REQUEST frame */
898 if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
899 return false;
900
901 /* sanity check, this should not happen on a normal switch,
902 * we ignore it in this case.
903 */
904 if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
905 return true;
906
907 batadv_dbg(BATADV_DBG_BLA, bat_priv,
908 "%s(): REQUEST vid %d (sent by %pM)...\n",
909 __func__, batadv_print_vid(vid), ethhdr->h_source);
910
911 batadv_bla_answer_request(bat_priv, primary_if, vid);
912 return true;
913 }
914
915 /**
916 * batadv_handle_unclaim() - check for UNCLAIM frame
917 * @bat_priv: the bat priv with all the soft interface information
918 * @primary_if: the primary hard interface of this batman soft interface
919 * @backbone_addr: originator address of the backbone (Ethernet source)
920 * @claim_addr: Client to be unclaimed (ARP sender HW MAC)
921 * @vid: the VLAN ID of the frame
922 *
923 * Return: true if handled
924 */
batadv_handle_unclaim(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,u8 * backbone_addr,u8 * claim_addr,unsigned short vid)925 static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
926 struct batadv_hard_iface *primary_if,
927 u8 *backbone_addr, u8 *claim_addr,
928 unsigned short vid)
929 {
930 struct batadv_bla_backbone_gw *backbone_gw;
931
932 /* unclaim in any case if it is our own */
933 if (primary_if && batadv_compare_eth(backbone_addr,
934 primary_if->net_dev->dev_addr))
935 batadv_bla_send_claim(bat_priv, claim_addr, vid,
936 BATADV_CLAIM_TYPE_UNCLAIM);
937
938 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
939
940 if (!backbone_gw)
941 return true;
942
943 /* this must be an UNCLAIM frame */
944 batadv_dbg(BATADV_DBG_BLA, bat_priv,
945 "%s(): UNCLAIM %pM on vid %d (sent by %pM)...\n", __func__,
946 claim_addr, batadv_print_vid(vid), backbone_gw->orig);
947
948 batadv_bla_del_claim(bat_priv, claim_addr, vid);
949 batadv_backbone_gw_put(backbone_gw);
950 return true;
951 }
952
953 /**
954 * batadv_handle_claim() - check for CLAIM frame
955 * @bat_priv: the bat priv with all the soft interface information
956 * @primary_if: the primary hard interface of this batman soft interface
957 * @backbone_addr: originator address of the backbone (Ethernet Source)
958 * @claim_addr: client mac address to be claimed (ARP sender HW MAC)
959 * @vid: the VLAN ID of the frame
960 *
961 * Return: true if handled
962 */
batadv_handle_claim(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,u8 * backbone_addr,u8 * claim_addr,unsigned short vid)963 static bool batadv_handle_claim(struct batadv_priv *bat_priv,
964 struct batadv_hard_iface *primary_if,
965 u8 *backbone_addr, u8 *claim_addr,
966 unsigned short vid)
967 {
968 struct batadv_bla_backbone_gw *backbone_gw;
969
970 /* register the gateway if not yet available, and add the claim. */
971
972 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
973 false);
974
975 if (unlikely(!backbone_gw))
976 return true;
977
978 /* this must be a CLAIM frame */
979 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
980 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
981 batadv_bla_send_claim(bat_priv, claim_addr, vid,
982 BATADV_CLAIM_TYPE_CLAIM);
983
984 /* TODO: we could call something like tt_local_del() here. */
985
986 batadv_backbone_gw_put(backbone_gw);
987 return true;
988 }
989
990 /**
991 * batadv_check_claim_group() - check for claim group membership
992 * @bat_priv: the bat priv with all the soft interface information
993 * @primary_if: the primary interface of this batman interface
994 * @hw_src: the Hardware source in the ARP Header
995 * @hw_dst: the Hardware destination in the ARP Header
996 * @ethhdr: pointer to the Ethernet header of the claim frame
997 *
998 * checks if it is a claim packet and if its on the same group.
999 * This function also applies the group ID of the sender
1000 * if it is in the same mesh.
1001 *
1002 * Return:
1003 * 2 - if it is a claim packet and on the same group
1004 * 1 - if is a claim packet from another group
1005 * 0 - if it is not a claim packet
1006 */
batadv_check_claim_group(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,u8 * hw_src,u8 * hw_dst,struct ethhdr * ethhdr)1007 static int batadv_check_claim_group(struct batadv_priv *bat_priv,
1008 struct batadv_hard_iface *primary_if,
1009 u8 *hw_src, u8 *hw_dst,
1010 struct ethhdr *ethhdr)
1011 {
1012 u8 *backbone_addr;
1013 struct batadv_orig_node *orig_node;
1014 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1015
1016 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1017 bla_dst_own = &bat_priv->bla.claim_dest;
1018
1019 /* if announcement packet, use the source,
1020 * otherwise assume it is in the hw_src
1021 */
1022 switch (bla_dst->type) {
1023 case BATADV_CLAIM_TYPE_CLAIM:
1024 backbone_addr = hw_src;
1025 break;
1026 case BATADV_CLAIM_TYPE_REQUEST:
1027 case BATADV_CLAIM_TYPE_ANNOUNCE:
1028 case BATADV_CLAIM_TYPE_UNCLAIM:
1029 backbone_addr = ethhdr->h_source;
1030 break;
1031 default:
1032 return 0;
1033 }
1034
1035 /* don't accept claim frames from ourselves */
1036 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
1037 return 0;
1038
1039 /* if its already the same group, it is fine. */
1040 if (bla_dst->group == bla_dst_own->group)
1041 return 2;
1042
1043 /* lets see if this originator is in our mesh */
1044 orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
1045
1046 /* dont accept claims from gateways which are not in
1047 * the same mesh or group.
1048 */
1049 if (!orig_node)
1050 return 1;
1051
1052 /* if our mesh friends mac is bigger, use it for ourselves. */
1053 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
1054 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1055 "taking other backbones claim group: %#.4x\n",
1056 ntohs(bla_dst->group));
1057 bla_dst_own->group = bla_dst->group;
1058 }
1059
1060 batadv_orig_node_put(orig_node);
1061
1062 return 2;
1063 }
1064
1065 /**
1066 * batadv_bla_process_claim() - Check if this is a claim frame, and process it
1067 * @bat_priv: the bat priv with all the soft interface information
1068 * @primary_if: the primary hard interface of this batman soft interface
1069 * @skb: the frame to be checked
1070 *
1071 * Return: true if it was a claim frame, otherwise return false to
1072 * tell the callee that it can use the frame on its own.
1073 */
batadv_bla_process_claim(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,struct sk_buff * skb)1074 static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
1075 struct batadv_hard_iface *primary_if,
1076 struct sk_buff *skb)
1077 {
1078 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1079 u8 *hw_src, *hw_dst;
1080 struct vlan_hdr *vhdr, vhdr_buf;
1081 struct ethhdr *ethhdr;
1082 struct arphdr *arphdr;
1083 unsigned short vid;
1084 int vlan_depth = 0;
1085 __be16 proto;
1086 int headlen;
1087 int ret;
1088
1089 vid = batadv_get_vid(skb, 0);
1090 ethhdr = eth_hdr(skb);
1091
1092 proto = ethhdr->h_proto;
1093 headlen = ETH_HLEN;
1094 if (vid & BATADV_VLAN_HAS_TAG) {
1095 /* Traverse the VLAN/Ethertypes.
1096 *
1097 * At this point it is known that the first protocol is a VLAN
1098 * header, so start checking at the encapsulated protocol.
1099 *
1100 * The depth of the VLAN headers is recorded to drop BLA claim
1101 * frames encapsulated into multiple VLAN headers (QinQ).
1102 */
1103 do {
1104 vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
1105 &vhdr_buf);
1106 if (!vhdr)
1107 return false;
1108
1109 proto = vhdr->h_vlan_encapsulated_proto;
1110 headlen += VLAN_HLEN;
1111 vlan_depth++;
1112 } while (proto == htons(ETH_P_8021Q));
1113 }
1114
1115 if (proto != htons(ETH_P_ARP))
1116 return false; /* not a claim frame */
1117
1118 /* this must be a ARP frame. check if it is a claim. */
1119
1120 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
1121 return false;
1122
1123 /* pskb_may_pull() may have modified the pointers, get ethhdr again */
1124 ethhdr = eth_hdr(skb);
1125 arphdr = (struct arphdr *)((u8 *)ethhdr + headlen);
1126
1127 /* Check whether the ARP frame carries a valid
1128 * IP information
1129 */
1130 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
1131 return false;
1132 if (arphdr->ar_pro != htons(ETH_P_IP))
1133 return false;
1134 if (arphdr->ar_hln != ETH_ALEN)
1135 return false;
1136 if (arphdr->ar_pln != 4)
1137 return false;
1138
1139 hw_src = (u8 *)arphdr + sizeof(struct arphdr);
1140 hw_dst = hw_src + ETH_ALEN + 4;
1141 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1142 bla_dst_own = &bat_priv->bla.claim_dest;
1143
1144 /* check if it is a claim frame in general */
1145 if (memcmp(bla_dst->magic, bla_dst_own->magic,
1146 sizeof(bla_dst->magic)) != 0)
1147 return false;
1148
1149 /* check if there is a claim frame encapsulated deeper in (QinQ) and
1150 * drop that, as this is not supported by BLA but should also not be
1151 * sent via the mesh.
1152 */
1153 if (vlan_depth > 1)
1154 return true;
1155
1156 /* Let the loopdetect frames on the mesh in any case. */
1157 if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
1158 return false;
1159
1160 /* check if it is a claim frame. */
1161 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
1162 ethhdr);
1163 if (ret == 1)
1164 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1165 "%s(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1166 __func__, ethhdr->h_source, batadv_print_vid(vid),
1167 hw_src, hw_dst);
1168
1169 if (ret < 2)
1170 return !!ret;
1171
1172 /* become a backbone gw ourselves on this vlan if not happened yet */
1173 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1174
1175 /* check for the different types of claim frames ... */
1176 switch (bla_dst->type) {
1177 case BATADV_CLAIM_TYPE_CLAIM:
1178 if (batadv_handle_claim(bat_priv, primary_if, hw_src,
1179 ethhdr->h_source, vid))
1180 return true;
1181 break;
1182 case BATADV_CLAIM_TYPE_UNCLAIM:
1183 if (batadv_handle_unclaim(bat_priv, primary_if,
1184 ethhdr->h_source, hw_src, vid))
1185 return true;
1186 break;
1187
1188 case BATADV_CLAIM_TYPE_ANNOUNCE:
1189 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
1190 vid))
1191 return true;
1192 break;
1193 case BATADV_CLAIM_TYPE_REQUEST:
1194 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
1195 vid))
1196 return true;
1197 break;
1198 }
1199
1200 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1201 "%s(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1202 __func__, ethhdr->h_source, batadv_print_vid(vid), hw_src,
1203 hw_dst);
1204 return true;
1205 }
1206
1207 /**
1208 * batadv_bla_purge_backbone_gw() - Remove backbone gateways after a timeout or
1209 * immediately
1210 * @bat_priv: the bat priv with all the soft interface information
1211 * @now: whether the whole hash shall be wiped now
1212 *
1213 * Check when we last heard from other nodes, and remove them in case of
1214 * a time out, or clean all backbone gws if now is set.
1215 */
batadv_bla_purge_backbone_gw(struct batadv_priv * bat_priv,int now)1216 static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
1217 {
1218 struct batadv_bla_backbone_gw *backbone_gw;
1219 struct hlist_node *node_tmp;
1220 struct hlist_head *head;
1221 struct batadv_hashtable *hash;
1222 spinlock_t *list_lock; /* protects write access to the hash lists */
1223 int i;
1224
1225 hash = bat_priv->bla.backbone_hash;
1226 if (!hash)
1227 return;
1228
1229 for (i = 0; i < hash->size; i++) {
1230 head = &hash->table[i];
1231 list_lock = &hash->list_locks[i];
1232
1233 spin_lock_bh(list_lock);
1234 hlist_for_each_entry_safe(backbone_gw, node_tmp,
1235 head, hash_entry) {
1236 if (now)
1237 goto purge_now;
1238 if (!batadv_has_timed_out(backbone_gw->lasttime,
1239 BATADV_BLA_BACKBONE_TIMEOUT))
1240 continue;
1241
1242 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
1243 "%s(): backbone gw %pM timed out\n",
1244 __func__, backbone_gw->orig);
1245
1246 purge_now:
1247 /* don't wait for the pending request anymore */
1248 if (atomic_read(&backbone_gw->request_sent))
1249 atomic_dec(&bat_priv->bla.num_requests);
1250
1251 batadv_bla_del_backbone_claims(backbone_gw);
1252
1253 hlist_del_rcu(&backbone_gw->hash_entry);
1254 batadv_backbone_gw_put(backbone_gw);
1255 }
1256 spin_unlock_bh(list_lock);
1257 }
1258 }
1259
1260 /**
1261 * batadv_bla_purge_claims() - Remove claims after a timeout or immediately
1262 * @bat_priv: the bat priv with all the soft interface information
1263 * @primary_if: the selected primary interface, may be NULL if now is set
1264 * @now: whether the whole hash shall be wiped now
1265 *
1266 * Check when we heard last time from our own claims, and remove them in case of
1267 * a time out, or clean all claims if now is set
1268 */
batadv_bla_purge_claims(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,int now)1269 static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1270 struct batadv_hard_iface *primary_if,
1271 int now)
1272 {
1273 struct batadv_bla_backbone_gw *backbone_gw;
1274 struct batadv_bla_claim *claim;
1275 struct hlist_head *head;
1276 struct batadv_hashtable *hash;
1277 int i;
1278
1279 hash = bat_priv->bla.claim_hash;
1280 if (!hash)
1281 return;
1282
1283 for (i = 0; i < hash->size; i++) {
1284 head = &hash->table[i];
1285
1286 rcu_read_lock();
1287 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1288 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1289 if (now)
1290 goto purge_now;
1291
1292 if (!batadv_compare_eth(backbone_gw->orig,
1293 primary_if->net_dev->dev_addr))
1294 goto skip;
1295
1296 if (!batadv_has_timed_out(claim->lasttime,
1297 BATADV_BLA_CLAIM_TIMEOUT))
1298 goto skip;
1299
1300 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1301 "%s(): timed out.\n", __func__);
1302
1303 purge_now:
1304 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1305 "%s(): %pM, vid %d\n", __func__,
1306 claim->addr, claim->vid);
1307
1308 batadv_handle_unclaim(bat_priv, primary_if,
1309 backbone_gw->orig,
1310 claim->addr, claim->vid);
1311 skip:
1312 batadv_backbone_gw_put(backbone_gw);
1313 }
1314 rcu_read_unlock();
1315 }
1316 }
1317
1318 /**
1319 * batadv_bla_update_orig_address() - Update the backbone gateways when the own
1320 * originator address changes
1321 * @bat_priv: the bat priv with all the soft interface information
1322 * @primary_if: the new selected primary_if
1323 * @oldif: the old primary interface, may be NULL
1324 */
batadv_bla_update_orig_address(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,struct batadv_hard_iface * oldif)1325 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1326 struct batadv_hard_iface *primary_if,
1327 struct batadv_hard_iface *oldif)
1328 {
1329 struct batadv_bla_backbone_gw *backbone_gw;
1330 struct hlist_head *head;
1331 struct batadv_hashtable *hash;
1332 __be16 group;
1333 int i;
1334
1335 /* reset bridge loop avoidance group id */
1336 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1337 bat_priv->bla.claim_dest.group = group;
1338
1339 /* purge everything when bridge loop avoidance is turned off */
1340 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1341 oldif = NULL;
1342
1343 if (!oldif) {
1344 batadv_bla_purge_claims(bat_priv, NULL, 1);
1345 batadv_bla_purge_backbone_gw(bat_priv, 1);
1346 return;
1347 }
1348
1349 hash = bat_priv->bla.backbone_hash;
1350 if (!hash)
1351 return;
1352
1353 for (i = 0; i < hash->size; i++) {
1354 head = &hash->table[i];
1355
1356 rcu_read_lock();
1357 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1358 /* own orig still holds the old value. */
1359 if (!batadv_compare_eth(backbone_gw->orig,
1360 oldif->net_dev->dev_addr))
1361 continue;
1362
1363 ether_addr_copy(backbone_gw->orig,
1364 primary_if->net_dev->dev_addr);
1365 /* send an announce frame so others will ask for our
1366 * claims and update their tables.
1367 */
1368 batadv_bla_send_announce(bat_priv, backbone_gw);
1369 }
1370 rcu_read_unlock();
1371 }
1372 }
1373
1374 /**
1375 * batadv_bla_send_loopdetect() - send a loopdetect frame
1376 * @bat_priv: the bat priv with all the soft interface information
1377 * @backbone_gw: the backbone gateway for which a loop should be detected
1378 *
1379 * To detect loops that the bridge loop avoidance can't handle, send a loop
1380 * detection packet on the backbone. Unlike other BLA frames, this frame will
1381 * be allowed on the mesh by other nodes. If it is received on the mesh, this
1382 * indicates that there is a loop.
1383 */
1384 static void
batadv_bla_send_loopdetect(struct batadv_priv * bat_priv,struct batadv_bla_backbone_gw * backbone_gw)1385 batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
1386 struct batadv_bla_backbone_gw *backbone_gw)
1387 {
1388 batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n",
1389 backbone_gw->vid);
1390 batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr,
1391 backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT);
1392 }
1393
1394 /**
1395 * batadv_bla_status_update() - purge bla interfaces if necessary
1396 * @net_dev: the soft interface net device
1397 */
batadv_bla_status_update(struct net_device * net_dev)1398 void batadv_bla_status_update(struct net_device *net_dev)
1399 {
1400 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1401 struct batadv_hard_iface *primary_if;
1402
1403 primary_if = batadv_primary_if_get_selected(bat_priv);
1404 if (!primary_if)
1405 return;
1406
1407 /* this function already purges everything when bla is disabled,
1408 * so just call that one.
1409 */
1410 batadv_bla_update_orig_address(bat_priv, primary_if, primary_if);
1411 batadv_hardif_put(primary_if);
1412 }
1413
1414 /**
1415 * batadv_bla_periodic_work() - performs periodic bla work
1416 * @work: kernel work struct
1417 *
1418 * periodic work to do:
1419 * * purge structures when they are too old
1420 * * send announcements
1421 */
batadv_bla_periodic_work(struct work_struct * work)1422 static void batadv_bla_periodic_work(struct work_struct *work)
1423 {
1424 struct delayed_work *delayed_work;
1425 struct batadv_priv *bat_priv;
1426 struct batadv_priv_bla *priv_bla;
1427 struct hlist_head *head;
1428 struct batadv_bla_backbone_gw *backbone_gw;
1429 struct batadv_hashtable *hash;
1430 struct batadv_hard_iface *primary_if;
1431 bool send_loopdetect = false;
1432 int i;
1433
1434 delayed_work = to_delayed_work(work);
1435 priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
1436 bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1437 primary_if = batadv_primary_if_get_selected(bat_priv);
1438 if (!primary_if)
1439 goto out;
1440
1441 batadv_bla_purge_claims(bat_priv, primary_if, 0);
1442 batadv_bla_purge_backbone_gw(bat_priv, 0);
1443
1444 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1445 goto out;
1446
1447 if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) {
1448 /* set a new random mac address for the next bridge loop
1449 * detection frames. Set the locally administered bit to avoid
1450 * collisions with users mac addresses.
1451 */
1452 eth_random_addr(bat_priv->bla.loopdetect_addr);
1453 bat_priv->bla.loopdetect_addr[0] = 0xba;
1454 bat_priv->bla.loopdetect_addr[1] = 0xbe;
1455 bat_priv->bla.loopdetect_lasttime = jiffies;
1456 atomic_set(&bat_priv->bla.loopdetect_next,
1457 BATADV_BLA_LOOPDETECT_PERIODS);
1458
1459 /* mark for sending loop detect on all VLANs */
1460 send_loopdetect = true;
1461 }
1462
1463 hash = bat_priv->bla.backbone_hash;
1464 if (!hash)
1465 goto out;
1466
1467 for (i = 0; i < hash->size; i++) {
1468 head = &hash->table[i];
1469
1470 rcu_read_lock();
1471 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1472 if (!batadv_compare_eth(backbone_gw->orig,
1473 primary_if->net_dev->dev_addr))
1474 continue;
1475
1476 backbone_gw->lasttime = jiffies;
1477
1478 batadv_bla_send_announce(bat_priv, backbone_gw);
1479 if (send_loopdetect)
1480 batadv_bla_send_loopdetect(bat_priv,
1481 backbone_gw);
1482
1483 /* request_sent is only set after creation to avoid
1484 * problems when we are not yet known as backbone gw
1485 * in the backbone.
1486 *
1487 * We can reset this now after we waited some periods
1488 * to give bridge forward delays and bla group forming
1489 * some grace time.
1490 */
1491
1492 if (atomic_read(&backbone_gw->request_sent) == 0)
1493 continue;
1494
1495 if (!atomic_dec_and_test(&backbone_gw->wait_periods))
1496 continue;
1497
1498 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
1499 atomic_set(&backbone_gw->request_sent, 0);
1500 }
1501 rcu_read_unlock();
1502 }
1503 out:
1504 if (primary_if)
1505 batadv_hardif_put(primary_if);
1506
1507 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1508 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1509 }
1510
1511 /* The hash for claim and backbone hash receive the same key because they
1512 * are getting initialized by hash_new with the same key. Reinitializing
1513 * them with to different keys to allow nested locking without generating
1514 * lockdep warnings
1515 */
1516 static struct lock_class_key batadv_claim_hash_lock_class_key;
1517 static struct lock_class_key batadv_backbone_hash_lock_class_key;
1518
1519 /**
1520 * batadv_bla_init() - initialize all bla structures
1521 * @bat_priv: the bat priv with all the soft interface information
1522 *
1523 * Return: 0 on success, < 0 on error.
1524 */
batadv_bla_init(struct batadv_priv * bat_priv)1525 int batadv_bla_init(struct batadv_priv *bat_priv)
1526 {
1527 int i;
1528 u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1529 struct batadv_hard_iface *primary_if;
1530 u16 crc;
1531 unsigned long entrytime;
1532
1533 spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
1534
1535 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1536
1537 /* setting claim destination address */
1538 memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
1539 bat_priv->bla.claim_dest.type = 0;
1540 primary_if = batadv_primary_if_get_selected(bat_priv);
1541 if (primary_if) {
1542 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
1543 bat_priv->bla.claim_dest.group = htons(crc);
1544 batadv_hardif_put(primary_if);
1545 } else {
1546 bat_priv->bla.claim_dest.group = 0; /* will be set later */
1547 }
1548
1549 /* initialize the duplicate list */
1550 entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1551 for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1552 bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
1553 bat_priv->bla.bcast_duplist_curr = 0;
1554
1555 atomic_set(&bat_priv->bla.loopdetect_next,
1556 BATADV_BLA_LOOPDETECT_PERIODS);
1557
1558 if (bat_priv->bla.claim_hash)
1559 return 0;
1560
1561 bat_priv->bla.claim_hash = batadv_hash_new(128);
1562 bat_priv->bla.backbone_hash = batadv_hash_new(32);
1563
1564 if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
1565 return -ENOMEM;
1566
1567 batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1568 &batadv_claim_hash_lock_class_key);
1569 batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1570 &batadv_backbone_hash_lock_class_key);
1571
1572 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1573
1574 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1575
1576 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1577 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1578 return 0;
1579 }
1580
1581 /**
1582 * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
1583 * @bat_priv: the bat priv with all the soft interface information
1584 * @skb: contains the bcast_packet to be checked
1585 *
1586 * check if it is on our broadcast list. Another gateway might
1587 * have sent the same packet because it is connected to the same backbone,
1588 * so we have to remove this duplicate.
1589 *
1590 * This is performed by checking the CRC, which will tell us
1591 * with a good chance that it is the same packet. If it is furthermore
1592 * sent by another host, drop it. We allow equal packets from
1593 * the same host however as this might be intended.
1594 *
1595 * Return: true if a packet is in the duplicate list, false otherwise.
1596 */
batadv_bla_check_bcast_duplist(struct batadv_priv * bat_priv,struct sk_buff * skb)1597 bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1598 struct sk_buff *skb)
1599 {
1600 int i, curr;
1601 __be32 crc;
1602 struct batadv_bcast_packet *bcast_packet;
1603 struct batadv_bcast_duplist_entry *entry;
1604 bool ret = false;
1605
1606 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1607
1608 /* calculate the crc ... */
1609 crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
1610
1611 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
1612
1613 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1614 curr = (bat_priv->bla.bcast_duplist_curr + i);
1615 curr %= BATADV_DUPLIST_SIZE;
1616 entry = &bat_priv->bla.bcast_duplist[curr];
1617
1618 /* we can stop searching if the entry is too old ;
1619 * later entries will be even older
1620 */
1621 if (batadv_has_timed_out(entry->entrytime,
1622 BATADV_DUPLIST_TIMEOUT))
1623 break;
1624
1625 if (entry->crc != crc)
1626 continue;
1627
1628 if (batadv_compare_eth(entry->orig, bcast_packet->orig))
1629 continue;
1630
1631 /* this entry seems to match: same crc, not too old,
1632 * and from another gw. therefore return true to forbid it.
1633 */
1634 ret = true;
1635 goto out;
1636 }
1637 /* not found, add a new entry (overwrite the oldest entry)
1638 * and allow it, its the first occurrence.
1639 */
1640 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1641 curr %= BATADV_DUPLIST_SIZE;
1642 entry = &bat_priv->bla.bcast_duplist[curr];
1643 entry->crc = crc;
1644 entry->entrytime = jiffies;
1645 ether_addr_copy(entry->orig, bcast_packet->orig);
1646 bat_priv->bla.bcast_duplist_curr = curr;
1647
1648 out:
1649 spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
1650
1651 return ret;
1652 }
1653
1654 /**
1655 * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for
1656 * the VLAN identified by vid.
1657 * @bat_priv: the bat priv with all the soft interface information
1658 * @orig: originator mac address
1659 * @vid: VLAN identifier
1660 *
1661 * Return: true if orig is a backbone for this vid, false otherwise.
1662 */
batadv_bla_is_backbone_gw_orig(struct batadv_priv * bat_priv,u8 * orig,unsigned short vid)1663 bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
1664 unsigned short vid)
1665 {
1666 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1667 struct hlist_head *head;
1668 struct batadv_bla_backbone_gw *backbone_gw;
1669 int i;
1670
1671 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1672 return false;
1673
1674 if (!hash)
1675 return false;
1676
1677 for (i = 0; i < hash->size; i++) {
1678 head = &hash->table[i];
1679
1680 rcu_read_lock();
1681 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1682 if (batadv_compare_eth(backbone_gw->orig, orig) &&
1683 backbone_gw->vid == vid) {
1684 rcu_read_unlock();
1685 return true;
1686 }
1687 }
1688 rcu_read_unlock();
1689 }
1690
1691 return false;
1692 }
1693
1694 /**
1695 * batadv_bla_is_backbone_gw() - check if originator is a backbone gw for a VLAN
1696 * @skb: the frame to be checked
1697 * @orig_node: the orig_node of the frame
1698 * @hdr_size: maximum length of the frame
1699 *
1700 * Return: true if the orig_node is also a gateway on the soft interface,
1701 * otherwise it returns false.
1702 */
batadv_bla_is_backbone_gw(struct sk_buff * skb,struct batadv_orig_node * orig_node,int hdr_size)1703 bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
1704 struct batadv_orig_node *orig_node, int hdr_size)
1705 {
1706 struct batadv_bla_backbone_gw *backbone_gw;
1707 unsigned short vid;
1708
1709 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1710 return false;
1711
1712 /* first, find out the vid. */
1713 if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1714 return false;
1715
1716 vid = batadv_get_vid(skb, hdr_size);
1717
1718 /* see if this originator is a backbone gw for this VLAN */
1719 backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
1720 orig_node->orig, vid);
1721 if (!backbone_gw)
1722 return false;
1723
1724 batadv_backbone_gw_put(backbone_gw);
1725 return true;
1726 }
1727
1728 /**
1729 * batadv_bla_free() - free all bla structures
1730 * @bat_priv: the bat priv with all the soft interface information
1731 *
1732 * for softinterface free or module unload
1733 */
batadv_bla_free(struct batadv_priv * bat_priv)1734 void batadv_bla_free(struct batadv_priv *bat_priv)
1735 {
1736 struct batadv_hard_iface *primary_if;
1737
1738 cancel_delayed_work_sync(&bat_priv->bla.work);
1739 primary_if = batadv_primary_if_get_selected(bat_priv);
1740
1741 if (bat_priv->bla.claim_hash) {
1742 batadv_bla_purge_claims(bat_priv, primary_if, 1);
1743 batadv_hash_destroy(bat_priv->bla.claim_hash);
1744 bat_priv->bla.claim_hash = NULL;
1745 }
1746 if (bat_priv->bla.backbone_hash) {
1747 batadv_bla_purge_backbone_gw(bat_priv, 1);
1748 batadv_hash_destroy(bat_priv->bla.backbone_hash);
1749 bat_priv->bla.backbone_hash = NULL;
1750 }
1751 if (primary_if)
1752 batadv_hardif_put(primary_if);
1753 }
1754
1755 /**
1756 * batadv_bla_loopdetect_check() - check and handle a detected loop
1757 * @bat_priv: the bat priv with all the soft interface information
1758 * @skb: the packet to check
1759 * @primary_if: interface where the request came on
1760 * @vid: the VLAN ID of the frame
1761 *
1762 * Checks if this packet is a loop detect frame which has been sent by us,
1763 * throw an uevent and log the event if that is the case.
1764 *
1765 * Return: true if it is a loop detect frame which is to be dropped, false
1766 * otherwise.
1767 */
1768 static bool
batadv_bla_loopdetect_check(struct batadv_priv * bat_priv,struct sk_buff * skb,struct batadv_hard_iface * primary_if,unsigned short vid)1769 batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
1770 struct batadv_hard_iface *primary_if,
1771 unsigned short vid)
1772 {
1773 struct batadv_bla_backbone_gw *backbone_gw;
1774 struct ethhdr *ethhdr;
1775 bool ret;
1776
1777 ethhdr = eth_hdr(skb);
1778
1779 /* Only check for the MAC address and skip more checks here for
1780 * performance reasons - this function is on the hotpath, after all.
1781 */
1782 if (!batadv_compare_eth(ethhdr->h_source,
1783 bat_priv->bla.loopdetect_addr))
1784 return false;
1785
1786 /* If the packet came too late, don't forward it on the mesh
1787 * but don't consider that as loop. It might be a coincidence.
1788 */
1789 if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime,
1790 BATADV_BLA_LOOPDETECT_TIMEOUT))
1791 return true;
1792
1793 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
1794 primary_if->net_dev->dev_addr,
1795 vid, true);
1796 if (unlikely(!backbone_gw))
1797 return true;
1798
1799 ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
1800
1801 /* backbone_gw is unreferenced in the report work function function
1802 * if queue_work() call was successful
1803 */
1804 if (!ret)
1805 batadv_backbone_gw_put(backbone_gw);
1806
1807 return true;
1808 }
1809
1810 /**
1811 * batadv_bla_rx() - check packets coming from the mesh.
1812 * @bat_priv: the bat priv with all the soft interface information
1813 * @skb: the frame to be checked
1814 * @vid: the VLAN ID of the frame
1815 * @is_bcast: the packet came in a broadcast packet type.
1816 *
1817 * batadv_bla_rx avoidance checks if:
1818 * * we have to race for a claim
1819 * * if the frame is allowed on the LAN
1820 *
1821 * in these cases, the skb is further handled by this function
1822 *
1823 * Return: true if handled, otherwise it returns false and the caller shall
1824 * further process the skb.
1825 */
batadv_bla_rx(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid,bool is_bcast)1826 bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1827 unsigned short vid, bool is_bcast)
1828 {
1829 struct batadv_bla_backbone_gw *backbone_gw;
1830 struct ethhdr *ethhdr;
1831 struct batadv_bla_claim search_claim, *claim = NULL;
1832 struct batadv_hard_iface *primary_if;
1833 bool own_claim;
1834 bool ret;
1835
1836 ethhdr = eth_hdr(skb);
1837
1838 primary_if = batadv_primary_if_get_selected(bat_priv);
1839 if (!primary_if)
1840 goto handled;
1841
1842 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1843 goto allow;
1844
1845 if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid))
1846 goto handled;
1847
1848 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1849 /* don't allow broadcasts while requests are in flight */
1850 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1851 goto handled;
1852
1853 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1854 search_claim.vid = vid;
1855 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1856
1857 if (!claim) {
1858 /* possible optimization: race for a claim */
1859 /* No claim exists yet, claim it for us!
1860 */
1861
1862 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1863 "%s(): Unclaimed MAC %pM found. Claim it. Local: %s\n",
1864 __func__, ethhdr->h_source,
1865 batadv_is_my_client(bat_priv,
1866 ethhdr->h_source, vid) ?
1867 "yes" : "no");
1868 batadv_handle_claim(bat_priv, primary_if,
1869 primary_if->net_dev->dev_addr,
1870 ethhdr->h_source, vid);
1871 goto allow;
1872 }
1873
1874 /* if it is our own claim ... */
1875 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1876 own_claim = batadv_compare_eth(backbone_gw->orig,
1877 primary_if->net_dev->dev_addr);
1878 batadv_backbone_gw_put(backbone_gw);
1879
1880 if (own_claim) {
1881 /* ... allow it in any case */
1882 claim->lasttime = jiffies;
1883 goto allow;
1884 }
1885
1886 /* if it is a broadcast ... */
1887 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
1888 /* ... drop it. the responsible gateway is in charge.
1889 *
1890 * We need to check is_bcast because with the gateway
1891 * feature, broadcasts (like DHCP requests) may be sent
1892 * using a unicast packet type.
1893 */
1894 goto handled;
1895 } else {
1896 /* seems the client considers us as its best gateway.
1897 * send a claim and update the claim table
1898 * immediately.
1899 */
1900 batadv_handle_claim(bat_priv, primary_if,
1901 primary_if->net_dev->dev_addr,
1902 ethhdr->h_source, vid);
1903 goto allow;
1904 }
1905 allow:
1906 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1907 ret = false;
1908 goto out;
1909
1910 handled:
1911 kfree_skb(skb);
1912 ret = true;
1913
1914 out:
1915 if (primary_if)
1916 batadv_hardif_put(primary_if);
1917 if (claim)
1918 batadv_claim_put(claim);
1919 return ret;
1920 }
1921
1922 /**
1923 * batadv_bla_tx() - check packets going into the mesh
1924 * @bat_priv: the bat priv with all the soft interface information
1925 * @skb: the frame to be checked
1926 * @vid: the VLAN ID of the frame
1927 *
1928 * batadv_bla_tx checks if:
1929 * * a claim was received which has to be processed
1930 * * the frame is allowed on the mesh
1931 *
1932 * in these cases, the skb is further handled by this function.
1933 *
1934 * This call might reallocate skb data.
1935 *
1936 * Return: true if handled, otherwise it returns false and the caller shall
1937 * further process the skb.
1938 */
batadv_bla_tx(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid)1939 bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1940 unsigned short vid)
1941 {
1942 struct ethhdr *ethhdr;
1943 struct batadv_bla_claim search_claim, *claim = NULL;
1944 struct batadv_bla_backbone_gw *backbone_gw;
1945 struct batadv_hard_iface *primary_if;
1946 bool client_roamed;
1947 bool ret = false;
1948
1949 primary_if = batadv_primary_if_get_selected(bat_priv);
1950 if (!primary_if)
1951 goto out;
1952
1953 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1954 goto allow;
1955
1956 if (batadv_bla_process_claim(bat_priv, primary_if, skb))
1957 goto handled;
1958
1959 ethhdr = eth_hdr(skb);
1960
1961 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1962 /* don't allow broadcasts while requests are in flight */
1963 if (is_multicast_ether_addr(ethhdr->h_dest))
1964 goto handled;
1965
1966 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1967 search_claim.vid = vid;
1968
1969 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1970
1971 /* if no claim exists, allow it. */
1972 if (!claim)
1973 goto allow;
1974
1975 /* check if we are responsible. */
1976 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1977 client_roamed = batadv_compare_eth(backbone_gw->orig,
1978 primary_if->net_dev->dev_addr);
1979 batadv_backbone_gw_put(backbone_gw);
1980
1981 if (client_roamed) {
1982 /* if yes, the client has roamed and we have
1983 * to unclaim it.
1984 */
1985 if (batadv_has_timed_out(claim->lasttime, 100)) {
1986 /* only unclaim if the last claim entry is
1987 * older than 100 ms to make sure we really
1988 * have a roaming client here.
1989 */
1990 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Roaming client %pM detected. Unclaim it.\n",
1991 __func__, ethhdr->h_source);
1992 batadv_handle_unclaim(bat_priv, primary_if,
1993 primary_if->net_dev->dev_addr,
1994 ethhdr->h_source, vid);
1995 goto allow;
1996 } else {
1997 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Race for claim %pM detected. Drop packet.\n",
1998 __func__, ethhdr->h_source);
1999 goto handled;
2000 }
2001 }
2002
2003 /* check if it is a multicast/broadcast frame */
2004 if (is_multicast_ether_addr(ethhdr->h_dest)) {
2005 /* drop it. the responsible gateway has forwarded it into
2006 * the backbone network.
2007 */
2008 goto handled;
2009 } else {
2010 /* we must allow it. at least if we are
2011 * responsible for the DESTINATION.
2012 */
2013 goto allow;
2014 }
2015 allow:
2016 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
2017 ret = false;
2018 goto out;
2019 handled:
2020 ret = true;
2021 out:
2022 if (primary_if)
2023 batadv_hardif_put(primary_if);
2024 if (claim)
2025 batadv_claim_put(claim);
2026 return ret;
2027 }
2028
2029 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
2030 /**
2031 * batadv_bla_claim_table_seq_print_text() - print the claim table in a seq file
2032 * @seq: seq file to print on
2033 * @offset: not used
2034 *
2035 * Return: always 0
2036 */
batadv_bla_claim_table_seq_print_text(struct seq_file * seq,void * offset)2037 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
2038 {
2039 struct net_device *net_dev = (struct net_device *)seq->private;
2040 struct batadv_priv *bat_priv = netdev_priv(net_dev);
2041 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
2042 struct batadv_bla_backbone_gw *backbone_gw;
2043 struct batadv_bla_claim *claim;
2044 struct batadv_hard_iface *primary_if;
2045 struct hlist_head *head;
2046 u16 backbone_crc;
2047 u32 i;
2048 bool is_own;
2049 u8 *primary_addr;
2050
2051 primary_if = batadv_seq_print_text_primary_if_get(seq);
2052 if (!primary_if)
2053 goto out;
2054
2055 primary_addr = primary_if->net_dev->dev_addr;
2056 seq_printf(seq,
2057 "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
2058 net_dev->name, primary_addr,
2059 ntohs(bat_priv->bla.claim_dest.group));
2060 seq_puts(seq,
2061 " Client VID Originator [o] (CRC )\n");
2062 for (i = 0; i < hash->size; i++) {
2063 head = &hash->table[i];
2064
2065 rcu_read_lock();
2066 hlist_for_each_entry_rcu(claim, head, hash_entry) {
2067 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
2068
2069 is_own = batadv_compare_eth(backbone_gw->orig,
2070 primary_addr);
2071
2072 spin_lock_bh(&backbone_gw->crc_lock);
2073 backbone_crc = backbone_gw->crc;
2074 spin_unlock_bh(&backbone_gw->crc_lock);
2075 seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
2076 claim->addr, batadv_print_vid(claim->vid),
2077 backbone_gw->orig,
2078 (is_own ? 'x' : ' '),
2079 backbone_crc);
2080
2081 batadv_backbone_gw_put(backbone_gw);
2082 }
2083 rcu_read_unlock();
2084 }
2085 out:
2086 if (primary_if)
2087 batadv_hardif_put(primary_if);
2088 return 0;
2089 }
2090 #endif
2091
2092 /**
2093 * batadv_bla_claim_dump_entry() - dump one entry of the claim table
2094 * to a netlink socket
2095 * @msg: buffer for the message
2096 * @portid: netlink port
2097 * @seq: Sequence number of netlink message
2098 * @primary_if: primary interface
2099 * @claim: entry to dump
2100 *
2101 * Return: 0 or error code.
2102 */
2103 static int
batadv_bla_claim_dump_entry(struct sk_buff * msg,u32 portid,u32 seq,struct batadv_hard_iface * primary_if,struct batadv_bla_claim * claim)2104 batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
2105 struct batadv_hard_iface *primary_if,
2106 struct batadv_bla_claim *claim)
2107 {
2108 u8 *primary_addr = primary_if->net_dev->dev_addr;
2109 u16 backbone_crc;
2110 bool is_own;
2111 void *hdr;
2112 int ret = -EINVAL;
2113
2114 hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
2115 NLM_F_MULTI, BATADV_CMD_GET_BLA_CLAIM);
2116 if (!hdr) {
2117 ret = -ENOBUFS;
2118 goto out;
2119 }
2120
2121 is_own = batadv_compare_eth(claim->backbone_gw->orig,
2122 primary_addr);
2123
2124 spin_lock_bh(&claim->backbone_gw->crc_lock);
2125 backbone_crc = claim->backbone_gw->crc;
2126 spin_unlock_bh(&claim->backbone_gw->crc_lock);
2127
2128 if (is_own)
2129 if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
2130 genlmsg_cancel(msg, hdr);
2131 goto out;
2132 }
2133
2134 if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) ||
2135 nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) ||
2136 nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
2137 claim->backbone_gw->orig) ||
2138 nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
2139 backbone_crc)) {
2140 genlmsg_cancel(msg, hdr);
2141 goto out;
2142 }
2143
2144 genlmsg_end(msg, hdr);
2145 ret = 0;
2146
2147 out:
2148 return ret;
2149 }
2150
2151 /**
2152 * batadv_bla_claim_dump_bucket() - dump one bucket of the claim table
2153 * to a netlink socket
2154 * @msg: buffer for the message
2155 * @portid: netlink port
2156 * @seq: Sequence number of netlink message
2157 * @primary_if: primary interface
2158 * @head: bucket to dump
2159 * @idx_skip: How many entries to skip
2160 *
2161 * Return: always 0.
2162 */
2163 static int
batadv_bla_claim_dump_bucket(struct sk_buff * msg,u32 portid,u32 seq,struct batadv_hard_iface * primary_if,struct hlist_head * head,int * idx_skip)2164 batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
2165 struct batadv_hard_iface *primary_if,
2166 struct hlist_head *head, int *idx_skip)
2167 {
2168 struct batadv_bla_claim *claim;
2169 int idx = 0;
2170 int ret = 0;
2171
2172 rcu_read_lock();
2173 hlist_for_each_entry_rcu(claim, head, hash_entry) {
2174 if (idx++ < *idx_skip)
2175 continue;
2176
2177 ret = batadv_bla_claim_dump_entry(msg, portid, seq,
2178 primary_if, claim);
2179 if (ret) {
2180 *idx_skip = idx - 1;
2181 goto unlock;
2182 }
2183 }
2184
2185 *idx_skip = 0;
2186 unlock:
2187 rcu_read_unlock();
2188 return ret;
2189 }
2190
2191 /**
2192 * batadv_bla_claim_dump() - dump claim table to a netlink socket
2193 * @msg: buffer for the message
2194 * @cb: callback structure containing arguments
2195 *
2196 * Return: message length.
2197 */
batadv_bla_claim_dump(struct sk_buff * msg,struct netlink_callback * cb)2198 int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
2199 {
2200 struct batadv_hard_iface *primary_if = NULL;
2201 int portid = NETLINK_CB(cb->skb).portid;
2202 struct net *net = sock_net(cb->skb->sk);
2203 struct net_device *soft_iface;
2204 struct batadv_hashtable *hash;
2205 struct batadv_priv *bat_priv;
2206 int bucket = cb->args[0];
2207 struct hlist_head *head;
2208 int idx = cb->args[1];
2209 int ifindex;
2210 int ret = 0;
2211
2212 ifindex = batadv_netlink_get_ifindex(cb->nlh,
2213 BATADV_ATTR_MESH_IFINDEX);
2214 if (!ifindex)
2215 return -EINVAL;
2216
2217 soft_iface = dev_get_by_index(net, ifindex);
2218 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2219 ret = -ENODEV;
2220 goto out;
2221 }
2222
2223 bat_priv = netdev_priv(soft_iface);
2224 hash = bat_priv->bla.claim_hash;
2225
2226 primary_if = batadv_primary_if_get_selected(bat_priv);
2227 if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
2228 ret = -ENOENT;
2229 goto out;
2230 }
2231
2232 while (bucket < hash->size) {
2233 head = &hash->table[bucket];
2234
2235 if (batadv_bla_claim_dump_bucket(msg, portid,
2236 cb->nlh->nlmsg_seq,
2237 primary_if, head, &idx))
2238 break;
2239 bucket++;
2240 }
2241
2242 cb->args[0] = bucket;
2243 cb->args[1] = idx;
2244
2245 ret = msg->len;
2246
2247 out:
2248 if (primary_if)
2249 batadv_hardif_put(primary_if);
2250
2251 if (soft_iface)
2252 dev_put(soft_iface);
2253
2254 return ret;
2255 }
2256
2257 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
2258 /**
2259 * batadv_bla_backbone_table_seq_print_text() - print the backbone table in a
2260 * seq file
2261 * @seq: seq file to print on
2262 * @offset: not used
2263 *
2264 * Return: always 0
2265 */
batadv_bla_backbone_table_seq_print_text(struct seq_file * seq,void * offset)2266 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
2267 {
2268 struct net_device *net_dev = (struct net_device *)seq->private;
2269 struct batadv_priv *bat_priv = netdev_priv(net_dev);
2270 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
2271 struct batadv_bla_backbone_gw *backbone_gw;
2272 struct batadv_hard_iface *primary_if;
2273 struct hlist_head *head;
2274 int secs, msecs;
2275 u16 backbone_crc;
2276 u32 i;
2277 bool is_own;
2278 u8 *primary_addr;
2279
2280 primary_if = batadv_seq_print_text_primary_if_get(seq);
2281 if (!primary_if)
2282 goto out;
2283
2284 primary_addr = primary_if->net_dev->dev_addr;
2285 seq_printf(seq,
2286 "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
2287 net_dev->name, primary_addr,
2288 ntohs(bat_priv->bla.claim_dest.group));
2289 seq_puts(seq, " Originator VID last seen (CRC )\n");
2290 for (i = 0; i < hash->size; i++) {
2291 head = &hash->table[i];
2292
2293 rcu_read_lock();
2294 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
2295 msecs = jiffies_to_msecs(jiffies -
2296 backbone_gw->lasttime);
2297 secs = msecs / 1000;
2298 msecs = msecs % 1000;
2299
2300 is_own = batadv_compare_eth(backbone_gw->orig,
2301 primary_addr);
2302 if (is_own)
2303 continue;
2304
2305 spin_lock_bh(&backbone_gw->crc_lock);
2306 backbone_crc = backbone_gw->crc;
2307 spin_unlock_bh(&backbone_gw->crc_lock);
2308
2309 seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
2310 backbone_gw->orig,
2311 batadv_print_vid(backbone_gw->vid), secs,
2312 msecs, backbone_crc);
2313 }
2314 rcu_read_unlock();
2315 }
2316 out:
2317 if (primary_if)
2318 batadv_hardif_put(primary_if);
2319 return 0;
2320 }
2321 #endif
2322
2323 /**
2324 * batadv_bla_backbone_dump_entry() - dump one entry of the backbone table to a
2325 * netlink socket
2326 * @msg: buffer for the message
2327 * @portid: netlink port
2328 * @seq: Sequence number of netlink message
2329 * @primary_if: primary interface
2330 * @backbone_gw: entry to dump
2331 *
2332 * Return: 0 or error code.
2333 */
2334 static int
batadv_bla_backbone_dump_entry(struct sk_buff * msg,u32 portid,u32 seq,struct batadv_hard_iface * primary_if,struct batadv_bla_backbone_gw * backbone_gw)2335 batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
2336 struct batadv_hard_iface *primary_if,
2337 struct batadv_bla_backbone_gw *backbone_gw)
2338 {
2339 u8 *primary_addr = primary_if->net_dev->dev_addr;
2340 u16 backbone_crc;
2341 bool is_own;
2342 int msecs;
2343 void *hdr;
2344 int ret = -EINVAL;
2345
2346 hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
2347 NLM_F_MULTI, BATADV_CMD_GET_BLA_BACKBONE);
2348 if (!hdr) {
2349 ret = -ENOBUFS;
2350 goto out;
2351 }
2352
2353 is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
2354
2355 spin_lock_bh(&backbone_gw->crc_lock);
2356 backbone_crc = backbone_gw->crc;
2357 spin_unlock_bh(&backbone_gw->crc_lock);
2358
2359 msecs = jiffies_to_msecs(jiffies - backbone_gw->lasttime);
2360
2361 if (is_own)
2362 if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
2363 genlmsg_cancel(msg, hdr);
2364 goto out;
2365 }
2366
2367 if (nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
2368 backbone_gw->orig) ||
2369 nla_put_u16(msg, BATADV_ATTR_BLA_VID, backbone_gw->vid) ||
2370 nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
2371 backbone_crc) ||
2372 nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) {
2373 genlmsg_cancel(msg, hdr);
2374 goto out;
2375 }
2376
2377 genlmsg_end(msg, hdr);
2378 ret = 0;
2379
2380 out:
2381 return ret;
2382 }
2383
2384 /**
2385 * batadv_bla_backbone_dump_bucket() - dump one bucket of the backbone table to
2386 * a netlink socket
2387 * @msg: buffer for the message
2388 * @portid: netlink port
2389 * @seq: Sequence number of netlink message
2390 * @primary_if: primary interface
2391 * @head: bucket to dump
2392 * @idx_skip: How many entries to skip
2393 *
2394 * Return: always 0.
2395 */
2396 static int
batadv_bla_backbone_dump_bucket(struct sk_buff * msg,u32 portid,u32 seq,struct batadv_hard_iface * primary_if,struct hlist_head * head,int * idx_skip)2397 batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
2398 struct batadv_hard_iface *primary_if,
2399 struct hlist_head *head, int *idx_skip)
2400 {
2401 struct batadv_bla_backbone_gw *backbone_gw;
2402 int idx = 0;
2403 int ret = 0;
2404
2405 rcu_read_lock();
2406 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
2407 if (idx++ < *idx_skip)
2408 continue;
2409
2410 ret = batadv_bla_backbone_dump_entry(msg, portid, seq,
2411 primary_if, backbone_gw);
2412 if (ret) {
2413 *idx_skip = idx - 1;
2414 goto unlock;
2415 }
2416 }
2417
2418 *idx_skip = 0;
2419 unlock:
2420 rcu_read_unlock();
2421 return ret;
2422 }
2423
2424 /**
2425 * batadv_bla_backbone_dump() - dump backbone table to a netlink socket
2426 * @msg: buffer for the message
2427 * @cb: callback structure containing arguments
2428 *
2429 * Return: message length.
2430 */
batadv_bla_backbone_dump(struct sk_buff * msg,struct netlink_callback * cb)2431 int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
2432 {
2433 struct batadv_hard_iface *primary_if = NULL;
2434 int portid = NETLINK_CB(cb->skb).portid;
2435 struct net *net = sock_net(cb->skb->sk);
2436 struct net_device *soft_iface;
2437 struct batadv_hashtable *hash;
2438 struct batadv_priv *bat_priv;
2439 int bucket = cb->args[0];
2440 struct hlist_head *head;
2441 int idx = cb->args[1];
2442 int ifindex;
2443 int ret = 0;
2444
2445 ifindex = batadv_netlink_get_ifindex(cb->nlh,
2446 BATADV_ATTR_MESH_IFINDEX);
2447 if (!ifindex)
2448 return -EINVAL;
2449
2450 soft_iface = dev_get_by_index(net, ifindex);
2451 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2452 ret = -ENODEV;
2453 goto out;
2454 }
2455
2456 bat_priv = netdev_priv(soft_iface);
2457 hash = bat_priv->bla.backbone_hash;
2458
2459 primary_if = batadv_primary_if_get_selected(bat_priv);
2460 if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
2461 ret = -ENOENT;
2462 goto out;
2463 }
2464
2465 while (bucket < hash->size) {
2466 head = &hash->table[bucket];
2467
2468 if (batadv_bla_backbone_dump_bucket(msg, portid,
2469 cb->nlh->nlmsg_seq,
2470 primary_if, head, &idx))
2471 break;
2472 bucket++;
2473 }
2474
2475 cb->args[0] = bucket;
2476 cb->args[1] = idx;
2477
2478 ret = msg->len;
2479
2480 out:
2481 if (primary_if)
2482 batadv_hardif_put(primary_if);
2483
2484 if (soft_iface)
2485 dev_put(soft_iface);
2486
2487 return ret;
2488 }
2489
2490 #ifdef CONFIG_BATMAN_ADV_DAT
2491 /**
2492 * batadv_bla_check_claim() - check if address is claimed
2493 *
2494 * @bat_priv: the bat priv with all the soft interface information
2495 * @addr: mac address of which the claim status is checked
2496 * @vid: the VLAN ID
2497 *
2498 * addr is checked if this address is claimed by the local device itself.
2499 *
2500 * Return: true if bla is disabled or the mac is claimed by the device,
2501 * false if the device addr is already claimed by another gateway
2502 */
batadv_bla_check_claim(struct batadv_priv * bat_priv,u8 * addr,unsigned short vid)2503 bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
2504 u8 *addr, unsigned short vid)
2505 {
2506 struct batadv_bla_claim search_claim;
2507 struct batadv_bla_claim *claim = NULL;
2508 struct batadv_hard_iface *primary_if = NULL;
2509 bool ret = true;
2510
2511 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
2512 return ret;
2513
2514 primary_if = batadv_primary_if_get_selected(bat_priv);
2515 if (!primary_if)
2516 return ret;
2517
2518 /* First look if the mac address is claimed */
2519 ether_addr_copy(search_claim.addr, addr);
2520 search_claim.vid = vid;
2521
2522 claim = batadv_claim_hash_find(bat_priv, &search_claim);
2523
2524 /* If there is a claim and we are not owner of the claim,
2525 * return false.
2526 */
2527 if (claim) {
2528 if (!batadv_compare_eth(claim->backbone_gw->orig,
2529 primary_if->net_dev->dev_addr))
2530 ret = false;
2531 batadv_claim_put(claim);
2532 }
2533
2534 batadv_hardif_put(primary_if);
2535 return ret;
2536 }
2537 #endif
2538