1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2013-2018  B.A.T.M.A.N. contributors:
3  *
4  * Martin Hundebøll <martin@hundeboll.net>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "fragmentation.h"
20 #include "main.h"
21 
22 #include <linux/atomic.h>
23 #include <linux/byteorder/generic.h>
24 #include <linux/errno.h>
25 #include <linux/etherdevice.h>
26 #include <linux/gfp.h>
27 #include <linux/if_ether.h>
28 #include <linux/jiffies.h>
29 #include <linux/kernel.h>
30 #include <linux/lockdep.h>
31 #include <linux/netdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/string.h>
36 #include <uapi/linux/batadv_packet.h>
37 
38 #include "hard-interface.h"
39 #include "originator.h"
40 #include "routing.h"
41 #include "send.h"
42 #include "soft-interface.h"
43 
44 /**
45  * batadv_frag_clear_chain() - delete entries in the fragment buffer chain
46  * @head: head of chain with entries.
47  * @dropped: whether the chain is cleared because all fragments are dropped
48  *
49  * Free fragments in the passed hlist. Should be called with appropriate lock.
50  */
batadv_frag_clear_chain(struct hlist_head * head,bool dropped)51 static void batadv_frag_clear_chain(struct hlist_head *head, bool dropped)
52 {
53 	struct batadv_frag_list_entry *entry;
54 	struct hlist_node *node;
55 
56 	hlist_for_each_entry_safe(entry, node, head, list) {
57 		hlist_del(&entry->list);
58 
59 		if (dropped)
60 			kfree_skb(entry->skb);
61 		else
62 			consume_skb(entry->skb);
63 
64 		kfree(entry);
65 	}
66 }
67 
68 /**
69  * batadv_frag_purge_orig() - free fragments associated to an orig
70  * @orig_node: originator to free fragments from
71  * @check_cb: optional function to tell if an entry should be purged
72  */
batadv_frag_purge_orig(struct batadv_orig_node * orig_node,bool (* check_cb)(struct batadv_frag_table_entry *))73 void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
74 			    bool (*check_cb)(struct batadv_frag_table_entry *))
75 {
76 	struct batadv_frag_table_entry *chain;
77 	u8 i;
78 
79 	for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
80 		chain = &orig_node->fragments[i];
81 		spin_lock_bh(&chain->lock);
82 
83 		if (!check_cb || check_cb(chain)) {
84 			batadv_frag_clear_chain(&chain->fragment_list, true);
85 			chain->size = 0;
86 		}
87 
88 		spin_unlock_bh(&chain->lock);
89 	}
90 }
91 
92 /**
93  * batadv_frag_size_limit() - maximum possible size of packet to be fragmented
94  *
95  * Return: the maximum size of payload that can be fragmented.
96  */
batadv_frag_size_limit(void)97 static int batadv_frag_size_limit(void)
98 {
99 	int limit = BATADV_FRAG_MAX_FRAG_SIZE;
100 
101 	limit -= sizeof(struct batadv_frag_packet);
102 	limit *= BATADV_FRAG_MAX_FRAGMENTS;
103 
104 	return limit;
105 }
106 
107 /**
108  * batadv_frag_init_chain() - check and prepare fragment chain for new fragment
109  * @chain: chain in fragments table to init
110  * @seqno: sequence number of the received fragment
111  *
112  * Make chain ready for a fragment with sequence number "seqno". Delete existing
113  * entries if they have an "old" sequence number.
114  *
115  * Caller must hold chain->lock.
116  *
117  * Return: true if chain is empty and caller can just insert the new fragment
118  * without searching for the right position.
119  */
batadv_frag_init_chain(struct batadv_frag_table_entry * chain,u16 seqno)120 static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
121 				   u16 seqno)
122 {
123 	lockdep_assert_held(&chain->lock);
124 
125 	if (chain->seqno == seqno)
126 		return false;
127 
128 	if (!hlist_empty(&chain->fragment_list))
129 		batadv_frag_clear_chain(&chain->fragment_list, true);
130 
131 	chain->size = 0;
132 	chain->seqno = seqno;
133 
134 	return true;
135 }
136 
137 /**
138  * batadv_frag_insert_packet() - insert a fragment into a fragment chain
139  * @orig_node: originator that the fragment was received from
140  * @skb: skb to insert
141  * @chain_out: list head to attach complete chains of fragments to
142  *
143  * Insert a new fragment into the reverse ordered chain in the right table
144  * entry. The hash table entry is cleared if "old" fragments exist in it.
145  *
146  * Return: true if skb is buffered, false on error. If the chain has all the
147  * fragments needed to merge the packet, the chain is moved to the passed head
148  * to avoid locking the chain in the table.
149  */
batadv_frag_insert_packet(struct batadv_orig_node * orig_node,struct sk_buff * skb,struct hlist_head * chain_out)150 static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
151 				      struct sk_buff *skb,
152 				      struct hlist_head *chain_out)
153 {
154 	struct batadv_frag_table_entry *chain;
155 	struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
156 	struct batadv_frag_list_entry *frag_entry_last = NULL;
157 	struct batadv_frag_packet *frag_packet;
158 	u8 bucket;
159 	u16 seqno, hdr_size = sizeof(struct batadv_frag_packet);
160 	bool ret = false;
161 
162 	/* Linearize packet to avoid linearizing 16 packets in a row when doing
163 	 * the later merge. Non-linear merge should be added to remove this
164 	 * linearization.
165 	 */
166 	if (skb_linearize(skb) < 0)
167 		goto err;
168 
169 	frag_packet = (struct batadv_frag_packet *)skb->data;
170 	seqno = ntohs(frag_packet->seqno);
171 	bucket = seqno % BATADV_FRAG_BUFFER_COUNT;
172 
173 	frag_entry_new = kmalloc(sizeof(*frag_entry_new), GFP_ATOMIC);
174 	if (!frag_entry_new)
175 		goto err;
176 
177 	frag_entry_new->skb = skb;
178 	frag_entry_new->no = frag_packet->no;
179 
180 	/* Select entry in the "chain table" and delete any prior fragments
181 	 * with another sequence number. batadv_frag_init_chain() returns true,
182 	 * if the list is empty at return.
183 	 */
184 	chain = &orig_node->fragments[bucket];
185 	spin_lock_bh(&chain->lock);
186 	if (batadv_frag_init_chain(chain, seqno)) {
187 		hlist_add_head(&frag_entry_new->list, &chain->fragment_list);
188 		chain->size = skb->len - hdr_size;
189 		chain->timestamp = jiffies;
190 		chain->total_size = ntohs(frag_packet->total_size);
191 		ret = true;
192 		goto out;
193 	}
194 
195 	/* Find the position for the new fragment. */
196 	hlist_for_each_entry(frag_entry_curr, &chain->fragment_list, list) {
197 		/* Drop packet if fragment already exists. */
198 		if (frag_entry_curr->no == frag_entry_new->no)
199 			goto err_unlock;
200 
201 		/* Order fragments from highest to lowest. */
202 		if (frag_entry_curr->no < frag_entry_new->no) {
203 			hlist_add_before(&frag_entry_new->list,
204 					 &frag_entry_curr->list);
205 			chain->size += skb->len - hdr_size;
206 			chain->timestamp = jiffies;
207 			ret = true;
208 			goto out;
209 		}
210 
211 		/* store current entry because it could be the last in list */
212 		frag_entry_last = frag_entry_curr;
213 	}
214 
215 	/* Reached the end of the list, so insert after 'frag_entry_last'. */
216 	if (likely(frag_entry_last)) {
217 		hlist_add_behind(&frag_entry_new->list, &frag_entry_last->list);
218 		chain->size += skb->len - hdr_size;
219 		chain->timestamp = jiffies;
220 		ret = true;
221 	}
222 
223 out:
224 	if (chain->size > batadv_frag_size_limit() ||
225 	    chain->total_size != ntohs(frag_packet->total_size) ||
226 	    chain->total_size > batadv_frag_size_limit()) {
227 		/* Clear chain if total size of either the list or the packet
228 		 * exceeds the maximum size of one merged packet. Don't allow
229 		 * packets to have different total_size.
230 		 */
231 		batadv_frag_clear_chain(&chain->fragment_list, true);
232 		chain->size = 0;
233 	} else if (ntohs(frag_packet->total_size) == chain->size) {
234 		/* All fragments received. Hand over chain to caller. */
235 		hlist_move_list(&chain->fragment_list, chain_out);
236 		chain->size = 0;
237 	}
238 
239 err_unlock:
240 	spin_unlock_bh(&chain->lock);
241 
242 err:
243 	if (!ret) {
244 		kfree(frag_entry_new);
245 		kfree_skb(skb);
246 	}
247 
248 	return ret;
249 }
250 
251 /**
252  * batadv_frag_merge_packets() - merge a chain of fragments
253  * @chain: head of chain with fragments
254  *
255  * Expand the first skb in the chain and copy the content of the remaining
256  * skb's into the expanded one. After doing so, clear the chain.
257  *
258  * Return: the merged skb or NULL on error.
259  */
260 static struct sk_buff *
batadv_frag_merge_packets(struct hlist_head * chain)261 batadv_frag_merge_packets(struct hlist_head *chain)
262 {
263 	struct batadv_frag_packet *packet;
264 	struct batadv_frag_list_entry *entry;
265 	struct sk_buff *skb_out;
266 	int size, hdr_size = sizeof(struct batadv_frag_packet);
267 	bool dropped = false;
268 
269 	/* Remove first entry, as this is the destination for the rest of the
270 	 * fragments.
271 	 */
272 	entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list);
273 	hlist_del(&entry->list);
274 	skb_out = entry->skb;
275 	kfree(entry);
276 
277 	packet = (struct batadv_frag_packet *)skb_out->data;
278 	size = ntohs(packet->total_size);
279 
280 	/* Make room for the rest of the fragments. */
281 	if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
282 		kfree_skb(skb_out);
283 		skb_out = NULL;
284 		dropped = true;
285 		goto free;
286 	}
287 
288 	/* Move the existing MAC header to just before the payload. (Override
289 	 * the fragment header.)
290 	 */
291 	skb_pull(skb_out, hdr_size);
292 	skb_out->ip_summed = CHECKSUM_NONE;
293 	memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
294 	skb_set_mac_header(skb_out, -ETH_HLEN);
295 	skb_reset_network_header(skb_out);
296 	skb_reset_transport_header(skb_out);
297 
298 	/* Copy the payload of the each fragment into the last skb */
299 	hlist_for_each_entry(entry, chain, list) {
300 		size = entry->skb->len - hdr_size;
301 		skb_put_data(skb_out, entry->skb->data + hdr_size, size);
302 	}
303 
304 free:
305 	/* Locking is not needed, because 'chain' is not part of any orig. */
306 	batadv_frag_clear_chain(chain, dropped);
307 	return skb_out;
308 }
309 
310 /**
311  * batadv_frag_skb_buffer() - buffer fragment for later merge
312  * @skb: skb to buffer
313  * @orig_node_src: originator that the skb is received from
314  *
315  * Add fragment to buffer and merge fragments if possible.
316  *
317  * There are three possible outcomes: 1) Packet is merged: Return true and
318  * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
319  * to NULL; 3) Error: Return false and free skb.
320  *
321  * Return: true when packet is merged or buffered, false when skb is not not
322  * used.
323  */
batadv_frag_skb_buffer(struct sk_buff ** skb,struct batadv_orig_node * orig_node_src)324 bool batadv_frag_skb_buffer(struct sk_buff **skb,
325 			    struct batadv_orig_node *orig_node_src)
326 {
327 	struct sk_buff *skb_out = NULL;
328 	struct hlist_head head = HLIST_HEAD_INIT;
329 	bool ret = false;
330 
331 	/* Add packet to buffer and table entry if merge is possible. */
332 	if (!batadv_frag_insert_packet(orig_node_src, *skb, &head))
333 		goto out_err;
334 
335 	/* Leave if more fragments are needed to merge. */
336 	if (hlist_empty(&head))
337 		goto out;
338 
339 	skb_out = batadv_frag_merge_packets(&head);
340 	if (!skb_out)
341 		goto out_err;
342 
343 out:
344 	ret = true;
345 out_err:
346 	*skb = skb_out;
347 	return ret;
348 }
349 
350 /**
351  * batadv_frag_skb_fwd() - forward fragments that would exceed MTU when merged
352  * @skb: skb to forward
353  * @recv_if: interface that the skb is received on
354  * @orig_node_src: originator that the skb is received from
355  *
356  * Look up the next-hop of the fragments payload and check if the merged packet
357  * will exceed the MTU towards the next-hop. If so, the fragment is forwarded
358  * without merging it.
359  *
360  * Return: true if the fragment is consumed/forwarded, false otherwise.
361  */
batadv_frag_skb_fwd(struct sk_buff * skb,struct batadv_hard_iface * recv_if,struct batadv_orig_node * orig_node_src)362 bool batadv_frag_skb_fwd(struct sk_buff *skb,
363 			 struct batadv_hard_iface *recv_if,
364 			 struct batadv_orig_node *orig_node_src)
365 {
366 	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
367 	struct batadv_orig_node *orig_node_dst;
368 	struct batadv_neigh_node *neigh_node = NULL;
369 	struct batadv_frag_packet *packet;
370 	u16 total_size;
371 	bool ret = false;
372 
373 	packet = (struct batadv_frag_packet *)skb->data;
374 	orig_node_dst = batadv_orig_hash_find(bat_priv, packet->dest);
375 	if (!orig_node_dst)
376 		goto out;
377 
378 	neigh_node = batadv_find_router(bat_priv, orig_node_dst, recv_if);
379 	if (!neigh_node)
380 		goto out;
381 
382 	/* Forward the fragment, if the merged packet would be too big to
383 	 * be assembled.
384 	 */
385 	total_size = ntohs(packet->total_size);
386 	if (total_size > neigh_node->if_incoming->net_dev->mtu) {
387 		batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_FWD);
388 		batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
389 				   skb->len + ETH_HLEN);
390 
391 		packet->ttl--;
392 		batadv_send_unicast_skb(skb, neigh_node);
393 		ret = true;
394 	}
395 
396 out:
397 	if (orig_node_dst)
398 		batadv_orig_node_put(orig_node_dst);
399 	if (neigh_node)
400 		batadv_neigh_node_put(neigh_node);
401 	return ret;
402 }
403 
404 /**
405  * batadv_frag_create() - create a fragment from skb
406  * @skb: skb to create fragment from
407  * @frag_head: header to use in new fragment
408  * @fragment_size: size of new fragment
409  *
410  * Split the passed skb into two fragments: A new one with size matching the
411  * passed mtu and the old one with the rest. The new skb contains data from the
412  * tail of the old skb.
413  *
414  * Return: the new fragment, NULL on error.
415  */
batadv_frag_create(struct sk_buff * skb,struct batadv_frag_packet * frag_head,unsigned int fragment_size)416 static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
417 					  struct batadv_frag_packet *frag_head,
418 					  unsigned int fragment_size)
419 {
420 	struct sk_buff *skb_fragment;
421 	unsigned int header_size = sizeof(*frag_head);
422 	unsigned int mtu = fragment_size + header_size;
423 
424 	skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
425 	if (!skb_fragment)
426 		goto err;
427 
428 	skb_fragment->priority = skb->priority;
429 
430 	/* Eat the last mtu-bytes of the skb */
431 	skb_reserve(skb_fragment, header_size + ETH_HLEN);
432 	skb_split(skb, skb_fragment, skb->len - fragment_size);
433 
434 	/* Add the header */
435 	skb_push(skb_fragment, header_size);
436 	memcpy(skb_fragment->data, frag_head, header_size);
437 
438 err:
439 	return skb_fragment;
440 }
441 
442 /**
443  * batadv_frag_send_packet() - create up to 16 fragments from the passed skb
444  * @skb: skb to create fragments from
445  * @orig_node: final destination of the created fragments
446  * @neigh_node: next-hop of the created fragments
447  *
448  * Return: the netdev tx status or a negative errno code on a failure
449  */
batadv_frag_send_packet(struct sk_buff * skb,struct batadv_orig_node * orig_node,struct batadv_neigh_node * neigh_node)450 int batadv_frag_send_packet(struct sk_buff *skb,
451 			    struct batadv_orig_node *orig_node,
452 			    struct batadv_neigh_node *neigh_node)
453 {
454 	struct batadv_priv *bat_priv;
455 	struct batadv_hard_iface *primary_if = NULL;
456 	struct batadv_frag_packet frag_header;
457 	struct sk_buff *skb_fragment;
458 	unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
459 	unsigned int header_size = sizeof(frag_header);
460 	unsigned int max_fragment_size, num_fragments;
461 	int ret;
462 
463 	/* To avoid merge and refragmentation at next-hops we never send
464 	 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
465 	 */
466 	mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
467 	max_fragment_size = mtu - header_size;
468 
469 	if (skb->len == 0 || max_fragment_size == 0)
470 		return -EINVAL;
471 
472 	num_fragments = (skb->len - 1) / max_fragment_size + 1;
473 	max_fragment_size = (skb->len - 1) / num_fragments + 1;
474 
475 	/* Don't even try to fragment, if we need more than 16 fragments */
476 	if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) {
477 		ret = -EAGAIN;
478 		goto free_skb;
479 	}
480 
481 	bat_priv = orig_node->bat_priv;
482 	primary_if = batadv_primary_if_get_selected(bat_priv);
483 	if (!primary_if) {
484 		ret = -EINVAL;
485 		goto free_skb;
486 	}
487 
488 	/* Create one header to be copied to all fragments */
489 	frag_header.packet_type = BATADV_UNICAST_FRAG;
490 	frag_header.version = BATADV_COMPAT_VERSION;
491 	frag_header.ttl = BATADV_TTL;
492 	frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
493 	frag_header.reserved = 0;
494 	frag_header.no = 0;
495 	frag_header.total_size = htons(skb->len);
496 
497 	/* skb->priority values from 256->263 are magic values to
498 	 * directly indicate a specific 802.1d priority.  This is used
499 	 * to allow 802.1d priority to be passed directly in from VLAN
500 	 * tags, etc.
501 	 */
502 	if (skb->priority >= 256 && skb->priority <= 263)
503 		frag_header.priority = skb->priority - 256;
504 	else
505 		frag_header.priority = 0;
506 
507 	ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr);
508 	ether_addr_copy(frag_header.dest, orig_node->orig);
509 
510 	/* Eat and send fragments from the tail of skb */
511 	while (skb->len > max_fragment_size) {
512 		/* The initial check in this function should cover this case */
513 		if (unlikely(frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)) {
514 			ret = -EINVAL;
515 			goto put_primary_if;
516 		}
517 
518 		skb_fragment = batadv_frag_create(skb, &frag_header,
519 						  max_fragment_size);
520 		if (!skb_fragment) {
521 			ret = -ENOMEM;
522 			goto put_primary_if;
523 		}
524 
525 		batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
526 		batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
527 				   skb_fragment->len + ETH_HLEN);
528 		ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
529 		if (ret != NET_XMIT_SUCCESS) {
530 			ret = NET_XMIT_DROP;
531 			goto put_primary_if;
532 		}
533 
534 		frag_header.no++;
535 	}
536 
537 	/* Make room for the fragment header. */
538 	if (batadv_skb_head_push(skb, header_size) < 0 ||
539 	    pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) {
540 		ret = -ENOMEM;
541 		goto put_primary_if;
542 	}
543 
544 	memcpy(skb->data, &frag_header, header_size);
545 
546 	/* Send the last fragment */
547 	batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
548 	batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
549 			   skb->len + ETH_HLEN);
550 	ret = batadv_send_unicast_skb(skb, neigh_node);
551 	/* skb was consumed */
552 	skb = NULL;
553 
554 put_primary_if:
555 	batadv_hardif_put(primary_if);
556 free_skb:
557 	kfree_skb(skb);
558 
559 	return ret;
560 }
561