1 /*
2  * Copyright (c) 2018 Intel Corporation
3  * Copyright (c) 2022 Jamie McCrae
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/logging/log.h>
9 LOG_MODULE_DECLARE(net_ipv4, CONFIG_NET_IPV4_LOG_LEVEL);
10 
11 #include <errno.h>
12 #include <zephyr/net/net_core.h>
13 #include <zephyr/net/net_pkt.h>
14 #include <zephyr/net/net_stats.h>
15 #include <zephyr/net/net_context.h>
16 #include <zephyr/net/net_mgmt.h>
17 #include <zephyr/random/random.h>
18 #include "net_private.h"
19 #include "connection.h"
20 #include "icmpv4.h"
21 #include "udp_internal.h"
22 #include "tcp_internal.h"
23 #include "ipv4.h"
24 #include "route.h"
25 #include "net_stats.h"
26 #include "pmtu.h"
27 
28 /* Timeout for various buffer allocations in this file. */
29 #define NET_BUF_TIMEOUT K_MSEC(100)
30 
31 static void reassembly_timeout(struct k_work *work);
32 
33 static struct net_ipv4_reassembly reassembly[CONFIG_NET_IPV4_FRAGMENT_MAX_COUNT];
34 
reassembly_get(uint16_t id,struct in_addr * src,struct in_addr * dst,uint8_t protocol)35 static struct net_ipv4_reassembly *reassembly_get(uint16_t id, struct in_addr *src,
36 						  struct in_addr *dst, uint8_t protocol)
37 {
38 	int i, avail = -1;
39 
40 	for (i = 0; i < CONFIG_NET_IPV4_FRAGMENT_MAX_COUNT; i++) {
41 		if (k_work_delayable_remaining_get(&reassembly[i].timer) &&
42 		    reassembly[i].id == id &&
43 		    net_ipv4_addr_cmp(src, &reassembly[i].src) &&
44 		    net_ipv4_addr_cmp(dst, &reassembly[i].dst) &&
45 		    reassembly[i].protocol == protocol) {
46 			return &reassembly[i];
47 		}
48 
49 		if (k_work_delayable_remaining_get(&reassembly[i].timer)) {
50 			continue;
51 		}
52 
53 		if (avail < 0) {
54 			avail = i;
55 		}
56 	}
57 
58 	if (avail < 0) {
59 		return NULL;
60 	}
61 
62 	k_work_reschedule(&reassembly[avail].timer, K_SECONDS(CONFIG_NET_IPV4_FRAGMENT_TIMEOUT));
63 
64 	net_ipaddr_copy(&reassembly[avail].src, src);
65 	net_ipaddr_copy(&reassembly[avail].dst, dst);
66 
67 	reassembly[avail].protocol = protocol;
68 	reassembly[avail].id = id;
69 
70 	return &reassembly[avail];
71 }
72 
reassembly_cancel(uint32_t id,struct in_addr * src,struct in_addr * dst)73 static bool reassembly_cancel(uint32_t id, struct in_addr *src, struct in_addr *dst)
74 {
75 	int i, j;
76 
77 	LOG_DBG("Cancel 0x%x", id);
78 
79 	for (i = 0; i < CONFIG_NET_IPV4_FRAGMENT_MAX_COUNT; i++) {
80 		int32_t remaining;
81 
82 		if (reassembly[i].id != id ||
83 		    !net_ipv4_addr_cmp(src, &reassembly[i].src) ||
84 		    !net_ipv4_addr_cmp(dst, &reassembly[i].dst)) {
85 			continue;
86 		}
87 
88 		remaining = k_ticks_to_ms_ceil32(
89 			k_work_delayable_remaining_get(&reassembly[i].timer));
90 		k_work_cancel_delayable(&reassembly[i].timer);
91 
92 		LOG_DBG("IPv4 reassembly id 0x%x remaining %d ms", reassembly[i].id, remaining);
93 
94 		reassembly[i].id = 0U;
95 
96 		for (j = 0; j < CONFIG_NET_IPV4_FRAGMENT_MAX_PKT; j++) {
97 			if (!reassembly[i].pkt[j]) {
98 				continue;
99 			}
100 
101 			LOG_DBG("[%d] IPv4 reassembly pkt %p %zd bytes data", j,
102 				reassembly[i].pkt[j], net_pkt_get_len(reassembly[i].pkt[j]));
103 
104 			net_pkt_unref(reassembly[i].pkt[j]);
105 			reassembly[i].pkt[j] = NULL;
106 		}
107 
108 		return true;
109 	}
110 
111 	return false;
112 }
113 
reassembly_info(char * str,struct net_ipv4_reassembly * reass)114 static void reassembly_info(char *str, struct net_ipv4_reassembly *reass)
115 {
116 	LOG_DBG("%s id 0x%x src %s dst %s remain %d ms", str, reass->id,
117 		net_sprint_ipv4_addr(&reass->src),
118 		net_sprint_ipv4_addr(&reass->dst),
119 		k_ticks_to_ms_ceil32(
120 			k_work_delayable_remaining_get(&reass->timer)));
121 }
122 
reassembly_timeout(struct k_work * work)123 static void reassembly_timeout(struct k_work *work)
124 {
125 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
126 	struct net_ipv4_reassembly *reass =
127 		CONTAINER_OF(dwork, struct net_ipv4_reassembly, timer);
128 
129 	reassembly_info("Reassembly cancelled", reass);
130 
131 	/* Send a ICMPv4 Time Exceeded only if we received the first fragment */
132 	if (reass->pkt[0] && net_pkt_ipv4_fragment_offset(reass->pkt[0]) == 0) {
133 		net_icmpv4_send_error(reass->pkt[0], NET_ICMPV4_TIME_EXCEEDED,
134 				      NET_ICMPV4_TIME_EXCEEDED_FRAGMENT_REASSEMBLY_TIME);
135 	}
136 
137 	reassembly_cancel(reass->id, &reass->src, &reass->dst);
138 }
139 
reassemble_packet(struct net_ipv4_reassembly * reass)140 static void reassemble_packet(struct net_ipv4_reassembly *reass)
141 {
142 	NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv4_access, struct net_ipv4_hdr);
143 	struct net_ipv4_hdr *ipv4_hdr;
144 	struct net_pkt *pkt;
145 	struct net_buf *last;
146 	int i;
147 
148 	k_work_cancel_delayable(&reass->timer);
149 
150 	NET_ASSERT(reass->pkt[0]);
151 
152 	last = net_buf_frag_last(reass->pkt[0]->buffer);
153 
154 	/* We start from 2nd packet which is then appended to the first one */
155 	for (i = 1; i < CONFIG_NET_IPV4_FRAGMENT_MAX_PKT; i++) {
156 		pkt = reass->pkt[i];
157 		if (!pkt) {
158 			break;
159 		}
160 
161 		net_pkt_cursor_init(pkt);
162 
163 		/* Get rid of IPv4 header which is at the beginning of the fragment. */
164 		ipv4_hdr = (struct net_ipv4_hdr *)net_pkt_get_data(pkt, &ipv4_access);
165 		if (!ipv4_hdr) {
166 			goto error;
167 		}
168 
169 		LOG_DBG("Removing %d bytes from start of pkt %p", net_pkt_ip_hdr_len(pkt),
170 			pkt->buffer);
171 
172 		if (net_pkt_pull(pkt, net_pkt_ip_hdr_len(pkt))) {
173 			LOG_ERR("Failed to pull headers");
174 			reassembly_cancel(reass->id, &reass->src, &reass->dst);
175 			return;
176 		}
177 
178 		/* Attach the data to the previous packet */
179 		last->frags = pkt->buffer;
180 		last = net_buf_frag_last(pkt->buffer);
181 
182 		pkt->buffer = NULL;
183 		reass->pkt[i] = NULL;
184 
185 		net_pkt_unref(pkt);
186 	}
187 
188 	pkt = reass->pkt[0];
189 	reass->pkt[0] = NULL;
190 
191 	/* Update the header details for the packet */
192 	net_pkt_cursor_init(pkt);
193 
194 	ipv4_hdr = (struct net_ipv4_hdr *)net_pkt_get_data(pkt, &ipv4_access);
195 	if (!ipv4_hdr) {
196 		goto error;
197 	}
198 
199 	/* Fix the total length, offset and checksum of the IPv4 packet */
200 	ipv4_hdr->len = htons(net_pkt_get_len(pkt));
201 	ipv4_hdr->offset[0] = 0;
202 	ipv4_hdr->offset[1] = 0;
203 	ipv4_hdr->chksum = 0;
204 	ipv4_hdr->chksum = net_calc_chksum_ipv4(pkt);
205 
206 	net_pkt_set_data(pkt, &ipv4_access);
207 	net_pkt_set_ip_reassembled(pkt, true);
208 
209 	LOG_DBG("New pkt %p IPv4 len is %d bytes", pkt, net_pkt_get_len(pkt));
210 
211 	/* We need to use the queue when feeding the packet back into the
212 	 * IP stack as we might run out of stack if we call processing_data()
213 	 * directly. As the packet does not contain link layer header, we
214 	 * MUST NOT pass it to L2 so there will be a special check for that
215 	 * in process_data() when handling the packet.
216 	 */
217 	if (net_recv_data(net_pkt_iface(pkt), pkt) >= 0) {
218 		return;
219 	}
220 
221 error:
222 	net_pkt_unref(pkt);
223 }
224 
net_ipv4_frag_foreach(net_ipv4_frag_cb_t cb,void * user_data)225 void net_ipv4_frag_foreach(net_ipv4_frag_cb_t cb, void *user_data)
226 {
227 	int i;
228 
229 	for (i = 0; i < CONFIG_NET_IPV4_FRAGMENT_MAX_COUNT; i++) {
230 		if (!k_work_delayable_remaining_get(&reassembly[i].timer)) {
231 			continue;
232 		}
233 
234 		cb(&reassembly[i], user_data);
235 	}
236 }
237 
238 /* Verify that we have all the fragments received and in correct order.
239  * Return:
240  * - a negative value if the fragments are erroneous and must be dropped
241  * - zero if we are expecting more fragments
242  * - a positive value if we can proceed with the reassembly
243  */
fragments_are_ready(struct net_ipv4_reassembly * reass)244 static int fragments_are_ready(struct net_ipv4_reassembly *reass)
245 {
246 	unsigned int expected_offset = 0;
247 	bool more = true;
248 	int i;
249 
250 	/* Fragments can arrive in any order, for example in reverse order:
251 	 *   1 -> Fragment3(M=0, offset=x2)
252 	 *   2 -> Fragment2(M=1, offset=x1)
253 	 *   3 -> Fragment1(M=1, offset=0)
254 	 * We have to test several requirements before proceeding with the reassembly:
255 	 * - We received the first fragment (Fragment Offset is 0)
256 	 * - All intermediate fragments are contiguous
257 	 * - The More bit of the last fragment is 0
258 	 */
259 	for (i = 0; i < CONFIG_NET_IPV4_FRAGMENT_MAX_PKT; i++) {
260 		struct net_pkt *pkt = reass->pkt[i];
261 		unsigned int offset;
262 		int payload_len;
263 
264 		if (!pkt) {
265 			break;
266 		}
267 
268 		offset = net_pkt_ipv4_fragment_offset(pkt);
269 
270 		if (offset < expected_offset) {
271 			/* Overlapping or duplicated, drop it */
272 			return -EBADMSG;
273 		} else if (offset != expected_offset) {
274 			/* Not contiguous, let's wait for fragments */
275 			return 0;
276 		}
277 
278 		payload_len = net_pkt_get_len(pkt) - net_pkt_ip_hdr_len(pkt);
279 
280 		if (payload_len < 0) {
281 			return -EBADMSG;
282 		}
283 
284 		expected_offset += payload_len;
285 		more = net_pkt_ipv4_fragment_more(pkt);
286 	}
287 
288 	if (more) {
289 		return 0;
290 	}
291 
292 	return 1;
293 }
294 
shift_packets(struct net_ipv4_reassembly * reass,int pos)295 static int shift_packets(struct net_ipv4_reassembly *reass, int pos)
296 {
297 	int i;
298 
299 	for (i = pos + 1; i < CONFIG_NET_IPV4_FRAGMENT_MAX_PKT; i++) {
300 		if (!reass->pkt[i]) {
301 			LOG_DBG("Moving [%d] %p (offset 0x%x) to [%d]", pos, reass->pkt[pos],
302 				net_pkt_ipv4_fragment_offset(reass->pkt[pos]), pos + 1);
303 
304 			/* pkt[i] is free, so shift everything between [pos] and [i - 1] by one
305 			 * element
306 			 */
307 			memmove(&reass->pkt[pos + 1], &reass->pkt[pos],
308 				sizeof(void *) * (i - pos));
309 
310 			/* pkt[pos] is now free */
311 			reass->pkt[pos] = NULL;
312 
313 			return 0;
314 		}
315 	}
316 
317 	/* We do not have free space left in the array */
318 	return -ENOMEM;
319 }
320 
net_ipv4_handle_fragment_hdr(struct net_pkt * pkt,struct net_ipv4_hdr * hdr)321 enum net_verdict net_ipv4_handle_fragment_hdr(struct net_pkt *pkt, struct net_ipv4_hdr *hdr)
322 {
323 	struct net_ipv4_reassembly *reass = NULL;
324 	uint16_t flag;
325 	bool found;
326 	uint8_t more;
327 	uint16_t id;
328 	int ret;
329 	int i;
330 
331 	flag = ntohs(*((uint16_t *)&hdr->offset));
332 	id = ntohs(*((uint16_t *)&hdr->id));
333 
334 	reass = reassembly_get(id, (struct in_addr *)hdr->src,
335 			       (struct in_addr *)hdr->dst, hdr->proto);
336 	if (!reass) {
337 		LOG_ERR("Cannot get reassembly slot, dropping pkt %p", pkt);
338 		goto drop;
339 	}
340 
341 	more = (flag & NET_IPV4_MORE_FRAG_MASK) ? true : false;
342 	net_pkt_set_ipv4_fragment_flags(pkt, flag);
343 
344 	if (more && (net_pkt_get_len(pkt) - net_pkt_ip_hdr_len(pkt)) % 8) {
345 		/* Fragment length is not multiple of 8, discard the packet and send bad IP
346 		 * header error.
347 		 */
348 		net_icmpv4_send_error(pkt, NET_ICMPV4_BAD_IP_HEADER,
349 				      NET_ICMPV4_BAD_IP_HEADER_LENGTH);
350 		goto drop;
351 	}
352 
353 	/* The fragments might come in wrong order so place them in the reassembly chain in the
354 	 * correct order.
355 	 */
356 	for (i = 0, found = false; i < CONFIG_NET_IPV4_FRAGMENT_MAX_PKT; i++) {
357 		if (reass->pkt[i]) {
358 			if (net_pkt_ipv4_fragment_offset(reass->pkt[i]) <
359 			    net_pkt_ipv4_fragment_offset(pkt)) {
360 				continue;
361 			}
362 
363 			/* Make room for this fragment. If there is no room then it will discard
364 			 * the whole reassembly.
365 			 */
366 			if (shift_packets(reass, i)) {
367 				break;
368 			}
369 		}
370 
371 		LOG_DBG("Storing pkt %p to slot %d offset %d", pkt, i,
372 			net_pkt_ipv4_fragment_offset(pkt));
373 		reass->pkt[i] = pkt;
374 		found = true;
375 
376 		break;
377 	}
378 
379 	if (!found) {
380 		/* We could not add this fragment into our saved fragment list. The whole packet
381 		 * must be discarded at this point.
382 		 */
383 		LOG_ERR("No slots available for 0x%x", reass->id);
384 		net_pkt_unref(pkt);
385 		goto drop;
386 	}
387 
388 	ret = fragments_are_ready(reass);
389 	if (ret < 0) {
390 		LOG_ERR("Reassembled IPv4 verify failed, dropping id %u", reass->id);
391 
392 		/* Let the caller release the already inserted pkt */
393 		if (i < CONFIG_NET_IPV4_FRAGMENT_MAX_PKT) {
394 			reass->pkt[i] = NULL;
395 		}
396 
397 		net_pkt_unref(pkt);
398 		goto drop;
399 	} else if (ret == 0) {
400 		reassembly_info("Reassembly nth pkt", reass);
401 
402 		LOG_DBG("More fragments to be received");
403 		goto accept;
404 	}
405 
406 	reassembly_info("Reassembly last pkt", reass);
407 
408 	/* The last fragment received, reassemble the packet */
409 	reassemble_packet(reass);
410 
411 accept:
412 	return NET_OK;
413 
414 drop:
415 	if (reass) {
416 		if (reassembly_cancel(reass->id, &reass->src, &reass->dst)) {
417 			return NET_OK;
418 		}
419 	}
420 
421 	return NET_DROP;
422 }
423 
send_ipv4_fragment(struct net_pkt * pkt,uint16_t rand_id,uint16_t fit_len,uint16_t frag_offset,bool final)424 static int send_ipv4_fragment(struct net_pkt *pkt, uint16_t rand_id, uint16_t fit_len,
425 			      uint16_t frag_offset, bool final)
426 {
427 	int ret = -ENOBUFS;
428 	struct net_pkt *frag_pkt;
429 	struct net_pkt_cursor cur;
430 	struct net_pkt_cursor cur_pkt;
431 	uint16_t offset_pkt;
432 
433 	frag_pkt = net_pkt_alloc_with_buffer(net_pkt_iface(pkt), fit_len +
434 					     net_pkt_ip_hdr_len(pkt),
435 					     AF_INET, 0, NET_BUF_TIMEOUT);
436 	if (!frag_pkt) {
437 		return -ENOMEM;
438 	}
439 
440 	net_pkt_cursor_init(frag_pkt);
441 	net_pkt_cursor_backup(pkt, &cur_pkt);
442 	net_pkt_cursor_backup(frag_pkt, &cur);
443 
444 	net_pkt_set_ll_proto_type(frag_pkt, net_pkt_ll_proto_type(pkt));
445 
446 	/* Copy the original IPv4 headers back to the fragment packet */
447 	if (net_pkt_copy(frag_pkt, pkt, net_pkt_ip_hdr_len(pkt))) {
448 		goto fail;
449 	}
450 
451 	net_pkt_cursor_restore(pkt, &cur_pkt);
452 
453 	/* Copy the payload part of this fragment from the original packet */
454 	if (net_pkt_skip(pkt, (frag_offset + net_pkt_ip_hdr_len(pkt))) ||
455 	    net_pkt_copy(frag_pkt, pkt, fit_len)) {
456 		goto fail;
457 	}
458 
459 	net_pkt_cursor_restore(frag_pkt, &cur);
460 	net_pkt_cursor_restore(pkt, &cur_pkt);
461 
462 	net_pkt_set_ip_hdr_len(frag_pkt, net_pkt_ip_hdr_len(pkt));
463 
464 	net_pkt_set_overwrite(frag_pkt, true);
465 	net_pkt_cursor_init(frag_pkt);
466 
467 	/* Update the header of the packet */
468 	NET_PKT_DATA_ACCESS_DEFINE(ipv4_access, struct net_ipv4_hdr);
469 	struct net_ipv4_hdr *ipv4_hdr;
470 
471 	ipv4_hdr = (struct net_ipv4_hdr *)net_pkt_get_data(frag_pkt, &ipv4_access);
472 	if (!ipv4_hdr) {
473 		goto fail;
474 	}
475 
476 	memcpy(ipv4_hdr->id, &rand_id, sizeof(rand_id));
477 	offset_pkt = frag_offset / 8;
478 
479 	if (!final) {
480 		offset_pkt |= NET_IPV4_MORE_FRAG_MASK;
481 	}
482 
483 	sys_put_be16(offset_pkt, ipv4_hdr->offset);
484 	ipv4_hdr->len = htons((fit_len + net_pkt_ip_hdr_len(pkt)));
485 
486 	ipv4_hdr->chksum = 0;
487 	ipv4_hdr->chksum = net_calc_chksum_ipv4(frag_pkt);
488 
489 	net_pkt_set_chksum_done(frag_pkt, true);
490 
491 	net_pkt_set_data(frag_pkt, &ipv4_access);
492 
493 	net_pkt_set_overwrite(frag_pkt, false);
494 	net_pkt_cursor_restore(frag_pkt, &cur);
495 
496 	if (final) {
497 		net_pkt_set_context(frag_pkt, net_pkt_context(pkt));
498 	}
499 
500 	/* If everything has been ok so far, we can send the packet. */
501 	ret = net_send_data(frag_pkt);
502 	if (ret < 0) {
503 		goto fail;
504 	}
505 
506 	/* Let this packet to be sent and hopefully it will release the memory that can be
507 	 * utilized for next IPv4 fragment.
508 	 */
509 	k_yield();
510 
511 	return 0;
512 
513 fail:
514 	LOG_ERR("Cannot send fragment (%d)", ret);
515 	net_pkt_unref(frag_pkt);
516 
517 	return ret;
518 }
519 
net_ipv4_send_fragmented_pkt(struct net_if * iface,struct net_pkt * pkt,uint16_t pkt_len,uint16_t mtu)520 int net_ipv4_send_fragmented_pkt(struct net_if *iface, struct net_pkt *pkt,
521 				 uint16_t pkt_len, uint16_t mtu)
522 {
523 	uint16_t frag_offset = 0;
524 	uint16_t flag;
525 	int fit_len;
526 	int ret;
527 	struct net_ipv4_hdr *frag_hdr;
528 
529 	NET_PKT_DATA_ACCESS_DEFINE(frag_access, struct net_ipv4_hdr);
530 	frag_hdr = (struct net_ipv4_hdr *)net_pkt_get_data(pkt, &frag_access);
531 	if (!frag_hdr) {
532 		return -EINVAL;
533 	}
534 
535 	/* Check if the DF (Don't Fragment) flag is set, if so, we cannot fragment the packet */
536 	flag = ntohs(*((uint16_t *)&frag_hdr->offset));
537 
538 	if (flag & NET_IPV4_DO_NOT_FRAG_MASK) {
539 		/* This packet cannot be fragmented */
540 		return -EPERM;
541 	}
542 
543 	/* Generate a random ID to be used for packet identification, ensuring that it is not 0 */
544 	uint16_t rand_id = sys_rand16_get();
545 
546 	if (rand_id == 0) {
547 		rand_id = 1;
548 	}
549 
550 	/* Calculate maximum payload that can fit into each packet after IPv4 header. Offsets are
551 	 * multiples of 8, therefore round down to nearest 8-byte boundary.
552 	 */
553 	fit_len = (mtu - net_pkt_ip_hdr_len(pkt)) / 8;
554 
555 	if (fit_len <= 0) {
556 		LOG_ERR("No room for IPv4 payload MTU %d hdrs_len %d", mtu,
557 			net_pkt_ip_hdr_len(pkt));
558 		return -EINVAL;
559 	}
560 
561 	fit_len *= 8;
562 
563 	pkt_len -= net_pkt_ip_hdr_len(pkt);
564 
565 	/* Calculate the L4 checksum (if not done already) before the fragmentation. */
566 	if (!net_pkt_is_chksum_done(pkt)) {
567 		struct net_pkt_cursor backup;
568 
569 		net_pkt_cursor_backup(pkt, &backup);
570 		net_pkt_acknowledge_data(pkt, &frag_access);
571 
572 		switch (frag_hdr->proto) {
573 		case IPPROTO_ICMP:
574 			ret = net_icmpv4_finalize(pkt, true);
575 			break;
576 		case IPPROTO_TCP:
577 			ret = net_tcp_finalize(pkt, true);
578 			break;
579 		case IPPROTO_UDP:
580 			ret = net_udp_finalize(pkt, true);
581 			break;
582 		default:
583 			ret = 0;
584 			break;
585 		}
586 
587 		if (ret < 0) {
588 			return ret;
589 		}
590 
591 		net_pkt_cursor_restore(pkt, &backup);
592 	}
593 
594 	while (frag_offset < pkt_len) {
595 		bool final = false;
596 
597 		if ((frag_offset + fit_len) >= pkt_len) {
598 			final = true;
599 			fit_len = (pkt_len - frag_offset);
600 		}
601 
602 		ret = send_ipv4_fragment(pkt, rand_id, fit_len, frag_offset, final);
603 		if (ret < 0) {
604 			return ret;
605 		}
606 
607 		frag_offset += fit_len;
608 	}
609 
610 	return 0;
611 }
612 
net_ipv4_prepare_for_send_fragment(struct net_pkt * pkt)613 enum net_verdict net_ipv4_prepare_for_send_fragment(struct net_pkt *pkt)
614 {
615 	NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv4_access, struct net_ipv4_hdr);
616 	struct net_ipv4_hdr *ip_hdr;
617 	int ret;
618 
619 	NET_ASSERT(pkt && pkt->buffer);
620 
621 	ip_hdr = (struct net_ipv4_hdr *)net_pkt_get_data(pkt, &ipv4_access);
622 	if (!ip_hdr) {
623 		return NET_DROP;
624 	}
625 
626 	/* If we have already fragmented the packet, the ID field will contain a non-zero value
627 	 * and we can skip other checks.
628 	 */
629 	if (ip_hdr->id[0] == 0 && ip_hdr->id[1] == 0) {
630 		size_t pkt_len = net_pkt_get_len(pkt);
631 		uint16_t mtu;
632 
633 		if (IS_ENABLED(CONFIG_NET_IPV4_PMTU)) {
634 			struct sockaddr_in dst = {
635 				.sin_family = AF_INET,
636 				.sin_addr = *((struct in_addr *)ip_hdr->dst),
637 			};
638 
639 			ret = net_pmtu_get_mtu((struct sockaddr *)&dst);
640 			if (ret <= 0) {
641 				goto use_interface_mtu;
642 			}
643 
644 			mtu = ret;
645 		} else {
646 use_interface_mtu:
647 			mtu = net_if_get_mtu(net_pkt_iface(pkt));
648 			mtu = MAX(NET_IPV4_MTU, mtu);
649 		}
650 
651 		if (pkt_len > mtu) {
652 			ret = net_ipv4_send_fragmented_pkt(net_pkt_iface(pkt), pkt, pkt_len, mtu);
653 
654 			if (ret < 0) {
655 				LOG_DBG("Cannot fragment IPv4 pkt (%d)", ret);
656 
657 				if (ret == -EPERM) {
658 					/* Try to send the packet if the don't fragment flag is set
659 					 * and hope the original large packet can be sent OK.
660 					 */
661 					goto ignore_frag_error;
662 				}
663 
664 				/* Other error, drop the packet */
665 				return NET_DROP;
666 			}
667 
668 			/* We need to unref here because we simulate the packet being sent. */
669 			net_pkt_unref(pkt);
670 
671 			/* No need to continue with the sending as the packet is now split and
672 			 * its fragments will be sent separately to the network.
673 			 */
674 			return NET_CONTINUE;
675 		}
676 	}
677 
678 ignore_frag_error:
679 
680 	return NET_OK;
681 }
682 
net_ipv4_setup_fragment_buffers(void)683 void net_ipv4_setup_fragment_buffers(void)
684 {
685 	/* Static initialising does not work here because of the array, so we must do it at
686 	 * runtime.
687 	 */
688 	for (int i = 0; i < CONFIG_NET_IPV4_FRAGMENT_MAX_COUNT; i++) {
689 		k_work_init_delayable(&reassembly[i].timer, reassembly_timeout);
690 	}
691 }
692