1 /** @file
2 * @brief IPv6 Fragment related functions
3 */
4
5 /*
6 * Copyright (c) 2018 Intel Corporation
7 *
8 * SPDX-License-Identifier: Apache-2.0
9 */
10
11 #include <logging/log.h>
12 LOG_MODULE_DECLARE(net_ipv6, CONFIG_NET_IPV6_LOG_LEVEL);
13
14 #include <errno.h>
15 #include <net/net_core.h>
16 #include <net/net_pkt.h>
17 #include <net/net_stats.h>
18 #include <net/net_context.h>
19 #include <net/net_mgmt.h>
20 #include <random/rand32.h>
21 #include "net_private.h"
22 #include "connection.h"
23 #include "icmpv6.h"
24 #include "udp_internal.h"
25 #include "tcp_internal.h"
26 #include "ipv6.h"
27 #include "nbr.h"
28 #include "6lo.h"
29 #include "route.h"
30 #include "net_stats.h"
31
32 /* Timeout for various buffer allocations in this file. */
33 #define NET_BUF_TIMEOUT K_MSEC(50)
34
35 #if defined(CONFIG_NET_IPV6_FRAGMENT_TIMEOUT)
36 #define IPV6_REASSEMBLY_TIMEOUT K_SECONDS(CONFIG_NET_IPV6_FRAGMENT_TIMEOUT)
37 #else
38 #define IPV6_REASSEMBLY_TIMEOUT K_SECONDS(5)
39 #endif /* CONFIG_NET_IPV6_FRAGMENT_TIMEOUT */
40
41 #define FRAG_BUF_WAIT K_MSEC(10) /* how long to max wait for a buffer */
42
43 static void reassembly_timeout(struct k_work *work);
44 static bool reassembly_init_done;
45
46 static struct net_ipv6_reassembly
47 reassembly[CONFIG_NET_IPV6_FRAGMENT_MAX_COUNT];
48
net_ipv6_find_last_ext_hdr(struct net_pkt * pkt,uint16_t * next_hdr_off,uint16_t * last_hdr_off)49 int net_ipv6_find_last_ext_hdr(struct net_pkt *pkt, uint16_t *next_hdr_off,
50 uint16_t *last_hdr_off)
51 {
52 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv6_access, struct net_ipv6_hdr);
53 struct net_ipv6_hdr *hdr;
54 uint8_t next_nexthdr;
55 uint8_t nexthdr;
56
57 if (!pkt || !pkt->frags || !next_hdr_off || !last_hdr_off) {
58 return -EINVAL;
59 }
60
61 net_pkt_cursor_init(pkt);
62
63 hdr = (struct net_ipv6_hdr *)net_pkt_get_data(pkt, &ipv6_access);
64 if (!hdr) {
65 return -ENOBUFS;
66 }
67
68 net_pkt_acknowledge_data(pkt, &ipv6_access);
69
70 nexthdr = hdr->nexthdr;
71
72 /* Initial values */
73 *next_hdr_off = offsetof(struct net_ipv6_hdr, nexthdr);
74 *last_hdr_off = sizeof(struct net_ipv6_hdr);
75
76 nexthdr = hdr->nexthdr;
77 while (!net_ipv6_is_nexthdr_upper_layer(nexthdr)) {
78 if (net_pkt_read_u8(pkt, &next_nexthdr)) {
79 goto fail;
80 }
81
82 switch (nexthdr) {
83 case NET_IPV6_NEXTHDR_HBHO:
84 case NET_IPV6_NEXTHDR_DESTO:
85 {
86 uint8_t val = 0U;
87 uint16_t length;
88
89 if (net_pkt_read_u8(pkt, &val)) {
90 goto fail;
91 }
92
93 length = val * 8U + 8 - 2;
94
95 if (net_pkt_skip(pkt, length)) {
96 goto fail;
97 }
98 }
99 break;
100 case NET_IPV6_NEXTHDR_FRAG:
101 if (net_pkt_skip(pkt, 7)) {
102 goto fail;
103 }
104
105 break;
106 case NET_IPV6_NEXTHDR_NONE:
107 goto out;
108 default:
109 /* TODO: Add more IPv6 extension headers to check */
110 goto fail;
111 }
112
113 *next_hdr_off = *last_hdr_off;
114 *last_hdr_off = net_pkt_get_current_offset(pkt);
115
116 nexthdr = next_nexthdr;
117 }
118 out:
119 return 0;
120 fail:
121 return -EINVAL;
122 }
123
reassembly_get(uint32_t id,struct in6_addr * src,struct in6_addr * dst)124 static struct net_ipv6_reassembly *reassembly_get(uint32_t id,
125 struct in6_addr *src,
126 struct in6_addr *dst)
127 {
128 int i, avail = -1;
129
130 for (i = 0; i < CONFIG_NET_IPV6_FRAGMENT_MAX_COUNT; i++) {
131 if (k_work_delayable_remaining_get(&reassembly[i].timer) &&
132 reassembly[i].id == id &&
133 net_ipv6_addr_cmp(src, &reassembly[i].src) &&
134 net_ipv6_addr_cmp(dst, &reassembly[i].dst)) {
135 return &reassembly[i];
136 }
137
138 if (k_work_delayable_remaining_get(&reassembly[i].timer)) {
139 continue;
140 }
141
142 if (avail < 0) {
143 avail = i;
144 }
145 }
146
147 if (avail < 0) {
148 return NULL;
149 }
150
151 k_work_reschedule(&reassembly[avail].timer, IPV6_REASSEMBLY_TIMEOUT);
152
153 net_ipaddr_copy(&reassembly[avail].src, src);
154 net_ipaddr_copy(&reassembly[avail].dst, dst);
155
156 reassembly[avail].id = id;
157
158 return &reassembly[avail];
159 }
160
reassembly_cancel(uint32_t id,struct in6_addr * src,struct in6_addr * dst)161 static bool reassembly_cancel(uint32_t id,
162 struct in6_addr *src,
163 struct in6_addr *dst)
164 {
165 int i, j;
166
167 NET_DBG("Cancel 0x%x", id);
168
169 for (i = 0; i < CONFIG_NET_IPV6_FRAGMENT_MAX_COUNT; i++) {
170 int32_t remaining;
171
172 if (reassembly[i].id != id ||
173 !net_ipv6_addr_cmp(src, &reassembly[i].src) ||
174 !net_ipv6_addr_cmp(dst, &reassembly[i].dst)) {
175 continue;
176 }
177
178 remaining = k_ticks_to_ms_ceil32(
179 k_work_delayable_remaining_get(&reassembly[i].timer));
180 k_work_cancel_delayable(&reassembly[i].timer);
181
182 NET_DBG("IPv6 reassembly id 0x%x remaining %d ms",
183 reassembly[i].id, remaining);
184
185 reassembly[i].id = 0U;
186
187 for (j = 0; j < CONFIG_NET_IPV6_FRAGMENT_MAX_PKT; j++) {
188 if (!reassembly[i].pkt[j]) {
189 continue;
190 }
191
192 NET_DBG("[%d] IPv6 reassembly pkt %p %zd bytes data",
193 j, reassembly[i].pkt[j],
194 net_pkt_get_len(reassembly[i].pkt[j]));
195
196 net_pkt_unref(reassembly[i].pkt[j]);
197 reassembly[i].pkt[j] = NULL;
198 }
199
200 return true;
201 }
202
203 return false;
204 }
205
reassembly_info(char * str,struct net_ipv6_reassembly * reass)206 static void reassembly_info(char *str, struct net_ipv6_reassembly *reass)
207 {
208 NET_DBG("%s id 0x%x src %s dst %s remain %d ms", str, reass->id,
209 log_strdup(net_sprint_ipv6_addr(&reass->src)),
210 log_strdup(net_sprint_ipv6_addr(&reass->dst)),
211 k_ticks_to_ms_ceil32(
212 k_work_delayable_remaining_get(&reass->timer)));
213 }
214
reassembly_timeout(struct k_work * work)215 static void reassembly_timeout(struct k_work *work)
216 {
217 struct net_ipv6_reassembly *reass =
218 CONTAINER_OF(work, struct net_ipv6_reassembly, timer);
219
220 reassembly_info("Reassembly cancelled", reass);
221
222 /* Send a ICMPv6 Time Exceeded only if we received the first fragment (RFC 2460 Sec. 5) */
223 if (reass->pkt[0] && net_pkt_ipv6_fragment_offset(reass->pkt[0]) == 0) {
224 net_icmpv6_send_error(reass->pkt[0], NET_ICMPV6_TIME_EXCEEDED, 1, 0);
225 }
226
227 reassembly_cancel(reass->id, &reass->src, &reass->dst);
228 }
229
reassemble_packet(struct net_ipv6_reassembly * reass)230 static void reassemble_packet(struct net_ipv6_reassembly *reass)
231 {
232 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv6_access, struct net_ipv6_hdr);
233 NET_PKT_DATA_ACCESS_DEFINE(frag_access, struct net_ipv6_frag_hdr);
234 union {
235 struct net_ipv6_hdr *hdr;
236 struct net_ipv6_frag_hdr *frag_hdr;
237 } ipv6;
238
239 struct net_pkt *pkt;
240 struct net_buf *last;
241 uint8_t next_hdr;
242 int i, len;
243
244 k_work_cancel_delayable(&reass->timer);
245
246 NET_ASSERT(reass->pkt[0]);
247
248 last = net_buf_frag_last(reass->pkt[0]->buffer);
249
250 /* We start from 2nd packet which is then appended to
251 * the first one.
252 */
253 for (i = 1; i < CONFIG_NET_IPV6_FRAGMENT_MAX_PKT; i++) {
254 int removed_len;
255
256 pkt = reass->pkt[i];
257 if (!pkt) {
258 break;
259 }
260
261 net_pkt_cursor_init(pkt);
262
263 /* Get rid of IPv6 and fragment header which are at
264 * the beginning of the fragment.
265 */
266 removed_len = net_pkt_ipv6_fragment_start(pkt) +
267 sizeof(struct net_ipv6_frag_hdr);
268
269 NET_DBG("Removing %d bytes from start of pkt %p",
270 removed_len, pkt->buffer);
271
272 if (net_pkt_pull(pkt, removed_len)) {
273 NET_ERR("Failed to pull headers");
274 reassembly_cancel(reass->id, &reass->src, &reass->dst);
275 return;
276 }
277
278 /* Attach the data to previous pkt */
279 last->frags = pkt->buffer;
280 last = net_buf_frag_last(pkt->buffer);
281
282 pkt->buffer = NULL;
283 reass->pkt[i] = NULL;
284
285 net_pkt_unref(pkt);
286 }
287
288 pkt = reass->pkt[0];
289 reass->pkt[0] = NULL;
290
291 /* Next we need to strip away the fragment header from the first packet
292 * and set the various pointers and values in packet.
293 */
294 net_pkt_cursor_init(pkt);
295
296 if (net_pkt_skip(pkt, net_pkt_ipv6_fragment_start(pkt))) {
297 NET_ERR("Failed to move to fragment header");
298 goto error;
299 }
300
301 ipv6.frag_hdr = (struct net_ipv6_frag_hdr *)net_pkt_get_data(
302 pkt, &frag_access);
303 if (!ipv6.frag_hdr) {
304 NET_ERR("Failed to get fragment header");
305 goto error;
306 }
307
308 next_hdr = ipv6.frag_hdr->nexthdr;
309
310 if (net_pkt_pull(pkt, sizeof(struct net_ipv6_frag_hdr))) {
311 NET_ERR("Failed to remove fragment header");
312 goto error;
313 }
314
315 /* This one updates the previous header's nexthdr value */
316 if (net_pkt_skip(pkt, net_pkt_ipv6_hdr_prev(pkt)) ||
317 net_pkt_write_u8(pkt, next_hdr)) {
318 goto error;
319 }
320
321 net_pkt_cursor_init(pkt);
322
323 ipv6.hdr = (struct net_ipv6_hdr *)net_pkt_get_data(pkt, &ipv6_access);
324 if (!ipv6.hdr) {
325 goto error;
326 }
327
328 /* Fix the total length of the IPv6 packet. */
329 len = net_pkt_ipv6_ext_len(pkt);
330 if (len > 0) {
331 NET_DBG("Old pkt %p IPv6 ext len is %d bytes", pkt, len);
332 net_pkt_set_ipv6_ext_len(pkt,
333 len - sizeof(struct net_ipv6_frag_hdr));
334 }
335
336 len = net_pkt_get_len(pkt) - sizeof(struct net_ipv6_hdr);
337
338 ipv6.hdr->len = htons(len);
339
340 net_pkt_set_data(pkt, &ipv6_access);
341
342 NET_DBG("New pkt %p IPv6 len is %d bytes", pkt,
343 len + NET_IPV6H_LEN);
344
345 /* We need to use the queue when feeding the packet back into the
346 * IP stack as we might run out of stack if we call processing_data()
347 * directly. As the packet does not contain link layer header, we
348 * MUST NOT pass it to L2 so there will be a special check for that
349 * in process_data() when handling the packet.
350 */
351 if (net_recv_data(net_pkt_iface(pkt), pkt) >= 0) {
352 return;
353 }
354 error:
355 net_pkt_unref(pkt);
356 }
357
net_ipv6_frag_foreach(net_ipv6_frag_cb_t cb,void * user_data)358 void net_ipv6_frag_foreach(net_ipv6_frag_cb_t cb, void *user_data)
359 {
360 int i;
361
362 for (i = 0; reassembly_init_done &&
363 i < CONFIG_NET_IPV6_FRAGMENT_MAX_COUNT; i++) {
364 if (!k_work_delayable_remaining_get(&reassembly[i].timer)) {
365 continue;
366 }
367
368 cb(&reassembly[i], user_data);
369 }
370 }
371
372 /* Verify that we have all the fragments received and in correct order.
373 * Return:
374 * - a negative value if the fragments are erroneous and must be dropped
375 * - zero if we are expecting more fragments
376 * - a positive value if we can proceed with the reassembly
377 */
fragments_are_ready(struct net_ipv6_reassembly * reass)378 static int fragments_are_ready(struct net_ipv6_reassembly *reass)
379 {
380 unsigned int expected_offset = 0;
381 bool more = true;
382 int i;
383
384 /* Fragments can arrive in any order, for example in reverse order:
385 * 1 -> Fragment3(M=0, offset=x2)
386 * 2 -> Fragment2(M=1, offset=x1)
387 * 3 -> Fragment1(M=1, offset=0)
388 * We have to test several requirements before proceeding with the reassembly:
389 * - We received the first fragment (Fragment Offset is 0)
390 * - All intermediate fragments are contiguous
391 * - The More bit of the last fragment is 0
392 */
393 for (i = 0; i < CONFIG_NET_IPV6_FRAGMENT_MAX_PKT; i++) {
394 struct net_pkt *pkt = reass->pkt[i];
395 unsigned int offset;
396 int payload_len;
397
398 if (!pkt) {
399 break;
400 }
401
402 offset = net_pkt_ipv6_fragment_offset(pkt);
403
404 if (offset < expected_offset) {
405 /* Overlapping or duplicated
406 * According to RFC8200 we can drop it
407 */
408 return -EBADMSG;
409 } else if (offset != expected_offset) {
410 /* Not contiguous, let's wait for fragments */
411 return 0;
412 }
413
414 payload_len = net_pkt_get_len(pkt) - net_pkt_ipv6_fragment_start(pkt);
415 payload_len -= sizeof(struct net_ipv6_frag_hdr);
416 if (payload_len < 0) {
417 return -EBADMSG;
418 }
419
420 expected_offset += payload_len;
421 more = net_pkt_ipv6_fragment_more(pkt);
422 }
423
424 if (more) {
425 return 0;
426 }
427
428 return 1;
429 }
430
shift_packets(struct net_ipv6_reassembly * reass,int pos)431 static int shift_packets(struct net_ipv6_reassembly *reass, int pos)
432 {
433 int i;
434
435 for (i = pos + 1; i < CONFIG_NET_IPV6_FRAGMENT_MAX_PKT; i++) {
436 if (!reass->pkt[i]) {
437 NET_DBG("Moving [%d] %p (offset 0x%x) to [%d]",
438 pos, reass->pkt[pos],
439 net_pkt_ipv6_fragment_offset(reass->pkt[pos]),
440 pos + 1);
441
442 /* pkt[i] is free, so shift everything between
443 * [pos] and [i - 1] by one element
444 */
445 memmove(&reass->pkt[pos + 1], &reass->pkt[pos],
446 sizeof(void *) * (i - pos));
447
448 /* pkt[pos] is now free */
449 reass->pkt[pos] = NULL;
450
451 return 0;
452 }
453 }
454
455 /* We do not have free space left in the array */
456 return -ENOMEM;
457 }
458
net_ipv6_handle_fragment_hdr(struct net_pkt * pkt,struct net_ipv6_hdr * hdr,uint8_t nexthdr)459 enum net_verdict net_ipv6_handle_fragment_hdr(struct net_pkt *pkt,
460 struct net_ipv6_hdr *hdr,
461 uint8_t nexthdr)
462 {
463 struct net_ipv6_reassembly *reass = NULL;
464 uint16_t flag;
465 bool found;
466 uint8_t more;
467 uint32_t id;
468 int ret;
469 int i;
470
471 if (!reassembly_init_done) {
472 /* Static initializing does not work here because of the array
473 * so we must do it at runtime.
474 */
475 for (i = 0; i < CONFIG_NET_IPV6_FRAGMENT_MAX_COUNT; i++) {
476 k_work_init_delayable(&reassembly[i].timer,
477 reassembly_timeout);
478 }
479
480 reassembly_init_done = true;
481 }
482
483 /* Each fragment has a fragment header, however since we already
484 * read the nexthdr part of it, we are not going to use
485 * net_pkt_get_data() and access the header directly: the cursor
486 * being 1 byte too far, let's just read the next relevant pieces.
487 */
488 if (net_pkt_skip(pkt, 1) || /* reserved */
489 net_pkt_read_be16(pkt, &flag) ||
490 net_pkt_read_be32(pkt, &id)) {
491 goto drop;
492 }
493
494 reass = reassembly_get(id, &hdr->src, &hdr->dst);
495 if (!reass) {
496 NET_DBG("Cannot get reassembly slot, dropping pkt %p", pkt);
497 goto drop;
498 }
499
500 more = flag & 0x01;
501 net_pkt_set_ipv6_fragment_flags(pkt, flag);
502
503 if (more && net_pkt_get_len(pkt) % 8) {
504 /* Fragment length is not multiple of 8, discard
505 * the packet and send parameter problem error with the
506 * offset of the "Payload Length" field in the IPv6 header.
507 */
508 net_icmpv6_send_error(pkt, NET_ICMPV6_PARAM_PROBLEM,
509 NET_ICMPV6_PARAM_PROB_HEADER, NET_IPV6H_LENGTH_OFFSET);
510 goto drop;
511 }
512
513 /* The fragments might come in wrong order so place them
514 * in reassembly chain in correct order.
515 */
516 for (i = 0, found = false; i < CONFIG_NET_IPV6_FRAGMENT_MAX_PKT; i++) {
517 if (reass->pkt[i]) {
518 if (net_pkt_ipv6_fragment_offset(reass->pkt[i]) <
519 net_pkt_ipv6_fragment_offset(pkt)) {
520 continue;
521 }
522
523 /* Make room for this fragment. If there is no room,
524 * then it will discard the whole reassembly.
525 */
526 if (shift_packets(reass, i)) {
527 break;
528 }
529 }
530
531 NET_DBG("Storing pkt %p to slot %d offset %d",
532 pkt, i, net_pkt_ipv6_fragment_offset(pkt));
533 reass->pkt[i] = pkt;
534 found = true;
535
536 break;
537 }
538
539 if (!found) {
540 /* We could not add this fragment into our saved fragment
541 * list. We must discard the whole packet at this point.
542 */
543 NET_DBG("No slots available for 0x%x", reass->id);
544 net_pkt_unref(pkt);
545 goto drop;
546 }
547
548 ret = fragments_are_ready(reass);
549 if (ret < 0) {
550 NET_DBG("Reassembled IPv6 verify failed, dropping id %u",
551 reass->id);
552
553 /* Let the caller release the already inserted pkt */
554 if (i < CONFIG_NET_IPV6_FRAGMENT_MAX_PKT) {
555 reass->pkt[i] = NULL;
556 }
557
558 net_pkt_unref(pkt);
559 goto drop;
560 } else if (ret == 0) {
561 reassembly_info("Reassembly nth pkt", reass);
562
563 NET_DBG("More fragments to be received");
564 goto accept;
565 }
566
567 reassembly_info("Reassembly last pkt", reass);
568
569 /* The last fragment received, reassemble the packet */
570 reassemble_packet(reass);
571
572 accept:
573 return NET_OK;
574
575 drop:
576 if (reass) {
577 if (reassembly_cancel(reass->id, &reass->src, &reass->dst)) {
578 return NET_OK;
579 }
580 }
581
582 return NET_DROP;
583 }
584
585 #define BUF_ALLOC_TIMEOUT K_MSEC(100)
586
send_ipv6_fragment(struct net_pkt * pkt,uint16_t fit_len,uint16_t frag_offset,uint16_t next_hdr_off,uint8_t next_hdr,bool final)587 static int send_ipv6_fragment(struct net_pkt *pkt,
588 uint16_t fit_len,
589 uint16_t frag_offset,
590 uint16_t next_hdr_off,
591 uint8_t next_hdr,
592 bool final)
593 {
594 NET_PKT_DATA_ACCESS_DEFINE(frag_access, struct net_ipv6_frag_hdr);
595 uint8_t frag_pkt_next_hdr = NET_IPV6_NEXTHDR_HBHO;
596 int ret = -ENOBUFS;
597 struct net_ipv6_frag_hdr *frag_hdr;
598 struct net_pkt *frag_pkt;
599
600 frag_pkt = net_pkt_alloc_with_buffer(net_pkt_iface(pkt), fit_len +
601 net_pkt_ipv6_ext_len(pkt) +
602 NET_IPV6_FRAGH_LEN,
603 AF_INET6, 0, BUF_ALLOC_TIMEOUT);
604 if (!frag_pkt) {
605 return -ENOMEM;
606 }
607
608 net_pkt_cursor_init(pkt);
609
610 /* We copy original headers back to the fragment packet
611 * Note that we insert the right next header to point to fragment header
612 */
613 if (net_pkt_copy(frag_pkt, pkt, next_hdr_off) ||
614 net_pkt_write_u8(frag_pkt, NET_IPV6_NEXTHDR_FRAG) ||
615 net_pkt_skip(pkt, 1) ||
616 net_pkt_copy(frag_pkt, pkt, net_pkt_ip_hdr_len(pkt) +
617 net_pkt_ipv6_ext_len(pkt) - next_hdr_off - 1)) {
618 goto fail;
619 }
620
621 if (!net_pkt_ipv6_ext_len(pkt)) {
622 frag_pkt_next_hdr = NET_IPV6_NEXTHDR_FRAG;
623 }
624
625 /* And we append the fragmentation header */
626 frag_hdr = (struct net_ipv6_frag_hdr *)net_pkt_get_data(frag_pkt,
627 &frag_access);
628 if (!frag_hdr) {
629 goto fail;
630 }
631
632 frag_hdr->nexthdr = next_hdr;
633 frag_hdr->reserved = 0U;
634 frag_hdr->id = net_pkt_ipv6_fragment_id(pkt);
635 frag_hdr->offset = htons(((frag_offset / 8U) << 3) | !final);
636
637 if (net_pkt_set_data(frag_pkt, &frag_access)) {
638 goto fail;
639 }
640
641 net_pkt_set_ipv6_ext_len(frag_pkt,
642 net_pkt_ipv6_ext_len(pkt) +
643 sizeof(struct net_ipv6_frag_hdr));
644
645 /* Finally we copy the payload part of this fragment from
646 * the original packet
647 */
648 if (net_pkt_skip(pkt, frag_offset) ||
649 net_pkt_copy(frag_pkt, pkt, fit_len)) {
650 goto fail;
651 }
652
653 net_pkt_cursor_init(frag_pkt);
654
655 if (net_ipv6_finalize(frag_pkt, frag_pkt_next_hdr) < 0) {
656 goto fail;
657 }
658
659 /* If everything has been ok so far, we can send the packet. */
660 ret = net_send_data(frag_pkt);
661 if (ret < 0) {
662 goto fail;
663 }
664
665 /* Let this packet to be sent and hopefully it will release
666 * the memory that can be utilized for next sent IPv6 fragment.
667 */
668 k_yield();
669
670 return 0;
671
672 fail:
673 NET_DBG("Cannot send fragment (%d)", ret);
674 net_pkt_unref(frag_pkt);
675
676 return ret;
677 }
678
net_ipv6_send_fragmented_pkt(struct net_if * iface,struct net_pkt * pkt,uint16_t pkt_len)679 int net_ipv6_send_fragmented_pkt(struct net_if *iface, struct net_pkt *pkt,
680 uint16_t pkt_len)
681 {
682 uint16_t next_hdr_off;
683 uint16_t last_hdr_off;
684 uint16_t frag_offset;
685 size_t length;
686 uint8_t next_hdr;
687 uint8_t last_hdr;
688 int fit_len;
689 int ret;
690
691 net_pkt_set_ipv6_fragment_id(pkt, sys_rand32_get());
692
693 ret = net_ipv6_find_last_ext_hdr(pkt, &next_hdr_off, &last_hdr_off);
694 if (ret < 0) {
695 return ret;
696 }
697
698 net_pkt_cursor_init(pkt);
699
700 if (net_pkt_skip(pkt, next_hdr_off) ||
701 net_pkt_read_u8(pkt, &next_hdr) ||
702 net_pkt_skip(pkt, last_hdr_off) ||
703 net_pkt_read_u8(pkt, &last_hdr)) {
704 return -ENOBUFS;
705 }
706
707 /* The Maximum payload can fit into each packet after IPv6 header,
708 * Extenstion headers and Fragmentation header.
709 */
710 fit_len = NET_IPV6_MTU - NET_IPV6_FRAGH_LEN -
711 (net_pkt_ip_hdr_len(pkt) + net_pkt_ipv6_ext_len(pkt));
712 if (fit_len <= 0) {
713 /* Must be invalid extension headers length */
714 NET_DBG("No room for IPv6 payload MTU %d hdrs_len %d",
715 NET_IPV6_MTU, NET_IPV6_FRAGH_LEN +
716 net_pkt_ip_hdr_len(pkt) + net_pkt_ipv6_ext_len(pkt));
717 return -EINVAL;
718 }
719
720 frag_offset = 0U;
721
722 length = net_pkt_get_len(pkt) -
723 (net_pkt_ip_hdr_len(pkt) + net_pkt_ipv6_ext_len(pkt));
724 while (length) {
725 bool final = false;
726
727 if (fit_len >= length) {
728 final = true;
729 fit_len = length;
730 }
731
732 ret = send_ipv6_fragment(pkt, fit_len, frag_offset,
733 next_hdr_off, next_hdr, final);
734 if (ret < 0) {
735 return ret;
736 }
737
738 length -= fit_len;
739 frag_offset += fit_len;
740 }
741
742 return 0;
743 }
744