1 /** @file
2 @brief Network packet buffers for IP stack
3
4 Network data is passed between components using net_pkt.
5 */
6
7 /*
8 * Copyright (c) 2016 Intel Corporation
9 *
10 * SPDX-License-Identifier: Apache-2.0
11 */
12
13 #include <zephyr/logging/log.h>
14 LOG_MODULE_REGISTER(net_pkt, CONFIG_NET_PKT_LOG_LEVEL);
15
16 /* This enables allocation debugging but does not print so much output
17 * as that can slow things down a lot.
18 */
19 #undef NET_LOG_LEVEL
20 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
21 #define NET_LOG_LEVEL 5
22 #else
23 #define NET_LOG_LEVEL CONFIG_NET_PKT_LOG_LEVEL
24 #endif
25
26 #include <zephyr/kernel.h>
27 #include <zephyr/toolchain.h>
28 #include <string.h>
29 #include <zephyr/types.h>
30 #include <sys/types.h>
31
32 #include <zephyr/sys/util.h>
33
34 #include <zephyr/net/net_core.h>
35 #include <zephyr/net/net_ip.h>
36 #include <zephyr/net/buf.h>
37 #include <zephyr/net/net_pkt.h>
38 #include <zephyr/net/ethernet.h>
39 #include <zephyr/net/udp.h>
40
41 #include "net_private.h"
42 #include "tcp_internal.h"
43
44 /* Find max header size of IP protocol (IPv4 or IPv6) */
45 #if defined(CONFIG_NET_IPV6) || defined(CONFIG_NET_RAW_MODE) || \
46 defined(CONFIG_NET_SOCKETS_PACKET) || defined(CONFIG_NET_SOCKETS_OFFLOAD)
47 #define MAX_IP_PROTO_LEN NET_IPV6H_LEN
48 #else
49 #if defined(CONFIG_NET_IPV4)
50 #define MAX_IP_PROTO_LEN NET_IPV4H_LEN
51 #else
52 #if defined(CONFIG_NET_SOCKETS_CAN)
53 /* TODO: Use CAN MTU here instead of hard coded value. There was
54 * weird circular dependency issue so this needs more TLC.
55 */
56 #define MAX_IP_PROTO_LEN 8
57 #else
58 #if defined(CONFIG_NET_ETHERNET_BRIDGE) || \
59 defined(CONFIG_NET_L2_IEEE802154) || \
60 defined(CONFIG_NET_L2_CUSTOM_IEEE802154)
61 #define MAX_IP_PROTO_LEN 0
62 #else
63 #error "Some packet protocol (e.g. IPv6, IPv4, ETH, IEEE 802.15.4) needs to be selected."
64 #endif /* ETHERNET_BRIDGE / L2_IEEE802154 */
65 #endif /* SOCKETS_CAN */
66 #endif /* IPv4 */
67 #endif /* IPv6 */
68
69 /* Find max header size of "next" protocol (TCP, UDP or ICMP) */
70 #if defined(CONFIG_NET_TCP)
71 #define MAX_NEXT_PROTO_LEN NET_TCPH_LEN
72 #else
73 #if defined(CONFIG_NET_UDP)
74 #define MAX_NEXT_PROTO_LEN NET_UDPH_LEN
75 #else
76 #if defined(CONFIG_NET_SOCKETS_CAN)
77 #define MAX_NEXT_PROTO_LEN 0
78 #else
79 /* If no TCP and no UDP, apparently we still want pings to work. */
80 #define MAX_NEXT_PROTO_LEN NET_ICMPH_LEN
81 #endif /* SOCKETS_CAN */
82 #endif /* UDP */
83 #endif /* TCP */
84
85 /* Make sure that IP + TCP/UDP/ICMP headers fit into one fragment. This
86 * makes possible to cast a fragment pointer to protocol header struct.
87 */
88 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
89 #if CONFIG_NET_BUF_DATA_SIZE < (MAX_IP_PROTO_LEN + MAX_NEXT_PROTO_LEN)
90 #if defined(STRING2)
91 #undef STRING2
92 #endif
93 #if defined(STRING)
94 #undef STRING
95 #endif
96 #define STRING2(x) #x
97 #define STRING(x) STRING2(x)
98 #pragma message "Data len " STRING(CONFIG_NET_BUF_DATA_SIZE)
99 #pragma message "Minimum len " STRING(MAX_IP_PROTO_LEN + MAX_NEXT_PROTO_LEN)
100 #error "Too small net_buf fragment size"
101 #endif
102 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
103
104 #if CONFIG_NET_PKT_RX_COUNT <= 0
105 #error "Minimum value for CONFIG_NET_PKT_RX_COUNT is 1"
106 #endif
107
108 #if CONFIG_NET_PKT_TX_COUNT <= 0
109 #error "Minimum value for CONFIG_NET_PKT_TX_COUNT is 1"
110 #endif
111
112 #if CONFIG_NET_BUF_RX_COUNT <= 0
113 #error "Minimum value for CONFIG_NET_BUF_RX_COUNT is 1"
114 #endif
115
116 #if CONFIG_NET_BUF_TX_COUNT <= 0
117 #error "Minimum value for CONFIG_NET_BUF_TX_COUNT is 1"
118 #endif
119
120 K_MEM_SLAB_DEFINE(rx_pkts, sizeof(struct net_pkt), CONFIG_NET_PKT_RX_COUNT, 4);
121 K_MEM_SLAB_DEFINE(tx_pkts, sizeof(struct net_pkt), CONFIG_NET_PKT_TX_COUNT, 4);
122
123 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
124
125 NET_BUF_POOL_FIXED_DEFINE(rx_bufs, CONFIG_NET_BUF_RX_COUNT, CONFIG_NET_BUF_DATA_SIZE,
126 CONFIG_NET_PKT_BUF_USER_DATA_SIZE, NULL);
127 NET_BUF_POOL_FIXED_DEFINE(tx_bufs, CONFIG_NET_BUF_TX_COUNT, CONFIG_NET_BUF_DATA_SIZE,
128 CONFIG_NET_PKT_BUF_USER_DATA_SIZE, NULL);
129
130 #else /* !CONFIG_NET_BUF_FIXED_DATA_SIZE */
131
132 NET_BUF_POOL_VAR_DEFINE(rx_bufs, CONFIG_NET_BUF_RX_COUNT, CONFIG_NET_BUF_DATA_POOL_SIZE,
133 CONFIG_NET_PKT_BUF_USER_DATA_SIZE, NULL);
134 NET_BUF_POOL_VAR_DEFINE(tx_bufs, CONFIG_NET_BUF_TX_COUNT, CONFIG_NET_BUF_DATA_POOL_SIZE,
135 CONFIG_NET_PKT_BUF_USER_DATA_SIZE, NULL);
136
137 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
138
139 /* Allocation tracking is only available if separately enabled */
140 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
141 struct net_pkt_alloc {
142 union {
143 struct net_pkt *pkt;
144 struct net_buf *buf;
145 void *alloc_data;
146 };
147 const char *func_alloc;
148 const char *func_free;
149 uint16_t line_alloc;
150 uint16_t line_free;
151 uint8_t in_use;
152 bool is_pkt;
153 };
154
155 #define MAX_NET_PKT_ALLOCS (CONFIG_NET_PKT_RX_COUNT + \
156 CONFIG_NET_PKT_TX_COUNT + \
157 CONFIG_NET_BUF_RX_COUNT + \
158 CONFIG_NET_BUF_TX_COUNT + \
159 CONFIG_NET_DEBUG_NET_PKT_EXTERNALS)
160
161 static struct net_pkt_alloc net_pkt_allocs[MAX_NET_PKT_ALLOCS];
162
net_pkt_alloc_add(void * alloc_data,bool is_pkt,const char * func,int line)163 static void net_pkt_alloc_add(void *alloc_data, bool is_pkt,
164 const char *func, int line)
165 {
166 int i;
167
168 for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
169 if (net_pkt_allocs[i].in_use) {
170 continue;
171 }
172
173 net_pkt_allocs[i].in_use = true;
174 net_pkt_allocs[i].is_pkt = is_pkt;
175 net_pkt_allocs[i].alloc_data = alloc_data;
176 net_pkt_allocs[i].func_alloc = func;
177 net_pkt_allocs[i].line_alloc = line;
178
179 return;
180 }
181 }
182
net_pkt_alloc_del(void * alloc_data,const char * func,int line)183 static void net_pkt_alloc_del(void *alloc_data, const char *func, int line)
184 {
185 int i;
186
187 for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
188 if (net_pkt_allocs[i].in_use &&
189 net_pkt_allocs[i].alloc_data == alloc_data) {
190 net_pkt_allocs[i].func_free = func;
191 net_pkt_allocs[i].line_free = line;
192 net_pkt_allocs[i].in_use = false;
193
194 return;
195 }
196 }
197 }
198
net_pkt_alloc_find(void * alloc_data,const char ** func_free,int * line_free)199 static bool net_pkt_alloc_find(void *alloc_data,
200 const char **func_free,
201 int *line_free)
202 {
203 int i;
204
205 for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
206 if (!net_pkt_allocs[i].in_use &&
207 net_pkt_allocs[i].alloc_data == alloc_data) {
208 *func_free = net_pkt_allocs[i].func_free;
209 *line_free = net_pkt_allocs[i].line_free;
210
211 return true;
212 }
213 }
214
215 return false;
216 }
217
net_pkt_allocs_foreach(net_pkt_allocs_cb_t cb,void * user_data)218 void net_pkt_allocs_foreach(net_pkt_allocs_cb_t cb, void *user_data)
219 {
220 int i;
221
222 for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
223 if (net_pkt_allocs[i].in_use) {
224 cb(net_pkt_allocs[i].is_pkt ?
225 net_pkt_allocs[i].pkt : NULL,
226 net_pkt_allocs[i].is_pkt ?
227 NULL : net_pkt_allocs[i].buf,
228 net_pkt_allocs[i].func_alloc,
229 net_pkt_allocs[i].line_alloc,
230 net_pkt_allocs[i].func_free,
231 net_pkt_allocs[i].line_free,
232 net_pkt_allocs[i].in_use,
233 user_data);
234 }
235 }
236
237 for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
238 if (!net_pkt_allocs[i].in_use) {
239 cb(net_pkt_allocs[i].is_pkt ?
240 net_pkt_allocs[i].pkt : NULL,
241 net_pkt_allocs[i].is_pkt ?
242 NULL : net_pkt_allocs[i].buf,
243 net_pkt_allocs[i].func_alloc,
244 net_pkt_allocs[i].line_alloc,
245 net_pkt_allocs[i].func_free,
246 net_pkt_allocs[i].line_free,
247 net_pkt_allocs[i].in_use,
248 user_data);
249 }
250 }
251 }
252 #else
253 #define net_pkt_alloc_add(alloc_data, is_pkt, func, line)
254 #define net_pkt_alloc_del(alloc_data, func, line)
255 #define net_pkt_alloc_find(alloc_data, func_free, line_free) false
256 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC */
257
258 #if defined(NET_PKT_DEBUG_ENABLED)
259
260 #define NET_FRAG_CHECK_IF_NOT_IN_USE(frag, ref) \
261 do { \
262 if (!(ref)) { \
263 NET_ERR("**ERROR** frag %p not in use (%s:%s():%d)", \
264 frag, __FILE__, __func__, __LINE__); \
265 } \
266 } while (0)
267
net_pkt_slab2str(struct k_mem_slab * slab)268 const char *net_pkt_slab2str(struct k_mem_slab *slab)
269 {
270 if (slab == &rx_pkts) {
271 return "RX";
272 } else if (slab == &tx_pkts) {
273 return "TX";
274 }
275
276 return "EXT";
277 }
278
net_pkt_pool2str(struct net_buf_pool * pool)279 const char *net_pkt_pool2str(struct net_buf_pool *pool)
280 {
281 if (pool == &rx_bufs) {
282 return "RDATA";
283 } else if (pool == &tx_bufs) {
284 return "TDATA";
285 }
286
287 return "EDATA";
288 }
289
get_frees(struct net_buf_pool * pool)290 static inline int16_t get_frees(struct net_buf_pool *pool)
291 {
292 #if defined(CONFIG_NET_BUF_POOL_USAGE)
293 return atomic_get(&pool->avail_count);
294 #else
295 return 0;
296 #endif
297 }
298
net_pkt_print_frags(struct net_pkt * pkt)299 void net_pkt_print_frags(struct net_pkt *pkt)
300 {
301 struct net_buf *frag;
302 size_t total = 0;
303 int count = 0, frag_size = 0;
304
305 if (!pkt) {
306 NET_INFO("pkt %p", pkt);
307 return;
308 }
309
310 NET_INFO("pkt %p frags %p", pkt, pkt->frags);
311
312 NET_ASSERT(pkt->frags);
313
314 frag = pkt->frags;
315 while (frag) {
316 total += frag->len;
317
318 frag_size = frag->size;
319
320 NET_INFO("[%d] frag %p len %d max len %u size %d pool %p",
321 count, frag, frag->len, net_buf_max_len(frag),
322 frag_size, net_buf_pool_get(frag->pool_id));
323
324 count++;
325
326 frag = frag->frags;
327 }
328
329 NET_INFO("Total data size %zu, occupied %d bytes, utilization %zu%%",
330 total, count * frag_size,
331 count ? (total * 100) / (count * frag_size) : 0);
332 }
333 #endif
334
335 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
get_name(struct net_buf_pool * pool)336 static inline const char *get_name(struct net_buf_pool *pool)
337 {
338 #if defined(CONFIG_NET_BUF_POOL_USAGE)
339 return pool->name;
340 #else
341 return "?";
342 #endif
343 }
344
get_size(struct net_buf_pool * pool)345 static inline int16_t get_size(struct net_buf_pool *pool)
346 {
347 #if defined(CONFIG_NET_BUF_POOL_USAGE)
348 return pool->pool_size;
349 #else
350 return 0;
351 #endif
352 }
353
slab2str(struct k_mem_slab * slab)354 static inline const char *slab2str(struct k_mem_slab *slab)
355 {
356 return net_pkt_slab2str(slab);
357 }
358
pool2str(struct net_buf_pool * pool)359 static inline const char *pool2str(struct net_buf_pool *pool)
360 {
361 return net_pkt_pool2str(pool);
362 }
363 #endif /* CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG */
364
365 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_get_reserve_data_debug(struct net_buf_pool * pool,size_t min_len,k_timeout_t timeout,const char * caller,int line)366 struct net_buf *net_pkt_get_reserve_data_debug(struct net_buf_pool *pool,
367 size_t min_len,
368 k_timeout_t timeout,
369 const char *caller,
370 int line)
371 #else /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
372 struct net_buf *net_pkt_get_reserve_data(struct net_buf_pool *pool,
373 size_t min_len, k_timeout_t timeout)
374 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
375 {
376 struct net_buf *frag;
377
378 if (k_is_in_isr()) {
379 timeout = K_NO_WAIT;
380 }
381
382 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
383 if (min_len > CONFIG_NET_BUF_DATA_SIZE) {
384 NET_ERR("Requested too large fragment. Increase CONFIG_NET_BUF_DATA_SIZE.");
385 return NULL;
386 }
387
388 frag = net_buf_alloc(pool, timeout);
389 #else
390 frag = net_buf_alloc_len(pool, min_len, timeout);
391 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
392
393 if (!frag) {
394 return NULL;
395 }
396
397 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
398 NET_FRAG_CHECK_IF_NOT_IN_USE(frag, frag->ref + 1U);
399 #endif
400
401 net_pkt_alloc_add(frag, false, caller, line);
402
403 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
404 NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
405 pool2str(pool), get_name(pool), get_frees(pool),
406 frag, frag->ref, caller, line);
407 #endif
408
409 return frag;
410 }
411
412 /* Get a fragment, try to figure out the pool from where to get
413 * the data.
414 */
415 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_get_frag_debug(struct net_pkt * pkt,size_t min_len,k_timeout_t timeout,const char * caller,int line)416 struct net_buf *net_pkt_get_frag_debug(struct net_pkt *pkt, size_t min_len,
417 k_timeout_t timeout,
418 const char *caller, int line)
419 #else
420 struct net_buf *net_pkt_get_frag(struct net_pkt *pkt, size_t min_len,
421 k_timeout_t timeout)
422 #endif
423 {
424 #if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL)
425 struct net_context *context;
426
427 context = net_pkt_context(pkt);
428 if (context && context->data_pool) {
429 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
430 return net_pkt_get_reserve_data_debug(context->data_pool(),
431 min_len, timeout,
432 caller, line);
433 #else
434 return net_pkt_get_reserve_data(context->data_pool(), min_len,
435 timeout);
436 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
437 }
438 #endif /* CONFIG_NET_CONTEXT_NET_PKT_POOL */
439
440 if (pkt->slab == &rx_pkts) {
441 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
442 return net_pkt_get_reserve_rx_data_debug(min_len, timeout,
443 caller, line);
444 #else
445 return net_pkt_get_reserve_rx_data(min_len, timeout);
446 #endif
447 }
448
449 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
450 return net_pkt_get_reserve_tx_data_debug(min_len, timeout, caller, line);
451 #else
452 return net_pkt_get_reserve_tx_data(min_len, timeout);
453 #endif
454 }
455
456 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_get_reserve_rx_data_debug(size_t min_len,k_timeout_t timeout,const char * caller,int line)457 struct net_buf *net_pkt_get_reserve_rx_data_debug(size_t min_len, k_timeout_t timeout,
458 const char *caller, int line)
459 {
460 return net_pkt_get_reserve_data_debug(&rx_bufs, min_len, timeout, caller, line);
461 }
462
net_pkt_get_reserve_tx_data_debug(size_t min_len,k_timeout_t timeout,const char * caller,int line)463 struct net_buf *net_pkt_get_reserve_tx_data_debug(size_t min_len, k_timeout_t timeout,
464 const char *caller, int line)
465 {
466 return net_pkt_get_reserve_data_debug(&tx_bufs, min_len, timeout, caller, line);
467 }
468
469 #else /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
470
net_pkt_get_reserve_rx_data(size_t min_len,k_timeout_t timeout)471 struct net_buf *net_pkt_get_reserve_rx_data(size_t min_len, k_timeout_t timeout)
472 {
473 return net_pkt_get_reserve_data(&rx_bufs, min_len, timeout);
474 }
475
net_pkt_get_reserve_tx_data(size_t min_len,k_timeout_t timeout)476 struct net_buf *net_pkt_get_reserve_tx_data(size_t min_len, k_timeout_t timeout)
477 {
478 return net_pkt_get_reserve_data(&tx_bufs, min_len, timeout);
479 }
480
481 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
482
483
484 #if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL)
get_tx_slab(struct net_context * context)485 static inline struct k_mem_slab *get_tx_slab(struct net_context *context)
486 {
487 if (context->tx_slab) {
488 return context->tx_slab();
489 }
490
491 return NULL;
492 }
493
get_data_pool(struct net_context * context)494 static inline struct net_buf_pool *get_data_pool(struct net_context *context)
495 {
496 if (context->data_pool) {
497 return context->data_pool();
498 }
499
500 return NULL;
501 }
502 #else
503 #define get_tx_slab(...) NULL
504 #define get_data_pool(...) NULL
505 #endif /* CONFIG_NET_CONTEXT_NET_PKT_POOL */
506
507 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_unref_debug(struct net_pkt * pkt,const char * caller,int line)508 void net_pkt_unref_debug(struct net_pkt *pkt, const char *caller, int line)
509 {
510 struct net_buf *frag;
511
512 #else
513 void net_pkt_unref(struct net_pkt *pkt)
514 {
515 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
516 atomic_val_t ref;
517
518 if (!pkt) {
519 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
520 NET_ERR("*** ERROR *** pkt %p (%s():%d)", pkt, caller, line);
521 #endif
522 return;
523 }
524
525 do {
526 ref = atomic_get(&pkt->atomic_ref);
527 if (!ref) {
528 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
529 const char *func_freed;
530 int line_freed;
531
532 if (net_pkt_alloc_find(pkt, &func_freed, &line_freed)) {
533 NET_ERR("*** ERROR *** pkt %p is freed already "
534 "by %s():%d (%s():%d)",
535 pkt, func_freed, line_freed, caller,
536 line);
537 } else {
538 NET_ERR("*** ERROR *** pkt %p is freed already "
539 "(%s():%d)", pkt, caller, line);
540 }
541 #endif
542 return;
543 }
544 } while (!atomic_cas(&pkt->atomic_ref, ref, ref - 1));
545
546 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
547 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
548 NET_DBG("%s [%d] pkt %p ref %ld frags %p (%s():%d)",
549 slab2str(pkt->slab), k_mem_slab_num_free_get(pkt->slab),
550 pkt, ref - 1, pkt->frags, caller, line);
551 #endif
552 if (ref > 1) {
553 goto done;
554 }
555
556 frag = pkt->frags;
557 while (frag) {
558 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
559 NET_DBG("%s (%s) [%d] frag %p ref %d frags %p (%s():%d)",
560 pool2str(net_buf_pool_get(frag->pool_id)),
561 get_name(net_buf_pool_get(frag->pool_id)),
562 get_frees(net_buf_pool_get(frag->pool_id)), frag,
563 frag->ref - 1U, frag->frags, caller, line);
564 #endif
565
566 if (!frag->ref) {
567 const char *func_freed;
568 int line_freed;
569
570 if (net_pkt_alloc_find(frag,
571 &func_freed, &line_freed)) {
572 NET_ERR("*** ERROR *** frag %p is freed "
573 "already by %s():%d (%s():%d)",
574 frag, func_freed, line_freed,
575 caller, line);
576 } else {
577 NET_ERR("*** ERROR *** frag %p is freed "
578 "already (%s():%d)",
579 frag, caller, line);
580 }
581 }
582
583 net_pkt_alloc_del(frag, caller, line);
584
585 frag = frag->frags;
586 }
587
588 net_pkt_alloc_del(pkt, caller, line);
589 done:
590 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
591
592 if (ref > 1) {
593 return;
594 }
595
596 if (pkt->frags) {
597 net_pkt_frag_unref(pkt->frags);
598 }
599
600 if (IS_ENABLED(CONFIG_NET_DEBUG_NET_PKT_NON_FRAGILE_ACCESS)) {
601 pkt->buffer = NULL;
602 net_pkt_cursor_init(pkt);
603 }
604
605 k_mem_slab_free(pkt->slab, (void **)&pkt);
606 }
607
608 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
609 struct net_pkt *net_pkt_ref_debug(struct net_pkt *pkt, const char *caller,
610 int line)
611 #else
612 struct net_pkt *net_pkt_ref(struct net_pkt *pkt)
613 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
614 {
615 atomic_val_t ref;
616
617 do {
618 ref = pkt ? atomic_get(&pkt->atomic_ref) : 0;
619 if (!ref) {
620 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
621 NET_ERR("*** ERROR *** pkt %p (%s():%d)",
622 pkt, caller, line);
623 #endif
624 return NULL;
625 }
626 } while (!atomic_cas(&pkt->atomic_ref, ref, ref + 1));
627
628 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
629 NET_DBG("%s [%d] pkt %p ref %ld (%s():%d)",
630 slab2str(pkt->slab), k_mem_slab_num_free_get(pkt->slab),
631 pkt, ref + 1, caller, line);
632 #endif
633
634
635 return pkt;
636 }
637
638 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
639 struct net_buf *net_pkt_frag_ref_debug(struct net_buf *frag,
640 const char *caller, int line)
641 #else
642 struct net_buf *net_pkt_frag_ref(struct net_buf *frag)
643 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
644 {
645 if (!frag) {
646 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
647 NET_ERR("*** ERROR *** frag %p (%s():%d)", frag, caller, line);
648 #endif
649 return NULL;
650 }
651
652 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
653 NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
654 pool2str(net_buf_pool_get(frag->pool_id)),
655 get_name(net_buf_pool_get(frag->pool_id)),
656 get_frees(net_buf_pool_get(frag->pool_id)),
657 frag, frag->ref + 1U, caller, line);
658 #endif
659
660 return net_buf_ref(frag);
661 }
662
663
664 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
665 void net_pkt_frag_unref_debug(struct net_buf *frag,
666 const char *caller, int line)
667 #else
668 void net_pkt_frag_unref(struct net_buf *frag)
669 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
670 {
671 if (!frag) {
672 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
673 NET_ERR("*** ERROR *** frag %p (%s():%d)", frag, caller, line);
674 #endif
675 return;
676 }
677
678 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
679 NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
680 pool2str(net_buf_pool_get(frag->pool_id)),
681 get_name(net_buf_pool_get(frag->pool_id)),
682 get_frees(net_buf_pool_get(frag->pool_id)),
683 frag, frag->ref - 1U, caller, line);
684 #endif
685
686 if (frag->ref == 1U) {
687 net_pkt_alloc_del(frag, caller, line);
688 }
689
690 net_buf_unref(frag);
691 }
692
693 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
694 struct net_buf *net_pkt_frag_del_debug(struct net_pkt *pkt,
695 struct net_buf *parent,
696 struct net_buf *frag,
697 const char *caller, int line)
698 #else
699 struct net_buf *net_pkt_frag_del(struct net_pkt *pkt,
700 struct net_buf *parent,
701 struct net_buf *frag)
702 #endif
703 {
704 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
705 NET_DBG("pkt %p parent %p frag %p ref %u (%s:%d)",
706 pkt, parent, frag, frag->ref, caller, line);
707 #endif
708
709 if (pkt->frags == frag && !parent) {
710 struct net_buf *tmp;
711
712 if (frag->ref == 1U) {
713 net_pkt_alloc_del(frag, caller, line);
714 }
715
716 tmp = net_buf_frag_del(NULL, frag);
717 pkt->frags = tmp;
718
719 return tmp;
720 }
721
722 if (frag->ref == 1U) {
723 net_pkt_alloc_del(frag, caller, line);
724 }
725
726 return net_buf_frag_del(parent, frag);
727 }
728
729 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
730 void net_pkt_frag_add_debug(struct net_pkt *pkt, struct net_buf *frag,
731 const char *caller, int line)
732 #else
733 void net_pkt_frag_add(struct net_pkt *pkt, struct net_buf *frag)
734 #endif
735 {
736 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
737 NET_DBG("pkt %p frag %p (%s:%d)", pkt, frag, caller, line);
738 #endif
739
740 /* We do not use net_buf_frag_add() as this one will refcount
741 * the frag once more if !pkt->frags
742 */
743 if (!pkt->frags) {
744 pkt->frags = frag;
745 return;
746 }
747
748 net_buf_frag_insert(net_buf_frag_last(pkt->frags), frag);
749 }
750
751 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
752 void net_pkt_frag_insert_debug(struct net_pkt *pkt, struct net_buf *frag,
753 const char *caller, int line)
754 #else
755 void net_pkt_frag_insert(struct net_pkt *pkt, struct net_buf *frag)
756 #endif
757 {
758 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
759 NET_DBG("pkt %p frag %p (%s:%d)", pkt, frag, caller, line);
760 #endif
761
762 net_buf_frag_last(frag)->frags = pkt->frags;
763 pkt->frags = frag;
764 }
765
766 void net_pkt_compact(struct net_pkt *pkt)
767 {
768 struct net_buf *frag, *prev;
769
770 NET_DBG("Compacting data in pkt %p", pkt);
771
772 frag = pkt->frags;
773 prev = NULL;
774
775 while (frag) {
776 if (frag->frags) {
777 /* Copy amount of data from next fragment to this
778 * fragment.
779 */
780 size_t copy_len;
781
782 copy_len = frag->frags->len;
783 if (copy_len > net_buf_tailroom(frag)) {
784 copy_len = net_buf_tailroom(frag);
785 }
786
787 memcpy(net_buf_tail(frag), frag->frags->data, copy_len);
788 net_buf_add(frag, copy_len);
789
790 memmove(frag->frags->data,
791 frag->frags->data + copy_len,
792 frag->frags->len - copy_len);
793
794 frag->frags->len -= copy_len;
795
796 /* Is there any more space in this fragment */
797 if (net_buf_tailroom(frag)) {
798 /* There is. This also means that the next
799 * fragment is empty as otherwise we could
800 * not have copied all data. Remove next
801 * fragment as there is no data in it any more.
802 */
803 net_pkt_frag_del(pkt, frag, frag->frags);
804
805 /* Then check next fragment */
806 continue;
807 }
808 } else {
809 if (!frag->len) {
810 /* Remove the last fragment because there is no
811 * data in it.
812 */
813 net_pkt_frag_del(pkt, prev, frag);
814
815 break;
816 }
817 }
818
819 prev = frag;
820 frag = frag->frags;
821 }
822 }
823
824 void net_pkt_get_info(struct k_mem_slab **rx,
825 struct k_mem_slab **tx,
826 struct net_buf_pool **rx_data,
827 struct net_buf_pool **tx_data)
828 {
829 if (rx) {
830 *rx = &rx_pkts;
831 }
832
833 if (tx) {
834 *tx = &tx_pkts;
835 }
836
837 if (rx_data) {
838 *rx_data = &rx_bufs;
839 }
840
841 if (tx_data) {
842 *tx_data = &tx_bufs;
843 }
844 }
845
846 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
847 void net_pkt_print(void)
848 {
849 NET_DBG("TX %u RX %u RDATA %d TDATA %d",
850 k_mem_slab_num_free_get(&tx_pkts),
851 k_mem_slab_num_free_get(&rx_pkts),
852 get_frees(&rx_bufs), get_frees(&tx_bufs));
853 }
854 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC */
855
856 /* New allocator and API starts here */
857
858 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
859
860 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
861 static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool,
862 size_t size, k_timeout_t timeout,
863 const char *caller, int line)
864 #else
865 static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool,
866 size_t size, k_timeout_t timeout)
867 #endif
868 {
869 uint64_t end = sys_clock_timeout_end_calc(timeout);
870 struct net_buf *first = NULL;
871 struct net_buf *current = NULL;
872
873 do {
874 struct net_buf *new;
875
876 new = net_buf_alloc_fixed(pool, timeout);
877 if (!new) {
878 goto error;
879 }
880
881 if (!first && !current) {
882 first = new;
883 } else {
884 current->frags = new;
885 }
886
887 current = new;
888 if (current->size > size) {
889 current->size = size;
890 }
891
892 size -= current->size;
893
894 if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
895 !K_TIMEOUT_EQ(timeout, K_FOREVER)) {
896 int64_t remaining = end - sys_clock_tick_get();
897
898 if (remaining <= 0) {
899 break;
900 }
901
902 timeout = Z_TIMEOUT_TICKS(remaining);
903 }
904
905 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
906 NET_FRAG_CHECK_IF_NOT_IN_USE(new, new->ref + 1);
907
908 net_pkt_alloc_add(new, false, caller, line);
909
910 NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
911 pool2str(pool), get_name(pool), get_frees(pool),
912 new, new->ref, caller, line);
913 #endif
914 } while (size);
915
916 return first;
917 error:
918 if (first) {
919 net_buf_unref(first);
920 }
921
922 return NULL;
923 }
924
925 #else /* !CONFIG_NET_BUF_FIXED_DATA_SIZE */
926
927 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
928 static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool,
929 size_t size, k_timeout_t timeout,
930 const char *caller, int line)
931 #else
932 static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool,
933 size_t size, k_timeout_t timeout)
934 #endif
935 {
936 struct net_buf *buf;
937
938 buf = net_buf_alloc_len(pool, size, timeout);
939
940 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
941 NET_FRAG_CHECK_IF_NOT_IN_USE(buf, buf->ref + 1);
942
943 net_pkt_alloc_add(buf, false, caller, line);
944
945 NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
946 pool2str(pool), get_name(pool), get_frees(pool),
947 buf, buf->ref, caller, line);
948 #endif
949
950 return buf;
951 }
952
953 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
954
955 static size_t pkt_buffer_length(struct net_pkt *pkt,
956 size_t size,
957 enum net_ip_protocol proto,
958 size_t existing)
959 {
960 sa_family_t family = net_pkt_family(pkt);
961 size_t max_len;
962
963 if (net_pkt_iface(pkt)) {
964 max_len = net_if_get_mtu(net_pkt_iface(pkt));
965 } else {
966 max_len = 0;
967 }
968
969 /* Family vs iface MTU */
970 if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
971 if (IS_ENABLED(CONFIG_NET_IPV6_FRAGMENT) && (size > max_len)) {
972 /* We support larger packets if IPv6 fragmentation is
973 * enabled.
974 */
975 max_len = size;
976 }
977
978 max_len = MAX(max_len, NET_IPV6_MTU);
979 } else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
980 if (IS_ENABLED(CONFIG_NET_IPV4_FRAGMENT) && (size > max_len)) {
981 /* We support larger packets if IPv4 fragmentation is enabled */
982 max_len = size;
983 }
984
985 max_len = MAX(max_len, NET_IPV4_MTU);
986 } else { /* family == AF_UNSPEC */
987 #if defined (CONFIG_NET_L2_ETHERNET)
988 if (net_if_l2(net_pkt_iface(pkt)) ==
989 &NET_L2_GET_NAME(ETHERNET)) {
990 max_len += NET_ETH_MAX_HDR_SIZE;
991 } else
992 #endif /* CONFIG_NET_L2_ETHERNET */
993 {
994 /* Other L2 are not checked as the pkt MTU in this case
995 * is based on the IP layer (IPv6 most of the time).
996 */
997 max_len = size;
998 }
999 }
1000
1001 max_len -= existing;
1002
1003 return MIN(size, max_len);
1004 }
1005
1006 static size_t pkt_estimate_headers_length(struct net_pkt *pkt,
1007 sa_family_t family,
1008 enum net_ip_protocol proto)
1009 {
1010 size_t hdr_len = 0;
1011
1012 if (family == AF_UNSPEC) {
1013 return 0;
1014 }
1015
1016 /* Family header */
1017 if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
1018 hdr_len += NET_IPV6H_LEN;
1019 } else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
1020 hdr_len += NET_IPV4H_LEN;
1021 }
1022
1023 /* + protocol header */
1024 if (IS_ENABLED(CONFIG_NET_TCP) && proto == IPPROTO_TCP) {
1025 hdr_len += NET_TCPH_LEN + NET_TCP_MAX_OPT_SIZE;
1026 } else if (IS_ENABLED(CONFIG_NET_UDP) && proto == IPPROTO_UDP) {
1027 hdr_len += NET_UDPH_LEN;
1028 } else if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) {
1029 hdr_len += NET_ICMPH_LEN;
1030 }
1031
1032 NET_DBG("HDRs length estimation %zu", hdr_len);
1033
1034 return hdr_len;
1035 }
1036
1037 static size_t pkt_get_max_len(struct net_pkt *pkt)
1038 {
1039 struct net_buf *buf = pkt->buffer;
1040 size_t size = 0;
1041
1042 while (buf) {
1043 size += net_buf_max_len(buf);
1044 buf = buf->frags;
1045 }
1046
1047 return size;
1048 }
1049
1050 size_t net_pkt_available_buffer(struct net_pkt *pkt)
1051 {
1052 if (!pkt) {
1053 return 0;
1054 }
1055
1056 return pkt_get_max_len(pkt) - net_pkt_get_len(pkt);
1057 }
1058
1059 size_t net_pkt_available_payload_buffer(struct net_pkt *pkt,
1060 enum net_ip_protocol proto)
1061 {
1062 size_t hdr_len = 0;
1063 size_t len;
1064
1065 if (!pkt) {
1066 return 0;
1067 }
1068
1069 hdr_len = pkt_estimate_headers_length(pkt, net_pkt_family(pkt), proto);
1070 len = net_pkt_get_len(pkt);
1071
1072 hdr_len = hdr_len <= len ? 0 : hdr_len - len;
1073
1074 len = net_pkt_available_buffer(pkt) - hdr_len;
1075
1076 return len;
1077 }
1078
1079 void net_pkt_trim_buffer(struct net_pkt *pkt)
1080 {
1081 struct net_buf *buf, *prev;
1082
1083 buf = pkt->buffer;
1084 prev = buf;
1085
1086 while (buf) {
1087 struct net_buf *next = buf->frags;
1088
1089 if (!buf->len) {
1090 if (buf == pkt->buffer) {
1091 pkt->buffer = next;
1092 } else if (buf == prev->frags) {
1093 prev->frags = next;
1094 }
1095
1096 buf->frags = NULL;
1097 net_buf_unref(buf);
1098 } else {
1099 prev = buf;
1100 }
1101
1102 buf = next;
1103 }
1104 }
1105
1106 int net_pkt_remove_tail(struct net_pkt *pkt, size_t length)
1107 {
1108 struct net_buf *buf = pkt->buffer;
1109 size_t remaining_len = net_pkt_get_len(pkt);
1110
1111 if (remaining_len < length) {
1112 return -EINVAL;
1113 }
1114
1115 remaining_len -= length;
1116
1117 while (buf) {
1118 if (buf->len >= remaining_len) {
1119 buf->len = remaining_len;
1120
1121 if (buf->frags) {
1122 net_pkt_frag_unref(buf->frags);
1123 buf->frags = NULL;
1124 }
1125
1126 break;
1127 }
1128
1129 remaining_len -= buf->len;
1130 buf = buf->frags;
1131 }
1132
1133 return 0;
1134 }
1135
1136 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1137 int net_pkt_alloc_buffer_debug(struct net_pkt *pkt,
1138 size_t size,
1139 enum net_ip_protocol proto,
1140 k_timeout_t timeout,
1141 const char *caller,
1142 int line)
1143 #else
1144 int net_pkt_alloc_buffer(struct net_pkt *pkt,
1145 size_t size,
1146 enum net_ip_protocol proto,
1147 k_timeout_t timeout)
1148 #endif
1149 {
1150 uint64_t end = sys_clock_timeout_end_calc(timeout);
1151 struct net_buf_pool *pool = NULL;
1152 size_t alloc_len = 0;
1153 size_t hdr_len = 0;
1154 struct net_buf *buf;
1155
1156 if (!size && proto == 0 && net_pkt_family(pkt) == AF_UNSPEC) {
1157 return 0;
1158 }
1159
1160 if (k_is_in_isr()) {
1161 timeout = K_NO_WAIT;
1162 }
1163
1164 /* Verifying existing buffer and take into account free space there */
1165 alloc_len = net_pkt_available_buffer(pkt);
1166 if (!alloc_len) {
1167 /* In case of no free space, it will account for header
1168 * space estimation
1169 */
1170 hdr_len = pkt_estimate_headers_length(pkt,
1171 net_pkt_family(pkt),
1172 proto);
1173 }
1174
1175 /* Calculate the maximum that can be allocated depending on size */
1176 alloc_len = pkt_buffer_length(pkt, size + hdr_len, proto, alloc_len);
1177
1178 NET_DBG("Data allocation maximum size %zu (requested %zu)",
1179 alloc_len, size);
1180
1181 if (pkt->context) {
1182 pool = get_data_pool(pkt->context);
1183 }
1184
1185 if (!pool) {
1186 pool = pkt->slab == &tx_pkts ? &tx_bufs : &rx_bufs;
1187 }
1188
1189 if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
1190 !K_TIMEOUT_EQ(timeout, K_FOREVER)) {
1191 int64_t remaining = end - sys_clock_tick_get();
1192
1193 if (remaining <= 0) {
1194 timeout = K_NO_WAIT;
1195 } else {
1196 timeout = Z_TIMEOUT_TICKS(remaining);
1197 }
1198 }
1199
1200 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1201 buf = pkt_alloc_buffer(pool, alloc_len, timeout, caller, line);
1202 #else
1203 buf = pkt_alloc_buffer(pool, alloc_len, timeout);
1204 #endif
1205
1206 if (!buf) {
1207 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1208 NET_ERR("Data buffer (%zd) allocation failed (%s:%d)",
1209 alloc_len, caller, line);
1210 #else
1211 NET_ERR("Data buffer (%zd) allocation failed.", alloc_len);
1212 #endif
1213 return -ENOMEM;
1214 }
1215
1216 net_pkt_append_buffer(pkt, buf);
1217
1218 return 0;
1219 }
1220
1221 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1222 static struct net_pkt *pkt_alloc(struct k_mem_slab *slab, k_timeout_t timeout,
1223 const char *caller, int line)
1224 #else
1225 static struct net_pkt *pkt_alloc(struct k_mem_slab *slab, k_timeout_t timeout)
1226 #endif
1227 {
1228 struct net_pkt *pkt;
1229 uint32_t create_time;
1230 int ret;
1231
1232 if (k_is_in_isr()) {
1233 timeout = K_NO_WAIT;
1234 }
1235
1236 if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS) ||
1237 IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS)) {
1238 create_time = k_cycle_get_32();
1239 } else {
1240 ARG_UNUSED(create_time);
1241 }
1242
1243 ret = k_mem_slab_alloc(slab, (void **)&pkt, timeout);
1244 if (ret) {
1245 return NULL;
1246 }
1247
1248 memset(pkt, 0, sizeof(struct net_pkt));
1249
1250 pkt->atomic_ref = ATOMIC_INIT(1);
1251 pkt->slab = slab;
1252
1253 if (IS_ENABLED(CONFIG_NET_IPV6)) {
1254 net_pkt_set_ipv6_next_hdr(pkt, 255);
1255 }
1256
1257 #if defined(CONFIG_NET_TX_DEFAULT_PRIORITY)
1258 #define TX_DEFAULT_PRIORITY CONFIG_NET_TX_DEFAULT_PRIORITY
1259 #else
1260 #define TX_DEFAULT_PRIORITY 0
1261 #endif
1262
1263 #if defined(CONFIG_NET_RX_DEFAULT_PRIORITY)
1264 #define RX_DEFAULT_PRIORITY CONFIG_NET_RX_DEFAULT_PRIORITY
1265 #else
1266 #define RX_DEFAULT_PRIORITY 0
1267 #endif
1268
1269 if (&tx_pkts == slab) {
1270 net_pkt_set_priority(pkt, TX_DEFAULT_PRIORITY);
1271 } else if (&rx_pkts == slab) {
1272 net_pkt_set_priority(pkt, RX_DEFAULT_PRIORITY);
1273 }
1274
1275 if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS) ||
1276 IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS)) {
1277 net_pkt_set_create_time(pkt, create_time);
1278 }
1279
1280 net_pkt_set_vlan_tag(pkt, NET_VLAN_TAG_UNSPEC);
1281
1282 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1283 net_pkt_alloc_add(pkt, true, caller, line);
1284 #endif
1285
1286 net_pkt_cursor_init(pkt);
1287
1288 return pkt;
1289 }
1290
1291 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1292 struct net_pkt *net_pkt_alloc_debug(k_timeout_t timeout,
1293 const char *caller, int line)
1294 #else
1295 struct net_pkt *net_pkt_alloc(k_timeout_t timeout)
1296 #endif
1297 {
1298 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1299 return pkt_alloc(&tx_pkts, timeout, caller, line);
1300 #else
1301 return pkt_alloc(&tx_pkts, timeout);
1302 #endif
1303 }
1304
1305 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1306 struct net_pkt *net_pkt_alloc_from_slab_debug(struct k_mem_slab *slab,
1307 k_timeout_t timeout,
1308 const char *caller, int line)
1309 #else
1310 struct net_pkt *net_pkt_alloc_from_slab(struct k_mem_slab *slab,
1311 k_timeout_t timeout)
1312 #endif
1313 {
1314 if (!slab) {
1315 return NULL;
1316 }
1317
1318 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1319 return pkt_alloc(slab, timeout, caller, line);
1320 #else
1321 return pkt_alloc(slab, timeout);
1322 #endif
1323 }
1324
1325 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1326 struct net_pkt *net_pkt_rx_alloc_debug(k_timeout_t timeout,
1327 const char *caller, int line)
1328 #else
1329 struct net_pkt *net_pkt_rx_alloc(k_timeout_t timeout)
1330 #endif
1331 {
1332 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1333 return pkt_alloc(&rx_pkts, timeout, caller, line);
1334 #else
1335 return pkt_alloc(&rx_pkts, timeout);
1336 #endif
1337 }
1338
1339 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1340 static struct net_pkt *pkt_alloc_on_iface(struct k_mem_slab *slab,
1341 struct net_if *iface,
1342 k_timeout_t timeout,
1343 const char *caller, int line)
1344 #else
1345 static struct net_pkt *pkt_alloc_on_iface(struct k_mem_slab *slab,
1346 struct net_if *iface,
1347 k_timeout_t timeout)
1348
1349 #endif
1350 {
1351 struct net_pkt *pkt;
1352
1353 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1354 pkt = pkt_alloc(slab, timeout, caller, line);
1355 #else
1356 pkt = pkt_alloc(slab, timeout);
1357 #endif
1358
1359 if (pkt) {
1360 net_pkt_set_iface(pkt, iface);
1361 }
1362
1363 return pkt;
1364 }
1365
1366 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1367 struct net_pkt *net_pkt_alloc_on_iface_debug(struct net_if *iface,
1368 k_timeout_t timeout,
1369 const char *caller,
1370 int line)
1371 #else
1372 struct net_pkt *net_pkt_alloc_on_iface(struct net_if *iface,
1373 k_timeout_t timeout)
1374 #endif
1375 {
1376 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1377 return pkt_alloc_on_iface(&tx_pkts, iface, timeout, caller, line);
1378 #else
1379 return pkt_alloc_on_iface(&tx_pkts, iface, timeout);
1380 #endif
1381 }
1382
1383 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1384 struct net_pkt *net_pkt_rx_alloc_on_iface_debug(struct net_if *iface,
1385 k_timeout_t timeout,
1386 const char *caller,
1387 int line)
1388 #else
1389 struct net_pkt *net_pkt_rx_alloc_on_iface(struct net_if *iface,
1390 k_timeout_t timeout)
1391 #endif
1392 {
1393 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1394 return pkt_alloc_on_iface(&rx_pkts, iface, timeout, caller, line);
1395 #else
1396 return pkt_alloc_on_iface(&rx_pkts, iface, timeout);
1397 #endif
1398 }
1399
1400 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1401 static struct net_pkt *
1402 pkt_alloc_with_buffer(struct k_mem_slab *slab,
1403 struct net_if *iface,
1404 size_t size,
1405 sa_family_t family,
1406 enum net_ip_protocol proto,
1407 k_timeout_t timeout,
1408 const char *caller,
1409 int line)
1410 #else
1411 static struct net_pkt *
1412 pkt_alloc_with_buffer(struct k_mem_slab *slab,
1413 struct net_if *iface,
1414 size_t size,
1415 sa_family_t family,
1416 enum net_ip_protocol proto,
1417 k_timeout_t timeout)
1418 #endif
1419 {
1420 uint64_t end = sys_clock_timeout_end_calc(timeout);
1421 struct net_pkt *pkt;
1422 int ret;
1423
1424 NET_DBG("On iface %p size %zu", iface, size);
1425
1426 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1427 pkt = pkt_alloc_on_iface(slab, iface, timeout, caller, line);
1428 #else
1429 pkt = pkt_alloc_on_iface(slab, iface, timeout);
1430 #endif
1431
1432 if (!pkt) {
1433 return NULL;
1434 }
1435
1436 net_pkt_set_family(pkt, family);
1437
1438 if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
1439 !K_TIMEOUT_EQ(timeout, K_FOREVER)) {
1440 int64_t remaining = end - sys_clock_tick_get();
1441
1442 if (remaining <= 0) {
1443 timeout = K_NO_WAIT;
1444 } else {
1445 timeout = Z_TIMEOUT_TICKS(remaining);
1446 }
1447 }
1448
1449 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1450 ret = net_pkt_alloc_buffer_debug(pkt, size, proto, timeout,
1451 caller, line);
1452 #else
1453 ret = net_pkt_alloc_buffer(pkt, size, proto, timeout);
1454 #endif
1455
1456 if (ret) {
1457 net_pkt_unref(pkt);
1458 return NULL;
1459 }
1460
1461 return pkt;
1462 }
1463
1464 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1465 struct net_pkt *net_pkt_alloc_with_buffer_debug(struct net_if *iface,
1466 size_t size,
1467 sa_family_t family,
1468 enum net_ip_protocol proto,
1469 k_timeout_t timeout,
1470 const char *caller,
1471 int line)
1472 #else
1473 struct net_pkt *net_pkt_alloc_with_buffer(struct net_if *iface,
1474 size_t size,
1475 sa_family_t family,
1476 enum net_ip_protocol proto,
1477 k_timeout_t timeout)
1478 #endif
1479 {
1480 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1481 return pkt_alloc_with_buffer(&tx_pkts, iface, size, family,
1482 proto, timeout, caller, line);
1483 #else
1484 return pkt_alloc_with_buffer(&tx_pkts, iface, size, family,
1485 proto, timeout);
1486 #endif
1487 }
1488
1489 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1490 struct net_pkt *net_pkt_rx_alloc_with_buffer_debug(struct net_if *iface,
1491 size_t size,
1492 sa_family_t family,
1493 enum net_ip_protocol proto,
1494 k_timeout_t timeout,
1495 const char *caller,
1496 int line)
1497 #else
1498 struct net_pkt *net_pkt_rx_alloc_with_buffer(struct net_if *iface,
1499 size_t size,
1500 sa_family_t family,
1501 enum net_ip_protocol proto,
1502 k_timeout_t timeout)
1503 #endif
1504 {
1505 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1506 return pkt_alloc_with_buffer(&rx_pkts, iface, size, family,
1507 proto, timeout, caller, line);
1508 #else
1509 return pkt_alloc_with_buffer(&rx_pkts, iface, size, family,
1510 proto, timeout);
1511 #endif
1512 }
1513
1514 void net_pkt_append_buffer(struct net_pkt *pkt, struct net_buf *buffer)
1515 {
1516 if (!pkt->buffer) {
1517 pkt->buffer = buffer;
1518 net_pkt_cursor_init(pkt);
1519 } else {
1520 net_buf_frag_insert(net_buf_frag_last(pkt->buffer), buffer);
1521 }
1522 }
1523
1524 void net_pkt_cursor_init(struct net_pkt *pkt)
1525 {
1526 pkt->cursor.buf = pkt->buffer;
1527 if (pkt->cursor.buf) {
1528 pkt->cursor.pos = pkt->cursor.buf->data;
1529 } else {
1530 pkt->cursor.pos = NULL;
1531 }
1532 }
1533
1534 static void pkt_cursor_jump(struct net_pkt *pkt, bool write)
1535 {
1536 struct net_pkt_cursor *cursor = &pkt->cursor;
1537
1538 cursor->buf = cursor->buf->frags;
1539 while (cursor->buf) {
1540 const size_t len =
1541 write ? net_buf_max_len(cursor->buf) : cursor->buf->len;
1542
1543 if (!len) {
1544 cursor->buf = cursor->buf->frags;
1545 } else {
1546 break;
1547 }
1548 }
1549
1550 if (cursor->buf) {
1551 cursor->pos = cursor->buf->data;
1552 } else {
1553 cursor->pos = NULL;
1554 }
1555 }
1556
1557 static void pkt_cursor_advance(struct net_pkt *pkt, bool write)
1558 {
1559 struct net_pkt_cursor *cursor = &pkt->cursor;
1560 size_t len;
1561
1562 if (!cursor->buf) {
1563 return;
1564 }
1565
1566 len = write ? net_buf_max_len(cursor->buf) : cursor->buf->len;
1567 if ((cursor->pos - cursor->buf->data) == len) {
1568 pkt_cursor_jump(pkt, write);
1569 }
1570 }
1571
1572 static void pkt_cursor_update(struct net_pkt *pkt,
1573 size_t length, bool write)
1574 {
1575 struct net_pkt_cursor *cursor = &pkt->cursor;
1576 size_t len;
1577
1578 if (net_pkt_is_being_overwritten(pkt)) {
1579 write = false;
1580 }
1581
1582 len = write ? net_buf_max_len(cursor->buf) : cursor->buf->len;
1583 if (length + (cursor->pos - cursor->buf->data) == len &&
1584 !(net_pkt_is_being_overwritten(pkt) &&
1585 len < net_buf_max_len(cursor->buf))) {
1586 pkt_cursor_jump(pkt, write);
1587 } else {
1588 cursor->pos += length;
1589 }
1590 }
1591
1592 /* Internal function that does all operation (skip/read/write/memset) */
1593 static int net_pkt_cursor_operate(struct net_pkt *pkt,
1594 void *data, size_t length,
1595 bool copy, bool write)
1596 {
1597 /* We use such variable to avoid lengthy lines */
1598 struct net_pkt_cursor *c_op = &pkt->cursor;
1599
1600 while (c_op->buf && length) {
1601 size_t d_len, len;
1602
1603 pkt_cursor_advance(pkt, net_pkt_is_being_overwritten(pkt) ?
1604 false : write);
1605 if (c_op->buf == NULL) {
1606 break;
1607 }
1608
1609 if (write && !net_pkt_is_being_overwritten(pkt)) {
1610 d_len = net_buf_max_len(c_op->buf) -
1611 (c_op->pos - c_op->buf->data);
1612 } else {
1613 d_len = c_op->buf->len - (c_op->pos - c_op->buf->data);
1614 }
1615
1616 if (!d_len) {
1617 break;
1618 }
1619
1620 if (length < d_len) {
1621 len = length;
1622 } else {
1623 len = d_len;
1624 }
1625
1626 if (copy && data) {
1627 memcpy(write ? c_op->pos : data,
1628 write ? data : c_op->pos,
1629 len);
1630 } else if (data) {
1631 memset(c_op->pos, *(int *)data, len);
1632 }
1633
1634 if (write && !net_pkt_is_being_overwritten(pkt)) {
1635 net_buf_add(c_op->buf, len);
1636 }
1637
1638 pkt_cursor_update(pkt, len, write);
1639
1640 if (copy && data) {
1641 data = (uint8_t *) data + len;
1642 }
1643
1644 length -= len;
1645 }
1646
1647 if (length) {
1648 NET_DBG("Still some length to go %zu", length);
1649 return -ENOBUFS;
1650 }
1651
1652 return 0;
1653 }
1654
1655 int net_pkt_skip(struct net_pkt *pkt, size_t skip)
1656 {
1657 NET_DBG("pkt %p skip %zu", pkt, skip);
1658
1659 return net_pkt_cursor_operate(pkt, NULL, skip, false, true);
1660 }
1661
1662 int net_pkt_memset(struct net_pkt *pkt, int byte, size_t amount)
1663 {
1664 NET_DBG("pkt %p byte %d amount %zu", pkt, byte, amount);
1665
1666 return net_pkt_cursor_operate(pkt, &byte, amount, false, true);
1667 }
1668
1669 int net_pkt_read(struct net_pkt *pkt, void *data, size_t length)
1670 {
1671 NET_DBG("pkt %p data %p length %zu", pkt, data, length);
1672
1673 return net_pkt_cursor_operate(pkt, data, length, true, false);
1674 }
1675
1676 int net_pkt_read_be16(struct net_pkt *pkt, uint16_t *data)
1677 {
1678 uint8_t d16[2];
1679 int ret;
1680
1681 ret = net_pkt_read(pkt, d16, sizeof(uint16_t));
1682
1683 *data = d16[0] << 8 | d16[1];
1684
1685 return ret;
1686 }
1687
1688 int net_pkt_read_le16(struct net_pkt *pkt, uint16_t *data)
1689 {
1690 uint8_t d16[2];
1691 int ret;
1692
1693 ret = net_pkt_read(pkt, d16, sizeof(uint16_t));
1694
1695 *data = d16[1] << 8 | d16[0];
1696
1697 return ret;
1698 }
1699
1700 int net_pkt_read_be32(struct net_pkt *pkt, uint32_t *data)
1701 {
1702 uint8_t d32[4];
1703 int ret;
1704
1705 ret = net_pkt_read(pkt, d32, sizeof(uint32_t));
1706
1707 *data = d32[0] << 24 | d32[1] << 16 | d32[2] << 8 | d32[3];
1708
1709 return ret;
1710 }
1711
1712 int net_pkt_write(struct net_pkt *pkt, const void *data, size_t length)
1713 {
1714 NET_DBG("pkt %p data %p length %zu", pkt, data, length);
1715
1716 if (data == pkt->cursor.pos && net_pkt_is_contiguous(pkt, length)) {
1717 return net_pkt_skip(pkt, length);
1718 }
1719
1720 return net_pkt_cursor_operate(pkt, (void *)data, length, true, true);
1721 }
1722
1723 int net_pkt_copy(struct net_pkt *pkt_dst,
1724 struct net_pkt *pkt_src,
1725 size_t length)
1726 {
1727 struct net_pkt_cursor *c_dst = &pkt_dst->cursor;
1728 struct net_pkt_cursor *c_src = &pkt_src->cursor;
1729
1730 while (c_dst->buf && c_src->buf && length) {
1731 size_t s_len, d_len, len;
1732
1733 pkt_cursor_advance(pkt_dst, true);
1734 pkt_cursor_advance(pkt_src, false);
1735
1736 if (!c_dst->buf || !c_src->buf) {
1737 break;
1738 }
1739
1740 s_len = c_src->buf->len - (c_src->pos - c_src->buf->data);
1741 d_len = net_buf_max_len(c_dst->buf) - (c_dst->pos - c_dst->buf->data);
1742 if (length < s_len && length < d_len) {
1743 len = length;
1744 } else {
1745 if (d_len < s_len) {
1746 len = d_len;
1747 } else {
1748 len = s_len;
1749 }
1750 }
1751
1752 if (!len) {
1753 break;
1754 }
1755
1756 memcpy(c_dst->pos, c_src->pos, len);
1757
1758 if (!net_pkt_is_being_overwritten(pkt_dst)) {
1759 net_buf_add(c_dst->buf, len);
1760 }
1761
1762 pkt_cursor_update(pkt_dst, len, true);
1763 pkt_cursor_update(pkt_src, len, false);
1764
1765 length -= len;
1766 }
1767
1768 if (length) {
1769 NET_DBG("Still some length to go %zu", length);
1770 return -ENOBUFS;
1771 }
1772
1773 return 0;
1774 }
1775
1776 static int32_t net_pkt_find_offset(struct net_pkt *pkt, uint8_t *ptr)
1777 {
1778 struct net_buf *buf;
1779 uint32_t ret = -EINVAL;
1780 uint16_t offset;
1781
1782 if (!ptr || !pkt || !pkt->buffer) {
1783 return ret;
1784 }
1785
1786 offset = 0U;
1787 buf = pkt->buffer;
1788
1789 while (buf) {
1790 if (buf->data <= ptr && ptr <= (buf->data + buf->len)) {
1791 ret = offset + (ptr - buf->data);
1792 break;
1793 }
1794 offset += buf->len;
1795 buf = buf->frags;
1796 }
1797
1798 return ret;
1799 }
1800
1801 static void clone_pkt_lladdr(struct net_pkt *pkt, struct net_pkt *clone_pkt,
1802 struct net_linkaddr *lladdr)
1803 {
1804 int32_t ll_addr_offset;
1805
1806 if (!lladdr->addr)
1807 return;
1808
1809 ll_addr_offset = net_pkt_find_offset(pkt, lladdr->addr);
1810
1811 if (ll_addr_offset >= 0) {
1812 net_pkt_cursor_init(clone_pkt);
1813 net_pkt_skip(clone_pkt, ll_addr_offset);
1814 lladdr->addr = net_pkt_cursor_get_pos(clone_pkt);
1815 }
1816 }
1817
1818 #if defined(NET_PKT_HAS_CONTROL_BLOCK)
1819 static inline void clone_pkt_cb(struct net_pkt *pkt, struct net_pkt *clone_pkt)
1820 {
1821 memcpy(net_pkt_cb(clone_pkt), net_pkt_cb(pkt), sizeof(clone_pkt->cb));
1822 }
1823 #else
1824 static inline void clone_pkt_cb(struct net_pkt *pkt, struct net_pkt *clone_pkt)
1825 {
1826 ARG_UNUSED(pkt);
1827 ARG_UNUSED(clone_pkt);
1828 }
1829 #endif
1830
1831 static void clone_pkt_attributes(struct net_pkt *pkt, struct net_pkt *clone_pkt)
1832 {
1833 net_pkt_set_family(clone_pkt, net_pkt_family(pkt));
1834 net_pkt_set_context(clone_pkt, net_pkt_context(pkt));
1835 net_pkt_set_ip_hdr_len(clone_pkt, net_pkt_ip_hdr_len(pkt));
1836 net_pkt_set_ip_dscp(clone_pkt, net_pkt_ip_dscp(pkt));
1837 net_pkt_set_ip_ecn(clone_pkt, net_pkt_ip_ecn(pkt));
1838 net_pkt_set_vlan_tag(clone_pkt, net_pkt_vlan_tag(pkt));
1839 net_pkt_set_timestamp(clone_pkt, net_pkt_timestamp(pkt));
1840 net_pkt_set_priority(clone_pkt, net_pkt_priority(pkt));
1841 net_pkt_set_orig_iface(clone_pkt, net_pkt_orig_iface(pkt));
1842 net_pkt_set_captured(clone_pkt, net_pkt_is_captured(pkt));
1843
1844 net_pkt_set_l2_bridged(clone_pkt, net_pkt_is_l2_bridged(pkt));
1845 net_pkt_set_l2_processed(clone_pkt, net_pkt_is_l2_processed(pkt));
1846 net_pkt_set_ll_proto_type(clone_pkt, net_pkt_ll_proto_type(pkt));
1847
1848 if (pkt->buffer && clone_pkt->buffer) {
1849 memcpy(net_pkt_lladdr_src(clone_pkt), net_pkt_lladdr_src(pkt),
1850 sizeof(struct net_linkaddr));
1851 memcpy(net_pkt_lladdr_dst(clone_pkt), net_pkt_lladdr_dst(pkt),
1852 sizeof(struct net_linkaddr));
1853 /* The link header pointers are usable as-is if we
1854 * shallow-copied the buffer even if they point
1855 * into the fragment memory of the buffer,
1856 * otherwise we have to set the ll address pointer
1857 * relative to the new buffer to avoid dangling
1858 * pointers into the source packet.
1859 */
1860 if (pkt->buffer != clone_pkt->buffer) {
1861 clone_pkt_lladdr(pkt, clone_pkt, net_pkt_lladdr_src(clone_pkt));
1862 clone_pkt_lladdr(pkt, clone_pkt, net_pkt_lladdr_dst(clone_pkt));
1863 }
1864 }
1865
1866 if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) {
1867 net_pkt_set_ipv4_ttl(clone_pkt, net_pkt_ipv4_ttl(pkt));
1868 net_pkt_set_ipv4_opts_len(clone_pkt,
1869 net_pkt_ipv4_opts_len(pkt));
1870 } else if (IS_ENABLED(CONFIG_NET_IPV6) &&
1871 net_pkt_family(pkt) == AF_INET6) {
1872 net_pkt_set_ipv6_hop_limit(clone_pkt,
1873 net_pkt_ipv6_hop_limit(pkt));
1874 net_pkt_set_ipv6_ext_len(clone_pkt, net_pkt_ipv6_ext_len(pkt));
1875 net_pkt_set_ipv6_ext_opt_len(clone_pkt,
1876 net_pkt_ipv6_ext_opt_len(pkt));
1877 net_pkt_set_ipv6_hdr_prev(clone_pkt,
1878 net_pkt_ipv6_hdr_prev(pkt));
1879 net_pkt_set_ipv6_next_hdr(clone_pkt,
1880 net_pkt_ipv6_next_hdr(pkt));
1881 }
1882
1883 clone_pkt_cb(pkt, clone_pkt);
1884 }
1885
1886 static struct net_pkt *net_pkt_clone_internal(struct net_pkt *pkt,
1887 struct k_mem_slab *slab,
1888 k_timeout_t timeout)
1889 {
1890 size_t cursor_offset = net_pkt_get_current_offset(pkt);
1891 bool overwrite = net_pkt_is_being_overwritten(pkt);
1892 struct net_pkt_cursor backup;
1893 struct net_pkt *clone_pkt;
1894
1895 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1896 clone_pkt = pkt_alloc_with_buffer(slab, net_pkt_iface(pkt),
1897 net_pkt_get_len(pkt),
1898 AF_UNSPEC, 0, timeout,
1899 __func__, __LINE__);
1900 #else
1901 clone_pkt = pkt_alloc_with_buffer(slab, net_pkt_iface(pkt),
1902 net_pkt_get_len(pkt),
1903 AF_UNSPEC, 0, timeout);
1904 #endif
1905 if (!clone_pkt) {
1906 return NULL;
1907 }
1908
1909 net_pkt_set_overwrite(pkt, true);
1910 net_pkt_cursor_backup(pkt, &backup);
1911 net_pkt_cursor_init(pkt);
1912
1913 if (net_pkt_copy(clone_pkt, pkt, net_pkt_get_len(pkt))) {
1914 net_pkt_unref(clone_pkt);
1915 net_pkt_cursor_restore(pkt, &backup);
1916 net_pkt_set_overwrite(pkt, overwrite);
1917 return NULL;
1918 }
1919 net_pkt_set_overwrite(clone_pkt, true);
1920
1921 clone_pkt_attributes(pkt, clone_pkt);
1922
1923 net_pkt_cursor_init(clone_pkt);
1924
1925 if (cursor_offset) {
1926 net_pkt_skip(clone_pkt, cursor_offset);
1927 }
1928 net_pkt_set_overwrite(clone_pkt, overwrite);
1929
1930 net_pkt_cursor_restore(pkt, &backup);
1931 net_pkt_set_overwrite(pkt, overwrite);
1932
1933 NET_DBG("Cloned %p to %p", pkt, clone_pkt);
1934
1935 return clone_pkt;
1936 }
1937
1938 struct net_pkt *net_pkt_clone(struct net_pkt *pkt, k_timeout_t timeout)
1939 {
1940 return net_pkt_clone_internal(pkt, pkt->slab, timeout);
1941 }
1942
1943 struct net_pkt *net_pkt_rx_clone(struct net_pkt *pkt, k_timeout_t timeout)
1944 {
1945 return net_pkt_clone_internal(pkt, &rx_pkts, timeout);
1946 }
1947
1948 struct net_pkt *net_pkt_shallow_clone(struct net_pkt *pkt, k_timeout_t timeout)
1949 {
1950 struct net_pkt *clone_pkt;
1951 struct net_buf *buf;
1952
1953 clone_pkt = net_pkt_alloc(timeout);
1954 if (!clone_pkt) {
1955 return NULL;
1956 }
1957
1958 net_pkt_set_iface(clone_pkt, net_pkt_iface(pkt));
1959 clone_pkt->buffer = pkt->buffer;
1960 buf = pkt->buffer;
1961
1962 net_pkt_frag_ref(buf);
1963
1964 clone_pkt_attributes(pkt, clone_pkt);
1965
1966 net_pkt_cursor_restore(clone_pkt, &pkt->cursor);
1967
1968 NET_DBG("Shallow cloned %p to %p", pkt, clone_pkt);
1969
1970 return clone_pkt;
1971 }
1972
1973 size_t net_pkt_remaining_data(struct net_pkt *pkt)
1974 {
1975 struct net_buf *buf;
1976 size_t data_length;
1977
1978 if (!pkt || !pkt->cursor.buf || !pkt->cursor.pos) {
1979 return 0;
1980 }
1981
1982 buf = pkt->cursor.buf;
1983 data_length = buf->len - (pkt->cursor.pos - buf->data);
1984
1985 buf = buf->frags;
1986 while (buf) {
1987 data_length += buf->len;
1988 buf = buf->frags;
1989 }
1990
1991 return data_length;
1992 }
1993
1994 int net_pkt_update_length(struct net_pkt *pkt, size_t length)
1995 {
1996 struct net_buf *buf;
1997
1998 for (buf = pkt->buffer; buf; buf = buf->frags) {
1999 if (buf->len < length) {
2000 length -= buf->len;
2001 } else {
2002 buf->len = length;
2003 length = 0;
2004 }
2005 }
2006
2007 return !length ? 0 : -EINVAL;
2008 }
2009
2010 int net_pkt_pull(struct net_pkt *pkt, size_t length)
2011 {
2012 struct net_pkt_cursor *c_op = &pkt->cursor;
2013
2014 while (length) {
2015 size_t left, rem;
2016
2017 pkt_cursor_advance(pkt, false);
2018
2019 if (!c_op->buf) {
2020 break;
2021 }
2022
2023 left = c_op->buf->len - (c_op->pos - c_op->buf->data);
2024 if (!left) {
2025 break;
2026 }
2027
2028 rem = left;
2029 if (rem > length) {
2030 rem = length;
2031 }
2032
2033 c_op->buf->len -= rem;
2034 left -= rem;
2035 if (left) {
2036 memmove(c_op->pos, c_op->pos+rem, left);
2037 } else {
2038 struct net_buf *buf = pkt->buffer;
2039
2040 if (buf) {
2041 pkt->buffer = buf->frags;
2042 buf->frags = NULL;
2043 net_buf_unref(buf);
2044 }
2045
2046 net_pkt_cursor_init(pkt);
2047 }
2048
2049 length -= rem;
2050 }
2051
2052 net_pkt_cursor_init(pkt);
2053
2054 if (length) {
2055 NET_DBG("Still some length to go %zu", length);
2056 return -ENOBUFS;
2057 }
2058
2059 return 0;
2060 }
2061
2062 uint16_t net_pkt_get_current_offset(struct net_pkt *pkt)
2063 {
2064 struct net_buf *buf = pkt->buffer;
2065 uint16_t offset;
2066
2067 if (!pkt->cursor.buf || !pkt->cursor.pos) {
2068 return 0;
2069 }
2070
2071 offset = 0U;
2072
2073 while (buf != pkt->cursor.buf) {
2074 offset += buf->len;
2075 buf = buf->frags;
2076 }
2077
2078 offset += pkt->cursor.pos - buf->data;
2079
2080 return offset;
2081 }
2082
2083 bool net_pkt_is_contiguous(struct net_pkt *pkt, size_t size)
2084 {
2085 size_t len = net_pkt_get_contiguous_len(pkt);
2086
2087 return len >= size;
2088 }
2089
2090 size_t net_pkt_get_contiguous_len(struct net_pkt *pkt)
2091 {
2092 pkt_cursor_advance(pkt, !net_pkt_is_being_overwritten(pkt));
2093
2094 if (pkt->cursor.buf && pkt->cursor.pos) {
2095 size_t len;
2096
2097 len = net_pkt_is_being_overwritten(pkt) ?
2098 pkt->cursor.buf->len : pkt->cursor.buf->size;
2099 len -= pkt->cursor.pos - pkt->cursor.buf->data;
2100 return len;
2101 }
2102
2103 return 0;
2104 }
2105
2106 void *net_pkt_get_data(struct net_pkt *pkt,
2107 struct net_pkt_data_access *access)
2108 {
2109 if (IS_ENABLED(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)) {
2110 if (!net_pkt_is_contiguous(pkt, access->size)) {
2111 return NULL;
2112 }
2113
2114 return pkt->cursor.pos;
2115 } else {
2116 if (net_pkt_is_contiguous(pkt, access->size)) {
2117 access->data = pkt->cursor.pos;
2118 } else if (net_pkt_is_being_overwritten(pkt)) {
2119 struct net_pkt_cursor backup;
2120
2121 if (!access->data) {
2122 NET_ERR("Uncontiguous data"
2123 " cannot be linearized");
2124 return NULL;
2125 }
2126
2127 net_pkt_cursor_backup(pkt, &backup);
2128
2129 if (net_pkt_read(pkt, access->data, access->size)) {
2130 net_pkt_cursor_restore(pkt, &backup);
2131 return NULL;
2132 }
2133
2134 net_pkt_cursor_restore(pkt, &backup);
2135 }
2136
2137 return access->data;
2138 }
2139
2140 return NULL;
2141 }
2142
2143 int net_pkt_set_data(struct net_pkt *pkt,
2144 struct net_pkt_data_access *access)
2145 {
2146 if (IS_ENABLED(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)) {
2147 return net_pkt_skip(pkt, access->size);
2148 }
2149
2150 return net_pkt_write(pkt, access->data, access->size);
2151 }
2152
2153 void net_pkt_init(void)
2154 {
2155 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
2156 NET_DBG("Allocating %u RX (%zu bytes), %u TX (%zu bytes), "
2157 "%d RX data (%u bytes) and %d TX data (%u bytes) buffers",
2158 k_mem_slab_num_free_get(&rx_pkts),
2159 (size_t)(k_mem_slab_num_free_get(&rx_pkts) *
2160 sizeof(struct net_pkt)),
2161 k_mem_slab_num_free_get(&tx_pkts),
2162 (size_t)(k_mem_slab_num_free_get(&tx_pkts) *
2163 sizeof(struct net_pkt)),
2164 get_frees(&rx_bufs), get_size(&rx_bufs),
2165 get_frees(&tx_bufs), get_size(&tx_bufs));
2166 #endif
2167 }
2168