1 /** @file
2 @brief Network packet buffers for IP stack
3
4 Network data is passed between components using net_pkt.
5 */
6
7 /*
8 * Copyright (c) 2016 Intel Corporation
9 *
10 * SPDX-License-Identifier: Apache-2.0
11 */
12
13 #include <logging/log.h>
14 LOG_MODULE_REGISTER(net_pkt, CONFIG_NET_PKT_LOG_LEVEL);
15
16 /* This enables allocation debugging but does not print so much output
17 * as that can slow things down a lot.
18 */
19 #undef NET_LOG_LEVEL
20 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
21 #define NET_LOG_LEVEL 5
22 #else
23 #define NET_LOG_LEVEL CONFIG_NET_PKT_LOG_LEVEL
24 #endif
25
26 #include <kernel.h>
27 #include <toolchain.h>
28 #include <string.h>
29 #include <zephyr/types.h>
30 #include <sys/types.h>
31
32 #include <sys/util.h>
33
34 #include <net/net_core.h>
35 #include <net/net_ip.h>
36 #include <net/buf.h>
37 #include <net/net_pkt.h>
38 #include <net/ethernet.h>
39 #include <net/udp.h>
40
41 #include "net_private.h"
42 #include "tcp_internal.h"
43
44 /* Find max header size of IP protocol (IPv4 or IPv6) */
45 #if defined(CONFIG_NET_IPV6) || defined(CONFIG_NET_RAW_MODE) || \
46 defined(CONFIG_NET_SOCKETS_PACKET) || defined(CONFIG_NET_SOCKETS_OFFLOAD)
47 #define MAX_IP_PROTO_LEN NET_IPV6H_LEN
48 #else
49 #if defined(CONFIG_NET_IPV4)
50 #define MAX_IP_PROTO_LEN NET_IPV4H_LEN
51 #else
52 #if defined(CONFIG_NET_SOCKETS_CAN)
53 /* TODO: Use CAN MTU here instead of hard coded value. There was
54 * weird circular dependency issue so this needs more TLC.
55 */
56 #define MAX_IP_PROTO_LEN 8
57 #else
58 #if defined(CONFIG_NET_ETHERNET_BRIDGE)
59 #define MAX_IP_PROTO_LEN 0
60 #else
61 #error "Either IPv6 or IPv4 needs to be selected."
62 #endif /* ETHERNET_BRIDGE */
63 #endif /* SOCKETS_CAN */
64 #endif /* IPv4 */
65 #endif /* IPv6 */
66
67 /* Find max header size of "next" protocol (TCP, UDP or ICMP) */
68 #if defined(CONFIG_NET_TCP)
69 #define MAX_NEXT_PROTO_LEN NET_TCPH_LEN
70 #else
71 #if defined(CONFIG_NET_UDP)
72 #define MAX_NEXT_PROTO_LEN NET_UDPH_LEN
73 #else
74 #if defined(CONFIG_NET_SOCKETS_CAN)
75 #define MAX_NEXT_PROTO_LEN 0
76 #else
77 /* If no TCP and no UDP, apparently we still want pings to work. */
78 #define MAX_NEXT_PROTO_LEN NET_ICMPH_LEN
79 #endif /* SOCKETS_CAN */
80 #endif /* UDP */
81 #endif /* TCP */
82
83 /* Make sure that IP + TCP/UDP/ICMP headers fit into one fragment. This
84 * makes possible to cast a fragment pointer to protocol header struct.
85 */
86 #if CONFIG_NET_BUF_DATA_SIZE < (MAX_IP_PROTO_LEN + MAX_NEXT_PROTO_LEN)
87 #if defined(STRING2)
88 #undef STRING2
89 #endif
90 #if defined(STRING)
91 #undef STRING
92 #endif
93 #define STRING2(x) #x
94 #define STRING(x) STRING2(x)
95 #pragma message "Data len " STRING(CONFIG_NET_BUF_DATA_SIZE)
96 #pragma message "Minimum len " STRING(MAX_IP_PROTO_LEN + MAX_NEXT_PROTO_LEN)
97 #error "Too small net_buf fragment size"
98 #endif
99
100 #if CONFIG_NET_PKT_RX_COUNT <= 0
101 #error "Minimum value for CONFIG_NET_PKT_RX_COUNT is 1"
102 #endif
103
104 #if CONFIG_NET_PKT_TX_COUNT <= 0
105 #error "Minimum value for CONFIG_NET_PKT_TX_COUNT is 1"
106 #endif
107
108 #if CONFIG_NET_BUF_RX_COUNT <= 0
109 #error "Minimum value for CONFIG_NET_BUF_RX_COUNT is 1"
110 #endif
111
112 #if CONFIG_NET_BUF_TX_COUNT <= 0
113 #error "Minimum value for CONFIG_NET_BUF_TX_COUNT is 1"
114 #endif
115
116 K_MEM_SLAB_DEFINE(rx_pkts, sizeof(struct net_pkt), CONFIG_NET_PKT_RX_COUNT, 4);
117 K_MEM_SLAB_DEFINE(tx_pkts, sizeof(struct net_pkt), CONFIG_NET_PKT_TX_COUNT, 4);
118
119 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
120
121 NET_BUF_POOL_FIXED_DEFINE(rx_bufs, CONFIG_NET_BUF_RX_COUNT,
122 CONFIG_NET_BUF_DATA_SIZE, NULL);
123 NET_BUF_POOL_FIXED_DEFINE(tx_bufs, CONFIG_NET_BUF_TX_COUNT,
124 CONFIG_NET_BUF_DATA_SIZE, NULL);
125
126 #else /* !CONFIG_NET_BUF_FIXED_DATA_SIZE */
127
128 NET_BUF_POOL_VAR_DEFINE(rx_bufs, CONFIG_NET_BUF_RX_COUNT,
129 CONFIG_NET_BUF_DATA_POOL_SIZE, NULL);
130 NET_BUF_POOL_VAR_DEFINE(tx_bufs, CONFIG_NET_BUF_TX_COUNT,
131 CONFIG_NET_BUF_DATA_POOL_SIZE, NULL);
132
133 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
134
135 /* Allocation tracking is only available if separately enabled */
136 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
137 struct net_pkt_alloc {
138 union {
139 struct net_pkt *pkt;
140 struct net_buf *buf;
141 void *alloc_data;
142 };
143 const char *func_alloc;
144 const char *func_free;
145 uint16_t line_alloc;
146 uint16_t line_free;
147 uint8_t in_use;
148 bool is_pkt;
149 };
150
151 #define MAX_NET_PKT_ALLOCS (CONFIG_NET_PKT_RX_COUNT + \
152 CONFIG_NET_PKT_TX_COUNT + \
153 CONFIG_NET_BUF_RX_COUNT + \
154 CONFIG_NET_BUF_TX_COUNT + \
155 CONFIG_NET_DEBUG_NET_PKT_EXTERNALS)
156
157 static struct net_pkt_alloc net_pkt_allocs[MAX_NET_PKT_ALLOCS];
158
net_pkt_alloc_add(void * alloc_data,bool is_pkt,const char * func,int line)159 static void net_pkt_alloc_add(void *alloc_data, bool is_pkt,
160 const char *func, int line)
161 {
162 int i;
163
164 for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
165 if (net_pkt_allocs[i].in_use) {
166 continue;
167 }
168
169 net_pkt_allocs[i].in_use = true;
170 net_pkt_allocs[i].is_pkt = is_pkt;
171 net_pkt_allocs[i].alloc_data = alloc_data;
172 net_pkt_allocs[i].func_alloc = func;
173 net_pkt_allocs[i].line_alloc = line;
174
175 return;
176 }
177 }
178
net_pkt_alloc_del(void * alloc_data,const char * func,int line)179 static void net_pkt_alloc_del(void *alloc_data, const char *func, int line)
180 {
181 int i;
182
183 for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
184 if (net_pkt_allocs[i].in_use &&
185 net_pkt_allocs[i].alloc_data == alloc_data) {
186 net_pkt_allocs[i].func_free = func;
187 net_pkt_allocs[i].line_free = line;
188 net_pkt_allocs[i].in_use = false;
189
190 return;
191 }
192 }
193 }
194
net_pkt_alloc_find(void * alloc_data,const char ** func_free,int * line_free)195 static bool net_pkt_alloc_find(void *alloc_data,
196 const char **func_free,
197 int *line_free)
198 {
199 int i;
200
201 for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
202 if (!net_pkt_allocs[i].in_use &&
203 net_pkt_allocs[i].alloc_data == alloc_data) {
204 *func_free = net_pkt_allocs[i].func_free;
205 *line_free = net_pkt_allocs[i].line_free;
206
207 return true;
208 }
209 }
210
211 return false;
212 }
213
net_pkt_allocs_foreach(net_pkt_allocs_cb_t cb,void * user_data)214 void net_pkt_allocs_foreach(net_pkt_allocs_cb_t cb, void *user_data)
215 {
216 int i;
217
218 for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
219 if (net_pkt_allocs[i].in_use) {
220 cb(net_pkt_allocs[i].is_pkt ?
221 net_pkt_allocs[i].pkt : NULL,
222 net_pkt_allocs[i].is_pkt ?
223 NULL : net_pkt_allocs[i].buf,
224 net_pkt_allocs[i].func_alloc,
225 net_pkt_allocs[i].line_alloc,
226 net_pkt_allocs[i].func_free,
227 net_pkt_allocs[i].line_free,
228 net_pkt_allocs[i].in_use,
229 user_data);
230 }
231 }
232
233 for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
234 if (!net_pkt_allocs[i].in_use) {
235 cb(net_pkt_allocs[i].is_pkt ?
236 net_pkt_allocs[i].pkt : NULL,
237 net_pkt_allocs[i].is_pkt ?
238 NULL : net_pkt_allocs[i].buf,
239 net_pkt_allocs[i].func_alloc,
240 net_pkt_allocs[i].line_alloc,
241 net_pkt_allocs[i].func_free,
242 net_pkt_allocs[i].line_free,
243 net_pkt_allocs[i].in_use,
244 user_data);
245 }
246 }
247 }
248 #else
249 #define net_pkt_alloc_add(alloc_data, is_pkt, func, line)
250 #define net_pkt_alloc_del(alloc_data, func, line)
251 #define net_pkt_alloc_find(alloc_data, func_free, line_free) false
252 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC */
253
254 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC) || \
255 CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
256
257 #define NET_FRAG_CHECK_IF_NOT_IN_USE(frag, ref) \
258 do { \
259 if (!(ref)) { \
260 NET_ERR("**ERROR** frag %p not in use (%s:%s():%d)", \
261 frag, __FILE__, __func__, __LINE__); \
262 } \
263 } while (0)
264
net_pkt_slab2str(struct k_mem_slab * slab)265 const char *net_pkt_slab2str(struct k_mem_slab *slab)
266 {
267 if (slab == &rx_pkts) {
268 return "RX";
269 } else if (slab == &tx_pkts) {
270 return "TX";
271 }
272
273 return "EXT";
274 }
275
net_pkt_pool2str(struct net_buf_pool * pool)276 const char *net_pkt_pool2str(struct net_buf_pool *pool)
277 {
278 if (pool == &rx_bufs) {
279 return "RDATA";
280 } else if (pool == &tx_bufs) {
281 return "TDATA";
282 }
283
284 return "EDATA";
285 }
286 #endif
287
288 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC) || \
289 CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
get_frees(struct net_buf_pool * pool)290 static inline int16_t get_frees(struct net_buf_pool *pool)
291 {
292 #if defined(CONFIG_NET_BUF_POOL_USAGE)
293 return atomic_get(&pool->avail_count);
294 #else
295 return 0;
296 #endif
297 }
298 #endif
299
300 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
get_name(struct net_buf_pool * pool)301 static inline const char *get_name(struct net_buf_pool *pool)
302 {
303 #if defined(CONFIG_NET_BUF_POOL_USAGE)
304 return pool->name;
305 #else
306 return "?";
307 #endif
308 }
309
get_size(struct net_buf_pool * pool)310 static inline int16_t get_size(struct net_buf_pool *pool)
311 {
312 #if defined(CONFIG_NET_BUF_POOL_USAGE)
313 return pool->pool_size;
314 #else
315 return 0;
316 #endif
317 }
318
slab2str(struct k_mem_slab * slab)319 static inline const char *slab2str(struct k_mem_slab *slab)
320 {
321 return net_pkt_slab2str(slab);
322 }
323
pool2str(struct net_buf_pool * pool)324 static inline const char *pool2str(struct net_buf_pool *pool)
325 {
326 return net_pkt_pool2str(pool);
327 }
328
net_pkt_print_frags(struct net_pkt * pkt)329 void net_pkt_print_frags(struct net_pkt *pkt)
330 {
331 struct net_buf *frag;
332 size_t total = 0;
333 int count = 0, frag_size = 0;
334
335 if (!pkt) {
336 NET_INFO("pkt %p", pkt);
337 return;
338 }
339
340 NET_INFO("pkt %p frags %p", pkt, pkt->frags);
341
342 NET_ASSERT(pkt->frags);
343
344 frag = pkt->frags;
345 while (frag) {
346 total += frag->len;
347
348 frag_size = frag->size;
349
350 NET_INFO("[%d] frag %p len %d max len %u size %d pool %p",
351 count, frag, frag->len, net_buf_max_len(frag),
352 frag_size, net_buf_pool_get(frag->pool_id));
353
354 count++;
355
356 frag = frag->frags;
357 }
358
359 NET_INFO("Total data size %zu, occupied %d bytes, utilization %zu%%",
360 total, count * frag_size,
361 count ? (total * 100) / (count * frag_size) : 0);
362 }
363 #endif /* CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG */
364
365 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_get_reserve_data_debug(struct net_buf_pool * pool,k_timeout_t timeout,const char * caller,int line)366 struct net_buf *net_pkt_get_reserve_data_debug(struct net_buf_pool *pool,
367 k_timeout_t timeout,
368 const char *caller,
369 int line)
370 #else /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
371 struct net_buf *net_pkt_get_reserve_data(struct net_buf_pool *pool,
372 k_timeout_t timeout)
373 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
374 {
375 struct net_buf *frag;
376
377 /*
378 * The reserve_head variable in the function will tell
379 * the size of the link layer headers if there are any.
380 */
381
382 if (k_is_in_isr()) {
383 frag = net_buf_alloc(pool, K_NO_WAIT);
384 } else {
385 frag = net_buf_alloc(pool, timeout);
386 }
387
388 if (!frag) {
389 return NULL;
390 }
391
392 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
393 NET_FRAG_CHECK_IF_NOT_IN_USE(frag, frag->ref + 1U);
394 #endif
395
396 net_pkt_alloc_add(frag, false, caller, line);
397
398 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
399 NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
400 pool2str(pool), get_name(pool), get_frees(pool),
401 frag, frag->ref, caller, line);
402 #endif
403
404 return frag;
405 }
406
407 /* Get a fragment, try to figure out the pool from where to get
408 * the data.
409 */
410 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_get_frag_debug(struct net_pkt * pkt,k_timeout_t timeout,const char * caller,int line)411 struct net_buf *net_pkt_get_frag_debug(struct net_pkt *pkt,
412 k_timeout_t timeout,
413 const char *caller, int line)
414 #else
415 struct net_buf *net_pkt_get_frag(struct net_pkt *pkt,
416 k_timeout_t timeout)
417 #endif
418 {
419 #if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL)
420 struct net_context *context;
421
422 context = net_pkt_context(pkt);
423 if (context && context->data_pool) {
424 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
425 return net_pkt_get_reserve_data_debug(context->data_pool(),
426 timeout, caller, line);
427 #else
428 return net_pkt_get_reserve_data(context->data_pool(), timeout);
429 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
430 }
431 #endif /* CONFIG_NET_CONTEXT_NET_PKT_POOL */
432
433 if (pkt->slab == &rx_pkts) {
434 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
435 return net_pkt_get_reserve_rx_data_debug(timeout,
436 caller, line);
437 #else
438 return net_pkt_get_reserve_rx_data(timeout);
439 #endif
440 }
441
442 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
443 return net_pkt_get_reserve_tx_data_debug(timeout, caller, line);
444 #else
445 return net_pkt_get_reserve_tx_data(timeout);
446 #endif
447 }
448
449 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_get_reserve_rx_data_debug(k_timeout_t timeout,const char * caller,int line)450 struct net_buf *net_pkt_get_reserve_rx_data_debug(k_timeout_t timeout,
451 const char *caller, int line)
452 {
453 return net_pkt_get_reserve_data_debug(&rx_bufs, timeout, caller, line);
454 }
455
net_pkt_get_reserve_tx_data_debug(k_timeout_t timeout,const char * caller,int line)456 struct net_buf *net_pkt_get_reserve_tx_data_debug(k_timeout_t timeout,
457 const char *caller, int line)
458 {
459 return net_pkt_get_reserve_data_debug(&tx_bufs, timeout, caller, line);
460 }
461
462 #else /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
463
net_pkt_get_reserve_rx_data(k_timeout_t timeout)464 struct net_buf *net_pkt_get_reserve_rx_data(k_timeout_t timeout)
465 {
466 return net_pkt_get_reserve_data(&rx_bufs, timeout);
467 }
468
net_pkt_get_reserve_tx_data(k_timeout_t timeout)469 struct net_buf *net_pkt_get_reserve_tx_data(k_timeout_t timeout)
470 {
471 return net_pkt_get_reserve_data(&tx_bufs, timeout);
472 }
473
474 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
475
476
477 #if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL)
get_tx_slab(struct net_context * context)478 static inline struct k_mem_slab *get_tx_slab(struct net_context *context)
479 {
480 if (context->tx_slab) {
481 return context->tx_slab();
482 }
483
484 return NULL;
485 }
486
get_data_pool(struct net_context * context)487 static inline struct net_buf_pool *get_data_pool(struct net_context *context)
488 {
489 if (context->data_pool) {
490 return context->data_pool();
491 }
492
493 return NULL;
494 }
495 #else
496 #define get_tx_slab(...) NULL
497 #define get_data_pool(...) NULL
498 #endif /* CONFIG_NET_CONTEXT_NET_PKT_POOL */
499
500 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_unref_debug(struct net_pkt * pkt,const char * caller,int line)501 void net_pkt_unref_debug(struct net_pkt *pkt, const char *caller, int line)
502 {
503 struct net_buf *frag;
504
505 #else
506 void net_pkt_unref(struct net_pkt *pkt)
507 {
508 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
509 atomic_val_t ref;
510
511 if (!pkt) {
512 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
513 NET_ERR("*** ERROR *** pkt %p (%s():%d)", pkt, caller, line);
514 #endif
515 return;
516 }
517
518 do {
519 ref = atomic_get(&pkt->atomic_ref);
520 if (!ref) {
521 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
522 const char *func_freed;
523 int line_freed;
524
525 if (net_pkt_alloc_find(pkt, &func_freed, &line_freed)) {
526 NET_ERR("*** ERROR *** pkt %p is freed already "
527 "by %s():%d (%s():%d)",
528 pkt, func_freed, line_freed, caller,
529 line);
530 } else {
531 NET_ERR("*** ERROR *** pkt %p is freed already "
532 "(%s():%d)", pkt, caller, line);
533 }
534 #endif
535 return;
536 }
537 } while (!atomic_cas(&pkt->atomic_ref, ref, ref - 1));
538
539 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
540 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
541 NET_DBG("%s [%d] pkt %p ref %d frags %p (%s():%d)",
542 slab2str(pkt->slab), k_mem_slab_num_free_get(pkt->slab),
543 pkt, ref - 1, pkt->frags, caller, line);
544 #endif
545 if (ref > 1) {
546 goto done;
547 }
548
549 frag = pkt->frags;
550 while (frag) {
551 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
552 NET_DBG("%s (%s) [%d] frag %p ref %d frags %p (%s():%d)",
553 pool2str(net_buf_pool_get(frag->pool_id)),
554 get_name(net_buf_pool_get(frag->pool_id)),
555 get_frees(net_buf_pool_get(frag->pool_id)), frag,
556 frag->ref - 1U, frag->frags, caller, line);
557 #endif
558
559 if (!frag->ref) {
560 const char *func_freed;
561 int line_freed;
562
563 if (net_pkt_alloc_find(frag,
564 &func_freed, &line_freed)) {
565 NET_ERR("*** ERROR *** frag %p is freed "
566 "already by %s():%d (%s():%d)",
567 frag, func_freed, line_freed,
568 caller, line);
569 } else {
570 NET_ERR("*** ERROR *** frag %p is freed "
571 "already (%s():%d)",
572 frag, caller, line);
573 }
574 }
575
576 net_pkt_alloc_del(frag, caller, line);
577
578 frag = frag->frags;
579 }
580
581 net_pkt_alloc_del(pkt, caller, line);
582 done:
583 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
584
585 if (ref > 1) {
586 return;
587 }
588
589 if (pkt->frags) {
590 net_pkt_frag_unref(pkt->frags);
591 }
592
593 if (IS_ENABLED(CONFIG_NET_DEBUG_NET_PKT_NON_FRAGILE_ACCESS)) {
594 pkt->buffer = NULL;
595 net_pkt_cursor_init(pkt);
596 }
597
598 k_mem_slab_free(pkt->slab, (void **)&pkt);
599 }
600
601 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
602 struct net_pkt *net_pkt_ref_debug(struct net_pkt *pkt, const char *caller,
603 int line)
604 #else
605 struct net_pkt *net_pkt_ref(struct net_pkt *pkt)
606 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
607 {
608 atomic_val_t ref;
609
610 do {
611 ref = pkt ? atomic_get(&pkt->atomic_ref) : 0;
612 if (!ref) {
613 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
614 NET_ERR("*** ERROR *** pkt %p (%s():%d)",
615 pkt, caller, line);
616 #endif
617 return NULL;
618 }
619 } while (!atomic_cas(&pkt->atomic_ref, ref, ref + 1));
620
621 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
622 NET_DBG("%s [%d] pkt %p ref %d (%s():%d)",
623 slab2str(pkt->slab), k_mem_slab_num_free_get(pkt->slab),
624 pkt, ref + 1, caller, line);
625 #endif
626
627
628 return pkt;
629 }
630
631 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
632 struct net_buf *net_pkt_frag_ref_debug(struct net_buf *frag,
633 const char *caller, int line)
634 #else
635 struct net_buf *net_pkt_frag_ref(struct net_buf *frag)
636 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
637 {
638 if (!frag) {
639 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
640 NET_ERR("*** ERROR *** frag %p (%s():%d)", frag, caller, line);
641 #endif
642 return NULL;
643 }
644
645 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
646 NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
647 pool2str(net_buf_pool_get(frag->pool_id)),
648 get_name(net_buf_pool_get(frag->pool_id)),
649 get_frees(net_buf_pool_get(frag->pool_id)),
650 frag, frag->ref + 1U, caller, line);
651 #endif
652
653 return net_buf_ref(frag);
654 }
655
656
657 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
658 void net_pkt_frag_unref_debug(struct net_buf *frag,
659 const char *caller, int line)
660 #else
661 void net_pkt_frag_unref(struct net_buf *frag)
662 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
663 {
664 if (!frag) {
665 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
666 NET_ERR("*** ERROR *** frag %p (%s():%d)", frag, caller, line);
667 #endif
668 return;
669 }
670
671 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
672 NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
673 pool2str(net_buf_pool_get(frag->pool_id)),
674 get_name(net_buf_pool_get(frag->pool_id)),
675 get_frees(net_buf_pool_get(frag->pool_id)),
676 frag, frag->ref - 1U, caller, line);
677 #endif
678
679 if (frag->ref == 1U) {
680 net_pkt_alloc_del(frag, caller, line);
681 }
682
683 net_buf_unref(frag);
684 }
685
686 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
687 struct net_buf *net_pkt_frag_del_debug(struct net_pkt *pkt,
688 struct net_buf *parent,
689 struct net_buf *frag,
690 const char *caller, int line)
691 #else
692 struct net_buf *net_pkt_frag_del(struct net_pkt *pkt,
693 struct net_buf *parent,
694 struct net_buf *frag)
695 #endif
696 {
697 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
698 NET_DBG("pkt %p parent %p frag %p ref %u (%s:%d)",
699 pkt, parent, frag, frag->ref, caller, line);
700 #endif
701
702 if (pkt->frags == frag && !parent) {
703 struct net_buf *tmp;
704
705 if (frag->ref == 1U) {
706 net_pkt_alloc_del(frag, caller, line);
707 }
708
709 tmp = net_buf_frag_del(NULL, frag);
710 pkt->frags = tmp;
711
712 return tmp;
713 }
714
715 if (frag->ref == 1U) {
716 net_pkt_alloc_del(frag, caller, line);
717 }
718
719 return net_buf_frag_del(parent, frag);
720 }
721
722 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
723 void net_pkt_frag_add_debug(struct net_pkt *pkt, struct net_buf *frag,
724 const char *caller, int line)
725 #else
726 void net_pkt_frag_add(struct net_pkt *pkt, struct net_buf *frag)
727 #endif
728 {
729 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
730 NET_DBG("pkt %p frag %p (%s:%d)", pkt, frag, caller, line);
731 #endif
732
733 /* We do not use net_buf_frag_add() as this one will refcount
734 * the frag once more if !pkt->frags
735 */
736 if (!pkt->frags) {
737 pkt->frags = frag;
738 return;
739 }
740
741 net_buf_frag_insert(net_buf_frag_last(pkt->frags), frag);
742 }
743
744 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
745 void net_pkt_frag_insert_debug(struct net_pkt *pkt, struct net_buf *frag,
746 const char *caller, int line)
747 #else
748 void net_pkt_frag_insert(struct net_pkt *pkt, struct net_buf *frag)
749 #endif
750 {
751 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
752 NET_DBG("pkt %p frag %p (%s:%d)", pkt, frag, caller, line);
753 #endif
754
755 net_buf_frag_last(frag)->frags = pkt->frags;
756 pkt->frags = frag;
757 }
758
759 bool net_pkt_compact(struct net_pkt *pkt)
760 {
761 struct net_buf *frag, *prev;
762
763 NET_DBG("Compacting data in pkt %p", pkt);
764
765 frag = pkt->frags;
766 prev = NULL;
767
768 while (frag) {
769 if (frag->frags) {
770 /* Copy amount of data from next fragment to this
771 * fragment.
772 */
773 size_t copy_len;
774
775 copy_len = frag->frags->len;
776 if (copy_len > net_buf_tailroom(frag)) {
777 copy_len = net_buf_tailroom(frag);
778 }
779
780 memcpy(net_buf_tail(frag), frag->frags->data, copy_len);
781 net_buf_add(frag, copy_len);
782
783 memmove(frag->frags->data,
784 frag->frags->data + copy_len,
785 frag->frags->len - copy_len);
786
787 frag->frags->len -= copy_len;
788
789 /* Is there any more space in this fragment */
790 if (net_buf_tailroom(frag)) {
791 /* There is. This also means that the next
792 * fragment is empty as otherwise we could
793 * not have copied all data. Remove next
794 * fragment as there is no data in it any more.
795 */
796 net_pkt_frag_del(pkt, frag, frag->frags);
797
798 /* Then check next fragment */
799 continue;
800 }
801 } else {
802 if (!frag->len) {
803 /* Remove the last fragment because there is no
804 * data in it.
805 */
806 net_pkt_frag_del(pkt, prev, frag);
807
808 break;
809 }
810 }
811
812 prev = frag;
813 frag = frag->frags;
814 }
815
816 return true;
817 }
818
819 void net_pkt_get_info(struct k_mem_slab **rx,
820 struct k_mem_slab **tx,
821 struct net_buf_pool **rx_data,
822 struct net_buf_pool **tx_data)
823 {
824 if (rx) {
825 *rx = &rx_pkts;
826 }
827
828 if (tx) {
829 *tx = &tx_pkts;
830 }
831
832 if (rx_data) {
833 *rx_data = &rx_bufs;
834 }
835
836 if (tx_data) {
837 *tx_data = &tx_bufs;
838 }
839 }
840
841 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
842 void net_pkt_print(void)
843 {
844 NET_DBG("TX %u RX %u RDATA %d TDATA %d",
845 k_mem_slab_num_free_get(&tx_pkts),
846 k_mem_slab_num_free_get(&rx_pkts),
847 get_frees(&rx_bufs), get_frees(&tx_bufs));
848 }
849 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC */
850
851 /* New allocator and API starts here */
852
853 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
854
855 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
856 static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool,
857 size_t size, k_timeout_t timeout,
858 const char *caller, int line)
859 #else
860 static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool,
861 size_t size, k_timeout_t timeout)
862 #endif
863 {
864 uint64_t end = sys_clock_timeout_end_calc(timeout);
865 struct net_buf *first = NULL;
866 struct net_buf *current = NULL;
867
868 while (size) {
869 struct net_buf *new;
870
871 new = net_buf_alloc_fixed(pool, timeout);
872 if (!new) {
873 goto error;
874 }
875
876 if (!first && !current) {
877 first = new;
878 } else {
879 current->frags = new;
880 }
881
882 current = new;
883 if (current->size > size) {
884 current->size = size;
885 }
886
887 size -= current->size;
888
889 if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
890 !K_TIMEOUT_EQ(timeout, K_FOREVER)) {
891 int64_t remaining = end - sys_clock_tick_get();
892
893 if (remaining <= 0) {
894 break;
895 }
896
897 timeout = Z_TIMEOUT_TICKS(remaining);
898 }
899
900 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
901 NET_FRAG_CHECK_IF_NOT_IN_USE(new, new->ref + 1);
902
903 net_pkt_alloc_add(new, false, caller, line);
904
905 NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
906 pool2str(pool), get_name(pool), get_frees(pool),
907 new, new->ref, caller, line);
908 #endif
909 }
910
911 return first;
912 error:
913 if (first) {
914 net_buf_unref(first);
915 }
916
917 return NULL;
918 }
919
920 #else /* !CONFIG_NET_BUF_FIXED_DATA_SIZE */
921
922 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
923 static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool,
924 size_t size, k_timeout_t timeout,
925 const char *caller, int line)
926 #else
927 static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool,
928 size_t size, k_timeout_t timeout)
929 #endif
930 {
931 struct net_buf *buf;
932
933 buf = net_buf_alloc_len(pool, size, timeout);
934
935 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
936 NET_FRAG_CHECK_IF_NOT_IN_USE(buf, buf->ref + 1);
937
938 net_pkt_alloc_add(buf, false, caller, line);
939
940 NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
941 pool2str(pool), get_name(pool), get_frees(pool),
942 buf, buf->ref, caller, line);
943 #endif
944
945 return buf;
946 }
947
948 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
949
950 static size_t pkt_buffer_length(struct net_pkt *pkt,
951 size_t size,
952 enum net_ip_protocol proto,
953 size_t existing)
954 {
955 sa_family_t family = net_pkt_family(pkt);
956 size_t max_len;
957
958 if (net_pkt_iface(pkt)) {
959 max_len = net_if_get_mtu(net_pkt_iface(pkt));
960 } else {
961 max_len = 0;
962 }
963
964 /* Family vs iface MTU */
965 if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
966 if (IS_ENABLED(CONFIG_NET_IPV6_FRAGMENT) && (size > max_len)) {
967 /* We support larger packets if IPv6 fragmentation is
968 * enabled.
969 */
970 max_len = size;
971 }
972
973 max_len = MAX(max_len, NET_IPV6_MTU);
974 } else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
975 max_len = MAX(max_len, NET_IPV4_MTU);
976 } else { /* family == AF_UNSPEC */
977 #if defined (CONFIG_NET_L2_ETHERNET)
978 if (net_if_l2(net_pkt_iface(pkt)) ==
979 &NET_L2_GET_NAME(ETHERNET)) {
980 max_len += NET_ETH_MAX_HDR_SIZE;
981 } else
982 #endif /* CONFIG_NET_L2_ETHERNET */
983 {
984 /* Other L2 are not checked as the pkt MTU in this case
985 * is based on the IP layer (IPv6 most of the time).
986 */
987 max_len = size;
988 }
989 }
990
991 max_len -= existing;
992
993 return MIN(size, max_len);
994 }
995
996 static size_t pkt_estimate_headers_length(struct net_pkt *pkt,
997 sa_family_t family,
998 enum net_ip_protocol proto)
999 {
1000 size_t hdr_len = 0;
1001
1002 if (family == AF_UNSPEC) {
1003 return 0;
1004 }
1005
1006 /* Family header */
1007 if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
1008 hdr_len += NET_IPV6H_LEN;
1009 } else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
1010 hdr_len += NET_IPV4H_LEN;
1011 }
1012
1013 /* + protocol header */
1014 if (IS_ENABLED(CONFIG_NET_TCP) && proto == IPPROTO_TCP) {
1015 hdr_len += NET_TCPH_LEN + NET_TCP_MAX_OPT_SIZE;
1016 } else if (IS_ENABLED(CONFIG_NET_UDP) && proto == IPPROTO_UDP) {
1017 hdr_len += NET_UDPH_LEN;
1018 } else if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) {
1019 hdr_len += NET_ICMPH_LEN;
1020 }
1021
1022 NET_DBG("HDRs length estimation %zu", hdr_len);
1023
1024 return hdr_len;
1025 }
1026
1027 static size_t pkt_get_max_len(struct net_pkt *pkt)
1028 {
1029 struct net_buf *buf = pkt->buffer;
1030 size_t size = 0;
1031
1032 while (buf) {
1033 size += net_buf_max_len(buf);
1034 buf = buf->frags;
1035 }
1036
1037 return size;
1038 }
1039
1040 size_t net_pkt_available_buffer(struct net_pkt *pkt)
1041 {
1042 if (!pkt) {
1043 return 0;
1044 }
1045
1046 return pkt_get_max_len(pkt) - net_pkt_get_len(pkt);
1047 }
1048
1049 size_t net_pkt_available_payload_buffer(struct net_pkt *pkt,
1050 enum net_ip_protocol proto)
1051 {
1052 size_t hdr_len = 0;
1053 size_t len;
1054
1055 if (!pkt) {
1056 return 0;
1057 }
1058
1059 hdr_len = pkt_estimate_headers_length(pkt, net_pkt_family(pkt), proto);
1060 len = net_pkt_get_len(pkt);
1061
1062 hdr_len = hdr_len <= len ? 0 : hdr_len - len;
1063
1064 len = net_pkt_available_buffer(pkt) - hdr_len;
1065
1066 return len;
1067 }
1068
1069 void net_pkt_trim_buffer(struct net_pkt *pkt)
1070 {
1071 struct net_buf *buf, *prev;
1072
1073 buf = pkt->buffer;
1074 prev = buf;
1075
1076 while (buf) {
1077 struct net_buf *next = buf->frags;
1078
1079 if (!buf->len) {
1080 if (buf == pkt->buffer) {
1081 pkt->buffer = next;
1082 } else if (buf == prev->frags) {
1083 prev->frags = next;
1084 }
1085
1086 buf->frags = NULL;
1087 net_buf_unref(buf);
1088 } else {
1089 prev = buf;
1090 }
1091
1092 buf = next;
1093 }
1094 }
1095
1096 int net_pkt_remove_tail(struct net_pkt *pkt, size_t length)
1097 {
1098 struct net_buf *buf = pkt->buffer;
1099 size_t remaining_len = net_pkt_get_len(pkt);
1100
1101 if (remaining_len < length) {
1102 return -EINVAL;
1103 }
1104
1105 remaining_len -= length;
1106
1107 while (buf) {
1108 if (buf->len >= remaining_len) {
1109 buf->len = remaining_len;
1110
1111 if (buf->frags) {
1112 net_pkt_frag_unref(buf->frags);
1113 buf->frags = NULL;
1114 }
1115
1116 break;
1117 }
1118
1119 remaining_len -= buf->len;
1120 buf = buf->frags;
1121 }
1122
1123 return 0;
1124 }
1125
1126 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1127 int net_pkt_alloc_buffer_debug(struct net_pkt *pkt,
1128 size_t size,
1129 enum net_ip_protocol proto,
1130 k_timeout_t timeout,
1131 const char *caller,
1132 int line)
1133 #else
1134 int net_pkt_alloc_buffer(struct net_pkt *pkt,
1135 size_t size,
1136 enum net_ip_protocol proto,
1137 k_timeout_t timeout)
1138 #endif
1139 {
1140 uint64_t end = sys_clock_timeout_end_calc(timeout);
1141 struct net_buf_pool *pool = NULL;
1142 size_t alloc_len = 0;
1143 size_t hdr_len = 0;
1144 struct net_buf *buf;
1145
1146 if (!size && proto == 0 && net_pkt_family(pkt) == AF_UNSPEC) {
1147 return 0;
1148 }
1149
1150 if (k_is_in_isr()) {
1151 timeout = K_NO_WAIT;
1152 }
1153
1154 /* Verifying existing buffer and take into account free space there */
1155 alloc_len = net_pkt_available_buffer(pkt);
1156 if (!alloc_len) {
1157 /* In case of no free space, it will account for header
1158 * space estimation
1159 */
1160 hdr_len = pkt_estimate_headers_length(pkt,
1161 net_pkt_family(pkt),
1162 proto);
1163 }
1164
1165 /* Calculate the maximum that can be allocated depending on size */
1166 alloc_len = pkt_buffer_length(pkt, size + hdr_len, proto, alloc_len);
1167
1168 NET_DBG("Data allocation maximum size %zu (requested %zu)",
1169 alloc_len, size);
1170
1171 if (pkt->context) {
1172 pool = get_data_pool(pkt->context);
1173 }
1174
1175 if (!pool) {
1176 pool = pkt->slab == &tx_pkts ? &tx_bufs : &rx_bufs;
1177 }
1178
1179 if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
1180 !K_TIMEOUT_EQ(timeout, K_FOREVER)) {
1181 int64_t remaining = end - sys_clock_tick_get();
1182
1183 if (remaining <= 0) {
1184 timeout = K_NO_WAIT;
1185 } else {
1186 timeout = Z_TIMEOUT_TICKS(remaining);
1187 }
1188 }
1189
1190 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1191 buf = pkt_alloc_buffer(pool, alloc_len, timeout, caller, line);
1192 #else
1193 buf = pkt_alloc_buffer(pool, alloc_len, timeout);
1194 #endif
1195
1196 if (!buf) {
1197 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1198 NET_ERR("Data buffer (%zd) allocation failed (%s:%d)",
1199 alloc_len, caller, line);
1200 #else
1201 NET_ERR("Data buffer (%zd) allocation failed.", alloc_len);
1202 #endif
1203 return -ENOMEM;
1204 }
1205
1206 net_pkt_append_buffer(pkt, buf);
1207
1208 return 0;
1209 }
1210
1211 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1212 static struct net_pkt *pkt_alloc(struct k_mem_slab *slab, k_timeout_t timeout,
1213 const char *caller, int line)
1214 #else
1215 static struct net_pkt *pkt_alloc(struct k_mem_slab *slab, k_timeout_t timeout)
1216 #endif
1217 {
1218 struct net_pkt *pkt;
1219 uint32_t create_time;
1220 int ret;
1221
1222 if (k_is_in_isr()) {
1223 timeout = K_NO_WAIT;
1224 }
1225
1226 if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS) ||
1227 IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS)) {
1228 create_time = k_cycle_get_32();
1229 } else {
1230 ARG_UNUSED(create_time);
1231 }
1232
1233 ret = k_mem_slab_alloc(slab, (void **)&pkt, timeout);
1234 if (ret) {
1235 return NULL;
1236 }
1237
1238 memset(pkt, 0, sizeof(struct net_pkt));
1239
1240 pkt->atomic_ref = ATOMIC_INIT(1);
1241 pkt->slab = slab;
1242
1243 if (IS_ENABLED(CONFIG_NET_IPV6)) {
1244 net_pkt_set_ipv6_next_hdr(pkt, 255);
1245 }
1246
1247 #if IS_ENABLED(CONFIG_NET_TX_DEFAULT_PRIORITY)
1248 #define TX_DEFAULT_PRIORITY CONFIG_NET_TX_DEFAULT_PRIORITY
1249 #else
1250 #define TX_DEFAULT_PRIORITY 0
1251 #endif
1252
1253 #if IS_ENABLED(CONFIG_NET_RX_DEFAULT_PRIORITY)
1254 #define RX_DEFAULT_PRIORITY CONFIG_NET_RX_DEFAULT_PRIORITY
1255 #else
1256 #define RX_DEFAULT_PRIORITY 0
1257 #endif
1258
1259 if (&tx_pkts == slab) {
1260 net_pkt_set_priority(pkt, TX_DEFAULT_PRIORITY);
1261 } else if (&rx_pkts == slab) {
1262 net_pkt_set_priority(pkt, RX_DEFAULT_PRIORITY);
1263 }
1264
1265 if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS) ||
1266 IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS)) {
1267 net_pkt_set_create_time(pkt, create_time);
1268 }
1269
1270 net_pkt_set_vlan_tag(pkt, NET_VLAN_TAG_UNSPEC);
1271
1272 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1273 net_pkt_alloc_add(pkt, true, caller, line);
1274 #endif
1275
1276 net_pkt_cursor_init(pkt);
1277
1278 return pkt;
1279 }
1280
1281 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1282 struct net_pkt *net_pkt_alloc_debug(k_timeout_t timeout,
1283 const char *caller, int line)
1284 #else
1285 struct net_pkt *net_pkt_alloc(k_timeout_t timeout)
1286 #endif
1287 {
1288 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1289 return pkt_alloc(&tx_pkts, timeout, caller, line);
1290 #else
1291 return pkt_alloc(&tx_pkts, timeout);
1292 #endif
1293 }
1294
1295 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1296 struct net_pkt *net_pkt_alloc_from_slab_debug(struct k_mem_slab *slab,
1297 k_timeout_t timeout,
1298 const char *caller, int line)
1299 #else
1300 struct net_pkt *net_pkt_alloc_from_slab(struct k_mem_slab *slab,
1301 k_timeout_t timeout)
1302 #endif
1303 {
1304 if (!slab) {
1305 return NULL;
1306 }
1307
1308 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1309 return pkt_alloc(slab, timeout, caller, line);
1310 #else
1311 return pkt_alloc(slab, timeout);
1312 #endif
1313 }
1314
1315 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1316 struct net_pkt *net_pkt_rx_alloc_debug(k_timeout_t timeout,
1317 const char *caller, int line)
1318 #else
1319 struct net_pkt *net_pkt_rx_alloc(k_timeout_t timeout)
1320 #endif
1321 {
1322 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1323 return pkt_alloc(&rx_pkts, timeout, caller, line);
1324 #else
1325 return pkt_alloc(&rx_pkts, timeout);
1326 #endif
1327 }
1328
1329 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1330 static struct net_pkt *pkt_alloc_on_iface(struct k_mem_slab *slab,
1331 struct net_if *iface,
1332 k_timeout_t timeout,
1333 const char *caller, int line)
1334 #else
1335 static struct net_pkt *pkt_alloc_on_iface(struct k_mem_slab *slab,
1336 struct net_if *iface,
1337 k_timeout_t timeout)
1338
1339 #endif
1340 {
1341 struct net_pkt *pkt;
1342
1343 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1344 pkt = pkt_alloc(slab, timeout, caller, line);
1345 #else
1346 pkt = pkt_alloc(slab, timeout);
1347 #endif
1348
1349 if (pkt) {
1350 net_pkt_set_iface(pkt, iface);
1351 }
1352
1353 return pkt;
1354 }
1355
1356 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1357 struct net_pkt *net_pkt_alloc_on_iface_debug(struct net_if *iface,
1358 k_timeout_t timeout,
1359 const char *caller,
1360 int line)
1361 #else
1362 struct net_pkt *net_pkt_alloc_on_iface(struct net_if *iface,
1363 k_timeout_t timeout)
1364 #endif
1365 {
1366 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1367 return pkt_alloc_on_iface(&tx_pkts, iface, timeout, caller, line);
1368 #else
1369 return pkt_alloc_on_iface(&tx_pkts, iface, timeout);
1370 #endif
1371 }
1372
1373 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1374 struct net_pkt *net_pkt_rx_alloc_on_iface_debug(struct net_if *iface,
1375 k_timeout_t timeout,
1376 const char *caller,
1377 int line)
1378 #else
1379 struct net_pkt *net_pkt_rx_alloc_on_iface(struct net_if *iface,
1380 k_timeout_t timeout)
1381 #endif
1382 {
1383 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1384 return pkt_alloc_on_iface(&rx_pkts, iface, timeout, caller, line);
1385 #else
1386 return pkt_alloc_on_iface(&rx_pkts, iface, timeout);
1387 #endif
1388 }
1389
1390 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1391 static struct net_pkt *
1392 pkt_alloc_with_buffer(struct k_mem_slab *slab,
1393 struct net_if *iface,
1394 size_t size,
1395 sa_family_t family,
1396 enum net_ip_protocol proto,
1397 k_timeout_t timeout,
1398 const char *caller,
1399 int line)
1400 #else
1401 static struct net_pkt *
1402 pkt_alloc_with_buffer(struct k_mem_slab *slab,
1403 struct net_if *iface,
1404 size_t size,
1405 sa_family_t family,
1406 enum net_ip_protocol proto,
1407 k_timeout_t timeout)
1408 #endif
1409 {
1410 uint64_t end = sys_clock_timeout_end_calc(timeout);
1411 struct net_pkt *pkt;
1412 int ret;
1413
1414 NET_DBG("On iface %p size %zu", iface, size);
1415
1416 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1417 pkt = pkt_alloc_on_iface(slab, iface, timeout, caller, line);
1418 #else
1419 pkt = pkt_alloc_on_iface(slab, iface, timeout);
1420 #endif
1421
1422 if (!pkt) {
1423 return NULL;
1424 }
1425
1426 net_pkt_set_family(pkt, family);
1427
1428 if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
1429 !K_TIMEOUT_EQ(timeout, K_FOREVER)) {
1430 int64_t remaining = end - sys_clock_tick_get();
1431
1432 if (remaining <= 0) {
1433 timeout = K_NO_WAIT;
1434 } else {
1435 timeout = Z_TIMEOUT_TICKS(remaining);
1436 }
1437 }
1438
1439 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1440 ret = net_pkt_alloc_buffer_debug(pkt, size, proto, timeout,
1441 caller, line);
1442 #else
1443 ret = net_pkt_alloc_buffer(pkt, size, proto, timeout);
1444 #endif
1445
1446 if (ret) {
1447 net_pkt_unref(pkt);
1448 return NULL;
1449 }
1450
1451 return pkt;
1452 }
1453
1454 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1455 struct net_pkt *net_pkt_alloc_with_buffer_debug(struct net_if *iface,
1456 size_t size,
1457 sa_family_t family,
1458 enum net_ip_protocol proto,
1459 k_timeout_t timeout,
1460 const char *caller,
1461 int line)
1462 #else
1463 struct net_pkt *net_pkt_alloc_with_buffer(struct net_if *iface,
1464 size_t size,
1465 sa_family_t family,
1466 enum net_ip_protocol proto,
1467 k_timeout_t timeout)
1468 #endif
1469 {
1470 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1471 return pkt_alloc_with_buffer(&tx_pkts, iface, size, family,
1472 proto, timeout, caller, line);
1473 #else
1474 return pkt_alloc_with_buffer(&tx_pkts, iface, size, family,
1475 proto, timeout);
1476 #endif
1477 }
1478
1479 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1480 struct net_pkt *net_pkt_rx_alloc_with_buffer_debug(struct net_if *iface,
1481 size_t size,
1482 sa_family_t family,
1483 enum net_ip_protocol proto,
1484 k_timeout_t timeout,
1485 const char *caller,
1486 int line)
1487 #else
1488 struct net_pkt *net_pkt_rx_alloc_with_buffer(struct net_if *iface,
1489 size_t size,
1490 sa_family_t family,
1491 enum net_ip_protocol proto,
1492 k_timeout_t timeout)
1493 #endif
1494 {
1495 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1496 return pkt_alloc_with_buffer(&rx_pkts, iface, size, family,
1497 proto, timeout, caller, line);
1498 #else
1499 return pkt_alloc_with_buffer(&rx_pkts, iface, size, family,
1500 proto, timeout);
1501 #endif
1502 }
1503
1504 void net_pkt_append_buffer(struct net_pkt *pkt, struct net_buf *buffer)
1505 {
1506 if (!pkt->buffer) {
1507 pkt->buffer = buffer;
1508 net_pkt_cursor_init(pkt);
1509 } else {
1510 net_buf_frag_insert(net_buf_frag_last(pkt->buffer), buffer);
1511 }
1512 }
1513
1514 void net_pkt_cursor_init(struct net_pkt *pkt)
1515 {
1516 pkt->cursor.buf = pkt->buffer;
1517 if (pkt->cursor.buf) {
1518 pkt->cursor.pos = pkt->cursor.buf->data;
1519 } else {
1520 pkt->cursor.pos = NULL;
1521 }
1522 }
1523
1524 static void pkt_cursor_jump(struct net_pkt *pkt, bool write)
1525 {
1526 struct net_pkt_cursor *cursor = &pkt->cursor;
1527
1528 cursor->buf = cursor->buf->frags;
1529 while (cursor->buf) {
1530 const size_t len =
1531 write ? net_buf_max_len(cursor->buf) : cursor->buf->len;
1532
1533 if (!len) {
1534 cursor->buf = cursor->buf->frags;
1535 } else {
1536 break;
1537 }
1538 }
1539
1540 if (cursor->buf) {
1541 cursor->pos = cursor->buf->data;
1542 } else {
1543 cursor->pos = NULL;
1544 }
1545 }
1546
1547 static void pkt_cursor_advance(struct net_pkt *pkt, bool write)
1548 {
1549 struct net_pkt_cursor *cursor = &pkt->cursor;
1550 size_t len;
1551
1552 if (!cursor->buf) {
1553 return;
1554 }
1555
1556 len = write ? net_buf_max_len(cursor->buf) : cursor->buf->len;
1557 if ((cursor->pos - cursor->buf->data) == len) {
1558 pkt_cursor_jump(pkt, write);
1559 }
1560 }
1561
1562 static void pkt_cursor_update(struct net_pkt *pkt,
1563 size_t length, bool write)
1564 {
1565 struct net_pkt_cursor *cursor = &pkt->cursor;
1566 size_t len;
1567
1568 if (net_pkt_is_being_overwritten(pkt)) {
1569 write = false;
1570 }
1571
1572 len = write ? net_buf_max_len(cursor->buf) : cursor->buf->len;
1573 if (length + (cursor->pos - cursor->buf->data) == len &&
1574 !(net_pkt_is_being_overwritten(pkt) &&
1575 len < net_buf_max_len(cursor->buf))) {
1576 pkt_cursor_jump(pkt, write);
1577 } else {
1578 cursor->pos += length;
1579 }
1580 }
1581
1582 /* Internal function that does all operation (skip/read/write/memset) */
1583 static int net_pkt_cursor_operate(struct net_pkt *pkt,
1584 void *data, size_t length,
1585 bool copy, bool write)
1586 {
1587 /* We use such variable to avoid lengthy lines */
1588 struct net_pkt_cursor *c_op = &pkt->cursor;
1589
1590 while (c_op->buf && length) {
1591 size_t d_len, len;
1592
1593 pkt_cursor_advance(pkt, net_pkt_is_being_overwritten(pkt) ?
1594 false : write);
1595 if (c_op->buf == NULL) {
1596 break;
1597 }
1598
1599 if (write && !net_pkt_is_being_overwritten(pkt)) {
1600 d_len = net_buf_max_len(c_op->buf) -
1601 (c_op->pos - c_op->buf->data);
1602 } else {
1603 d_len = c_op->buf->len - (c_op->pos - c_op->buf->data);
1604 }
1605
1606 if (!d_len) {
1607 break;
1608 }
1609
1610 if (length < d_len) {
1611 len = length;
1612 } else {
1613 len = d_len;
1614 }
1615
1616 if (copy) {
1617 memcpy(write ? c_op->pos : data,
1618 write ? data : c_op->pos,
1619 len);
1620 } else if (data) {
1621 memset(c_op->pos, *(int *)data, len);
1622 }
1623
1624 if (write && !net_pkt_is_being_overwritten(pkt)) {
1625 net_buf_add(c_op->buf, len);
1626 }
1627
1628 pkt_cursor_update(pkt, len, write);
1629
1630 if (copy && data) {
1631 data = (uint8_t *) data + len;
1632 }
1633
1634 length -= len;
1635 }
1636
1637 if (length) {
1638 NET_DBG("Still some length to go %zu", length);
1639 return -ENOBUFS;
1640 }
1641
1642 return 0;
1643 }
1644
1645 int net_pkt_skip(struct net_pkt *pkt, size_t skip)
1646 {
1647 NET_DBG("pkt %p skip %zu", pkt, skip);
1648
1649 return net_pkt_cursor_operate(pkt, NULL, skip, false, true);
1650 }
1651
1652 int net_pkt_memset(struct net_pkt *pkt, int byte, size_t amount)
1653 {
1654 NET_DBG("pkt %p byte %d amount %zu", pkt, byte, amount);
1655
1656 return net_pkt_cursor_operate(pkt, &byte, amount, false, true);
1657 }
1658
1659 int net_pkt_read(struct net_pkt *pkt, void *data, size_t length)
1660 {
1661 NET_DBG("pkt %p data %p length %zu", pkt, data, length);
1662
1663 return net_pkt_cursor_operate(pkt, data, length, true, false);
1664 }
1665
1666 int net_pkt_read_be16(struct net_pkt *pkt, uint16_t *data)
1667 {
1668 uint8_t d16[2];
1669 int ret;
1670
1671 ret = net_pkt_read(pkt, d16, sizeof(uint16_t));
1672
1673 *data = d16[0] << 8 | d16[1];
1674
1675 return ret;
1676 }
1677
1678 int net_pkt_read_le16(struct net_pkt *pkt, uint16_t *data)
1679 {
1680 uint8_t d16[2];
1681 int ret;
1682
1683 ret = net_pkt_read(pkt, d16, sizeof(uint16_t));
1684
1685 *data = d16[1] << 8 | d16[0];
1686
1687 return ret;
1688 }
1689
1690 int net_pkt_read_be32(struct net_pkt *pkt, uint32_t *data)
1691 {
1692 uint8_t d32[4];
1693 int ret;
1694
1695 ret = net_pkt_read(pkt, d32, sizeof(uint32_t));
1696
1697 *data = d32[0] << 24 | d32[1] << 16 | d32[2] << 8 | d32[3];
1698
1699 return ret;
1700 }
1701
1702 int net_pkt_write(struct net_pkt *pkt, const void *data, size_t length)
1703 {
1704 NET_DBG("pkt %p data %p length %zu", pkt, data, length);
1705
1706 if (data == pkt->cursor.pos && net_pkt_is_contiguous(pkt, length)) {
1707 return net_pkt_skip(pkt, length);
1708 }
1709
1710 return net_pkt_cursor_operate(pkt, (void *)data, length, true, true);
1711 }
1712
1713 int net_pkt_copy(struct net_pkt *pkt_dst,
1714 struct net_pkt *pkt_src,
1715 size_t length)
1716 {
1717 struct net_pkt_cursor *c_dst = &pkt_dst->cursor;
1718 struct net_pkt_cursor *c_src = &pkt_src->cursor;
1719
1720 while (c_dst->buf && c_src->buf && length) {
1721 size_t s_len, d_len, len;
1722
1723 pkt_cursor_advance(pkt_dst, true);
1724 pkt_cursor_advance(pkt_src, false);
1725
1726 if (!c_dst->buf || !c_src->buf) {
1727 break;
1728 }
1729
1730 s_len = c_src->buf->len - (c_src->pos - c_src->buf->data);
1731 d_len = net_buf_max_len(c_dst->buf) - (c_dst->pos - c_dst->buf->data);
1732 if (length < s_len && length < d_len) {
1733 len = length;
1734 } else {
1735 if (d_len < s_len) {
1736 len = d_len;
1737 } else {
1738 len = s_len;
1739 }
1740 }
1741
1742 if (!len) {
1743 break;
1744 }
1745
1746 memcpy(c_dst->pos, c_src->pos, len);
1747
1748 if (!net_pkt_is_being_overwritten(pkt_dst)) {
1749 net_buf_add(c_dst->buf, len);
1750 }
1751
1752 pkt_cursor_update(pkt_dst, len, true);
1753 pkt_cursor_update(pkt_src, len, false);
1754
1755 length -= len;
1756 }
1757
1758 if (length) {
1759 NET_DBG("Still some length to go %zu", length);
1760 return -ENOBUFS;
1761 }
1762
1763 return 0;
1764 }
1765
1766 static void clone_pkt_attributes(struct net_pkt *pkt, struct net_pkt *clone_pkt)
1767 {
1768 net_pkt_set_family(clone_pkt, net_pkt_family(pkt));
1769 net_pkt_set_context(clone_pkt, net_pkt_context(pkt));
1770 net_pkt_set_ip_hdr_len(clone_pkt, net_pkt_ip_hdr_len(pkt));
1771 net_pkt_set_vlan_tag(clone_pkt, net_pkt_vlan_tag(pkt));
1772 net_pkt_set_timestamp(clone_pkt, net_pkt_timestamp(pkt));
1773 net_pkt_set_priority(clone_pkt, net_pkt_priority(pkt));
1774 net_pkt_set_orig_iface(clone_pkt, net_pkt_orig_iface(pkt));
1775 net_pkt_set_captured(clone_pkt, net_pkt_is_captured(pkt));
1776 net_pkt_set_l2_bridged(clone_pkt, net_pkt_is_l2_bridged(pkt));
1777
1778 if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) {
1779 net_pkt_set_ipv4_ttl(clone_pkt, net_pkt_ipv4_ttl(pkt));
1780 net_pkt_set_ipv4_opts_len(clone_pkt,
1781 net_pkt_ipv4_opts_len(pkt));
1782 } else if (IS_ENABLED(CONFIG_NET_IPV6) &&
1783 net_pkt_family(pkt) == AF_INET6) {
1784 net_pkt_set_ipv6_hop_limit(clone_pkt,
1785 net_pkt_ipv6_hop_limit(pkt));
1786 net_pkt_set_ipv6_ext_len(clone_pkt, net_pkt_ipv6_ext_len(pkt));
1787 net_pkt_set_ipv6_ext_opt_len(clone_pkt,
1788 net_pkt_ipv6_ext_opt_len(pkt));
1789 net_pkt_set_ipv6_hdr_prev(clone_pkt,
1790 net_pkt_ipv6_hdr_prev(pkt));
1791 net_pkt_set_ipv6_next_hdr(clone_pkt,
1792 net_pkt_ipv6_next_hdr(pkt));
1793 }
1794 }
1795
1796 struct net_pkt *net_pkt_clone(struct net_pkt *pkt, k_timeout_t timeout)
1797 {
1798 size_t cursor_offset = net_pkt_get_current_offset(pkt);
1799 struct net_pkt *clone_pkt;
1800 struct net_pkt_cursor backup;
1801
1802 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1803 clone_pkt = pkt_alloc_with_buffer(pkt->slab, net_pkt_iface(pkt),
1804 net_pkt_get_len(pkt),
1805 AF_UNSPEC, 0, timeout,
1806 __func__, __LINE__);
1807 #else
1808 clone_pkt = pkt_alloc_with_buffer(pkt->slab, net_pkt_iface(pkt),
1809 net_pkt_get_len(pkt),
1810 AF_UNSPEC, 0, timeout);
1811 #endif
1812 if (!clone_pkt) {
1813 return NULL;
1814 }
1815
1816 net_pkt_cursor_backup(pkt, &backup);
1817 net_pkt_cursor_init(pkt);
1818
1819 if (net_pkt_copy(clone_pkt, pkt, net_pkt_get_len(pkt))) {
1820 net_pkt_unref(clone_pkt);
1821 net_pkt_cursor_restore(pkt, &backup);
1822 return NULL;
1823 }
1824
1825 if (clone_pkt->buffer) {
1826 /* The link header pointers are only usable if there is
1827 * a buffer that we copied because those pointers point
1828 * to start of the fragment which we do not have right now.
1829 */
1830 memcpy(&clone_pkt->lladdr_src, &pkt->lladdr_src,
1831 sizeof(clone_pkt->lladdr_src));
1832 memcpy(&clone_pkt->lladdr_dst, &pkt->lladdr_dst,
1833 sizeof(clone_pkt->lladdr_dst));
1834 }
1835
1836 clone_pkt_attributes(pkt, clone_pkt);
1837
1838 net_pkt_cursor_init(clone_pkt);
1839
1840 if (cursor_offset) {
1841 net_pkt_set_overwrite(clone_pkt, true);
1842 net_pkt_skip(clone_pkt, cursor_offset);
1843 }
1844
1845 net_pkt_cursor_restore(pkt, &backup);
1846
1847 NET_DBG("Cloned %p to %p", pkt, clone_pkt);
1848
1849 return clone_pkt;
1850 }
1851
1852 struct net_pkt *net_pkt_shallow_clone(struct net_pkt *pkt, k_timeout_t timeout)
1853 {
1854 struct net_pkt *clone_pkt;
1855 struct net_buf *buf;
1856
1857 clone_pkt = net_pkt_alloc(timeout);
1858 if (!clone_pkt) {
1859 return NULL;
1860 }
1861
1862 net_pkt_set_iface(clone_pkt, net_pkt_iface(pkt));
1863 clone_pkt->buffer = pkt->buffer;
1864 buf = pkt->buffer;
1865
1866 while (buf) {
1867 net_pkt_frag_ref(buf);
1868 buf = buf->frags;
1869 }
1870
1871 if (pkt->buffer) {
1872 /* The link header pointers are only usable if there is
1873 * a buffer that we copied because those pointers point
1874 * to start of the fragment which we do not have right now.
1875 */
1876 memcpy(&clone_pkt->lladdr_src, &pkt->lladdr_src,
1877 sizeof(clone_pkt->lladdr_src));
1878 memcpy(&clone_pkt->lladdr_dst, &pkt->lladdr_dst,
1879 sizeof(clone_pkt->lladdr_dst));
1880 }
1881
1882 clone_pkt_attributes(pkt, clone_pkt);
1883
1884 net_pkt_cursor_restore(clone_pkt, &pkt->cursor);
1885
1886 NET_DBG("Shallow cloned %p to %p", pkt, clone_pkt);
1887
1888 return clone_pkt;
1889 }
1890
1891 size_t net_pkt_remaining_data(struct net_pkt *pkt)
1892 {
1893 struct net_buf *buf;
1894 size_t data_length;
1895
1896 if (!pkt || !pkt->cursor.buf || !pkt->cursor.pos) {
1897 return 0;
1898 }
1899
1900 buf = pkt->cursor.buf;
1901 data_length = buf->len - (pkt->cursor.pos - buf->data);
1902
1903 buf = buf->frags;
1904 while (buf) {
1905 data_length += buf->len;
1906 buf = buf->frags;
1907 }
1908
1909 return data_length;
1910 }
1911
1912 int net_pkt_update_length(struct net_pkt *pkt, size_t length)
1913 {
1914 struct net_buf *buf;
1915
1916 for (buf = pkt->buffer; buf; buf = buf->frags) {
1917 if (buf->len < length) {
1918 length -= buf->len;
1919 } else {
1920 buf->len = length;
1921 length = 0;
1922 }
1923 }
1924
1925 return !length ? 0 : -EINVAL;
1926 }
1927
1928 int net_pkt_pull(struct net_pkt *pkt, size_t length)
1929 {
1930 struct net_pkt_cursor *c_op = &pkt->cursor;
1931
1932 while (length) {
1933 size_t left, rem;
1934
1935 pkt_cursor_advance(pkt, false);
1936
1937 if (!c_op->buf) {
1938 break;
1939 }
1940
1941 left = c_op->buf->len - (c_op->pos - c_op->buf->data);
1942 if (!left) {
1943 break;
1944 }
1945
1946 rem = left;
1947 if (rem > length) {
1948 rem = length;
1949 }
1950
1951 c_op->buf->len -= rem;
1952 left -= rem;
1953 if (left) {
1954 memmove(c_op->pos, c_op->pos+rem, left);
1955 } else {
1956 struct net_buf *buf = pkt->buffer;
1957
1958 if (buf) {
1959 pkt->buffer = buf->frags;
1960 buf->frags = NULL;
1961 net_buf_unref(buf);
1962 }
1963
1964 net_pkt_cursor_init(pkt);
1965 }
1966
1967 length -= rem;
1968 }
1969
1970 net_pkt_cursor_init(pkt);
1971
1972 if (length) {
1973 NET_DBG("Still some length to go %zu", length);
1974 return -ENOBUFS;
1975 }
1976
1977 return 0;
1978 }
1979
1980 uint16_t net_pkt_get_current_offset(struct net_pkt *pkt)
1981 {
1982 struct net_buf *buf = pkt->buffer;
1983 uint16_t offset;
1984
1985 if (!pkt->cursor.buf || !pkt->cursor.pos) {
1986 return 0;
1987 }
1988
1989 offset = 0U;
1990
1991 while (buf != pkt->cursor.buf) {
1992 offset += buf->len;
1993 buf = buf->frags;
1994 }
1995
1996 offset += pkt->cursor.pos - buf->data;
1997
1998 return offset;
1999 }
2000
2001 bool net_pkt_is_contiguous(struct net_pkt *pkt, size_t size)
2002 {
2003 size_t len = net_pkt_get_contiguous_len(pkt);
2004
2005 return len >= size;
2006 }
2007
2008 size_t net_pkt_get_contiguous_len(struct net_pkt *pkt)
2009 {
2010 pkt_cursor_advance(pkt, !net_pkt_is_being_overwritten(pkt));
2011
2012 if (pkt->cursor.buf && pkt->cursor.pos) {
2013 size_t len;
2014
2015 len = net_pkt_is_being_overwritten(pkt) ?
2016 pkt->cursor.buf->len : pkt->cursor.buf->size;
2017 len -= pkt->cursor.pos - pkt->cursor.buf->data;
2018 return len;
2019 }
2020
2021 return 0;
2022 }
2023
2024 void *net_pkt_get_data(struct net_pkt *pkt,
2025 struct net_pkt_data_access *access)
2026 {
2027 if (IS_ENABLED(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)) {
2028 if (!net_pkt_is_contiguous(pkt, access->size)) {
2029 return NULL;
2030 }
2031
2032 return pkt->cursor.pos;
2033 } else {
2034 if (net_pkt_is_contiguous(pkt, access->size)) {
2035 access->data = pkt->cursor.pos;
2036 } else if (net_pkt_is_being_overwritten(pkt)) {
2037 struct net_pkt_cursor backup;
2038
2039 if (!access->data) {
2040 NET_ERR("Uncontiguous data"
2041 " cannot be linearized");
2042 return NULL;
2043 }
2044
2045 net_pkt_cursor_backup(pkt, &backup);
2046
2047 if (net_pkt_read(pkt, access->data, access->size)) {
2048 net_pkt_cursor_restore(pkt, &backup);
2049 return NULL;
2050 }
2051
2052 net_pkt_cursor_restore(pkt, &backup);
2053 }
2054
2055 return access->data;
2056 }
2057
2058 return NULL;
2059 }
2060
2061 int net_pkt_set_data(struct net_pkt *pkt,
2062 struct net_pkt_data_access *access)
2063 {
2064 if (IS_ENABLED(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)) {
2065 return net_pkt_skip(pkt, access->size);
2066 }
2067
2068 return net_pkt_write(pkt, access->data, access->size);
2069 }
2070
2071 void net_pkt_init(void)
2072 {
2073 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
2074 NET_DBG("Allocating %u RX (%zu bytes), %u TX (%zu bytes), "
2075 "%d RX data (%u bytes) and %d TX data (%u bytes) buffers",
2076 k_mem_slab_num_free_get(&rx_pkts),
2077 (size_t)(k_mem_slab_num_free_get(&rx_pkts) *
2078 sizeof(struct net_pkt)),
2079 k_mem_slab_num_free_get(&tx_pkts),
2080 (size_t)(k_mem_slab_num_free_get(&tx_pkts) *
2081 sizeof(struct net_pkt)),
2082 get_frees(&rx_bufs), get_size(&rx_bufs),
2083 get_frees(&tx_bufs), get_size(&tx_bufs));
2084 #endif
2085 }
2086