1 /** @file
2 @brief Network packet buffers for IP stack
3
4 Network data is passed between components using net_pkt.
5 */
6
7 /*
8 * Copyright (c) 2016 Intel Corporation
9 *
10 * SPDX-License-Identifier: Apache-2.0
11 */
12
13 #include <zephyr/logging/log.h>
14 LOG_MODULE_REGISTER(net_pkt, CONFIG_NET_PKT_LOG_LEVEL);
15
16 /* This enables allocation debugging but does not print so much output
17 * as that can slow things down a lot.
18 */
19 #undef NET_LOG_LEVEL
20 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
21 #define NET_LOG_LEVEL 5
22 #else
23 #define NET_LOG_LEVEL CONFIG_NET_PKT_LOG_LEVEL
24 #endif
25
26 #include <zephyr/kernel.h>
27 #include <zephyr/toolchain.h>
28 #include <string.h>
29 #include <zephyr/types.h>
30 #include <sys/types.h>
31
32 #include <zephyr/sys/util.h>
33
34 #include <zephyr/net/net_core.h>
35 #include <zephyr/net/net_ip.h>
36 #include <zephyr/net/buf.h>
37 #include <zephyr/net/net_pkt.h>
38 #include <zephyr/net/ethernet.h>
39 #include <zephyr/net/udp.h>
40
41 #include "net_private.h"
42 #include "tcp_internal.h"
43
44 /* Find max header size of IP protocol (IPv4 or IPv6) */
45 #if defined(CONFIG_NET_IPV6) || defined(CONFIG_NET_RAW_MODE) || \
46 defined(CONFIG_NET_SOCKETS_PACKET) || defined(CONFIG_NET_SOCKETS_OFFLOAD)
47 #define MAX_IP_PROTO_LEN NET_IPV6H_LEN
48 #else
49 #if defined(CONFIG_NET_IPV4)
50 #define MAX_IP_PROTO_LEN NET_IPV4H_LEN
51 #else
52 #if defined(CONFIG_NET_SOCKETS_CAN)
53 /* TODO: Use CAN MTU here instead of hard coded value. There was
54 * weird circular dependency issue so this needs more TLC.
55 */
56 #define MAX_IP_PROTO_LEN 8
57 #else
58 #if defined(CONFIG_NET_ETHERNET_BRIDGE) || \
59 defined(CONFIG_NET_L2_IEEE802154) || \
60 defined(CONFIG_NET_L2_CUSTOM_IEEE802154)
61 #define MAX_IP_PROTO_LEN 0
62 #else
63 #error "Some packet protocol (e.g. IPv6, IPv4, ETH, IEEE 802.15.4) needs to be selected."
64 #endif /* ETHERNET_BRIDGE / L2_IEEE802154 */
65 #endif /* SOCKETS_CAN */
66 #endif /* IPv4 */
67 #endif /* IPv6 */
68
69 /* Find max header size of "next" protocol (TCP, UDP or ICMP) */
70 #if defined(CONFIG_NET_TCP)
71 #define MAX_NEXT_PROTO_LEN NET_TCPH_LEN
72 #else
73 #if defined(CONFIG_NET_UDP)
74 #define MAX_NEXT_PROTO_LEN NET_UDPH_LEN
75 #else
76 #if defined(CONFIG_NET_SOCKETS_CAN)
77 #define MAX_NEXT_PROTO_LEN 0
78 #else
79 /* If no TCP and no UDP, apparently we still want pings to work. */
80 #define MAX_NEXT_PROTO_LEN NET_ICMPH_LEN
81 #endif /* SOCKETS_CAN */
82 #endif /* UDP */
83 #endif /* TCP */
84
85 /* Make sure that IP + TCP/UDP/ICMP headers fit into one fragment. This
86 * makes possible to cast a fragment pointer to protocol header struct.
87 */
88 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
89 #if CONFIG_NET_BUF_DATA_SIZE < (MAX_IP_PROTO_LEN + MAX_NEXT_PROTO_LEN)
90 #if defined(STRING2)
91 #undef STRING2
92 #endif
93 #if defined(STRING)
94 #undef STRING
95 #endif
96 #define STRING2(x) #x
97 #define STRING(x) STRING2(x)
98 #pragma message "Data len " STRING(CONFIG_NET_BUF_DATA_SIZE)
99 #pragma message "Minimum len " STRING(MAX_IP_PROTO_LEN + MAX_NEXT_PROTO_LEN)
100 #error "Too small net_buf fragment size"
101 #endif
102 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
103
104 #if CONFIG_NET_PKT_RX_COUNT <= 0
105 #error "Minimum value for CONFIG_NET_PKT_RX_COUNT is 1"
106 #endif
107
108 #if CONFIG_NET_PKT_TX_COUNT <= 0
109 #error "Minimum value for CONFIG_NET_PKT_TX_COUNT is 1"
110 #endif
111
112 #if CONFIG_NET_BUF_RX_COUNT <= 0
113 #error "Minimum value for CONFIG_NET_BUF_RX_COUNT is 1"
114 #endif
115
116 #if CONFIG_NET_BUF_TX_COUNT <= 0
117 #error "Minimum value for CONFIG_NET_BUF_TX_COUNT is 1"
118 #endif
119
120 K_MEM_SLAB_DEFINE(rx_pkts, sizeof(struct net_pkt), CONFIG_NET_PKT_RX_COUNT, 4);
121 K_MEM_SLAB_DEFINE(tx_pkts, sizeof(struct net_pkt), CONFIG_NET_PKT_TX_COUNT, 4);
122
123 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
124
125 NET_BUF_POOL_FIXED_DEFINE(rx_bufs, CONFIG_NET_BUF_RX_COUNT, CONFIG_NET_BUF_DATA_SIZE,
126 CONFIG_NET_PKT_BUF_USER_DATA_SIZE, NULL);
127 NET_BUF_POOL_FIXED_DEFINE(tx_bufs, CONFIG_NET_BUF_TX_COUNT, CONFIG_NET_BUF_DATA_SIZE,
128 CONFIG_NET_PKT_BUF_USER_DATA_SIZE, NULL);
129
130 #else /* !CONFIG_NET_BUF_FIXED_DATA_SIZE */
131
132 NET_BUF_POOL_VAR_DEFINE(rx_bufs, CONFIG_NET_BUF_RX_COUNT, CONFIG_NET_BUF_DATA_POOL_SIZE,
133 CONFIG_NET_PKT_BUF_USER_DATA_SIZE, NULL);
134 NET_BUF_POOL_VAR_DEFINE(tx_bufs, CONFIG_NET_BUF_TX_COUNT, CONFIG_NET_BUF_DATA_POOL_SIZE,
135 CONFIG_NET_PKT_BUF_USER_DATA_SIZE, NULL);
136
137 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
138
139 /* Allocation tracking is only available if separately enabled */
140 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
141 struct net_pkt_alloc {
142 union {
143 struct net_pkt *pkt;
144 struct net_buf *buf;
145 void *alloc_data;
146 };
147 const char *func_alloc;
148 const char *func_free;
149 uint16_t line_alloc;
150 uint16_t line_free;
151 uint8_t in_use;
152 bool is_pkt;
153 };
154
155 #define MAX_NET_PKT_ALLOCS (CONFIG_NET_PKT_RX_COUNT + \
156 CONFIG_NET_PKT_TX_COUNT + \
157 CONFIG_NET_BUF_RX_COUNT + \
158 CONFIG_NET_BUF_TX_COUNT + \
159 CONFIG_NET_DEBUG_NET_PKT_EXTERNALS)
160
161 static struct net_pkt_alloc net_pkt_allocs[MAX_NET_PKT_ALLOCS];
162
net_pkt_alloc_add(void * alloc_data,bool is_pkt,const char * func,int line)163 static void net_pkt_alloc_add(void *alloc_data, bool is_pkt,
164 const char *func, int line)
165 {
166 int i;
167
168 for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
169 if (net_pkt_allocs[i].in_use) {
170 continue;
171 }
172
173 net_pkt_allocs[i].in_use = true;
174 net_pkt_allocs[i].is_pkt = is_pkt;
175 net_pkt_allocs[i].alloc_data = alloc_data;
176 net_pkt_allocs[i].func_alloc = func;
177 net_pkt_allocs[i].line_alloc = line;
178
179 return;
180 }
181 }
182
net_pkt_alloc_del(void * alloc_data,const char * func,int line)183 static void net_pkt_alloc_del(void *alloc_data, const char *func, int line)
184 {
185 int i;
186
187 for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
188 if (net_pkt_allocs[i].in_use &&
189 net_pkt_allocs[i].alloc_data == alloc_data) {
190 net_pkt_allocs[i].func_free = func;
191 net_pkt_allocs[i].line_free = line;
192 net_pkt_allocs[i].in_use = false;
193
194 return;
195 }
196 }
197 }
198
net_pkt_alloc_find(void * alloc_data,const char ** func_free,int * line_free)199 static bool net_pkt_alloc_find(void *alloc_data,
200 const char **func_free,
201 int *line_free)
202 {
203 int i;
204
205 for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
206 if (!net_pkt_allocs[i].in_use &&
207 net_pkt_allocs[i].alloc_data == alloc_data) {
208 *func_free = net_pkt_allocs[i].func_free;
209 *line_free = net_pkt_allocs[i].line_free;
210
211 return true;
212 }
213 }
214
215 return false;
216 }
217
net_pkt_allocs_foreach(net_pkt_allocs_cb_t cb,void * user_data)218 void net_pkt_allocs_foreach(net_pkt_allocs_cb_t cb, void *user_data)
219 {
220 int i;
221
222 for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
223 if (net_pkt_allocs[i].in_use) {
224 cb(net_pkt_allocs[i].is_pkt ?
225 net_pkt_allocs[i].pkt : NULL,
226 net_pkt_allocs[i].is_pkt ?
227 NULL : net_pkt_allocs[i].buf,
228 net_pkt_allocs[i].func_alloc,
229 net_pkt_allocs[i].line_alloc,
230 net_pkt_allocs[i].func_free,
231 net_pkt_allocs[i].line_free,
232 net_pkt_allocs[i].in_use,
233 user_data);
234 }
235 }
236
237 for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
238 if (!net_pkt_allocs[i].in_use) {
239 cb(net_pkt_allocs[i].is_pkt ?
240 net_pkt_allocs[i].pkt : NULL,
241 net_pkt_allocs[i].is_pkt ?
242 NULL : net_pkt_allocs[i].buf,
243 net_pkt_allocs[i].func_alloc,
244 net_pkt_allocs[i].line_alloc,
245 net_pkt_allocs[i].func_free,
246 net_pkt_allocs[i].line_free,
247 net_pkt_allocs[i].in_use,
248 user_data);
249 }
250 }
251 }
252 #else
253 #define net_pkt_alloc_add(alloc_data, is_pkt, func, line)
254 #define net_pkt_alloc_del(alloc_data, func, line)
255 #define net_pkt_alloc_find(alloc_data, func_free, line_free) false
256 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC */
257
258 #if defined(NET_PKT_DEBUG_ENABLED)
259
260 #define NET_FRAG_CHECK_IF_NOT_IN_USE(frag, ref) \
261 do { \
262 if (!(ref)) { \
263 NET_ERR("**ERROR** frag %p not in use (%s:%s():%d)", \
264 frag, __FILE__, __func__, __LINE__); \
265 } \
266 } while (0)
267
net_pkt_slab2str(struct k_mem_slab * slab)268 const char *net_pkt_slab2str(struct k_mem_slab *slab)
269 {
270 if (slab == &rx_pkts) {
271 return "RX";
272 } else if (slab == &tx_pkts) {
273 return "TX";
274 }
275
276 return "EXT";
277 }
278
net_pkt_pool2str(struct net_buf_pool * pool)279 const char *net_pkt_pool2str(struct net_buf_pool *pool)
280 {
281 if (pool == &rx_bufs) {
282 return "RDATA";
283 } else if (pool == &tx_bufs) {
284 return "TDATA";
285 }
286
287 return "EDATA";
288 }
289
get_frees(struct net_buf_pool * pool)290 static inline int16_t get_frees(struct net_buf_pool *pool)
291 {
292 #if defined(CONFIG_NET_BUF_POOL_USAGE)
293 return atomic_get(&pool->avail_count);
294 #else
295 return 0;
296 #endif
297 }
298
net_pkt_print_frags(struct net_pkt * pkt)299 void net_pkt_print_frags(struct net_pkt *pkt)
300 {
301 struct net_buf *frag;
302 size_t total = 0;
303 int count = 0, frag_size = 0;
304
305 if (!pkt) {
306 NET_INFO("pkt %p", pkt);
307 return;
308 }
309
310 NET_INFO("pkt %p frags %p", pkt, pkt->frags);
311
312 NET_ASSERT(pkt->frags);
313
314 frag = pkt->frags;
315 while (frag) {
316 total += frag->len;
317
318 frag_size = frag->size;
319
320 NET_INFO("[%d] frag %p len %d max len %u size %d pool %p",
321 count, frag, frag->len, net_buf_max_len(frag),
322 frag_size, net_buf_pool_get(frag->pool_id));
323
324 count++;
325
326 frag = frag->frags;
327 }
328
329 NET_INFO("Total data size %zu, occupied %d bytes, utilization %zu%%",
330 total, count * frag_size,
331 count ? (total * 100) / (count * frag_size) : 0);
332 }
333 #endif
334
335 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
get_name(struct net_buf_pool * pool)336 static inline const char *get_name(struct net_buf_pool *pool)
337 {
338 #if defined(CONFIG_NET_BUF_POOL_USAGE)
339 return pool->name;
340 #else
341 return "?";
342 #endif
343 }
344
get_size(struct net_buf_pool * pool)345 static inline int16_t get_size(struct net_buf_pool *pool)
346 {
347 #if defined(CONFIG_NET_BUF_POOL_USAGE)
348 return pool->pool_size;
349 #else
350 return 0;
351 #endif
352 }
353
slab2str(struct k_mem_slab * slab)354 static inline const char *slab2str(struct k_mem_slab *slab)
355 {
356 return net_pkt_slab2str(slab);
357 }
358
pool2str(struct net_buf_pool * pool)359 static inline const char *pool2str(struct net_buf_pool *pool)
360 {
361 return net_pkt_pool2str(pool);
362 }
363 #endif /* CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG */
364
365 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_get_reserve_data_debug(struct net_buf_pool * pool,size_t min_len,k_timeout_t timeout,const char * caller,int line)366 struct net_buf *net_pkt_get_reserve_data_debug(struct net_buf_pool *pool,
367 size_t min_len,
368 k_timeout_t timeout,
369 const char *caller,
370 int line)
371 #else /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
372 struct net_buf *net_pkt_get_reserve_data(struct net_buf_pool *pool,
373 size_t min_len, k_timeout_t timeout)
374 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
375 {
376 struct net_buf *frag;
377
378 if (k_is_in_isr()) {
379 timeout = K_NO_WAIT;
380 }
381
382 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
383 if (min_len > CONFIG_NET_BUF_DATA_SIZE) {
384 NET_ERR("Requested too large fragment. Increase CONFIG_NET_BUF_DATA_SIZE.");
385 return NULL;
386 }
387
388 frag = net_buf_alloc(pool, timeout);
389 #else
390 frag = net_buf_alloc_len(pool, min_len, timeout);
391 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
392
393 if (!frag) {
394 return NULL;
395 }
396
397 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
398 NET_FRAG_CHECK_IF_NOT_IN_USE(frag, frag->ref + 1U);
399 #endif
400
401 net_pkt_alloc_add(frag, false, caller, line);
402
403 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
404 NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
405 pool2str(pool), get_name(pool), get_frees(pool),
406 frag, frag->ref, caller, line);
407 #endif
408
409 return frag;
410 }
411
412 /* Get a fragment, try to figure out the pool from where to get
413 * the data.
414 */
415 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_get_frag_debug(struct net_pkt * pkt,size_t min_len,k_timeout_t timeout,const char * caller,int line)416 struct net_buf *net_pkt_get_frag_debug(struct net_pkt *pkt, size_t min_len,
417 k_timeout_t timeout,
418 const char *caller, int line)
419 #else
420 struct net_buf *net_pkt_get_frag(struct net_pkt *pkt, size_t min_len,
421 k_timeout_t timeout)
422 #endif
423 {
424 #if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL)
425 struct net_context *context;
426
427 context = net_pkt_context(pkt);
428 if (context && context->data_pool) {
429 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
430 return net_pkt_get_reserve_data_debug(context->data_pool(),
431 min_len, timeout,
432 caller, line);
433 #else
434 return net_pkt_get_reserve_data(context->data_pool(), min_len,
435 timeout);
436 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
437 }
438 #endif /* CONFIG_NET_CONTEXT_NET_PKT_POOL */
439
440 if (pkt->slab == &rx_pkts) {
441 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
442 return net_pkt_get_reserve_rx_data_debug(min_len, timeout,
443 caller, line);
444 #else
445 return net_pkt_get_reserve_rx_data(min_len, timeout);
446 #endif
447 }
448
449 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
450 return net_pkt_get_reserve_tx_data_debug(min_len, timeout, caller, line);
451 #else
452 return net_pkt_get_reserve_tx_data(min_len, timeout);
453 #endif
454 }
455
456 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_get_reserve_rx_data_debug(size_t min_len,k_timeout_t timeout,const char * caller,int line)457 struct net_buf *net_pkt_get_reserve_rx_data_debug(size_t min_len, k_timeout_t timeout,
458 const char *caller, int line)
459 {
460 return net_pkt_get_reserve_data_debug(&rx_bufs, min_len, timeout, caller, line);
461 }
462
net_pkt_get_reserve_tx_data_debug(size_t min_len,k_timeout_t timeout,const char * caller,int line)463 struct net_buf *net_pkt_get_reserve_tx_data_debug(size_t min_len, k_timeout_t timeout,
464 const char *caller, int line)
465 {
466 return net_pkt_get_reserve_data_debug(&tx_bufs, min_len, timeout, caller, line);
467 }
468
469 #else /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
470
net_pkt_get_reserve_rx_data(size_t min_len,k_timeout_t timeout)471 struct net_buf *net_pkt_get_reserve_rx_data(size_t min_len, k_timeout_t timeout)
472 {
473 return net_pkt_get_reserve_data(&rx_bufs, min_len, timeout);
474 }
475
net_pkt_get_reserve_tx_data(size_t min_len,k_timeout_t timeout)476 struct net_buf *net_pkt_get_reserve_tx_data(size_t min_len, k_timeout_t timeout)
477 {
478 return net_pkt_get_reserve_data(&tx_bufs, min_len, timeout);
479 }
480
481 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
482
483
484 #if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL)
get_tx_slab(struct net_context * context)485 static inline struct k_mem_slab *get_tx_slab(struct net_context *context)
486 {
487 if (context->tx_slab) {
488 return context->tx_slab();
489 }
490
491 return NULL;
492 }
493
get_data_pool(struct net_context * context)494 static inline struct net_buf_pool *get_data_pool(struct net_context *context)
495 {
496 if (context->data_pool) {
497 return context->data_pool();
498 }
499
500 return NULL;
501 }
502 #else
503 #define get_tx_slab(...) NULL
504 #define get_data_pool(...) NULL
505 #endif /* CONFIG_NET_CONTEXT_NET_PKT_POOL */
506
507 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_unref_debug(struct net_pkt * pkt,const char * caller,int line)508 void net_pkt_unref_debug(struct net_pkt *pkt, const char *caller, int line)
509 {
510 struct net_buf *frag;
511
512 #else
513 void net_pkt_unref(struct net_pkt *pkt)
514 {
515 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
516 atomic_val_t ref;
517
518 if (!pkt) {
519 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
520 NET_ERR("*** ERROR *** pkt %p (%s():%d)", pkt, caller, line);
521 #endif
522 return;
523 }
524
525 do {
526 ref = atomic_get(&pkt->atomic_ref);
527 if (!ref) {
528 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
529 const char *func_freed;
530 int line_freed;
531
532 if (net_pkt_alloc_find(pkt, &func_freed, &line_freed)) {
533 NET_ERR("*** ERROR *** pkt %p is freed already "
534 "by %s():%d (%s():%d)",
535 pkt, func_freed, line_freed, caller,
536 line);
537 } else {
538 NET_ERR("*** ERROR *** pkt %p is freed already "
539 "(%s():%d)", pkt, caller, line);
540 }
541 #endif
542 return;
543 }
544 } while (!atomic_cas(&pkt->atomic_ref, ref, ref - 1));
545
546 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
547 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
548 NET_DBG("%s [%d] pkt %p ref %ld frags %p (%s():%d)",
549 slab2str(pkt->slab), k_mem_slab_num_free_get(pkt->slab),
550 pkt, ref - 1, pkt->frags, caller, line);
551 #endif
552 if (ref > 1) {
553 goto done;
554 }
555
556 frag = pkt->frags;
557 while (frag) {
558 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
559 NET_DBG("%s (%s) [%d] frag %p ref %d frags %p (%s():%d)",
560 pool2str(net_buf_pool_get(frag->pool_id)),
561 get_name(net_buf_pool_get(frag->pool_id)),
562 get_frees(net_buf_pool_get(frag->pool_id)), frag,
563 frag->ref - 1U, frag->frags, caller, line);
564 #endif
565
566 if (!frag->ref) {
567 const char *func_freed;
568 int line_freed;
569
570 if (net_pkt_alloc_find(frag,
571 &func_freed, &line_freed)) {
572 NET_ERR("*** ERROR *** frag %p is freed "
573 "already by %s():%d (%s():%d)",
574 frag, func_freed, line_freed,
575 caller, line);
576 } else {
577 NET_ERR("*** ERROR *** frag %p is freed "
578 "already (%s():%d)",
579 frag, caller, line);
580 }
581 }
582
583 net_pkt_alloc_del(frag, caller, line);
584
585 frag = frag->frags;
586 }
587
588 net_pkt_alloc_del(pkt, caller, line);
589 done:
590 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
591
592 if (ref > 1) {
593 return;
594 }
595
596 if (pkt->frags) {
597 net_pkt_frag_unref(pkt->frags);
598 }
599
600 if (IS_ENABLED(CONFIG_NET_DEBUG_NET_PKT_NON_FRAGILE_ACCESS)) {
601 pkt->buffer = NULL;
602 net_pkt_cursor_init(pkt);
603 }
604
605 k_mem_slab_free(pkt->slab, (void *)pkt);
606 }
607
608 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
609 struct net_pkt *net_pkt_ref_debug(struct net_pkt *pkt, const char *caller,
610 int line)
611 #else
612 struct net_pkt *net_pkt_ref(struct net_pkt *pkt)
613 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
614 {
615 atomic_val_t ref;
616
617 do {
618 ref = pkt ? atomic_get(&pkt->atomic_ref) : 0;
619 if (!ref) {
620 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
621 NET_ERR("*** ERROR *** pkt %p (%s():%d)",
622 pkt, caller, line);
623 #endif
624 return NULL;
625 }
626 } while (!atomic_cas(&pkt->atomic_ref, ref, ref + 1));
627
628 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
629 NET_DBG("%s [%d] pkt %p ref %ld (%s():%d)",
630 slab2str(pkt->slab), k_mem_slab_num_free_get(pkt->slab),
631 pkt, ref + 1, caller, line);
632 #endif
633
634
635 return pkt;
636 }
637
638 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
639 struct net_buf *net_pkt_frag_ref_debug(struct net_buf *frag,
640 const char *caller, int line)
641 #else
642 struct net_buf *net_pkt_frag_ref(struct net_buf *frag)
643 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
644 {
645 if (!frag) {
646 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
647 NET_ERR("*** ERROR *** frag %p (%s():%d)", frag, caller, line);
648 #endif
649 return NULL;
650 }
651
652 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
653 NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
654 pool2str(net_buf_pool_get(frag->pool_id)),
655 get_name(net_buf_pool_get(frag->pool_id)),
656 get_frees(net_buf_pool_get(frag->pool_id)),
657 frag, frag->ref + 1U, caller, line);
658 #endif
659
660 return net_buf_ref(frag);
661 }
662
663
664 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
665 void net_pkt_frag_unref_debug(struct net_buf *frag,
666 const char *caller, int line)
667 #else
668 void net_pkt_frag_unref(struct net_buf *frag)
669 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
670 {
671 if (!frag) {
672 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
673 NET_ERR("*** ERROR *** frag %p (%s():%d)", frag, caller, line);
674 #endif
675 return;
676 }
677
678 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
679 NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
680 pool2str(net_buf_pool_get(frag->pool_id)),
681 get_name(net_buf_pool_get(frag->pool_id)),
682 get_frees(net_buf_pool_get(frag->pool_id)),
683 frag, frag->ref - 1U, caller, line);
684 #endif
685
686 if (frag->ref == 1U) {
687 net_pkt_alloc_del(frag, caller, line);
688 }
689
690 net_buf_unref(frag);
691 }
692
693 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
694 struct net_buf *net_pkt_frag_del_debug(struct net_pkt *pkt,
695 struct net_buf *parent,
696 struct net_buf *frag,
697 const char *caller, int line)
698 #else
699 struct net_buf *net_pkt_frag_del(struct net_pkt *pkt,
700 struct net_buf *parent,
701 struct net_buf *frag)
702 #endif
703 {
704 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
705 NET_DBG("pkt %p parent %p frag %p ref %u (%s:%d)",
706 pkt, parent, frag, frag->ref, caller, line);
707 #endif
708
709 if (pkt->frags == frag && !parent) {
710 struct net_buf *tmp;
711
712 if (frag->ref == 1U) {
713 net_pkt_alloc_del(frag, caller, line);
714 }
715
716 tmp = net_buf_frag_del(NULL, frag);
717 pkt->frags = tmp;
718
719 return tmp;
720 }
721
722 if (frag->ref == 1U) {
723 net_pkt_alloc_del(frag, caller, line);
724 }
725
726 return net_buf_frag_del(parent, frag);
727 }
728
729 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
730 void net_pkt_frag_add_debug(struct net_pkt *pkt, struct net_buf *frag,
731 const char *caller, int line)
732 #else
733 void net_pkt_frag_add(struct net_pkt *pkt, struct net_buf *frag)
734 #endif
735 {
736 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
737 NET_DBG("pkt %p frag %p (%s:%d)", pkt, frag, caller, line);
738 #endif
739
740 /* We do not use net_buf_frag_add() as this one will refcount
741 * the frag once more if !pkt->frags
742 */
743 if (!pkt->frags) {
744 pkt->frags = frag;
745 return;
746 }
747
748 net_buf_frag_insert(net_buf_frag_last(pkt->frags), frag);
749 }
750
751 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
752 void net_pkt_frag_insert_debug(struct net_pkt *pkt, struct net_buf *frag,
753 const char *caller, int line)
754 #else
755 void net_pkt_frag_insert(struct net_pkt *pkt, struct net_buf *frag)
756 #endif
757 {
758 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
759 NET_DBG("pkt %p frag %p (%s:%d)", pkt, frag, caller, line);
760 #endif
761
762 net_buf_frag_last(frag)->frags = pkt->frags;
763 pkt->frags = frag;
764 }
765
766 void net_pkt_compact(struct net_pkt *pkt)
767 {
768 struct net_buf *frag, *prev;
769
770 NET_DBG("Compacting data in pkt %p", pkt);
771
772 frag = pkt->frags;
773 prev = NULL;
774
775 while (frag) {
776 if (frag->frags) {
777 /* Copy amount of data from next fragment to this
778 * fragment.
779 */
780 size_t copy_len;
781
782 copy_len = frag->frags->len;
783 if (copy_len > net_buf_tailroom(frag)) {
784 copy_len = net_buf_tailroom(frag);
785 }
786
787 memcpy(net_buf_tail(frag), frag->frags->data, copy_len);
788 net_buf_add(frag, copy_len);
789
790 memmove(frag->frags->data,
791 frag->frags->data + copy_len,
792 frag->frags->len - copy_len);
793
794 frag->frags->len -= copy_len;
795
796 /* Is there any more space in this fragment */
797 if (net_buf_tailroom(frag)) {
798 /* There is. This also means that the next
799 * fragment is empty as otherwise we could
800 * not have copied all data. Remove next
801 * fragment as there is no data in it any more.
802 */
803 net_pkt_frag_del(pkt, frag, frag->frags);
804
805 /* Then check next fragment */
806 continue;
807 }
808 } else {
809 if (!frag->len) {
810 /* Remove the last fragment because there is no
811 * data in it.
812 */
813 net_pkt_frag_del(pkt, prev, frag);
814
815 break;
816 }
817 }
818
819 prev = frag;
820 frag = frag->frags;
821 }
822 }
823
824 void net_pkt_get_info(struct k_mem_slab **rx,
825 struct k_mem_slab **tx,
826 struct net_buf_pool **rx_data,
827 struct net_buf_pool **tx_data)
828 {
829 if (rx) {
830 *rx = &rx_pkts;
831 }
832
833 if (tx) {
834 *tx = &tx_pkts;
835 }
836
837 if (rx_data) {
838 *rx_data = &rx_bufs;
839 }
840
841 if (tx_data) {
842 *tx_data = &tx_bufs;
843 }
844 }
845
846 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
847 void net_pkt_print(void)
848 {
849 NET_DBG("TX %u RX %u RDATA %d TDATA %d",
850 k_mem_slab_num_free_get(&tx_pkts),
851 k_mem_slab_num_free_get(&rx_pkts),
852 get_frees(&rx_bufs), get_frees(&tx_bufs));
853 }
854 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC */
855
856 /* New allocator and API starts here */
857
858 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
859
860 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
861 static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool,
862 size_t size, k_timeout_t timeout,
863 const char *caller, int line)
864 #else
865 static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool,
866 size_t size, k_timeout_t timeout)
867 #endif
868 {
869 k_timepoint_t end = sys_timepoint_calc(timeout);
870 struct net_buf *first = NULL;
871 struct net_buf *current = NULL;
872
873 do {
874 struct net_buf *new;
875
876 new = net_buf_alloc_fixed(pool, timeout);
877 if (!new) {
878 goto error;
879 }
880
881 if (!first && !current) {
882 first = new;
883 } else {
884 current->frags = new;
885 }
886
887 current = new;
888 if (current->size > size) {
889 current->size = size;
890 }
891
892 size -= current->size;
893
894 timeout = sys_timepoint_timeout(end);
895
896 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
897 NET_FRAG_CHECK_IF_NOT_IN_USE(new, new->ref + 1);
898
899 net_pkt_alloc_add(new, false, caller, line);
900
901 NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
902 pool2str(pool), get_name(pool), get_frees(pool),
903 new, new->ref, caller, line);
904 #endif
905 } while (size);
906
907 return first;
908 error:
909 if (first) {
910 net_buf_unref(first);
911 }
912
913 return NULL;
914 }
915
916 #else /* !CONFIG_NET_BUF_FIXED_DATA_SIZE */
917
918 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
919 static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool,
920 size_t size, k_timeout_t timeout,
921 const char *caller, int line)
922 #else
923 static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool,
924 size_t size, k_timeout_t timeout)
925 #endif
926 {
927 struct net_buf *buf;
928
929 buf = net_buf_alloc_len(pool, size, timeout);
930
931 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
932 NET_FRAG_CHECK_IF_NOT_IN_USE(buf, buf->ref + 1);
933
934 net_pkt_alloc_add(buf, false, caller, line);
935
936 NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
937 pool2str(pool), get_name(pool), get_frees(pool),
938 buf, buf->ref, caller, line);
939 #endif
940
941 return buf;
942 }
943
944 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
945
946 static size_t pkt_buffer_length(struct net_pkt *pkt,
947 size_t size,
948 enum net_ip_protocol proto,
949 size_t existing)
950 {
951 sa_family_t family = net_pkt_family(pkt);
952 size_t max_len;
953
954 if (net_pkt_iface(pkt)) {
955 max_len = net_if_get_mtu(net_pkt_iface(pkt));
956 } else {
957 max_len = 0;
958 }
959
960 /* Family vs iface MTU */
961 if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
962 if (IS_ENABLED(CONFIG_NET_IPV6_FRAGMENT) && (size > max_len)) {
963 /* We support larger packets if IPv6 fragmentation is
964 * enabled.
965 */
966 max_len = size;
967 }
968
969 max_len = MAX(max_len, NET_IPV6_MTU);
970 } else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
971 if (IS_ENABLED(CONFIG_NET_IPV4_FRAGMENT) && (size > max_len)) {
972 /* We support larger packets if IPv4 fragmentation is enabled */
973 max_len = size;
974 }
975
976 max_len = MAX(max_len, NET_IPV4_MTU);
977 } else { /* family == AF_UNSPEC */
978 #if defined (CONFIG_NET_L2_ETHERNET)
979 if (net_if_l2(net_pkt_iface(pkt)) ==
980 &NET_L2_GET_NAME(ETHERNET)) {
981 max_len += NET_ETH_MAX_HDR_SIZE;
982 } else
983 #endif /* CONFIG_NET_L2_ETHERNET */
984 {
985 /* Other L2 are not checked as the pkt MTU in this case
986 * is based on the IP layer (IPv6 most of the time).
987 */
988 max_len = size;
989 }
990 }
991
992 max_len -= existing;
993
994 return MIN(size, max_len);
995 }
996
997 static size_t pkt_estimate_headers_length(struct net_pkt *pkt,
998 sa_family_t family,
999 enum net_ip_protocol proto)
1000 {
1001 size_t hdr_len = 0;
1002
1003 if (family == AF_UNSPEC) {
1004 return 0;
1005 }
1006
1007 /* Family header */
1008 if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
1009 hdr_len += NET_IPV6H_LEN;
1010 } else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
1011 hdr_len += NET_IPV4H_LEN;
1012 }
1013
1014 /* + protocol header */
1015 if (IS_ENABLED(CONFIG_NET_TCP) && proto == IPPROTO_TCP) {
1016 hdr_len += NET_TCPH_LEN + NET_TCP_MAX_OPT_SIZE;
1017 } else if (IS_ENABLED(CONFIG_NET_UDP) && proto == IPPROTO_UDP) {
1018 hdr_len += NET_UDPH_LEN;
1019 } else if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) {
1020 hdr_len += NET_ICMPH_LEN;
1021 }
1022
1023 NET_DBG("HDRs length estimation %zu", hdr_len);
1024
1025 return hdr_len;
1026 }
1027
1028 static size_t pkt_get_max_len(struct net_pkt *pkt)
1029 {
1030 struct net_buf *buf = pkt->buffer;
1031 size_t size = 0;
1032
1033 while (buf) {
1034 size += net_buf_max_len(buf);
1035 buf = buf->frags;
1036 }
1037
1038 return size;
1039 }
1040
1041 size_t net_pkt_available_buffer(struct net_pkt *pkt)
1042 {
1043 if (!pkt) {
1044 return 0;
1045 }
1046
1047 return pkt_get_max_len(pkt) - net_pkt_get_len(pkt);
1048 }
1049
1050 size_t net_pkt_available_payload_buffer(struct net_pkt *pkt,
1051 enum net_ip_protocol proto)
1052 {
1053 size_t hdr_len = 0;
1054 size_t len;
1055
1056 if (!pkt) {
1057 return 0;
1058 }
1059
1060 hdr_len = pkt_estimate_headers_length(pkt, net_pkt_family(pkt), proto);
1061 len = net_pkt_get_len(pkt);
1062
1063 hdr_len = hdr_len <= len ? 0 : hdr_len - len;
1064
1065 len = net_pkt_available_buffer(pkt) - hdr_len;
1066
1067 return len;
1068 }
1069
1070 void net_pkt_trim_buffer(struct net_pkt *pkt)
1071 {
1072 struct net_buf *buf, *prev;
1073
1074 buf = pkt->buffer;
1075 prev = buf;
1076
1077 while (buf) {
1078 struct net_buf *next = buf->frags;
1079
1080 if (!buf->len) {
1081 if (buf == pkt->buffer) {
1082 pkt->buffer = next;
1083 } else if (buf == prev->frags) {
1084 prev->frags = next;
1085 }
1086
1087 buf->frags = NULL;
1088 net_buf_unref(buf);
1089 } else {
1090 prev = buf;
1091 }
1092
1093 buf = next;
1094 }
1095 }
1096
1097 int net_pkt_remove_tail(struct net_pkt *pkt, size_t length)
1098 {
1099 struct net_buf *buf = pkt->buffer;
1100 size_t remaining_len = net_pkt_get_len(pkt);
1101
1102 if (remaining_len < length) {
1103 return -EINVAL;
1104 }
1105
1106 remaining_len -= length;
1107
1108 while (buf) {
1109 if (buf->len >= remaining_len) {
1110 buf->len = remaining_len;
1111
1112 if (buf->frags) {
1113 net_pkt_frag_unref(buf->frags);
1114 buf->frags = NULL;
1115 }
1116
1117 break;
1118 }
1119
1120 remaining_len -= buf->len;
1121 buf = buf->frags;
1122 }
1123
1124 return 0;
1125 }
1126
1127 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1128 int net_pkt_alloc_buffer_debug(struct net_pkt *pkt,
1129 size_t size,
1130 enum net_ip_protocol proto,
1131 k_timeout_t timeout,
1132 const char *caller,
1133 int line)
1134 #else
1135 int net_pkt_alloc_buffer(struct net_pkt *pkt,
1136 size_t size,
1137 enum net_ip_protocol proto,
1138 k_timeout_t timeout)
1139 #endif
1140 {
1141 struct net_buf_pool *pool = NULL;
1142 size_t alloc_len = 0;
1143 size_t hdr_len = 0;
1144 struct net_buf *buf;
1145
1146 if (!size && proto == 0 && net_pkt_family(pkt) == AF_UNSPEC) {
1147 return 0;
1148 }
1149
1150 if (k_is_in_isr()) {
1151 timeout = K_NO_WAIT;
1152 }
1153
1154 /* Verifying existing buffer and take into account free space there */
1155 alloc_len = net_pkt_available_buffer(pkt);
1156 if (!alloc_len) {
1157 /* In case of no free space, it will account for header
1158 * space estimation
1159 */
1160 hdr_len = pkt_estimate_headers_length(pkt,
1161 net_pkt_family(pkt),
1162 proto);
1163 }
1164
1165 /* Calculate the maximum that can be allocated depending on size */
1166 alloc_len = pkt_buffer_length(pkt, size + hdr_len, proto, alloc_len);
1167
1168 NET_DBG("Data allocation maximum size %zu (requested %zu)",
1169 alloc_len, size);
1170
1171 if (pkt->context) {
1172 pool = get_data_pool(pkt->context);
1173 }
1174
1175 if (!pool) {
1176 pool = pkt->slab == &tx_pkts ? &tx_bufs : &rx_bufs;
1177 }
1178
1179 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1180 buf = pkt_alloc_buffer(pool, alloc_len, timeout, caller, line);
1181 #else
1182 buf = pkt_alloc_buffer(pool, alloc_len, timeout);
1183 #endif
1184
1185 if (!buf) {
1186 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1187 NET_ERR("Data buffer (%zd) allocation failed (%s:%d)",
1188 alloc_len, caller, line);
1189 #else
1190 NET_ERR("Data buffer (%zd) allocation failed.", alloc_len);
1191 #endif
1192 return -ENOMEM;
1193 }
1194
1195 net_pkt_append_buffer(pkt, buf);
1196
1197 return 0;
1198 }
1199
1200 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1201 static struct net_pkt *pkt_alloc(struct k_mem_slab *slab, k_timeout_t timeout,
1202 const char *caller, int line)
1203 #else
1204 static struct net_pkt *pkt_alloc(struct k_mem_slab *slab, k_timeout_t timeout)
1205 #endif
1206 {
1207 struct net_pkt *pkt;
1208 uint32_t create_time;
1209 int ret;
1210
1211 if (k_is_in_isr()) {
1212 timeout = K_NO_WAIT;
1213 }
1214
1215 if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS) ||
1216 IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS)) {
1217 create_time = k_cycle_get_32();
1218 } else {
1219 ARG_UNUSED(create_time);
1220 }
1221
1222 ret = k_mem_slab_alloc(slab, (void **)&pkt, timeout);
1223 if (ret) {
1224 return NULL;
1225 }
1226
1227 memset(pkt, 0, sizeof(struct net_pkt));
1228
1229 pkt->atomic_ref = ATOMIC_INIT(1);
1230 pkt->slab = slab;
1231
1232 if (IS_ENABLED(CONFIG_NET_IPV6)) {
1233 net_pkt_set_ipv6_next_hdr(pkt, 255);
1234 }
1235
1236 #if defined(CONFIG_NET_TX_DEFAULT_PRIORITY)
1237 #define TX_DEFAULT_PRIORITY CONFIG_NET_TX_DEFAULT_PRIORITY
1238 #else
1239 #define TX_DEFAULT_PRIORITY 0
1240 #endif
1241
1242 #if defined(CONFIG_NET_RX_DEFAULT_PRIORITY)
1243 #define RX_DEFAULT_PRIORITY CONFIG_NET_RX_DEFAULT_PRIORITY
1244 #else
1245 #define RX_DEFAULT_PRIORITY 0
1246 #endif
1247
1248 if (&tx_pkts == slab) {
1249 net_pkt_set_priority(pkt, TX_DEFAULT_PRIORITY);
1250 } else if (&rx_pkts == slab) {
1251 net_pkt_set_priority(pkt, RX_DEFAULT_PRIORITY);
1252 }
1253
1254 if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS) ||
1255 IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS)) {
1256 net_pkt_set_create_time(pkt, create_time);
1257 }
1258
1259 net_pkt_set_vlan_tag(pkt, NET_VLAN_TAG_UNSPEC);
1260
1261 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1262 net_pkt_alloc_add(pkt, true, caller, line);
1263 #endif
1264
1265 net_pkt_cursor_init(pkt);
1266
1267 return pkt;
1268 }
1269
1270 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1271 struct net_pkt *net_pkt_alloc_debug(k_timeout_t timeout,
1272 const char *caller, int line)
1273 #else
1274 struct net_pkt *net_pkt_alloc(k_timeout_t timeout)
1275 #endif
1276 {
1277 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1278 return pkt_alloc(&tx_pkts, timeout, caller, line);
1279 #else
1280 return pkt_alloc(&tx_pkts, timeout);
1281 #endif
1282 }
1283
1284 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1285 struct net_pkt *net_pkt_alloc_from_slab_debug(struct k_mem_slab *slab,
1286 k_timeout_t timeout,
1287 const char *caller, int line)
1288 #else
1289 struct net_pkt *net_pkt_alloc_from_slab(struct k_mem_slab *slab,
1290 k_timeout_t timeout)
1291 #endif
1292 {
1293 if (!slab) {
1294 return NULL;
1295 }
1296
1297 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1298 return pkt_alloc(slab, timeout, caller, line);
1299 #else
1300 return pkt_alloc(slab, timeout);
1301 #endif
1302 }
1303
1304 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1305 struct net_pkt *net_pkt_rx_alloc_debug(k_timeout_t timeout,
1306 const char *caller, int line)
1307 #else
1308 struct net_pkt *net_pkt_rx_alloc(k_timeout_t timeout)
1309 #endif
1310 {
1311 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1312 return pkt_alloc(&rx_pkts, timeout, caller, line);
1313 #else
1314 return pkt_alloc(&rx_pkts, timeout);
1315 #endif
1316 }
1317
1318 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1319 static struct net_pkt *pkt_alloc_on_iface(struct k_mem_slab *slab,
1320 struct net_if *iface,
1321 k_timeout_t timeout,
1322 const char *caller, int line)
1323 #else
1324 static struct net_pkt *pkt_alloc_on_iface(struct k_mem_slab *slab,
1325 struct net_if *iface,
1326 k_timeout_t timeout)
1327
1328 #endif
1329 {
1330 struct net_pkt *pkt;
1331
1332 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1333 pkt = pkt_alloc(slab, timeout, caller, line);
1334 #else
1335 pkt = pkt_alloc(slab, timeout);
1336 #endif
1337
1338 if (pkt) {
1339 net_pkt_set_iface(pkt, iface);
1340 }
1341
1342 return pkt;
1343 }
1344
1345 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1346 struct net_pkt *net_pkt_alloc_on_iface_debug(struct net_if *iface,
1347 k_timeout_t timeout,
1348 const char *caller,
1349 int line)
1350 #else
1351 struct net_pkt *net_pkt_alloc_on_iface(struct net_if *iface,
1352 k_timeout_t timeout)
1353 #endif
1354 {
1355 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1356 return pkt_alloc_on_iface(&tx_pkts, iface, timeout, caller, line);
1357 #else
1358 return pkt_alloc_on_iface(&tx_pkts, iface, timeout);
1359 #endif
1360 }
1361
1362 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1363 struct net_pkt *net_pkt_rx_alloc_on_iface_debug(struct net_if *iface,
1364 k_timeout_t timeout,
1365 const char *caller,
1366 int line)
1367 #else
1368 struct net_pkt *net_pkt_rx_alloc_on_iface(struct net_if *iface,
1369 k_timeout_t timeout)
1370 #endif
1371 {
1372 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1373 return pkt_alloc_on_iface(&rx_pkts, iface, timeout, caller, line);
1374 #else
1375 return pkt_alloc_on_iface(&rx_pkts, iface, timeout);
1376 #endif
1377 }
1378
1379 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1380 static struct net_pkt *
1381 pkt_alloc_with_buffer(struct k_mem_slab *slab,
1382 struct net_if *iface,
1383 size_t size,
1384 sa_family_t family,
1385 enum net_ip_protocol proto,
1386 k_timeout_t timeout,
1387 const char *caller,
1388 int line)
1389 #else
1390 static struct net_pkt *
1391 pkt_alloc_with_buffer(struct k_mem_slab *slab,
1392 struct net_if *iface,
1393 size_t size,
1394 sa_family_t family,
1395 enum net_ip_protocol proto,
1396 k_timeout_t timeout)
1397 #endif
1398 {
1399 k_timepoint_t end = sys_timepoint_calc(timeout);
1400 struct net_pkt *pkt;
1401 int ret;
1402
1403 NET_DBG("On iface %p size %zu", iface, size);
1404
1405 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1406 pkt = pkt_alloc_on_iface(slab, iface, timeout, caller, line);
1407 #else
1408 pkt = pkt_alloc_on_iface(slab, iface, timeout);
1409 #endif
1410
1411 if (!pkt) {
1412 return NULL;
1413 }
1414
1415 net_pkt_set_family(pkt, family);
1416
1417 timeout = sys_timepoint_timeout(end);
1418 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1419 ret = net_pkt_alloc_buffer_debug(pkt, size, proto, timeout,
1420 caller, line);
1421 #else
1422 ret = net_pkt_alloc_buffer(pkt, size, proto, timeout);
1423 #endif
1424
1425 if (ret) {
1426 net_pkt_unref(pkt);
1427 return NULL;
1428 }
1429
1430 return pkt;
1431 }
1432
1433 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1434 struct net_pkt *net_pkt_alloc_with_buffer_debug(struct net_if *iface,
1435 size_t size,
1436 sa_family_t family,
1437 enum net_ip_protocol proto,
1438 k_timeout_t timeout,
1439 const char *caller,
1440 int line)
1441 #else
1442 struct net_pkt *net_pkt_alloc_with_buffer(struct net_if *iface,
1443 size_t size,
1444 sa_family_t family,
1445 enum net_ip_protocol proto,
1446 k_timeout_t timeout)
1447 #endif
1448 {
1449 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1450 return pkt_alloc_with_buffer(&tx_pkts, iface, size, family,
1451 proto, timeout, caller, line);
1452 #else
1453 return pkt_alloc_with_buffer(&tx_pkts, iface, size, family,
1454 proto, timeout);
1455 #endif
1456 }
1457
1458 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1459 struct net_pkt *net_pkt_rx_alloc_with_buffer_debug(struct net_if *iface,
1460 size_t size,
1461 sa_family_t family,
1462 enum net_ip_protocol proto,
1463 k_timeout_t timeout,
1464 const char *caller,
1465 int line)
1466 #else
1467 struct net_pkt *net_pkt_rx_alloc_with_buffer(struct net_if *iface,
1468 size_t size,
1469 sa_family_t family,
1470 enum net_ip_protocol proto,
1471 k_timeout_t timeout)
1472 #endif
1473 {
1474 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1475 return pkt_alloc_with_buffer(&rx_pkts, iface, size, family,
1476 proto, timeout, caller, line);
1477 #else
1478 return pkt_alloc_with_buffer(&rx_pkts, iface, size, family,
1479 proto, timeout);
1480 #endif
1481 }
1482
1483 void net_pkt_append_buffer(struct net_pkt *pkt, struct net_buf *buffer)
1484 {
1485 if (!pkt->buffer) {
1486 pkt->buffer = buffer;
1487 net_pkt_cursor_init(pkt);
1488 } else {
1489 net_buf_frag_insert(net_buf_frag_last(pkt->buffer), buffer);
1490 }
1491 }
1492
1493 void net_pkt_cursor_init(struct net_pkt *pkt)
1494 {
1495 pkt->cursor.buf = pkt->buffer;
1496 if (pkt->cursor.buf) {
1497 pkt->cursor.pos = pkt->cursor.buf->data;
1498 } else {
1499 pkt->cursor.pos = NULL;
1500 }
1501 }
1502
1503 static void pkt_cursor_jump(struct net_pkt *pkt, bool write)
1504 {
1505 struct net_pkt_cursor *cursor = &pkt->cursor;
1506
1507 cursor->buf = cursor->buf->frags;
1508 while (cursor->buf) {
1509 const size_t len =
1510 write ? net_buf_max_len(cursor->buf) : cursor->buf->len;
1511
1512 if (!len) {
1513 cursor->buf = cursor->buf->frags;
1514 } else {
1515 break;
1516 }
1517 }
1518
1519 if (cursor->buf) {
1520 cursor->pos = cursor->buf->data;
1521 } else {
1522 cursor->pos = NULL;
1523 }
1524 }
1525
1526 static void pkt_cursor_advance(struct net_pkt *pkt, bool write)
1527 {
1528 struct net_pkt_cursor *cursor = &pkt->cursor;
1529 size_t len;
1530
1531 if (!cursor->buf) {
1532 return;
1533 }
1534
1535 len = write ? net_buf_max_len(cursor->buf) : cursor->buf->len;
1536 if ((cursor->pos - cursor->buf->data) == len) {
1537 pkt_cursor_jump(pkt, write);
1538 }
1539 }
1540
1541 static void pkt_cursor_update(struct net_pkt *pkt,
1542 size_t length, bool write)
1543 {
1544 struct net_pkt_cursor *cursor = &pkt->cursor;
1545 size_t len;
1546
1547 if (net_pkt_is_being_overwritten(pkt)) {
1548 write = false;
1549 }
1550
1551 len = write ? net_buf_max_len(cursor->buf) : cursor->buf->len;
1552 if (length + (cursor->pos - cursor->buf->data) == len &&
1553 !(net_pkt_is_being_overwritten(pkt) &&
1554 len < net_buf_max_len(cursor->buf))) {
1555 pkt_cursor_jump(pkt, write);
1556 } else {
1557 cursor->pos += length;
1558 }
1559 }
1560
1561 /* Internal function that does all operation (skip/read/write/memset) */
1562 static int net_pkt_cursor_operate(struct net_pkt *pkt,
1563 void *data, size_t length,
1564 bool copy, bool write)
1565 {
1566 /* We use such variable to avoid lengthy lines */
1567 struct net_pkt_cursor *c_op = &pkt->cursor;
1568
1569 while (c_op->buf && length) {
1570 size_t d_len, len;
1571
1572 pkt_cursor_advance(pkt, net_pkt_is_being_overwritten(pkt) ?
1573 false : write);
1574 if (c_op->buf == NULL) {
1575 break;
1576 }
1577
1578 if (write && !net_pkt_is_being_overwritten(pkt)) {
1579 d_len = net_buf_max_len(c_op->buf) -
1580 (c_op->pos - c_op->buf->data);
1581 } else {
1582 d_len = c_op->buf->len - (c_op->pos - c_op->buf->data);
1583 }
1584
1585 if (!d_len) {
1586 break;
1587 }
1588
1589 if (length < d_len) {
1590 len = length;
1591 } else {
1592 len = d_len;
1593 }
1594
1595 if (copy && data) {
1596 memcpy(write ? c_op->pos : data,
1597 write ? data : c_op->pos,
1598 len);
1599 } else if (data) {
1600 memset(c_op->pos, *(int *)data, len);
1601 }
1602
1603 if (write && !net_pkt_is_being_overwritten(pkt)) {
1604 net_buf_add(c_op->buf, len);
1605 }
1606
1607 pkt_cursor_update(pkt, len, write);
1608
1609 if (copy && data) {
1610 data = (uint8_t *) data + len;
1611 }
1612
1613 length -= len;
1614 }
1615
1616 if (length) {
1617 NET_DBG("Still some length to go %zu", length);
1618 return -ENOBUFS;
1619 }
1620
1621 return 0;
1622 }
1623
1624 int net_pkt_skip(struct net_pkt *pkt, size_t skip)
1625 {
1626 NET_DBG("pkt %p skip %zu", pkt, skip);
1627
1628 return net_pkt_cursor_operate(pkt, NULL, skip, false, true);
1629 }
1630
1631 int net_pkt_memset(struct net_pkt *pkt, int byte, size_t amount)
1632 {
1633 NET_DBG("pkt %p byte %d amount %zu", pkt, byte, amount);
1634
1635 return net_pkt_cursor_operate(pkt, &byte, amount, false, true);
1636 }
1637
1638 int net_pkt_read(struct net_pkt *pkt, void *data, size_t length)
1639 {
1640 NET_DBG("pkt %p data %p length %zu", pkt, data, length);
1641
1642 return net_pkt_cursor_operate(pkt, data, length, true, false);
1643 }
1644
1645 int net_pkt_read_be16(struct net_pkt *pkt, uint16_t *data)
1646 {
1647 uint8_t d16[2];
1648 int ret;
1649
1650 ret = net_pkt_read(pkt, d16, sizeof(uint16_t));
1651
1652 *data = d16[0] << 8 | d16[1];
1653
1654 return ret;
1655 }
1656
1657 int net_pkt_read_le16(struct net_pkt *pkt, uint16_t *data)
1658 {
1659 uint8_t d16[2];
1660 int ret;
1661
1662 ret = net_pkt_read(pkt, d16, sizeof(uint16_t));
1663
1664 *data = d16[1] << 8 | d16[0];
1665
1666 return ret;
1667 }
1668
1669 int net_pkt_read_be32(struct net_pkt *pkt, uint32_t *data)
1670 {
1671 uint8_t d32[4];
1672 int ret;
1673
1674 ret = net_pkt_read(pkt, d32, sizeof(uint32_t));
1675
1676 *data = d32[0] << 24 | d32[1] << 16 | d32[2] << 8 | d32[3];
1677
1678 return ret;
1679 }
1680
1681 int net_pkt_write(struct net_pkt *pkt, const void *data, size_t length)
1682 {
1683 NET_DBG("pkt %p data %p length %zu", pkt, data, length);
1684
1685 if (data == pkt->cursor.pos && net_pkt_is_contiguous(pkt, length)) {
1686 return net_pkt_skip(pkt, length);
1687 }
1688
1689 return net_pkt_cursor_operate(pkt, (void *)data, length, true, true);
1690 }
1691
1692 int net_pkt_copy(struct net_pkt *pkt_dst,
1693 struct net_pkt *pkt_src,
1694 size_t length)
1695 {
1696 struct net_pkt_cursor *c_dst = &pkt_dst->cursor;
1697 struct net_pkt_cursor *c_src = &pkt_src->cursor;
1698
1699 while (c_dst->buf && c_src->buf && length) {
1700 size_t s_len, d_len, len;
1701
1702 pkt_cursor_advance(pkt_dst, true);
1703 pkt_cursor_advance(pkt_src, false);
1704
1705 if (!c_dst->buf || !c_src->buf) {
1706 break;
1707 }
1708
1709 s_len = c_src->buf->len - (c_src->pos - c_src->buf->data);
1710 d_len = net_buf_max_len(c_dst->buf) - (c_dst->pos - c_dst->buf->data);
1711 if (length < s_len && length < d_len) {
1712 len = length;
1713 } else {
1714 if (d_len < s_len) {
1715 len = d_len;
1716 } else {
1717 len = s_len;
1718 }
1719 }
1720
1721 if (!len) {
1722 break;
1723 }
1724
1725 memcpy(c_dst->pos, c_src->pos, len);
1726
1727 if (!net_pkt_is_being_overwritten(pkt_dst)) {
1728 net_buf_add(c_dst->buf, len);
1729 }
1730
1731 pkt_cursor_update(pkt_dst, len, true);
1732 pkt_cursor_update(pkt_src, len, false);
1733
1734 length -= len;
1735 }
1736
1737 if (length) {
1738 NET_DBG("Still some length to go %zu", length);
1739 return -ENOBUFS;
1740 }
1741
1742 return 0;
1743 }
1744
1745 static int32_t net_pkt_find_offset(struct net_pkt *pkt, uint8_t *ptr)
1746 {
1747 struct net_buf *buf;
1748 uint32_t ret = -EINVAL;
1749 uint16_t offset;
1750
1751 if (!ptr || !pkt || !pkt->buffer) {
1752 return ret;
1753 }
1754
1755 offset = 0U;
1756 buf = pkt->buffer;
1757
1758 while (buf) {
1759 if (buf->data <= ptr && ptr <= (buf->data + buf->len)) {
1760 ret = offset + (ptr - buf->data);
1761 break;
1762 }
1763 offset += buf->len;
1764 buf = buf->frags;
1765 }
1766
1767 return ret;
1768 }
1769
1770 static void clone_pkt_lladdr(struct net_pkt *pkt, struct net_pkt *clone_pkt,
1771 struct net_linkaddr *lladdr)
1772 {
1773 int32_t ll_addr_offset;
1774
1775 if (!lladdr->addr)
1776 return;
1777
1778 ll_addr_offset = net_pkt_find_offset(pkt, lladdr->addr);
1779
1780 if (ll_addr_offset >= 0) {
1781 net_pkt_cursor_init(clone_pkt);
1782 net_pkt_skip(clone_pkt, ll_addr_offset);
1783 lladdr->addr = net_pkt_cursor_get_pos(clone_pkt);
1784 }
1785 }
1786
1787 #if defined(NET_PKT_HAS_CONTROL_BLOCK)
1788 static inline void clone_pkt_cb(struct net_pkt *pkt, struct net_pkt *clone_pkt)
1789 {
1790 memcpy(net_pkt_cb(clone_pkt), net_pkt_cb(pkt), sizeof(clone_pkt->cb));
1791 }
1792 #else
1793 static inline void clone_pkt_cb(struct net_pkt *pkt, struct net_pkt *clone_pkt)
1794 {
1795 ARG_UNUSED(pkt);
1796 ARG_UNUSED(clone_pkt);
1797 }
1798 #endif
1799
1800 static void clone_pkt_attributes(struct net_pkt *pkt, struct net_pkt *clone_pkt)
1801 {
1802 net_pkt_set_family(clone_pkt, net_pkt_family(pkt));
1803 net_pkt_set_context(clone_pkt, net_pkt_context(pkt));
1804 net_pkt_set_ip_hdr_len(clone_pkt, net_pkt_ip_hdr_len(pkt));
1805 net_pkt_set_ip_dscp(clone_pkt, net_pkt_ip_dscp(pkt));
1806 net_pkt_set_ip_ecn(clone_pkt, net_pkt_ip_ecn(pkt));
1807 net_pkt_set_vlan_tag(clone_pkt, net_pkt_vlan_tag(pkt));
1808 net_pkt_set_timestamp(clone_pkt, net_pkt_timestamp(pkt));
1809 net_pkt_set_priority(clone_pkt, net_pkt_priority(pkt));
1810 net_pkt_set_orig_iface(clone_pkt, net_pkt_orig_iface(pkt));
1811 net_pkt_set_captured(clone_pkt, net_pkt_is_captured(pkt));
1812 net_pkt_set_eof(clone_pkt, net_pkt_eof(pkt));
1813 net_pkt_set_ptp(clone_pkt, net_pkt_is_ptp(pkt));
1814 net_pkt_set_forwarding(clone_pkt, net_pkt_forwarding(pkt));
1815
1816 net_pkt_set_l2_bridged(clone_pkt, net_pkt_is_l2_bridged(pkt));
1817 net_pkt_set_l2_processed(clone_pkt, net_pkt_is_l2_processed(pkt));
1818 net_pkt_set_ll_proto_type(clone_pkt, net_pkt_ll_proto_type(pkt));
1819
1820 if (pkt->buffer && clone_pkt->buffer) {
1821 memcpy(net_pkt_lladdr_src(clone_pkt), net_pkt_lladdr_src(pkt),
1822 sizeof(struct net_linkaddr));
1823 memcpy(net_pkt_lladdr_dst(clone_pkt), net_pkt_lladdr_dst(pkt),
1824 sizeof(struct net_linkaddr));
1825 /* The link header pointers are usable as-is if we
1826 * shallow-copied the buffer even if they point
1827 * into the fragment memory of the buffer,
1828 * otherwise we have to set the ll address pointer
1829 * relative to the new buffer to avoid dangling
1830 * pointers into the source packet.
1831 */
1832 if (pkt->buffer != clone_pkt->buffer) {
1833 clone_pkt_lladdr(pkt, clone_pkt, net_pkt_lladdr_src(clone_pkt));
1834 clone_pkt_lladdr(pkt, clone_pkt, net_pkt_lladdr_dst(clone_pkt));
1835 }
1836 }
1837
1838 if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) {
1839 net_pkt_set_ipv4_ttl(clone_pkt, net_pkt_ipv4_ttl(pkt));
1840 net_pkt_set_ipv4_opts_len(clone_pkt,
1841 net_pkt_ipv4_opts_len(pkt));
1842 } else if (IS_ENABLED(CONFIG_NET_IPV6) &&
1843 net_pkt_family(pkt) == AF_INET6) {
1844 net_pkt_set_ipv6_hop_limit(clone_pkt,
1845 net_pkt_ipv6_hop_limit(pkt));
1846 net_pkt_set_ipv6_ext_len(clone_pkt, net_pkt_ipv6_ext_len(pkt));
1847 net_pkt_set_ipv6_ext_opt_len(clone_pkt,
1848 net_pkt_ipv6_ext_opt_len(pkt));
1849 net_pkt_set_ipv6_hdr_prev(clone_pkt,
1850 net_pkt_ipv6_hdr_prev(pkt));
1851 net_pkt_set_ipv6_next_hdr(clone_pkt,
1852 net_pkt_ipv6_next_hdr(pkt));
1853 }
1854
1855 clone_pkt_cb(pkt, clone_pkt);
1856 }
1857
1858 static struct net_pkt *net_pkt_clone_internal(struct net_pkt *pkt,
1859 struct k_mem_slab *slab,
1860 k_timeout_t timeout)
1861 {
1862 size_t cursor_offset = net_pkt_get_current_offset(pkt);
1863 bool overwrite = net_pkt_is_being_overwritten(pkt);
1864 struct net_pkt_cursor backup;
1865 struct net_pkt *clone_pkt;
1866
1867 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1868 clone_pkt = pkt_alloc_with_buffer(slab, net_pkt_iface(pkt),
1869 net_pkt_get_len(pkt),
1870 AF_UNSPEC, 0, timeout,
1871 __func__, __LINE__);
1872 #else
1873 clone_pkt = pkt_alloc_with_buffer(slab, net_pkt_iface(pkt),
1874 net_pkt_get_len(pkt),
1875 AF_UNSPEC, 0, timeout);
1876 #endif
1877 if (!clone_pkt) {
1878 return NULL;
1879 }
1880
1881 net_pkt_set_overwrite(pkt, true);
1882 net_pkt_cursor_backup(pkt, &backup);
1883 net_pkt_cursor_init(pkt);
1884
1885 if (net_pkt_copy(clone_pkt, pkt, net_pkt_get_len(pkt))) {
1886 net_pkt_unref(clone_pkt);
1887 net_pkt_cursor_restore(pkt, &backup);
1888 net_pkt_set_overwrite(pkt, overwrite);
1889 return NULL;
1890 }
1891 net_pkt_set_overwrite(clone_pkt, true);
1892
1893 clone_pkt_attributes(pkt, clone_pkt);
1894
1895 net_pkt_cursor_init(clone_pkt);
1896
1897 if (cursor_offset) {
1898 net_pkt_skip(clone_pkt, cursor_offset);
1899 }
1900 net_pkt_set_overwrite(clone_pkt, overwrite);
1901
1902 net_pkt_cursor_restore(pkt, &backup);
1903 net_pkt_set_overwrite(pkt, overwrite);
1904
1905 NET_DBG("Cloned %p to %p", pkt, clone_pkt);
1906
1907 return clone_pkt;
1908 }
1909
1910 struct net_pkt *net_pkt_clone(struct net_pkt *pkt, k_timeout_t timeout)
1911 {
1912 return net_pkt_clone_internal(pkt, pkt->slab, timeout);
1913 }
1914
1915 struct net_pkt *net_pkt_rx_clone(struct net_pkt *pkt, k_timeout_t timeout)
1916 {
1917 return net_pkt_clone_internal(pkt, &rx_pkts, timeout);
1918 }
1919
1920 struct net_pkt *net_pkt_shallow_clone(struct net_pkt *pkt, k_timeout_t timeout)
1921 {
1922 struct net_pkt *clone_pkt;
1923 struct net_buf *buf;
1924
1925 clone_pkt = net_pkt_alloc(timeout);
1926 if (!clone_pkt) {
1927 return NULL;
1928 }
1929
1930 net_pkt_set_iface(clone_pkt, net_pkt_iface(pkt));
1931 clone_pkt->buffer = pkt->buffer;
1932 buf = pkt->buffer;
1933
1934 net_pkt_frag_ref(buf);
1935
1936 clone_pkt_attributes(pkt, clone_pkt);
1937
1938 net_pkt_cursor_restore(clone_pkt, &pkt->cursor);
1939
1940 NET_DBG("Shallow cloned %p to %p", pkt, clone_pkt);
1941
1942 return clone_pkt;
1943 }
1944
1945 size_t net_pkt_remaining_data(struct net_pkt *pkt)
1946 {
1947 struct net_buf *buf;
1948 size_t data_length;
1949
1950 if (!pkt || !pkt->cursor.buf || !pkt->cursor.pos) {
1951 return 0;
1952 }
1953
1954 buf = pkt->cursor.buf;
1955 data_length = buf->len - (pkt->cursor.pos - buf->data);
1956
1957 buf = buf->frags;
1958 while (buf) {
1959 data_length += buf->len;
1960 buf = buf->frags;
1961 }
1962
1963 return data_length;
1964 }
1965
1966 int net_pkt_update_length(struct net_pkt *pkt, size_t length)
1967 {
1968 struct net_buf *buf;
1969
1970 for (buf = pkt->buffer; buf; buf = buf->frags) {
1971 if (buf->len < length) {
1972 length -= buf->len;
1973 } else {
1974 buf->len = length;
1975 length = 0;
1976 }
1977 }
1978
1979 return !length ? 0 : -EINVAL;
1980 }
1981
1982 int net_pkt_pull(struct net_pkt *pkt, size_t length)
1983 {
1984 struct net_pkt_cursor *c_op = &pkt->cursor;
1985
1986 while (length) {
1987 size_t left, rem;
1988
1989 pkt_cursor_advance(pkt, false);
1990
1991 if (!c_op->buf) {
1992 break;
1993 }
1994
1995 left = c_op->buf->len - (c_op->pos - c_op->buf->data);
1996 if (!left) {
1997 break;
1998 }
1999
2000 rem = left;
2001 if (rem > length) {
2002 rem = length;
2003 }
2004
2005 c_op->buf->len -= rem;
2006 left -= rem;
2007 if (left) {
2008 memmove(c_op->pos, c_op->pos+rem, left);
2009 } else {
2010 struct net_buf *buf = pkt->buffer;
2011
2012 if (buf) {
2013 pkt->buffer = buf->frags;
2014 buf->frags = NULL;
2015 net_buf_unref(buf);
2016 }
2017
2018 net_pkt_cursor_init(pkt);
2019 }
2020
2021 length -= rem;
2022 }
2023
2024 net_pkt_cursor_init(pkt);
2025
2026 if (length) {
2027 NET_DBG("Still some length to go %zu", length);
2028 return -ENOBUFS;
2029 }
2030
2031 return 0;
2032 }
2033
2034 uint16_t net_pkt_get_current_offset(struct net_pkt *pkt)
2035 {
2036 struct net_buf *buf = pkt->buffer;
2037 uint16_t offset;
2038
2039 if (!pkt->cursor.buf || !pkt->cursor.pos) {
2040 return 0;
2041 }
2042
2043 offset = 0U;
2044
2045 while (buf != pkt->cursor.buf) {
2046 offset += buf->len;
2047 buf = buf->frags;
2048 }
2049
2050 offset += pkt->cursor.pos - buf->data;
2051
2052 return offset;
2053 }
2054
2055 bool net_pkt_is_contiguous(struct net_pkt *pkt, size_t size)
2056 {
2057 size_t len = net_pkt_get_contiguous_len(pkt);
2058
2059 return len >= size;
2060 }
2061
2062 size_t net_pkt_get_contiguous_len(struct net_pkt *pkt)
2063 {
2064 pkt_cursor_advance(pkt, !net_pkt_is_being_overwritten(pkt));
2065
2066 if (pkt->cursor.buf && pkt->cursor.pos) {
2067 size_t len;
2068
2069 len = net_pkt_is_being_overwritten(pkt) ?
2070 pkt->cursor.buf->len : pkt->cursor.buf->size;
2071 len -= pkt->cursor.pos - pkt->cursor.buf->data;
2072 return len;
2073 }
2074
2075 return 0;
2076 }
2077
2078 void *net_pkt_get_data(struct net_pkt *pkt,
2079 struct net_pkt_data_access *access)
2080 {
2081 if (IS_ENABLED(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)) {
2082 if (!net_pkt_is_contiguous(pkt, access->size)) {
2083 return NULL;
2084 }
2085
2086 return pkt->cursor.pos;
2087 } else {
2088 if (net_pkt_is_contiguous(pkt, access->size)) {
2089 access->data = pkt->cursor.pos;
2090 } else if (net_pkt_is_being_overwritten(pkt)) {
2091 struct net_pkt_cursor backup;
2092
2093 if (!access->data) {
2094 NET_ERR("Uncontiguous data"
2095 " cannot be linearized");
2096 return NULL;
2097 }
2098
2099 net_pkt_cursor_backup(pkt, &backup);
2100
2101 if (net_pkt_read(pkt, access->data, access->size)) {
2102 net_pkt_cursor_restore(pkt, &backup);
2103 return NULL;
2104 }
2105
2106 net_pkt_cursor_restore(pkt, &backup);
2107 }
2108
2109 return access->data;
2110 }
2111
2112 return NULL;
2113 }
2114
2115 int net_pkt_set_data(struct net_pkt *pkt,
2116 struct net_pkt_data_access *access)
2117 {
2118 if (IS_ENABLED(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)) {
2119 return net_pkt_skip(pkt, access->size);
2120 }
2121
2122 return net_pkt_write(pkt, access->data, access->size);
2123 }
2124
2125 void net_pkt_init(void)
2126 {
2127 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
2128 NET_DBG("Allocating %u RX (%zu bytes), %u TX (%zu bytes), "
2129 "%d RX data (%u bytes) and %d TX data (%u bytes) buffers",
2130 k_mem_slab_num_free_get(&rx_pkts),
2131 (size_t)(k_mem_slab_num_free_get(&rx_pkts) *
2132 sizeof(struct net_pkt)),
2133 k_mem_slab_num_free_get(&tx_pkts),
2134 (size_t)(k_mem_slab_num_free_get(&tx_pkts) *
2135 sizeof(struct net_pkt)),
2136 get_frees(&rx_bufs), get_size(&rx_bufs),
2137 get_frees(&tx_bufs), get_size(&tx_bufs));
2138 #endif
2139 }
2140