1 /** @file
2  @brief Network packet buffers for IP stack
3 
4  Network data is passed between components using net_pkt.
5  */
6 
7 /*
8  * Copyright (c) 2016 Intel Corporation
9  *
10  * SPDX-License-Identifier: Apache-2.0
11  */
12 
13 #include <zephyr/logging/log.h>
14 LOG_MODULE_REGISTER(net_pkt, CONFIG_NET_PKT_LOG_LEVEL);
15 
16 /* This enables allocation debugging but does not print so much output
17  * as that can slow things down a lot.
18  */
19 #undef NET_LOG_LEVEL
20 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
21 #define NET_LOG_LEVEL 5
22 #else
23 #define NET_LOG_LEVEL CONFIG_NET_PKT_LOG_LEVEL
24 #endif
25 
26 #include <zephyr/kernel.h>
27 #include <zephyr/toolchain.h>
28 #include <string.h>
29 #include <zephyr/types.h>
30 #include <sys/types.h>
31 
32 #include <zephyr/sys/util.h>
33 
34 #include <zephyr/net/net_core.h>
35 #include <zephyr/net/net_ip.h>
36 #include <zephyr/net_buf.h>
37 #include <zephyr/net/net_pkt.h>
38 #include <zephyr/net/ethernet.h>
39 #include <zephyr/net/udp.h>
40 
41 #include "net_private.h"
42 #include "tcp_internal.h"
43 
44 /* Make sure net_buf data size is large enough that IPv6
45  * and possible extensions fit to the network buffer.
46  * The check is done using an arbitrarily chosen value 96 by monitoring
47  * wireshark traffic to see what the typical header lengts are.
48  * It is still recommended to use the default value 128 but allow smaller
49  * value if really needed.
50  */
51 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE) && defined(CONFIG_NET_NATIVE_IPV6)
52 BUILD_ASSERT(CONFIG_NET_BUF_DATA_SIZE >= 96);
53 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
54 
55 /* Find max header size of IP protocol (IPv4 or IPv6) */
56 #if defined(CONFIG_NET_IPV6) || defined(CONFIG_NET_RAW_MODE) || \
57     defined(CONFIG_NET_SOCKETS_PACKET) || defined(CONFIG_NET_SOCKETS_OFFLOAD)
58 #define MAX_IP_PROTO_LEN NET_IPV6H_LEN
59 #else
60 #if defined(CONFIG_NET_IPV4)
61 #define MAX_IP_PROTO_LEN NET_IPV4H_LEN
62 #else
63 #if defined(CONFIG_NET_SOCKETS_CAN)
64 /* TODO: Use CAN MTU here instead of hard coded value. There was
65  * weird circular dependency issue so this needs more TLC.
66  */
67 #define MAX_IP_PROTO_LEN 8
68 #else
69 #if defined(CONFIG_NET_ETHERNET_BRIDGE) || \
70 	defined(CONFIG_NET_L2_IEEE802154) || \
71 	defined(CONFIG_NET_L2_CUSTOM_IEEE802154)
72 #define MAX_IP_PROTO_LEN 0
73 #else
74 #error "Some packet protocol (e.g. IPv6, IPv4, ETH, IEEE 802.15.4) needs to be selected."
75 #endif /* ETHERNET_BRIDGE / L2_IEEE802154 */
76 #endif /* SOCKETS_CAN */
77 #endif /* IPv4 */
78 #endif /* IPv6 */
79 
80 /* Find max header size of "next" protocol (TCP, UDP or ICMP) */
81 #if defined(CONFIG_NET_TCP)
82 #define MAX_NEXT_PROTO_LEN NET_TCPH_LEN
83 #else
84 #if defined(CONFIG_NET_UDP)
85 #define MAX_NEXT_PROTO_LEN NET_UDPH_LEN
86 #else
87 #if defined(CONFIG_NET_SOCKETS_CAN)
88 #define MAX_NEXT_PROTO_LEN 0
89 #else
90 /* If no TCP and no UDP, apparently we still want pings to work. */
91 #define MAX_NEXT_PROTO_LEN NET_ICMPH_LEN
92 #endif /* SOCKETS_CAN */
93 #endif /* UDP */
94 #endif /* TCP */
95 
96 /* Make sure that IP + TCP/UDP/ICMP headers fit into one fragment. This
97  * makes possible to cast a fragment pointer to protocol header struct.
98  */
99 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
100 #if CONFIG_NET_BUF_DATA_SIZE < (MAX_IP_PROTO_LEN + MAX_NEXT_PROTO_LEN)
101 #if defined(STRING2)
102 #undef STRING2
103 #endif
104 #if defined(STRING)
105 #undef STRING
106 #endif
107 #define STRING2(x) #x
108 #define STRING(x) STRING2(x)
109 #pragma message "Data len " STRING(CONFIG_NET_BUF_DATA_SIZE)
110 #pragma message "Minimum len " STRING(MAX_IP_PROTO_LEN + MAX_NEXT_PROTO_LEN)
111 #error "Too small net_buf fragment size"
112 #endif
113 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
114 
115 #if CONFIG_NET_PKT_RX_COUNT <= 0
116 #error "Minimum value for CONFIG_NET_PKT_RX_COUNT is 1"
117 #endif
118 
119 #if CONFIG_NET_PKT_TX_COUNT <= 0
120 #error "Minimum value for CONFIG_NET_PKT_TX_COUNT is 1"
121 #endif
122 
123 #if CONFIG_NET_BUF_RX_COUNT <= 0
124 #error "Minimum value for CONFIG_NET_BUF_RX_COUNT is 1"
125 #endif
126 
127 #if CONFIG_NET_BUF_TX_COUNT <= 0
128 #error "Minimum value for CONFIG_NET_BUF_TX_COUNT is 1"
129 #endif
130 
131 NET_PKT_SLAB_DEFINE(rx_pkts, CONFIG_NET_PKT_RX_COUNT);
132 NET_PKT_SLAB_DEFINE(tx_pkts, CONFIG_NET_PKT_TX_COUNT);
133 
134 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
135 
136 NET_BUF_POOL_FIXED_DEFINE(rx_bufs, CONFIG_NET_BUF_RX_COUNT, CONFIG_NET_BUF_DATA_SIZE,
137 			  CONFIG_NET_PKT_BUF_USER_DATA_SIZE, NULL);
138 NET_BUF_POOL_FIXED_DEFINE(tx_bufs, CONFIG_NET_BUF_TX_COUNT, CONFIG_NET_BUF_DATA_SIZE,
139 			  CONFIG_NET_PKT_BUF_USER_DATA_SIZE, NULL);
140 
141 #else /* !CONFIG_NET_BUF_FIXED_DATA_SIZE */
142 
143 NET_BUF_POOL_VAR_DEFINE(rx_bufs, CONFIG_NET_BUF_RX_COUNT, CONFIG_NET_PKT_BUF_RX_DATA_POOL_SIZE,
144 			CONFIG_NET_PKT_BUF_USER_DATA_SIZE, NULL);
145 NET_BUF_POOL_VAR_DEFINE(tx_bufs, CONFIG_NET_BUF_TX_COUNT, CONFIG_NET_PKT_BUF_TX_DATA_POOL_SIZE,
146 			CONFIG_NET_PKT_BUF_USER_DATA_SIZE, NULL);
147 
148 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
149 
150 /* Allocation tracking is only available if separately enabled */
151 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
152 struct net_pkt_alloc {
153 	union {
154 		struct net_pkt *pkt;
155 		struct net_buf *buf;
156 		void *alloc_data;
157 	};
158 	const char *func_alloc;
159 	const char *func_free;
160 	uint16_t line_alloc;
161 	uint16_t line_free;
162 	uint8_t in_use;
163 	bool is_pkt;
164 };
165 
166 #define MAX_NET_PKT_ALLOCS (CONFIG_NET_PKT_RX_COUNT + \
167 			    CONFIG_NET_PKT_TX_COUNT + \
168 			    CONFIG_NET_BUF_RX_COUNT + \
169 			    CONFIG_NET_BUF_TX_COUNT + \
170 			    CONFIG_NET_DEBUG_NET_PKT_EXTERNALS)
171 
172 static struct net_pkt_alloc net_pkt_allocs[MAX_NET_PKT_ALLOCS];
173 
net_pkt_alloc_add(void * alloc_data,bool is_pkt,const char * func,int line)174 static void net_pkt_alloc_add(void *alloc_data, bool is_pkt,
175 			      const char *func, int line)
176 {
177 	int i;
178 
179 	for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
180 		if (net_pkt_allocs[i].in_use) {
181 			continue;
182 		}
183 
184 		net_pkt_allocs[i].in_use = true;
185 		net_pkt_allocs[i].is_pkt = is_pkt;
186 		net_pkt_allocs[i].alloc_data = alloc_data;
187 		net_pkt_allocs[i].func_alloc = func;
188 		net_pkt_allocs[i].line_alloc = line;
189 
190 		return;
191 	}
192 }
193 
net_pkt_alloc_del(void * alloc_data,const char * func,int line)194 static void net_pkt_alloc_del(void *alloc_data, const char *func, int line)
195 {
196 	int i;
197 
198 	for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
199 		if (net_pkt_allocs[i].in_use &&
200 		    net_pkt_allocs[i].alloc_data == alloc_data) {
201 			net_pkt_allocs[i].func_free = func;
202 			net_pkt_allocs[i].line_free = line;
203 			net_pkt_allocs[i].in_use = false;
204 
205 			return;
206 		}
207 	}
208 }
209 
net_pkt_alloc_find(void * alloc_data,const char ** func_free,int * line_free)210 static bool net_pkt_alloc_find(void *alloc_data,
211 			       const char **func_free,
212 			       int *line_free)
213 {
214 	int i;
215 
216 	for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
217 		if (!net_pkt_allocs[i].in_use &&
218 		    net_pkt_allocs[i].alloc_data == alloc_data) {
219 			*func_free = net_pkt_allocs[i].func_free;
220 			*line_free = net_pkt_allocs[i].line_free;
221 
222 			return true;
223 		}
224 	}
225 
226 	return false;
227 }
228 
net_pkt_allocs_foreach(net_pkt_allocs_cb_t cb,void * user_data)229 void net_pkt_allocs_foreach(net_pkt_allocs_cb_t cb, void *user_data)
230 {
231 	int i;
232 
233 	for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
234 		if (net_pkt_allocs[i].in_use) {
235 			cb(net_pkt_allocs[i].is_pkt ?
236 			   net_pkt_allocs[i].pkt : NULL,
237 			   net_pkt_allocs[i].is_pkt ?
238 			   NULL : net_pkt_allocs[i].buf,
239 			   net_pkt_allocs[i].func_alloc,
240 			   net_pkt_allocs[i].line_alloc,
241 			   net_pkt_allocs[i].func_free,
242 			   net_pkt_allocs[i].line_free,
243 			   net_pkt_allocs[i].in_use,
244 			   user_data);
245 		}
246 	}
247 
248 	for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
249 		if (!net_pkt_allocs[i].in_use) {
250 			cb(net_pkt_allocs[i].is_pkt ?
251 			   net_pkt_allocs[i].pkt : NULL,
252 			   net_pkt_allocs[i].is_pkt ?
253 			   NULL : net_pkt_allocs[i].buf,
254 			   net_pkt_allocs[i].func_alloc,
255 			   net_pkt_allocs[i].line_alloc,
256 			   net_pkt_allocs[i].func_free,
257 			   net_pkt_allocs[i].line_free,
258 			   net_pkt_allocs[i].in_use,
259 			   user_data);
260 		}
261 	}
262 }
263 #else
264 #define net_pkt_alloc_add(alloc_data, is_pkt, func, line)
265 #define net_pkt_alloc_del(alloc_data, func, line)
266 #define net_pkt_alloc_find(alloc_data, func_free, line_free) false
267 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC */
268 
269 #if defined(NET_PKT_DEBUG_ENABLED)
270 
271 #define NET_FRAG_CHECK_IF_NOT_IN_USE(frag, ref)				\
272 	do {								\
273 		if (!(ref)) {                                           \
274 			NET_ERR("**ERROR** frag %p not in use (%s:%s():%d)", \
275 				frag, __FILE__, __func__, __LINE__);     \
276 		}                                                       \
277 	} while (false)
278 
net_pkt_slab2str(struct k_mem_slab * slab)279 const char *net_pkt_slab2str(struct k_mem_slab *slab)
280 {
281 	if (slab == &rx_pkts) {
282 		return "RX";
283 	} else if (slab == &tx_pkts) {
284 		return "TX";
285 	}
286 
287 	return "EXT";
288 }
289 
net_pkt_pool2str(struct net_buf_pool * pool)290 const char *net_pkt_pool2str(struct net_buf_pool *pool)
291 {
292 	if (pool == &rx_bufs) {
293 		return "RDATA";
294 	} else if (pool == &tx_bufs) {
295 		return "TDATA";
296 	}
297 
298 	return "EDATA";
299 }
300 
get_frees(struct net_buf_pool * pool)301 static inline int16_t get_frees(struct net_buf_pool *pool)
302 {
303 #if defined(CONFIG_NET_BUF_POOL_USAGE)
304 	return atomic_get(&pool->avail_count);
305 #else
306 	return 0;
307 #endif
308 }
309 
net_pkt_print_frags(struct net_pkt * pkt)310 void net_pkt_print_frags(struct net_pkt *pkt)
311 {
312 	struct net_buf *frag;
313 	size_t total = 0;
314 	int count = 0, frag_size = 0;
315 
316 	if (!pkt) {
317 		NET_INFO("pkt %p", pkt);
318 		return;
319 	}
320 
321 	NET_INFO("pkt %p frags %p", pkt, pkt->frags);
322 
323 	NET_ASSERT(pkt->frags);
324 
325 	frag = pkt->frags;
326 	while (frag) {
327 		total += frag->len;
328 
329 		frag_size = net_buf_max_len(frag);
330 
331 		NET_INFO("[%d] frag %p len %d max len %u size %d pool %p",
332 			 count, frag, frag->len, frag->size,
333 			 frag_size, net_buf_pool_get(frag->pool_id));
334 
335 		count++;
336 
337 		frag = frag->frags;
338 	}
339 
340 	NET_INFO("Total data size %zu, occupied %d bytes, utilization %zu%%",
341 		 total, count * frag_size,
342 		 count ? (total * 100) / (count * frag_size) : 0);
343 }
344 #endif
345 
346 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
get_name(struct net_buf_pool * pool)347 static inline const char *get_name(struct net_buf_pool *pool)
348 {
349 #if defined(CONFIG_NET_BUF_POOL_USAGE)
350 	return pool->name;
351 #else
352 	return "?";
353 #endif
354 }
355 
get_size(struct net_buf_pool * pool)356 static inline int16_t get_size(struct net_buf_pool *pool)
357 {
358 #if defined(CONFIG_NET_BUF_POOL_USAGE)
359 	return pool->pool_size;
360 #else
361 	return 0;
362 #endif
363 }
364 
slab2str(struct k_mem_slab * slab)365 static inline const char *slab2str(struct k_mem_slab *slab)
366 {
367 	return net_pkt_slab2str(slab);
368 }
369 
pool2str(struct net_buf_pool * pool)370 static inline const char *pool2str(struct net_buf_pool *pool)
371 {
372 	return net_pkt_pool2str(pool);
373 }
374 #endif /* CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG */
375 
376 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_get_reserve_data_debug(struct net_buf_pool * pool,size_t min_len,k_timeout_t timeout,const char * caller,int line)377 struct net_buf *net_pkt_get_reserve_data_debug(struct net_buf_pool *pool,
378 					       size_t min_len,
379 					       k_timeout_t timeout,
380 					       const char *caller,
381 					       int line)
382 #else /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
383 struct net_buf *net_pkt_get_reserve_data(struct net_buf_pool *pool,
384 					 size_t min_len, k_timeout_t timeout)
385 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
386 {
387 	struct net_buf *frag;
388 
389 	if (k_is_in_isr()) {
390 		timeout = K_NO_WAIT;
391 	}
392 
393 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
394 	if (min_len > CONFIG_NET_BUF_DATA_SIZE) {
395 		NET_ERR("Requested too large fragment. Increase CONFIG_NET_BUF_DATA_SIZE.");
396 		return NULL;
397 	}
398 
399 	frag = net_buf_alloc(pool, timeout);
400 #else
401 	frag = net_buf_alloc_len(pool, min_len, timeout);
402 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
403 
404 	if (!frag) {
405 		return NULL;
406 	}
407 
408 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
409 	NET_FRAG_CHECK_IF_NOT_IN_USE(frag, frag->ref + 1U);
410 #endif
411 
412 	net_pkt_alloc_add(frag, false, caller, line);
413 
414 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
415 	NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
416 		pool2str(pool), get_name(pool), get_frees(pool),
417 		frag, frag->ref, caller, line);
418 #endif
419 
420 	return frag;
421 }
422 
423 /* Get a fragment, try to figure out the pool from where to get
424  * the data.
425  */
426 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_get_frag_debug(struct net_pkt * pkt,size_t min_len,k_timeout_t timeout,const char * caller,int line)427 struct net_buf *net_pkt_get_frag_debug(struct net_pkt *pkt, size_t min_len,
428 				       k_timeout_t timeout,
429 				       const char *caller, int line)
430 #else
431 struct net_buf *net_pkt_get_frag(struct net_pkt *pkt, size_t min_len,
432 				 k_timeout_t timeout)
433 #endif
434 {
435 #if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL)
436 	struct net_context *context;
437 
438 	context = net_pkt_context(pkt);
439 	if (context && context->data_pool) {
440 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
441 		return net_pkt_get_reserve_data_debug(context->data_pool(),
442 						      min_len, timeout,
443 						      caller, line);
444 #else
445 		return net_pkt_get_reserve_data(context->data_pool(), min_len,
446 						timeout);
447 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
448 	}
449 #endif /* CONFIG_NET_CONTEXT_NET_PKT_POOL */
450 
451 	if (pkt->slab == &rx_pkts) {
452 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
453 		return net_pkt_get_reserve_rx_data_debug(min_len, timeout,
454 							 caller, line);
455 #else
456 		return net_pkt_get_reserve_rx_data(min_len, timeout);
457 #endif
458 	}
459 
460 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
461 	return net_pkt_get_reserve_tx_data_debug(min_len, timeout, caller, line);
462 #else
463 	return net_pkt_get_reserve_tx_data(min_len, timeout);
464 #endif
465 }
466 
467 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_get_reserve_rx_data_debug(size_t min_len,k_timeout_t timeout,const char * caller,int line)468 struct net_buf *net_pkt_get_reserve_rx_data_debug(size_t min_len, k_timeout_t timeout,
469 						  const char *caller, int line)
470 {
471 	return net_pkt_get_reserve_data_debug(&rx_bufs, min_len, timeout, caller, line);
472 }
473 
net_pkt_get_reserve_tx_data_debug(size_t min_len,k_timeout_t timeout,const char * caller,int line)474 struct net_buf *net_pkt_get_reserve_tx_data_debug(size_t min_len, k_timeout_t timeout,
475 						  const char *caller, int line)
476 {
477 	return net_pkt_get_reserve_data_debug(&tx_bufs, min_len, timeout, caller, line);
478 }
479 
480 #else /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
481 
net_pkt_get_reserve_rx_data(size_t min_len,k_timeout_t timeout)482 struct net_buf *net_pkt_get_reserve_rx_data(size_t min_len, k_timeout_t timeout)
483 {
484 	return net_pkt_get_reserve_data(&rx_bufs, min_len, timeout);
485 }
486 
net_pkt_get_reserve_tx_data(size_t min_len,k_timeout_t timeout)487 struct net_buf *net_pkt_get_reserve_tx_data(size_t min_len, k_timeout_t timeout)
488 {
489 	return net_pkt_get_reserve_data(&tx_bufs, min_len, timeout);
490 }
491 
492 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
493 
494 
495 #if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL)
get_tx_slab(struct net_context * context)496 static inline struct k_mem_slab *get_tx_slab(struct net_context *context)
497 {
498 	if (context->tx_slab) {
499 		return context->tx_slab();
500 	}
501 
502 	return NULL;
503 }
504 
get_data_pool(struct net_context * context)505 static inline struct net_buf_pool *get_data_pool(struct net_context *context)
506 {
507 	if (context->data_pool) {
508 		return context->data_pool();
509 	}
510 
511 	return NULL;
512 }
513 #else
514 #define get_tx_slab(...) NULL
515 #define get_data_pool(...) NULL
516 #endif /* CONFIG_NET_CONTEXT_NET_PKT_POOL */
517 
518 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_unref_debug(struct net_pkt * pkt,const char * caller,int line)519 void net_pkt_unref_debug(struct net_pkt *pkt, const char *caller, int line)
520 {
521 	struct net_buf *frag;
522 
523 #else
524 void net_pkt_unref(struct net_pkt *pkt)
525 {
526 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
527 	atomic_val_t ref;
528 
529 	if (!pkt) {
530 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
531 		NET_ERR("*** ERROR *** pkt %p (%s():%d)", pkt, caller, line);
532 #endif
533 		return;
534 	}
535 
536 	do {
537 		ref = atomic_get(&pkt->atomic_ref);
538 		if (!ref) {
539 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
540 			const char *func_freed;
541 			int line_freed;
542 
543 			if (net_pkt_alloc_find(pkt, &func_freed, &line_freed)) {
544 				NET_ERR("*** ERROR *** pkt %p is freed already "
545 					"by %s():%d (%s():%d)",
546 					pkt, func_freed, line_freed, caller,
547 					line);
548 			} else {
549 				NET_ERR("*** ERROR *** pkt %p is freed already "
550 					"(%s():%d)", pkt, caller, line);
551 			}
552 #endif
553 			return;
554 		}
555 	} while (!atomic_cas(&pkt->atomic_ref, ref, ref - 1));
556 
557 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
558 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
559 	NET_DBG("%s [%d] pkt %p ref %ld frags %p (%s():%d)",
560 		slab2str(pkt->slab), k_mem_slab_num_free_get(pkt->slab),
561 		pkt, ref - 1, pkt->frags, caller, line);
562 #endif
563 	if (ref > 1) {
564 		goto done;
565 	}
566 
567 	frag = pkt->frags;
568 	while (frag) {
569 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
570 		NET_DBG("%s (%s) [%d] frag %p ref %d frags %p (%s():%d)",
571 			pool2str(net_buf_pool_get(frag->pool_id)),
572 			get_name(net_buf_pool_get(frag->pool_id)),
573 			get_frees(net_buf_pool_get(frag->pool_id)), frag,
574 			frag->ref - 1U, frag->frags, caller, line);
575 #endif
576 
577 		if (!frag->ref) {
578 			const char *func_freed;
579 			int line_freed;
580 
581 			if (net_pkt_alloc_find(frag,
582 					       &func_freed, &line_freed)) {
583 				NET_ERR("*** ERROR *** frag %p is freed "
584 					"already by %s():%d (%s():%d)",
585 					frag, func_freed, line_freed,
586 					caller, line);
587 			} else {
588 				NET_ERR("*** ERROR *** frag %p is freed "
589 					"already (%s():%d)",
590 					frag, caller, line);
591 			}
592 		}
593 
594 		net_pkt_alloc_del(frag, caller, line);
595 
596 		frag = frag->frags;
597 	}
598 
599 	net_pkt_alloc_del(pkt, caller, line);
600 done:
601 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
602 
603 	if (ref > 1) {
604 		return;
605 	}
606 
607 	if (pkt->frags) {
608 		net_pkt_frag_unref(pkt->frags);
609 	}
610 
611 	if (IS_ENABLED(CONFIG_NET_DEBUG_NET_PKT_NON_FRAGILE_ACCESS)) {
612 		pkt->buffer = NULL;
613 		net_pkt_cursor_init(pkt);
614 	}
615 
616 	k_mem_slab_free(pkt->slab, (void *)pkt);
617 }
618 
619 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
620 struct net_pkt *net_pkt_ref_debug(struct net_pkt *pkt, const char *caller,
621 				  int line)
622 #else
623 struct net_pkt *net_pkt_ref(struct net_pkt *pkt)
624 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
625 {
626 	atomic_val_t ref;
627 
628 	do {
629 		ref = pkt ? atomic_get(&pkt->atomic_ref) : 0;
630 		if (!ref) {
631 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
632 			NET_ERR("*** ERROR *** pkt %p (%s():%d)",
633 				pkt, caller, line);
634 #endif
635 			return NULL;
636 		}
637 	} while (!atomic_cas(&pkt->atomic_ref, ref, ref + 1));
638 
639 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
640 	NET_DBG("%s [%d] pkt %p ref %ld (%s():%d)",
641 		slab2str(pkt->slab), k_mem_slab_num_free_get(pkt->slab),
642 		pkt, ref + 1, caller, line);
643 #endif
644 
645 
646 	return pkt;
647 }
648 
649 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
650 struct net_buf *net_pkt_frag_ref_debug(struct net_buf *frag,
651 				       const char *caller, int line)
652 #else
653 struct net_buf *net_pkt_frag_ref(struct net_buf *frag)
654 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
655 {
656 	if (!frag) {
657 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
658 		NET_ERR("*** ERROR *** frag %p (%s():%d)", frag, caller, line);
659 #endif
660 		return NULL;
661 	}
662 
663 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
664 	NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
665 		pool2str(net_buf_pool_get(frag->pool_id)),
666 		get_name(net_buf_pool_get(frag->pool_id)),
667 		get_frees(net_buf_pool_get(frag->pool_id)),
668 		frag, frag->ref + 1U, caller, line);
669 #endif
670 
671 	return net_buf_ref(frag);
672 }
673 
674 
675 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
676 void net_pkt_frag_unref_debug(struct net_buf *frag,
677 			      const char *caller, int line)
678 #else
679 void net_pkt_frag_unref(struct net_buf *frag)
680 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
681 {
682 	if (!frag) {
683 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
684 		NET_ERR("*** ERROR *** frag %p (%s():%d)", frag, caller, line);
685 #endif
686 		return;
687 	}
688 
689 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
690 	NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
691 		pool2str(net_buf_pool_get(frag->pool_id)),
692 		get_name(net_buf_pool_get(frag->pool_id)),
693 		get_frees(net_buf_pool_get(frag->pool_id)),
694 		frag, frag->ref - 1U, caller, line);
695 #endif
696 
697 	if (frag->ref == 1U) {
698 		net_pkt_alloc_del(frag, caller, line);
699 	}
700 
701 	net_buf_unref(frag);
702 }
703 
704 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
705 struct net_buf *net_pkt_frag_del_debug(struct net_pkt *pkt,
706 				       struct net_buf *parent,
707 				       struct net_buf *frag,
708 				       const char *caller, int line)
709 #else
710 struct net_buf *net_pkt_frag_del(struct net_pkt *pkt,
711 				 struct net_buf *parent,
712 				 struct net_buf *frag)
713 #endif
714 {
715 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
716 	NET_DBG("pkt %p parent %p frag %p ref %u (%s:%d)",
717 		pkt, parent, frag, frag->ref, caller, line);
718 #endif
719 
720 	if (pkt->frags == frag && !parent) {
721 		struct net_buf *tmp;
722 
723 		if (frag->ref == 1U) {
724 			net_pkt_alloc_del(frag, caller, line);
725 		}
726 
727 		tmp = net_buf_frag_del(NULL, frag);
728 		pkt->frags = tmp;
729 
730 		return tmp;
731 	}
732 
733 	if (frag->ref == 1U) {
734 		net_pkt_alloc_del(frag, caller, line);
735 	}
736 
737 	return net_buf_frag_del(parent, frag);
738 }
739 
740 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
741 void net_pkt_frag_add_debug(struct net_pkt *pkt, struct net_buf *frag,
742 			    const char *caller, int line)
743 #else
744 void net_pkt_frag_add(struct net_pkt *pkt, struct net_buf *frag)
745 #endif
746 {
747 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
748 	NET_DBG("pkt %p frag %p (%s:%d)", pkt, frag, caller, line);
749 #endif
750 
751 	/* We do not use net_buf_frag_add() as this one will refcount
752 	 * the frag once more if !pkt->frags
753 	 */
754 	if (!pkt->frags) {
755 		pkt->frags = frag;
756 		return;
757 	}
758 
759 	net_buf_frag_insert(net_buf_frag_last(pkt->frags), frag);
760 }
761 
762 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
763 void net_pkt_frag_insert_debug(struct net_pkt *pkt, struct net_buf *frag,
764 			       const char *caller, int line)
765 #else
766 void net_pkt_frag_insert(struct net_pkt *pkt, struct net_buf *frag)
767 #endif
768 {
769 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
770 	NET_DBG("pkt %p frag %p (%s:%d)", pkt, frag, caller, line);
771 #endif
772 
773 	net_buf_frag_last(frag)->frags = pkt->frags;
774 	pkt->frags = frag;
775 }
776 
777 void net_pkt_compact(struct net_pkt *pkt)
778 {
779 	struct net_buf *frag, *prev;
780 
781 	NET_DBG("Compacting data in pkt %p", pkt);
782 
783 	frag = pkt->frags;
784 	prev = NULL;
785 
786 	while (frag) {
787 		if (frag->frags) {
788 			/* Copy amount of data from next fragment to this
789 			 * fragment.
790 			 */
791 			size_t copy_len;
792 
793 			copy_len = frag->frags->len;
794 			if (copy_len > net_buf_tailroom(frag)) {
795 				copy_len = net_buf_tailroom(frag);
796 			}
797 
798 			memcpy(net_buf_tail(frag), frag->frags->data, copy_len);
799 			net_buf_add(frag, copy_len);
800 
801 			memmove(frag->frags->data,
802 				frag->frags->data + copy_len,
803 				frag->frags->len - copy_len);
804 
805 			frag->frags->len -= copy_len;
806 
807 			/* Is there any more space in this fragment */
808 			if (net_buf_tailroom(frag)) {
809 				/* There is. This also means that the next
810 				 * fragment is empty as otherwise we could
811 				 * not have copied all data. Remove next
812 				 * fragment as there is no data in it any more.
813 				 */
814 				net_pkt_frag_del(pkt, frag, frag->frags);
815 
816 				/* Then check next fragment */
817 				continue;
818 			}
819 		} else {
820 			if (!frag->len) {
821 				/* Remove the last fragment because there is no
822 				 * data in it.
823 				 */
824 				net_pkt_frag_del(pkt, prev, frag);
825 
826 				break;
827 			}
828 		}
829 
830 		prev = frag;
831 		frag = frag->frags;
832 	}
833 }
834 
835 void net_pkt_get_info(struct k_mem_slab **rx,
836 		      struct k_mem_slab **tx,
837 		      struct net_buf_pool **rx_data,
838 		      struct net_buf_pool **tx_data)
839 {
840 	if (rx) {
841 		*rx = &rx_pkts;
842 	}
843 
844 	if (tx) {
845 		*tx = &tx_pkts;
846 	}
847 
848 	if (rx_data) {
849 		*rx_data = &rx_bufs;
850 	}
851 
852 	if (tx_data) {
853 		*tx_data = &tx_bufs;
854 	}
855 }
856 
857 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
858 void net_pkt_print(void)
859 {
860 	NET_DBG("TX %u RX %u RDATA %d TDATA %d",
861 		k_mem_slab_num_free_get(&tx_pkts),
862 		k_mem_slab_num_free_get(&rx_pkts),
863 		get_frees(&rx_bufs), get_frees(&tx_bufs));
864 }
865 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC */
866 
867 /* New allocator and API starts here */
868 
869 #if defined(CONFIG_NET_PKT_ALLOC_STATS)
870 static struct net_pkt_alloc_stats_slab *find_alloc_stats(struct k_mem_slab *slab)
871 {
872 	STRUCT_SECTION_FOREACH(net_pkt_alloc_stats_slab, tmp) {
873 		if (tmp->slab == slab) {
874 			return tmp;
875 		}
876 	}
877 
878 	NET_ASSERT("slab not found");
879 
880 	/* This will force a crash which is intended in this case as the
881 	 * slab should always have a valid value.
882 	 */
883 	return NULL;
884 }
885 
886 #define NET_PKT_ALLOC_STATS_UPDATE(pkt, alloc_size, start) ({		\
887 	if (pkt->alloc_stats == NULL) {					\
888 		pkt->alloc_stats = find_alloc_stats(pkt->slab);		\
889 	}								\
890 	pkt->alloc_stats->ok.count++;					\
891 	if (pkt->alloc_stats->ok.count == 0) {				\
892 		pkt->alloc_stats->ok.alloc_sum = 0ULL;			\
893 		pkt->alloc_stats->ok.time_sum = 0ULL;			\
894 	} else {							\
895 		pkt->alloc_stats->ok.alloc_sum += (uint64_t)alloc_size;	\
896 		pkt->alloc_stats->ok.time_sum += (uint64_t)(k_cycle_get_32() - start); \
897 	}								\
898 									\
899 	pkt->alloc_stats->ok.count;					\
900 })
901 
902 #define NET_PKT_ALLOC_STATS_FAIL(pkt, alloc_size, start) ({		\
903 	if (pkt->alloc_stats == NULL) {					\
904 		pkt->alloc_stats = find_alloc_stats(pkt->slab);		\
905 	}								\
906 	pkt->alloc_stats->fail.count++;					\
907 	if (pkt->alloc_stats->fail.count == 0) {			\
908 		pkt->alloc_stats->fail.alloc_sum = 0ULL;		\
909 		pkt->alloc_stats->fail.time_sum = 0ULL;			\
910 	} else {							\
911 		pkt->alloc_stats->fail.alloc_sum += (uint64_t)alloc_size;\
912 		pkt->alloc_stats->fail.time_sum += (uint64_t)(k_cycle_get_32() - start); \
913 	}								\
914 									\
915 	pkt->alloc_stats->fail.count;					\
916 })
917 #else
918 #define NET_PKT_ALLOC_STATS_UPDATE(pkt, alloc_size, start) ({ 0; })
919 #define NET_PKT_ALLOC_STATS_FAIL(pkt, alloc_size, start) ({ 0; })
920 #endif /* CONFIG_NET_PKT_ALLOC_STATS */
921 
922 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
923 
924 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
925 static struct net_buf *pkt_alloc_buffer(struct net_pkt *pkt,
926 					struct net_buf_pool *pool,
927 					size_t size, size_t headroom,
928 					k_timeout_t timeout,
929 					const char *caller, int line)
930 #else
931 static struct net_buf *pkt_alloc_buffer(struct net_pkt *pkt,
932 					struct net_buf_pool *pool,
933 					size_t size, size_t headroom,
934 					k_timeout_t timeout)
935 #endif
936 {
937 #if defined(CONFIG_NET_PKT_ALLOC_STATS)
938 	uint32_t start_time = k_cycle_get_32();
939 	size_t total_size = size;
940 #else
941 	ARG_UNUSED(pkt);
942 #endif
943 
944 	k_timepoint_t end = sys_timepoint_calc(timeout);
945 	struct net_buf *first = NULL;
946 	struct net_buf *current = NULL;
947 
948 	do {
949 		struct net_buf *new;
950 
951 		new = net_buf_alloc_fixed(pool, timeout);
952 		if (!new) {
953 			goto error;
954 		}
955 
956 		if (!first && !current) {
957 			first = new;
958 		} else {
959 			current->frags = new;
960 		}
961 
962 		current = new;
963 
964 		/* If there is headroom reserved, then allocate that to the
965 		 * first buf.
966 		 */
967 		if (current == first && headroom > 0) {
968 			if (current->size > (headroom + size)) {
969 				current->size = size + headroom;
970 
971 				size = 0U;
972 			} else {
973 				size -= current->size - headroom;
974 			}
975 		} else {
976 			if (current->size > size) {
977 				current->size = size;
978 			}
979 
980 			size -= current->size;
981 		}
982 
983 		timeout = sys_timepoint_timeout(end);
984 
985 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
986 		NET_FRAG_CHECK_IF_NOT_IN_USE(new, new->ref + 1);
987 
988 		net_pkt_alloc_add(new, false, caller, line);
989 
990 		NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
991 			pool2str(pool), get_name(pool), get_frees(pool),
992 			new, new->ref, caller, line);
993 #endif
994 	} while (size);
995 
996 #if defined(CONFIG_NET_PKT_ALLOC_STATS)
997 	if (NET_PKT_ALLOC_STATS_UPDATE(pkt, total_size, start_time) == 0) {
998 		NET_DBG("pkt %p %s stats rollover", pkt, "ok");
999 	}
1000 #endif
1001 
1002 	return first;
1003 error:
1004 	if (first) {
1005 		net_buf_unref(first);
1006 	}
1007 
1008 #if defined(CONFIG_NET_PKT_ALLOC_STATS)
1009 	if (NET_PKT_ALLOC_STATS_FAIL(pkt, total_size, start_time) == 0) {
1010 		NET_DBG("pkt %p %s stats rollover", pkt, "fail");
1011 	}
1012 #endif
1013 
1014 	return NULL;
1015 }
1016 
1017 #else /* !CONFIG_NET_BUF_FIXED_DATA_SIZE */
1018 
1019 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1020 static struct net_buf *pkt_alloc_buffer(struct net_pkt *pkt,
1021 					struct net_buf_pool *pool,
1022 					size_t size, size_t headroom,
1023 					k_timeout_t timeout,
1024 					const char *caller, int line)
1025 #else
1026 static struct net_buf *pkt_alloc_buffer(struct net_pkt *pkt,
1027 					struct net_buf_pool *pool,
1028 					size_t size, size_t headroom,
1029 					k_timeout_t timeout)
1030 #endif
1031 {
1032 	struct net_buf *buf;
1033 
1034 #if defined(CONFIG_NET_PKT_ALLOC_STATS)
1035 	uint32_t start_time = k_cycle_get_32();
1036 	size_t total_size = size + headroom;
1037 #else
1038 	ARG_UNUSED(pkt);
1039 #endif
1040 
1041 	buf = net_buf_alloc_len(pool, size + headroom, timeout);
1042 
1043 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
1044 	NET_FRAG_CHECK_IF_NOT_IN_USE(buf, buf->ref + 1);
1045 
1046 	net_pkt_alloc_add(buf, false, caller, line);
1047 
1048 	NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
1049 		pool2str(pool), get_name(pool), get_frees(pool),
1050 		buf, buf->ref, caller, line);
1051 #endif
1052 
1053 #if defined(CONFIG_NET_PKT_ALLOC_STATS)
1054 	if (buf) {
1055 		if (NET_PKT_ALLOC_STATS_UPDATE(pkt, total_size, start_time) == 0) {
1056 			NET_DBG("pkt %p %s stats rollover", pkt, "ok");
1057 		}
1058 	} else {
1059 		if (NET_PKT_ALLOC_STATS_FAIL(pkt, total_size, start_time) == 0) {
1060 			NET_DBG("pkt %p %s stats rollover", pkt, "fail");
1061 		}
1062 	}
1063 #endif /* CONFIG_NET_PKT_ALLOC_STATS */
1064 
1065 	return buf;
1066 }
1067 
1068 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
1069 
1070 static size_t pkt_buffer_length(struct net_pkt *pkt,
1071 				size_t size,
1072 				enum net_ip_protocol proto,
1073 				size_t existing)
1074 {
1075 	sa_family_t family = net_pkt_family(pkt);
1076 	size_t max_len;
1077 
1078 	if (net_pkt_iface(pkt)) {
1079 		max_len = net_if_get_mtu(net_pkt_iface(pkt));
1080 	} else {
1081 		max_len = 0;
1082 	}
1083 
1084 	/* Family vs iface MTU */
1085 	if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
1086 		if (IS_ENABLED(CONFIG_NET_IPV6_FRAGMENT) && (size > max_len)) {
1087 			/* We support larger packets if IPv6 fragmentation is
1088 			 * enabled.
1089 			 */
1090 			max_len = size;
1091 		}
1092 
1093 		max_len = MAX(max_len, NET_IPV6_MTU);
1094 	} else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
1095 		if (IS_ENABLED(CONFIG_NET_IPV4_FRAGMENT) && (size > max_len)) {
1096 			/* We support larger packets if IPv4 fragmentation is enabled */
1097 			max_len = size;
1098 		}
1099 
1100 		max_len = MAX(max_len, NET_IPV4_MTU);
1101 	} else { /* family == AF_UNSPEC */
1102 #if defined (CONFIG_NET_L2_ETHERNET)
1103 		if (net_if_l2(net_pkt_iface(pkt)) ==
1104 		    &NET_L2_GET_NAME(ETHERNET)) {
1105 			max_len += NET_ETH_MAX_HDR_SIZE;
1106 		} else
1107 #endif /* CONFIG_NET_L2_ETHERNET */
1108 		{
1109 			/* Other L2 are not checked as the pkt MTU in this case
1110 			 * is based on the IP layer (IPv6 most of the time).
1111 			 */
1112 			max_len = size;
1113 		}
1114 	}
1115 
1116 	max_len -= existing;
1117 
1118 	return MIN(size, max_len);
1119 }
1120 
1121 static size_t pkt_estimate_headers_length(struct net_pkt *pkt,
1122 					  sa_family_t family,
1123 					  enum net_ip_protocol proto)
1124 {
1125 	size_t hdr_len = 0;
1126 
1127 	if (family == AF_UNSPEC) {
1128 		return  0;
1129 	}
1130 
1131 	/* Family header */
1132 	if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
1133 		hdr_len += NET_IPV6H_LEN;
1134 	} else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
1135 		hdr_len += NET_IPV4H_LEN;
1136 	}
1137 
1138 	/* + protocol header */
1139 	if (IS_ENABLED(CONFIG_NET_TCP) && proto == IPPROTO_TCP) {
1140 		hdr_len += NET_TCPH_LEN + NET_TCP_MAX_OPT_SIZE;
1141 	} else if (IS_ENABLED(CONFIG_NET_UDP) && proto == IPPROTO_UDP) {
1142 		hdr_len += NET_UDPH_LEN;
1143 	} else if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) {
1144 		hdr_len += NET_ICMPH_LEN;
1145 	}
1146 
1147 	NET_DBG("HDRs length estimation %zu", hdr_len);
1148 
1149 	return hdr_len;
1150 }
1151 
1152 static size_t pkt_get_max_len(struct net_pkt *pkt)
1153 {
1154 	struct net_buf *buf = pkt->buffer;
1155 	size_t size = 0;
1156 
1157 	while (buf) {
1158 		size += net_buf_max_len(buf);
1159 		buf = buf->frags;
1160 	}
1161 
1162 	return size;
1163 }
1164 
1165 size_t net_pkt_available_buffer(struct net_pkt *pkt)
1166 {
1167 	if (!pkt) {
1168 		return 0;
1169 	}
1170 
1171 	return pkt_get_max_len(pkt) - net_pkt_get_len(pkt);
1172 }
1173 
1174 size_t net_pkt_available_payload_buffer(struct net_pkt *pkt,
1175 					enum net_ip_protocol proto)
1176 {
1177 	size_t hdr_len = 0;
1178 	size_t len;
1179 
1180 	if (!pkt) {
1181 		return 0;
1182 	}
1183 
1184 	hdr_len = pkt_estimate_headers_length(pkt, net_pkt_family(pkt), proto);
1185 	len = net_pkt_get_len(pkt);
1186 
1187 	hdr_len = hdr_len <= len ? 0 : hdr_len - len;
1188 
1189 	len = net_pkt_available_buffer(pkt) - hdr_len;
1190 
1191 	return len;
1192 }
1193 
1194 void net_pkt_trim_buffer(struct net_pkt *pkt)
1195 {
1196 	struct net_buf *buf, *prev;
1197 
1198 	buf = pkt->buffer;
1199 	prev = buf;
1200 
1201 	while (buf) {
1202 		struct net_buf *next = buf->frags;
1203 
1204 		if (!buf->len) {
1205 			if (buf == pkt->buffer) {
1206 				pkt->buffer = next;
1207 			} else if (buf == prev->frags) {
1208 				prev->frags = next;
1209 			}
1210 
1211 			buf->frags = NULL;
1212 			net_buf_unref(buf);
1213 		} else {
1214 			prev = buf;
1215 		}
1216 
1217 		buf = next;
1218 	}
1219 }
1220 
1221 int net_pkt_remove_tail(struct net_pkt *pkt, size_t length)
1222 {
1223 	struct net_buf *buf = pkt->buffer;
1224 	size_t remaining_len = net_pkt_get_len(pkt);
1225 
1226 	if (remaining_len < length) {
1227 		return -EINVAL;
1228 	}
1229 
1230 	remaining_len -= length;
1231 
1232 	while (buf) {
1233 		if (buf->len >= remaining_len) {
1234 			buf->len = remaining_len;
1235 
1236 			if (buf->frags) {
1237 				net_pkt_frag_unref(buf->frags);
1238 				buf->frags = NULL;
1239 			}
1240 
1241 			break;
1242 		}
1243 
1244 		remaining_len -= buf->len;
1245 		buf = buf->frags;
1246 	}
1247 
1248 	return 0;
1249 }
1250 
1251 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1252 int net_pkt_alloc_buffer_with_reserve_debug(struct net_pkt *pkt,
1253 					    size_t size,
1254 					    size_t reserve,
1255 					    enum net_ip_protocol proto,
1256 					    k_timeout_t timeout,
1257 					    const char *caller,
1258 					    int line)
1259 #else
1260 int net_pkt_alloc_buffer_with_reserve(struct net_pkt *pkt,
1261 				      size_t size,
1262 				      size_t reserve,
1263 				      enum net_ip_protocol proto,
1264 				      k_timeout_t timeout)
1265 #endif
1266 {
1267 	struct net_buf_pool *pool = NULL;
1268 	size_t alloc_len = 0;
1269 	size_t hdr_len = 0;
1270 	struct net_buf *buf;
1271 
1272 	if (!size && proto == 0 && net_pkt_family(pkt) == AF_UNSPEC) {
1273 		return 0;
1274 	}
1275 
1276 	if (k_is_in_isr()) {
1277 		timeout = K_NO_WAIT;
1278 	}
1279 
1280 	/* Verifying existing buffer and take into account free space there */
1281 	alloc_len = net_pkt_available_buffer(pkt);
1282 	if (!alloc_len) {
1283 		/* In case of no free space, it will account for header
1284 		 * space estimation
1285 		 */
1286 		hdr_len = pkt_estimate_headers_length(pkt,
1287 						      net_pkt_family(pkt),
1288 						      proto);
1289 	}
1290 
1291 	/* Calculate the maximum that can be allocated depending on size */
1292 	alloc_len = pkt_buffer_length(pkt, size + hdr_len, proto, alloc_len);
1293 
1294 	NET_DBG("Data allocation maximum size %zu (requested %zu, reserve %zu)",
1295 		alloc_len, size, reserve);
1296 
1297 	if (pkt->context) {
1298 		pool = get_data_pool(pkt->context);
1299 	}
1300 
1301 	if (!pool) {
1302 		pool = pkt->slab == &tx_pkts ? &tx_bufs : &rx_bufs;
1303 	}
1304 
1305 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1306 	buf = pkt_alloc_buffer(pkt, pool, alloc_len, reserve,
1307 			       timeout, caller, line);
1308 #else
1309 	buf = pkt_alloc_buffer(pkt, pool, alloc_len, reserve, timeout);
1310 #endif
1311 
1312 	if (!buf) {
1313 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1314 		NET_ERR("Data buffer (%zu) allocation failed (%s:%d)",
1315 			alloc_len + reserve, caller, line);
1316 #else
1317 		NET_ERR("Data buffer (%zu) allocation failed.",
1318 			alloc_len + reserve);
1319 #endif
1320 		return -ENOMEM;
1321 	}
1322 
1323 	net_pkt_append_buffer(pkt, buf);
1324 
1325 	/* Hide the link layer header for now. The space is used when
1326 	 * link layer header needs to be written to the packet by L2 send.
1327 	 */
1328 	if (reserve > 0U) {
1329 		NET_DBG("Reserving %zu bytes for L2 header", reserve);
1330 
1331 		net_buf_reserve(pkt->buffer, reserve);
1332 
1333 		net_pkt_cursor_init(pkt);
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 static bool is_pkt_tx(struct net_pkt *pkt)
1340 {
1341 #if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL)
1342 	if ((pkt->context != NULL) && (get_tx_slab(pkt->context) != NULL)) {
1343 		return pkt->slab == get_tx_slab(pkt->context);
1344 	}
1345 #endif
1346 	return pkt->slab == &tx_pkts;
1347 }
1348 
1349 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1350 int net_pkt_alloc_buffer_debug(struct net_pkt *pkt,
1351 			       size_t size,
1352 			       enum net_ip_protocol proto,
1353 			       k_timeout_t timeout,
1354 			       const char *caller,
1355 			       int line)
1356 #else
1357 int net_pkt_alloc_buffer(struct net_pkt *pkt,
1358 			 size_t size,
1359 			 enum net_ip_protocol proto,
1360 			 k_timeout_t timeout)
1361 #endif
1362 {
1363 	struct net_if *iface;
1364 	int ret;
1365 
1366 	if (!size && proto == 0 && net_pkt_family(pkt) == AF_UNSPEC) {
1367 		return 0;
1368 	}
1369 
1370 	if (k_is_in_isr()) {
1371 		timeout = K_NO_WAIT;
1372 	}
1373 
1374 	iface = net_pkt_iface(pkt);
1375 
1376 	if (iface != NULL && is_pkt_tx(pkt) && net_if_l2(iface)->alloc != NULL) {
1377 		ret = net_if_l2(iface)->alloc(iface, pkt, size, proto, timeout);
1378 		if (ret != -ENOTSUP) {
1379 			return ret;
1380 		}
1381 	}
1382 
1383 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1384 	ret = net_pkt_alloc_buffer_with_reserve_debug(pkt,
1385 						      size,
1386 						      0U,
1387 						      proto,
1388 						      timeout,
1389 						      caller,
1390 						      line);
1391 #else
1392 	ret = net_pkt_alloc_buffer_with_reserve(pkt,
1393 						size,
1394 						0U,
1395 						proto,
1396 						timeout);
1397 #endif
1398 
1399 	return ret;
1400 }
1401 
1402 
1403 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1404 int net_pkt_alloc_buffer_raw_debug(struct net_pkt *pkt, size_t size,
1405 				   k_timeout_t timeout, const char *caller,
1406 				   int line)
1407 #else
1408 int net_pkt_alloc_buffer_raw(struct net_pkt *pkt, size_t size,
1409 			     k_timeout_t timeout)
1410 #endif
1411 {
1412 	struct net_buf_pool *pool = NULL;
1413 	struct net_buf *buf;
1414 
1415 	if (size == 0) {
1416 		return 0;
1417 	}
1418 
1419 	if (k_is_in_isr()) {
1420 		timeout = K_NO_WAIT;
1421 	}
1422 
1423 	NET_DBG("Data allocation size %zu", size);
1424 
1425 	if (pkt->context) {
1426 		pool = get_data_pool(pkt->context);
1427 	}
1428 
1429 	if (!pool) {
1430 		pool = pkt->slab == &tx_pkts ? &tx_bufs : &rx_bufs;
1431 	}
1432 
1433 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1434 	buf = pkt_alloc_buffer(pkt, pool, size, 0U, timeout, caller, line);
1435 #else
1436 	buf = pkt_alloc_buffer(pkt, pool, size, 0U, timeout);
1437 #endif
1438 
1439 	if (!buf) {
1440 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1441 		NET_ERR("Data buffer (%zd) allocation failed (%s:%d)",
1442 			size, caller, line);
1443 #else
1444 		NET_ERR("Data buffer (%zd) allocation failed.", size);
1445 #endif
1446 		return -ENOMEM;
1447 	}
1448 
1449 	net_pkt_append_buffer(pkt, buf);
1450 
1451 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
1452 	/* net_buf allocators shrink the buffer size to the requested size.
1453 	 * We don't want this behavior here, so restore the real size of the
1454 	 * last fragment.
1455 	 */
1456 	buf = net_buf_frag_last(buf);
1457 	buf->size = CONFIG_NET_BUF_DATA_SIZE;
1458 #endif
1459 
1460 	return 0;
1461 }
1462 
1463 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1464 static struct net_pkt *pkt_alloc(struct k_mem_slab *slab, k_timeout_t timeout,
1465 				 const char *caller, int line)
1466 #else
1467 static struct net_pkt *pkt_alloc(struct k_mem_slab *slab, k_timeout_t timeout)
1468 #endif
1469 {
1470 	struct net_pkt *pkt;
1471 	uint32_t create_time;
1472 	int ret;
1473 
1474 	if (k_is_in_isr()) {
1475 		timeout = K_NO_WAIT;
1476 	}
1477 
1478 	if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS) ||
1479 	    IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS) ||
1480 	    IS_ENABLED(CONFIG_TRACING_NET_CORE)) {
1481 		create_time = k_cycle_get_32();
1482 	} else {
1483 		ARG_UNUSED(create_time);
1484 	}
1485 
1486 	ret = k_mem_slab_alloc(slab, (void **)&pkt, timeout);
1487 	if (ret) {
1488 		return NULL;
1489 	}
1490 
1491 	memset(pkt, 0, sizeof(struct net_pkt));
1492 
1493 	pkt->atomic_ref = ATOMIC_INIT(1);
1494 	pkt->slab = slab;
1495 
1496 	if (IS_ENABLED(CONFIG_NET_IPV6)) {
1497 		net_pkt_set_ipv6_next_hdr(pkt, 255);
1498 	}
1499 
1500 #if defined(CONFIG_NET_TX_DEFAULT_PRIORITY)
1501 #define TX_DEFAULT_PRIORITY CONFIG_NET_TX_DEFAULT_PRIORITY
1502 #else
1503 #define TX_DEFAULT_PRIORITY 0
1504 #endif
1505 
1506 #if defined(CONFIG_NET_RX_DEFAULT_PRIORITY)
1507 #define RX_DEFAULT_PRIORITY CONFIG_NET_RX_DEFAULT_PRIORITY
1508 #else
1509 #define RX_DEFAULT_PRIORITY 0
1510 #endif
1511 
1512 	if (&tx_pkts == slab) {
1513 		net_pkt_set_priority(pkt, TX_DEFAULT_PRIORITY);
1514 	} else if (&rx_pkts == slab) {
1515 		net_pkt_set_priority(pkt, RX_DEFAULT_PRIORITY);
1516 	}
1517 
1518 	if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS) ||
1519 	    IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS) ||
1520 	    IS_ENABLED(CONFIG_TRACING_NET_CORE)) {
1521 		net_pkt_set_create_time(pkt, create_time);
1522 	}
1523 
1524 	net_pkt_set_vlan_tag(pkt, NET_VLAN_TAG_UNSPEC);
1525 
1526 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1527 	net_pkt_alloc_add(pkt, true, caller, line);
1528 #endif
1529 
1530 	net_pkt_cursor_init(pkt);
1531 
1532 	return pkt;
1533 }
1534 
1535 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1536 struct net_pkt *net_pkt_alloc_debug(k_timeout_t timeout,
1537 				    const char *caller, int line)
1538 #else
1539 struct net_pkt *net_pkt_alloc(k_timeout_t timeout)
1540 #endif
1541 {
1542 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1543 	return pkt_alloc(&tx_pkts, timeout, caller, line);
1544 #else
1545 	return pkt_alloc(&tx_pkts, timeout);
1546 #endif
1547 }
1548 
1549 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1550 struct net_pkt *net_pkt_alloc_from_slab_debug(struct k_mem_slab *slab,
1551 					      k_timeout_t timeout,
1552 					      const char *caller, int line)
1553 #else
1554 struct net_pkt *net_pkt_alloc_from_slab(struct k_mem_slab *slab,
1555 					k_timeout_t timeout)
1556 #endif
1557 {
1558 	if (!slab) {
1559 		return NULL;
1560 	}
1561 
1562 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1563 	return pkt_alloc(slab, timeout, caller, line);
1564 #else
1565 	return pkt_alloc(slab, timeout);
1566 #endif
1567 }
1568 
1569 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1570 struct net_pkt *net_pkt_rx_alloc_debug(k_timeout_t timeout,
1571 				       const char *caller, int line)
1572 #else
1573 struct net_pkt *net_pkt_rx_alloc(k_timeout_t timeout)
1574 #endif
1575 {
1576 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1577 	return pkt_alloc(&rx_pkts, timeout, caller, line);
1578 #else
1579 	return pkt_alloc(&rx_pkts, timeout);
1580 #endif
1581 }
1582 
1583 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1584 static struct net_pkt *pkt_alloc_on_iface(struct k_mem_slab *slab,
1585 					  struct net_if *iface,
1586 					  k_timeout_t timeout,
1587 					  const char *caller, int line)
1588 #else
1589 static struct net_pkt *pkt_alloc_on_iface(struct k_mem_slab *slab,
1590 					  struct net_if *iface,
1591 					  k_timeout_t timeout)
1592 
1593 #endif
1594 {
1595 	struct net_pkt *pkt;
1596 
1597 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1598 	pkt = pkt_alloc(slab, timeout, caller, line);
1599 #else
1600 	pkt = pkt_alloc(slab, timeout);
1601 #endif
1602 
1603 	if (pkt) {
1604 		net_pkt_set_iface(pkt, iface);
1605 	}
1606 
1607 	return pkt;
1608 }
1609 
1610 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1611 struct net_pkt *net_pkt_alloc_on_iface_debug(struct net_if *iface,
1612 					     k_timeout_t timeout,
1613 					     const char *caller,
1614 					     int line)
1615 #else
1616 struct net_pkt *net_pkt_alloc_on_iface(struct net_if *iface,
1617 				       k_timeout_t timeout)
1618 #endif
1619 {
1620 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1621 	return pkt_alloc_on_iface(&tx_pkts, iface, timeout, caller, line);
1622 #else
1623 	return pkt_alloc_on_iface(&tx_pkts, iface, timeout);
1624 #endif
1625 }
1626 
1627 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1628 struct net_pkt *net_pkt_rx_alloc_on_iface_debug(struct net_if *iface,
1629 						k_timeout_t timeout,
1630 						const char *caller,
1631 						int line)
1632 #else
1633 struct net_pkt *net_pkt_rx_alloc_on_iface(struct net_if *iface,
1634 					  k_timeout_t timeout)
1635 #endif
1636 {
1637 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1638 	return pkt_alloc_on_iface(&rx_pkts, iface, timeout, caller, line);
1639 #else
1640 	return pkt_alloc_on_iface(&rx_pkts, iface, timeout);
1641 #endif
1642 }
1643 
1644 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1645 static struct net_pkt *
1646 pkt_alloc_with_buffer(struct k_mem_slab *slab,
1647 		      struct net_if *iface,
1648 		      size_t size,
1649 		      sa_family_t family,
1650 		      enum net_ip_protocol proto,
1651 		      k_timeout_t timeout,
1652 		      const char *caller,
1653 		      int line)
1654 #else
1655 static struct net_pkt *
1656 pkt_alloc_with_buffer(struct k_mem_slab *slab,
1657 		      struct net_if *iface,
1658 		      size_t size,
1659 		      sa_family_t family,
1660 		      enum net_ip_protocol proto,
1661 		      k_timeout_t timeout)
1662 #endif
1663 {
1664 	k_timepoint_t end = sys_timepoint_calc(timeout);
1665 	struct net_pkt *pkt;
1666 	int ret;
1667 
1668 	NET_DBG("On iface %d (%p) size %zu", net_if_get_by_iface(iface), iface, size);
1669 
1670 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1671 	pkt = pkt_alloc_on_iface(slab, iface, timeout, caller, line);
1672 #else
1673 	pkt = pkt_alloc_on_iface(slab, iface, timeout);
1674 #endif
1675 
1676 	if (!pkt) {
1677 		return NULL;
1678 	}
1679 
1680 	net_pkt_set_family(pkt, family);
1681 
1682 	timeout = sys_timepoint_timeout(end);
1683 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1684 	ret = net_pkt_alloc_buffer_debug(pkt, size, proto, timeout,
1685 					 caller, line);
1686 #else
1687 	ret = net_pkt_alloc_buffer(pkt, size, proto, timeout);
1688 #endif
1689 
1690 	if (ret) {
1691 		net_pkt_unref(pkt);
1692 		return NULL;
1693 	}
1694 
1695 	return pkt;
1696 }
1697 
1698 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1699 struct net_pkt *net_pkt_alloc_with_buffer_debug(struct net_if *iface,
1700 						size_t size,
1701 						sa_family_t family,
1702 						enum net_ip_protocol proto,
1703 						k_timeout_t timeout,
1704 						const char *caller,
1705 						int line)
1706 #else
1707 struct net_pkt *net_pkt_alloc_with_buffer(struct net_if *iface,
1708 					  size_t size,
1709 					  sa_family_t family,
1710 					  enum net_ip_protocol proto,
1711 					  k_timeout_t timeout)
1712 #endif
1713 {
1714 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1715 	return pkt_alloc_with_buffer(&tx_pkts, iface, size, family,
1716 				     proto, timeout, caller, line);
1717 #else
1718 	return pkt_alloc_with_buffer(&tx_pkts, iface, size, family,
1719 				     proto, timeout);
1720 #endif
1721 }
1722 
1723 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1724 struct net_pkt *net_pkt_rx_alloc_with_buffer_debug(struct net_if *iface,
1725 						   size_t size,
1726 						   sa_family_t family,
1727 						   enum net_ip_protocol proto,
1728 						   k_timeout_t timeout,
1729 						   const char *caller,
1730 						   int line)
1731 #else
1732 struct net_pkt *net_pkt_rx_alloc_with_buffer(struct net_if *iface,
1733 					     size_t size,
1734 					     sa_family_t family,
1735 					     enum net_ip_protocol proto,
1736 					     k_timeout_t timeout)
1737 #endif
1738 {
1739 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1740 	return pkt_alloc_with_buffer(&rx_pkts, iface, size, family,
1741 					proto, timeout, caller, line);
1742 #else
1743 	return pkt_alloc_with_buffer(&rx_pkts, iface, size, family,
1744 					proto, timeout);
1745 #endif
1746 }
1747 
1748 void net_pkt_append_buffer(struct net_pkt *pkt, struct net_buf *buffer)
1749 {
1750 	if (!pkt->buffer) {
1751 		pkt->buffer = buffer;
1752 		net_pkt_cursor_init(pkt);
1753 	} else {
1754 		net_buf_frag_insert(net_buf_frag_last(pkt->buffer), buffer);
1755 	}
1756 }
1757 
1758 void net_pkt_cursor_init(struct net_pkt *pkt)
1759 {
1760 	pkt->cursor.buf = pkt->buffer;
1761 	if (pkt->cursor.buf) {
1762 		pkt->cursor.pos = pkt->cursor.buf->data;
1763 	} else {
1764 		pkt->cursor.pos = NULL;
1765 	}
1766 }
1767 
1768 static void pkt_cursor_jump(struct net_pkt *pkt, bool write)
1769 {
1770 	struct net_pkt_cursor *cursor = &pkt->cursor;
1771 
1772 	cursor->buf = cursor->buf->frags;
1773 	while (cursor->buf) {
1774 		const size_t len =
1775 			write ? net_buf_max_len(cursor->buf) : cursor->buf->len;
1776 
1777 		if (!len) {
1778 			cursor->buf = cursor->buf->frags;
1779 		} else {
1780 			break;
1781 		}
1782 	}
1783 
1784 	if (cursor->buf) {
1785 		cursor->pos = cursor->buf->data;
1786 	} else {
1787 		cursor->pos = NULL;
1788 	}
1789 }
1790 
1791 static void pkt_cursor_advance(struct net_pkt *pkt, bool write)
1792 {
1793 	struct net_pkt_cursor *cursor = &pkt->cursor;
1794 	size_t len;
1795 
1796 	if (!cursor->buf) {
1797 		return;
1798 	}
1799 
1800 	len = write ? net_buf_max_len(cursor->buf) : cursor->buf->len;
1801 	if ((cursor->pos - cursor->buf->data) == len) {
1802 		pkt_cursor_jump(pkt, write);
1803 	}
1804 }
1805 
1806 static void pkt_cursor_update(struct net_pkt *pkt,
1807 			      size_t length, bool write)
1808 {
1809 	struct net_pkt_cursor *cursor = &pkt->cursor;
1810 	size_t len;
1811 
1812 	if (net_pkt_is_being_overwritten(pkt)) {
1813 		write = false;
1814 	}
1815 
1816 	len = write ? net_buf_max_len(cursor->buf) : cursor->buf->len;
1817 	if (length + (cursor->pos - cursor->buf->data) == len &&
1818 	    !(net_pkt_is_being_overwritten(pkt) &&
1819 	      len < net_buf_max_len(cursor->buf))) {
1820 		pkt_cursor_jump(pkt, write);
1821 	} else {
1822 		cursor->pos += length;
1823 	}
1824 }
1825 
1826 /* Internal function that does all operation (skip/read/write/memset) */
1827 static int net_pkt_cursor_operate(struct net_pkt *pkt,
1828 				  void *data, size_t length,
1829 				  bool copy, bool write)
1830 {
1831 	/* We use such variable to avoid lengthy lines */
1832 	struct net_pkt_cursor *c_op = &pkt->cursor;
1833 
1834 	while (c_op->buf && length) {
1835 		size_t d_len, len;
1836 
1837 		pkt_cursor_advance(pkt, net_pkt_is_being_overwritten(pkt) ?
1838 				   false : write);
1839 		if (c_op->buf == NULL) {
1840 			break;
1841 		}
1842 
1843 		if (write && !net_pkt_is_being_overwritten(pkt)) {
1844 			d_len = net_buf_max_len(c_op->buf) -
1845 				(c_op->pos - c_op->buf->data);
1846 		} else {
1847 			d_len = c_op->buf->len - (c_op->pos - c_op->buf->data);
1848 		}
1849 
1850 		if (!d_len) {
1851 			break;
1852 		}
1853 
1854 		if (length < d_len) {
1855 			len = length;
1856 		} else {
1857 			len = d_len;
1858 		}
1859 
1860 		if (copy && data) {
1861 			memcpy(write ? c_op->pos : data,
1862 			       write ? data : c_op->pos,
1863 			       len);
1864 		} else if (data) {
1865 			memset(c_op->pos, *(int *)data, len);
1866 		}
1867 
1868 		if (write && !net_pkt_is_being_overwritten(pkt)) {
1869 			net_buf_add(c_op->buf, len);
1870 		}
1871 
1872 		pkt_cursor_update(pkt, len, write);
1873 
1874 		if (copy && data) {
1875 			data = (uint8_t *) data + len;
1876 		}
1877 
1878 		length -= len;
1879 	}
1880 
1881 	if (length) {
1882 		NET_DBG("Still some length to go %zu", length);
1883 		return -ENOBUFS;
1884 	}
1885 
1886 	return 0;
1887 }
1888 
1889 int net_pkt_skip(struct net_pkt *pkt, size_t skip)
1890 {
1891 	NET_DBG("pkt %p skip %zu", pkt, skip);
1892 
1893 	return net_pkt_cursor_operate(pkt, NULL, skip, false, true);
1894 }
1895 
1896 int net_pkt_memset(struct net_pkt *pkt, int byte, size_t amount)
1897 {
1898 	NET_DBG("pkt %p byte %d amount %zu", pkt, byte, amount);
1899 
1900 	return net_pkt_cursor_operate(pkt, &byte, amount, false, true);
1901 }
1902 
1903 int net_pkt_read(struct net_pkt *pkt, void *data, size_t length)
1904 {
1905 	NET_DBG("pkt %p data %p length %zu", pkt, data, length);
1906 
1907 	return net_pkt_cursor_operate(pkt, data, length, true, false);
1908 }
1909 
1910 int net_pkt_read_be16(struct net_pkt *pkt, uint16_t *data)
1911 {
1912 	uint8_t d16[2];
1913 	int ret;
1914 
1915 	ret = net_pkt_read(pkt, d16, sizeof(uint16_t));
1916 
1917 	*data = d16[0] << 8 | d16[1];
1918 
1919 	return ret;
1920 }
1921 
1922 int net_pkt_read_le16(struct net_pkt *pkt, uint16_t *data)
1923 {
1924 	uint8_t d16[2];
1925 	int ret;
1926 
1927 	ret = net_pkt_read(pkt, d16, sizeof(uint16_t));
1928 
1929 	*data = d16[1] << 8 | d16[0];
1930 
1931 	return ret;
1932 }
1933 
1934 int net_pkt_read_be32(struct net_pkt *pkt, uint32_t *data)
1935 {
1936 	uint8_t d32[4];
1937 	int ret;
1938 
1939 	ret = net_pkt_read(pkt, d32, sizeof(uint32_t));
1940 
1941 	*data = d32[0] << 24 | d32[1] << 16 | d32[2] << 8 | d32[3];
1942 
1943 	return ret;
1944 }
1945 
1946 int net_pkt_write(struct net_pkt *pkt, const void *data, size_t length)
1947 {
1948 	NET_DBG("pkt %p data %p length %zu", pkt, data, length);
1949 
1950 	if (data == pkt->cursor.pos && net_pkt_is_contiguous(pkt, length)) {
1951 		return net_pkt_skip(pkt, length);
1952 	}
1953 
1954 	return net_pkt_cursor_operate(pkt, (void *)data, length, true, true);
1955 }
1956 
1957 int net_pkt_copy(struct net_pkt *pkt_dst,
1958 		 struct net_pkt *pkt_src,
1959 		 size_t length)
1960 {
1961 	struct net_pkt_cursor *c_dst = &pkt_dst->cursor;
1962 	struct net_pkt_cursor *c_src = &pkt_src->cursor;
1963 
1964 	while (c_dst->buf && c_src->buf && length) {
1965 		size_t s_len, d_len, len;
1966 
1967 		pkt_cursor_advance(pkt_dst, true);
1968 		pkt_cursor_advance(pkt_src, false);
1969 
1970 		if (!c_dst->buf || !c_src->buf) {
1971 			break;
1972 		}
1973 
1974 		s_len = c_src->buf->len - (c_src->pos - c_src->buf->data);
1975 		d_len = net_buf_max_len(c_dst->buf) - (c_dst->pos - c_dst->buf->data);
1976 		if (length < s_len && length < d_len) {
1977 			len = length;
1978 		} else {
1979 			if (d_len < s_len) {
1980 				len = d_len;
1981 			} else {
1982 				len = s_len;
1983 			}
1984 		}
1985 
1986 		if (!len) {
1987 			break;
1988 		}
1989 
1990 		memcpy(c_dst->pos, c_src->pos, len);
1991 
1992 		if (!net_pkt_is_being_overwritten(pkt_dst)) {
1993 			net_buf_add(c_dst->buf, len);
1994 		}
1995 
1996 		pkt_cursor_update(pkt_dst, len, true);
1997 		pkt_cursor_update(pkt_src, len, false);
1998 
1999 		length -= len;
2000 	}
2001 
2002 	if (length) {
2003 		NET_DBG("Still some length to go %zu", length);
2004 		return -ENOBUFS;
2005 	}
2006 
2007 	return 0;
2008 }
2009 
2010 #if defined(NET_PKT_HAS_CONTROL_BLOCK)
2011 static inline void clone_pkt_cb(struct net_pkt *pkt, struct net_pkt *clone_pkt)
2012 {
2013 	memcpy(net_pkt_cb(clone_pkt), net_pkt_cb(pkt), sizeof(clone_pkt->cb));
2014 }
2015 #else
2016 static inline void clone_pkt_cb(struct net_pkt *pkt, struct net_pkt *clone_pkt)
2017 {
2018 	ARG_UNUSED(pkt);
2019 	ARG_UNUSED(clone_pkt);
2020 }
2021 #endif
2022 
2023 static void clone_pkt_attributes(struct net_pkt *pkt, struct net_pkt *clone_pkt)
2024 {
2025 	net_pkt_set_family(clone_pkt, net_pkt_family(pkt));
2026 	net_pkt_set_context(clone_pkt, net_pkt_context(pkt));
2027 	net_pkt_set_ip_hdr_len(clone_pkt, net_pkt_ip_hdr_len(pkt));
2028 	net_pkt_set_ip_dscp(clone_pkt, net_pkt_ip_dscp(pkt));
2029 	net_pkt_set_ip_ecn(clone_pkt, net_pkt_ip_ecn(pkt));
2030 	net_pkt_set_vlan_tag(clone_pkt, net_pkt_vlan_tag(pkt));
2031 	net_pkt_set_timestamp(clone_pkt, net_pkt_timestamp(pkt));
2032 	net_pkt_set_priority(clone_pkt, net_pkt_priority(pkt));
2033 	net_pkt_set_orig_iface(clone_pkt, net_pkt_orig_iface(pkt));
2034 	net_pkt_set_captured(clone_pkt, net_pkt_is_captured(pkt));
2035 	net_pkt_set_eof(clone_pkt, net_pkt_eof(pkt));
2036 	net_pkt_set_ptp(clone_pkt, net_pkt_is_ptp(pkt));
2037 	net_pkt_set_ppp(clone_pkt, net_pkt_is_ppp(pkt));
2038 	net_pkt_set_lldp(clone_pkt, net_pkt_is_lldp(pkt));
2039 	net_pkt_set_ipv4_acd(clone_pkt, net_pkt_ipv4_acd(pkt));
2040 	net_pkt_set_tx_timestamping(clone_pkt, net_pkt_is_tx_timestamping(pkt));
2041 	net_pkt_set_rx_timestamping(clone_pkt, net_pkt_is_rx_timestamping(pkt));
2042 	net_pkt_set_forwarding(clone_pkt, net_pkt_forwarding(pkt));
2043 	net_pkt_set_chksum_done(clone_pkt, net_pkt_is_chksum_done(pkt));
2044 	net_pkt_set_ip_reassembled(pkt, net_pkt_is_ip_reassembled(pkt));
2045 	net_pkt_set_cooked_mode(clone_pkt, net_pkt_is_cooked_mode(pkt));
2046 	net_pkt_set_ipv4_pmtu(clone_pkt, net_pkt_ipv4_pmtu(pkt));
2047 	net_pkt_set_l2_bridged(clone_pkt, net_pkt_is_l2_bridged(pkt));
2048 	net_pkt_set_l2_processed(clone_pkt, net_pkt_is_l2_processed(pkt));
2049 	net_pkt_set_ll_proto_type(clone_pkt, net_pkt_ll_proto_type(pkt));
2050 
2051 #if defined(CONFIG_NET_OFFLOAD) || defined(CONFIG_NET_L2_IPIP)
2052 	net_pkt_set_remote_address(clone_pkt, net_pkt_remote_address(pkt),
2053 				   sizeof(struct sockaddr_storage));
2054 #endif
2055 
2056 	if (pkt->buffer && clone_pkt->buffer) {
2057 		memcpy(net_pkt_lladdr_src(clone_pkt), net_pkt_lladdr_src(pkt),
2058 		       sizeof(struct net_linkaddr));
2059 		memcpy(net_pkt_lladdr_dst(clone_pkt), net_pkt_lladdr_dst(pkt),
2060 		       sizeof(struct net_linkaddr));
2061 	}
2062 
2063 	if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) {
2064 		net_pkt_set_ipv4_ttl(clone_pkt, net_pkt_ipv4_ttl(pkt));
2065 		net_pkt_set_ipv4_opts_len(clone_pkt,
2066 					  net_pkt_ipv4_opts_len(pkt));
2067 	} else if (IS_ENABLED(CONFIG_NET_IPV6) &&
2068 		   net_pkt_family(pkt) == AF_INET6) {
2069 		net_pkt_set_ipv6_hop_limit(clone_pkt,
2070 					   net_pkt_ipv6_hop_limit(pkt));
2071 		net_pkt_set_ipv6_ext_len(clone_pkt, net_pkt_ipv6_ext_len(pkt));
2072 		net_pkt_set_ipv6_ext_opt_len(clone_pkt,
2073 					     net_pkt_ipv6_ext_opt_len(pkt));
2074 		net_pkt_set_ipv6_hdr_prev(clone_pkt,
2075 					  net_pkt_ipv6_hdr_prev(pkt));
2076 		net_pkt_set_ipv6_next_hdr(clone_pkt,
2077 					  net_pkt_ipv6_next_hdr(pkt));
2078 	}
2079 
2080 	clone_pkt_cb(pkt, clone_pkt);
2081 }
2082 
2083 static struct net_pkt *net_pkt_clone_internal(struct net_pkt *pkt,
2084 					      struct k_mem_slab *slab,
2085 					      k_timeout_t timeout)
2086 {
2087 	size_t cursor_offset = net_pkt_get_current_offset(pkt);
2088 	bool overwrite = net_pkt_is_being_overwritten(pkt);
2089 	struct net_pkt_cursor backup;
2090 	struct net_pkt *clone_pkt;
2091 
2092 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
2093 	clone_pkt = pkt_alloc_with_buffer(slab, net_pkt_iface(pkt),
2094 					  net_pkt_get_len(pkt),
2095 					  AF_UNSPEC, 0, timeout,
2096 					  __func__, __LINE__);
2097 #else
2098 	clone_pkt = pkt_alloc_with_buffer(slab, net_pkt_iface(pkt),
2099 					  net_pkt_get_len(pkt),
2100 					  AF_UNSPEC, 0, timeout);
2101 #endif
2102 	if (!clone_pkt) {
2103 		return NULL;
2104 	}
2105 
2106 	net_pkt_set_overwrite(pkt, true);
2107 	net_pkt_cursor_backup(pkt, &backup);
2108 	net_pkt_cursor_init(pkt);
2109 
2110 	if (net_pkt_copy(clone_pkt, pkt, net_pkt_get_len(pkt))) {
2111 		net_pkt_unref(clone_pkt);
2112 		net_pkt_cursor_restore(pkt, &backup);
2113 		net_pkt_set_overwrite(pkt, overwrite);
2114 		return NULL;
2115 	}
2116 	net_pkt_set_overwrite(clone_pkt, true);
2117 
2118 	clone_pkt_attributes(pkt, clone_pkt);
2119 
2120 	net_pkt_cursor_init(clone_pkt);
2121 
2122 	if (cursor_offset) {
2123 		net_pkt_skip(clone_pkt, cursor_offset);
2124 	}
2125 	net_pkt_set_overwrite(clone_pkt, overwrite);
2126 
2127 	net_pkt_cursor_restore(pkt, &backup);
2128 	net_pkt_set_overwrite(pkt, overwrite);
2129 
2130 	NET_DBG("Cloned %p to %p", pkt, clone_pkt);
2131 
2132 	return clone_pkt;
2133 }
2134 
2135 struct net_pkt *net_pkt_clone(struct net_pkt *pkt, k_timeout_t timeout)
2136 {
2137 	return net_pkt_clone_internal(pkt, pkt->slab, timeout);
2138 }
2139 
2140 struct net_pkt *net_pkt_rx_clone(struct net_pkt *pkt, k_timeout_t timeout)
2141 {
2142 	return net_pkt_clone_internal(pkt, &rx_pkts, timeout);
2143 }
2144 
2145 struct net_pkt *net_pkt_shallow_clone(struct net_pkt *pkt, k_timeout_t timeout)
2146 {
2147 	struct net_pkt *clone_pkt;
2148 	struct net_buf *buf;
2149 
2150 	clone_pkt = net_pkt_alloc(timeout);
2151 	if (!clone_pkt) {
2152 		return NULL;
2153 	}
2154 
2155 	net_pkt_set_iface(clone_pkt, net_pkt_iface(pkt));
2156 	clone_pkt->buffer = pkt->buffer;
2157 	buf = pkt->buffer;
2158 
2159 	net_pkt_frag_ref(buf);
2160 
2161 	clone_pkt_attributes(pkt, clone_pkt);
2162 
2163 	net_pkt_cursor_restore(clone_pkt, &pkt->cursor);
2164 
2165 	NET_DBG("Shallow cloned %p to %p", pkt, clone_pkt);
2166 
2167 	return clone_pkt;
2168 }
2169 
2170 size_t net_pkt_remaining_data(struct net_pkt *pkt)
2171 {
2172 	struct net_buf *buf;
2173 	size_t data_length;
2174 
2175 	if (!pkt || !pkt->cursor.buf || !pkt->cursor.pos) {
2176 		return 0;
2177 	}
2178 
2179 	buf = pkt->cursor.buf;
2180 	data_length = buf->len - (pkt->cursor.pos - buf->data);
2181 
2182 	buf = buf->frags;
2183 	while (buf) {
2184 		data_length += buf->len;
2185 		buf = buf->frags;
2186 	}
2187 
2188 	return data_length;
2189 }
2190 
2191 int net_pkt_update_length(struct net_pkt *pkt, size_t length)
2192 {
2193 	struct net_buf *buf;
2194 
2195 	for (buf = pkt->buffer; buf; buf = buf->frags) {
2196 		if (buf->len < length) {
2197 			length -= buf->len;
2198 		} else {
2199 			buf->len = length;
2200 			length = 0;
2201 		}
2202 	}
2203 
2204 	return !length ? 0 : -EINVAL;
2205 }
2206 
2207 int net_pkt_pull(struct net_pkt *pkt, size_t length)
2208 {
2209 	struct net_pkt_cursor *c_op = &pkt->cursor;
2210 
2211 	while (length) {
2212 		size_t left, rem;
2213 
2214 		pkt_cursor_advance(pkt, false);
2215 
2216 		if (!c_op->buf) {
2217 			break;
2218 		}
2219 
2220 		left = c_op->buf->len - (c_op->pos - c_op->buf->data);
2221 		if (!left) {
2222 			break;
2223 		}
2224 
2225 		rem = left;
2226 		if (rem > length) {
2227 			rem = length;
2228 		}
2229 
2230 		c_op->buf->len -= rem;
2231 		left -= rem;
2232 		if (left) {
2233 			memmove(c_op->pos, c_op->pos+rem, left);
2234 		} else {
2235 			struct net_buf *buf = pkt->buffer;
2236 
2237 			if (buf) {
2238 				pkt->buffer = buf->frags;
2239 				buf->frags = NULL;
2240 				net_buf_unref(buf);
2241 			}
2242 
2243 			net_pkt_cursor_init(pkt);
2244 		}
2245 
2246 		length -= rem;
2247 	}
2248 
2249 	net_pkt_cursor_init(pkt);
2250 
2251 	if (length) {
2252 		NET_DBG("Still some length to go %zu", length);
2253 		return -ENOBUFS;
2254 	}
2255 
2256 	return 0;
2257 }
2258 
2259 uint16_t net_pkt_get_current_offset(struct net_pkt *pkt)
2260 {
2261 	struct net_buf *buf = pkt->buffer;
2262 	uint16_t offset;
2263 
2264 	if (!pkt->cursor.buf || !pkt->cursor.pos) {
2265 		return 0;
2266 	}
2267 
2268 	offset = 0U;
2269 
2270 	while (buf != pkt->cursor.buf) {
2271 		offset += buf->len;
2272 		buf = buf->frags;
2273 	}
2274 
2275 	offset += pkt->cursor.pos - buf->data;
2276 
2277 	return offset;
2278 }
2279 
2280 bool net_pkt_is_contiguous(struct net_pkt *pkt, size_t size)
2281 {
2282 	size_t len = net_pkt_get_contiguous_len(pkt);
2283 
2284 	return len >= size;
2285 }
2286 
2287 size_t net_pkt_get_contiguous_len(struct net_pkt *pkt)
2288 {
2289 	pkt_cursor_advance(pkt, !net_pkt_is_being_overwritten(pkt));
2290 
2291 	if (pkt->cursor.buf && pkt->cursor.pos) {
2292 		size_t len;
2293 
2294 		len = net_pkt_is_being_overwritten(pkt) ?
2295 			pkt->cursor.buf->len : net_buf_max_len(pkt->cursor.buf);
2296 		len -= pkt->cursor.pos - pkt->cursor.buf->data;
2297 
2298 		return len;
2299 	}
2300 
2301 	return 0;
2302 }
2303 
2304 void *net_pkt_get_data(struct net_pkt *pkt,
2305 		       struct net_pkt_data_access *access)
2306 {
2307 	if (IS_ENABLED(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)) {
2308 		if (!net_pkt_is_contiguous(pkt, access->size)) {
2309 			return NULL;
2310 		}
2311 
2312 		return pkt->cursor.pos;
2313 	} else {
2314 		if (net_pkt_is_contiguous(pkt, access->size)) {
2315 			access->data = pkt->cursor.pos;
2316 		} else if (net_pkt_is_being_overwritten(pkt)) {
2317 			struct net_pkt_cursor backup;
2318 
2319 			if (!access->data) {
2320 				NET_ERR("Uncontiguous data"
2321 					" cannot be linearized");
2322 				return NULL;
2323 			}
2324 
2325 			net_pkt_cursor_backup(pkt, &backup);
2326 
2327 			if (net_pkt_read(pkt, access->data, access->size)) {
2328 				net_pkt_cursor_restore(pkt, &backup);
2329 				return NULL;
2330 			}
2331 
2332 			net_pkt_cursor_restore(pkt, &backup);
2333 		}
2334 
2335 		return access->data;
2336 	}
2337 
2338 	return NULL;
2339 }
2340 
2341 int net_pkt_set_data(struct net_pkt *pkt,
2342 		     struct net_pkt_data_access *access)
2343 {
2344 	if (IS_ENABLED(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)) {
2345 		return net_pkt_skip(pkt, access->size);
2346 	}
2347 
2348 	return net_pkt_write(pkt, access->data, access->size);
2349 }
2350 
2351 void net_pkt_init(void)
2352 {
2353 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
2354 	NET_DBG("Allocating %u RX (%zu bytes), %u TX (%zu bytes), "
2355 		"%d RX data (%u bytes) and %d TX data (%u bytes) buffers",
2356 		k_mem_slab_num_free_get(&rx_pkts),
2357 		(size_t)(k_mem_slab_num_free_get(&rx_pkts) *
2358 			 sizeof(struct net_pkt)),
2359 		k_mem_slab_num_free_get(&tx_pkts),
2360 		(size_t)(k_mem_slab_num_free_get(&tx_pkts) *
2361 			 sizeof(struct net_pkt)),
2362 		get_frees(&rx_bufs), get_size(&rx_bufs),
2363 		get_frees(&tx_bufs), get_size(&tx_bufs));
2364 #endif
2365 }
2366