1 /** @file
2  @brief Network packet buffers for IP stack
3 
4  Network data is passed between components using net_pkt.
5  */
6 
7 /*
8  * Copyright (c) 2016 Intel Corporation
9  *
10  * SPDX-License-Identifier: Apache-2.0
11  */
12 
13 #include <zephyr/logging/log.h>
14 LOG_MODULE_REGISTER(net_pkt, CONFIG_NET_PKT_LOG_LEVEL);
15 
16 /* This enables allocation debugging but does not print so much output
17  * as that can slow things down a lot.
18  */
19 #undef NET_LOG_LEVEL
20 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
21 #define NET_LOG_LEVEL 5
22 #else
23 #define NET_LOG_LEVEL CONFIG_NET_PKT_LOG_LEVEL
24 #endif
25 
26 #include <zephyr/kernel.h>
27 #include <zephyr/toolchain.h>
28 #include <string.h>
29 #include <zephyr/types.h>
30 #include <sys/types.h>
31 
32 #include <zephyr/sys/util.h>
33 
34 #include <zephyr/net/net_core.h>
35 #include <zephyr/net/net_ip.h>
36 #include <zephyr/net_buf.h>
37 #include <zephyr/net/net_pkt.h>
38 #include <zephyr/net/ethernet.h>
39 #include <zephyr/net/udp.h>
40 
41 #include "net_private.h"
42 #include "tcp_internal.h"
43 
44 /* Make sure net_buf data size is large enough that IPv6
45  * and possible extensions fit to the network buffer.
46  * The check is done using an arbitrarily chosen value 96 by monitoring
47  * wireshark traffic to see what the typical header lengts are.
48  * It is still recommended to use the default value 128 but allow smaller
49  * value if really needed.
50  */
51 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE) && defined(CONFIG_NET_NATIVE_IPV6)
52 BUILD_ASSERT(CONFIG_NET_BUF_DATA_SIZE >= 96);
53 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
54 
55 /* Find max header size of IP protocol (IPv4 or IPv6) */
56 #if defined(CONFIG_NET_IPV6) || defined(CONFIG_NET_RAW_MODE) || \
57     defined(CONFIG_NET_SOCKETS_PACKET) || defined(CONFIG_NET_SOCKETS_OFFLOAD)
58 #define MAX_IP_PROTO_LEN NET_IPV6H_LEN
59 #else
60 #if defined(CONFIG_NET_IPV4)
61 #define MAX_IP_PROTO_LEN NET_IPV4H_LEN
62 #else
63 #if defined(CONFIG_NET_SOCKETS_CAN)
64 /* TODO: Use CAN MTU here instead of hard coded value. There was
65  * weird circular dependency issue so this needs more TLC.
66  */
67 #define MAX_IP_PROTO_LEN 8
68 #else
69 #if defined(CONFIG_NET_ETHERNET_BRIDGE) || \
70 	defined(CONFIG_NET_L2_IEEE802154) || \
71 	defined(CONFIG_NET_L2_CUSTOM_IEEE802154)
72 #define MAX_IP_PROTO_LEN 0
73 #else
74 #error "Some packet protocol (e.g. IPv6, IPv4, ETH, IEEE 802.15.4) needs to be selected."
75 #endif /* ETHERNET_BRIDGE / L2_IEEE802154 */
76 #endif /* SOCKETS_CAN */
77 #endif /* IPv4 */
78 #endif /* IPv6 */
79 
80 /* Find max header size of "next" protocol (TCP, UDP or ICMP) */
81 #if defined(CONFIG_NET_TCP)
82 #define MAX_NEXT_PROTO_LEN NET_TCPH_LEN
83 #else
84 #if defined(CONFIG_NET_UDP)
85 #define MAX_NEXT_PROTO_LEN NET_UDPH_LEN
86 #else
87 #if defined(CONFIG_NET_SOCKETS_CAN)
88 #define MAX_NEXT_PROTO_LEN 0
89 #else
90 /* If no TCP and no UDP, apparently we still want pings to work. */
91 #define MAX_NEXT_PROTO_LEN NET_ICMPH_LEN
92 #endif /* SOCKETS_CAN */
93 #endif /* UDP */
94 #endif /* TCP */
95 
96 /* Make sure that IP + TCP/UDP/ICMP headers fit into one fragment. This
97  * makes possible to cast a fragment pointer to protocol header struct.
98  */
99 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
100 #if CONFIG_NET_BUF_DATA_SIZE < (MAX_IP_PROTO_LEN + MAX_NEXT_PROTO_LEN)
101 #if defined(STRING2)
102 #undef STRING2
103 #endif
104 #if defined(STRING)
105 #undef STRING
106 #endif
107 #define STRING2(x) #x
108 #define STRING(x) STRING2(x)
109 #pragma message "Data len " STRING(CONFIG_NET_BUF_DATA_SIZE)
110 #pragma message "Minimum len " STRING(MAX_IP_PROTO_LEN + MAX_NEXT_PROTO_LEN)
111 #error "Too small net_buf fragment size"
112 #endif
113 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
114 
115 #if CONFIG_NET_PKT_RX_COUNT <= 0
116 #error "Minimum value for CONFIG_NET_PKT_RX_COUNT is 1"
117 #endif
118 
119 #if CONFIG_NET_PKT_TX_COUNT <= 0
120 #error "Minimum value for CONFIG_NET_PKT_TX_COUNT is 1"
121 #endif
122 
123 #if CONFIG_NET_BUF_RX_COUNT <= 0
124 #error "Minimum value for CONFIG_NET_BUF_RX_COUNT is 1"
125 #endif
126 
127 #if CONFIG_NET_BUF_TX_COUNT <= 0
128 #error "Minimum value for CONFIG_NET_BUF_TX_COUNT is 1"
129 #endif
130 
131 NET_PKT_SLAB_DEFINE(rx_pkts, CONFIG_NET_PKT_RX_COUNT);
132 NET_PKT_SLAB_DEFINE(tx_pkts, CONFIG_NET_PKT_TX_COUNT);
133 
134 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
135 
136 NET_BUF_POOL_FIXED_DEFINE(rx_bufs, CONFIG_NET_BUF_RX_COUNT, CONFIG_NET_BUF_DATA_SIZE,
137 			  CONFIG_NET_PKT_BUF_USER_DATA_SIZE, NULL);
138 NET_BUF_POOL_FIXED_DEFINE(tx_bufs, CONFIG_NET_BUF_TX_COUNT, CONFIG_NET_BUF_DATA_SIZE,
139 			  CONFIG_NET_PKT_BUF_USER_DATA_SIZE, NULL);
140 
141 #else /* !CONFIG_NET_BUF_FIXED_DATA_SIZE */
142 
143 NET_BUF_POOL_VAR_DEFINE(rx_bufs, CONFIG_NET_BUF_RX_COUNT, CONFIG_NET_PKT_BUF_RX_DATA_POOL_SIZE,
144 			CONFIG_NET_PKT_BUF_USER_DATA_SIZE, NULL);
145 NET_BUF_POOL_VAR_DEFINE(tx_bufs, CONFIG_NET_BUF_TX_COUNT, CONFIG_NET_PKT_BUF_TX_DATA_POOL_SIZE,
146 			CONFIG_NET_PKT_BUF_USER_DATA_SIZE, NULL);
147 
148 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
149 
150 /* Allocation tracking is only available if separately enabled */
151 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
152 struct net_pkt_alloc {
153 	union {
154 		struct net_pkt *pkt;
155 		struct net_buf *buf;
156 		void *alloc_data;
157 	};
158 	const char *func_alloc;
159 	const char *func_free;
160 	uint16_t line_alloc;
161 	uint16_t line_free;
162 	uint8_t in_use;
163 	bool is_pkt;
164 };
165 
166 #define MAX_NET_PKT_ALLOCS (CONFIG_NET_PKT_RX_COUNT + \
167 			    CONFIG_NET_PKT_TX_COUNT + \
168 			    CONFIG_NET_BUF_RX_COUNT + \
169 			    CONFIG_NET_BUF_TX_COUNT + \
170 			    CONFIG_NET_DEBUG_NET_PKT_EXTERNALS)
171 
172 static struct net_pkt_alloc net_pkt_allocs[MAX_NET_PKT_ALLOCS];
173 
net_pkt_alloc_add(void * alloc_data,bool is_pkt,const char * func,int line)174 static void net_pkt_alloc_add(void *alloc_data, bool is_pkt,
175 			      const char *func, int line)
176 {
177 	int i;
178 
179 	for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
180 		if (net_pkt_allocs[i].in_use) {
181 			continue;
182 		}
183 
184 		net_pkt_allocs[i].in_use = true;
185 		net_pkt_allocs[i].is_pkt = is_pkt;
186 		net_pkt_allocs[i].alloc_data = alloc_data;
187 		net_pkt_allocs[i].func_alloc = func;
188 		net_pkt_allocs[i].line_alloc = line;
189 
190 		return;
191 	}
192 }
193 
net_pkt_alloc_del(void * alloc_data,const char * func,int line)194 static void net_pkt_alloc_del(void *alloc_data, const char *func, int line)
195 {
196 	int i;
197 
198 	for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
199 		if (net_pkt_allocs[i].in_use &&
200 		    net_pkt_allocs[i].alloc_data == alloc_data) {
201 			net_pkt_allocs[i].func_free = func;
202 			net_pkt_allocs[i].line_free = line;
203 			net_pkt_allocs[i].in_use = false;
204 
205 			return;
206 		}
207 	}
208 }
209 
net_pkt_alloc_find(void * alloc_data,const char ** func_free,int * line_free)210 static bool net_pkt_alloc_find(void *alloc_data,
211 			       const char **func_free,
212 			       int *line_free)
213 {
214 	int i;
215 
216 	for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
217 		if (!net_pkt_allocs[i].in_use &&
218 		    net_pkt_allocs[i].alloc_data == alloc_data) {
219 			*func_free = net_pkt_allocs[i].func_free;
220 			*line_free = net_pkt_allocs[i].line_free;
221 
222 			return true;
223 		}
224 	}
225 
226 	return false;
227 }
228 
net_pkt_allocs_foreach(net_pkt_allocs_cb_t cb,void * user_data)229 void net_pkt_allocs_foreach(net_pkt_allocs_cb_t cb, void *user_data)
230 {
231 	int i;
232 
233 	for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
234 		if (net_pkt_allocs[i].in_use) {
235 			cb(net_pkt_allocs[i].is_pkt ?
236 			   net_pkt_allocs[i].pkt : NULL,
237 			   net_pkt_allocs[i].is_pkt ?
238 			   NULL : net_pkt_allocs[i].buf,
239 			   net_pkt_allocs[i].func_alloc,
240 			   net_pkt_allocs[i].line_alloc,
241 			   net_pkt_allocs[i].func_free,
242 			   net_pkt_allocs[i].line_free,
243 			   net_pkt_allocs[i].in_use,
244 			   user_data);
245 		}
246 	}
247 
248 	for (i = 0; i < MAX_NET_PKT_ALLOCS; i++) {
249 		if (!net_pkt_allocs[i].in_use) {
250 			cb(net_pkt_allocs[i].is_pkt ?
251 			   net_pkt_allocs[i].pkt : NULL,
252 			   net_pkt_allocs[i].is_pkt ?
253 			   NULL : net_pkt_allocs[i].buf,
254 			   net_pkt_allocs[i].func_alloc,
255 			   net_pkt_allocs[i].line_alloc,
256 			   net_pkt_allocs[i].func_free,
257 			   net_pkt_allocs[i].line_free,
258 			   net_pkt_allocs[i].in_use,
259 			   user_data);
260 		}
261 	}
262 }
263 #else
264 #define net_pkt_alloc_add(alloc_data, is_pkt, func, line)
265 #define net_pkt_alloc_del(alloc_data, func, line)
266 #define net_pkt_alloc_find(alloc_data, func_free, line_free) false
267 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC */
268 
269 #if defined(NET_PKT_DEBUG_ENABLED)
270 
271 #define NET_FRAG_CHECK_IF_NOT_IN_USE(frag, ref)				\
272 	do {								\
273 		if (!(ref)) {                                           \
274 			NET_ERR("**ERROR** frag %p not in use (%s:%s():%d)", \
275 				frag, __FILE__, __func__, __LINE__);     \
276 		}                                                       \
277 	} while (false)
278 
net_pkt_slab2str(struct k_mem_slab * slab)279 const char *net_pkt_slab2str(struct k_mem_slab *slab)
280 {
281 	if (slab == &rx_pkts) {
282 		return "RX";
283 	} else if (slab == &tx_pkts) {
284 		return "TX";
285 	}
286 
287 	return "EXT";
288 }
289 
net_pkt_pool2str(struct net_buf_pool * pool)290 const char *net_pkt_pool2str(struct net_buf_pool *pool)
291 {
292 	if (pool == &rx_bufs) {
293 		return "RDATA";
294 	} else if (pool == &tx_bufs) {
295 		return "TDATA";
296 	}
297 
298 	return "EDATA";
299 }
300 
get_frees(struct net_buf_pool * pool)301 static inline int16_t get_frees(struct net_buf_pool *pool)
302 {
303 #if defined(CONFIG_NET_BUF_POOL_USAGE)
304 	return atomic_get(&pool->avail_count);
305 #else
306 	return 0;
307 #endif
308 }
309 
net_pkt_print_frags(struct net_pkt * pkt)310 void net_pkt_print_frags(struct net_pkt *pkt)
311 {
312 	struct net_buf *frag;
313 	size_t total = 0;
314 	int count = 0, frag_size = 0;
315 
316 	if (!pkt) {
317 		NET_INFO("pkt %p", pkt);
318 		return;
319 	}
320 
321 	NET_INFO("pkt %p frags %p", pkt, pkt->frags);
322 
323 	NET_ASSERT(pkt->frags);
324 
325 	frag = pkt->frags;
326 	while (frag) {
327 		total += frag->len;
328 
329 		frag_size = net_buf_max_len(frag);
330 
331 		NET_INFO("[%d] frag %p len %d max len %u size %d pool %p",
332 			 count, frag, frag->len, frag->size,
333 			 frag_size, net_buf_pool_get(frag->pool_id));
334 
335 		count++;
336 
337 		frag = frag->frags;
338 	}
339 
340 	NET_INFO("Total data size %zu, occupied %d bytes, utilization %zu%%",
341 		 total, count * frag_size,
342 		 count ? (total * 100) / (count * frag_size) : 0);
343 }
344 #endif
345 
346 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
get_name(struct net_buf_pool * pool)347 static inline const char *get_name(struct net_buf_pool *pool)
348 {
349 #if defined(CONFIG_NET_BUF_POOL_USAGE)
350 	return pool->name;
351 #else
352 	return "?";
353 #endif
354 }
355 
get_size(struct net_buf_pool * pool)356 static inline int16_t get_size(struct net_buf_pool *pool)
357 {
358 #if defined(CONFIG_NET_BUF_POOL_USAGE)
359 	return pool->pool_size;
360 #else
361 	return 0;
362 #endif
363 }
364 
slab2str(struct k_mem_slab * slab)365 static inline const char *slab2str(struct k_mem_slab *slab)
366 {
367 	return net_pkt_slab2str(slab);
368 }
369 
pool2str(struct net_buf_pool * pool)370 static inline const char *pool2str(struct net_buf_pool *pool)
371 {
372 	return net_pkt_pool2str(pool);
373 }
374 #endif /* CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG */
375 
376 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_get_reserve_data_debug(struct net_buf_pool * pool,size_t min_len,k_timeout_t timeout,const char * caller,int line)377 struct net_buf *net_pkt_get_reserve_data_debug(struct net_buf_pool *pool,
378 					       size_t min_len,
379 					       k_timeout_t timeout,
380 					       const char *caller,
381 					       int line)
382 #else /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
383 struct net_buf *net_pkt_get_reserve_data(struct net_buf_pool *pool,
384 					 size_t min_len, k_timeout_t timeout)
385 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
386 {
387 	struct net_buf *frag;
388 
389 	if (k_is_in_isr()) {
390 		timeout = K_NO_WAIT;
391 	}
392 
393 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
394 	if (min_len > CONFIG_NET_BUF_DATA_SIZE) {
395 		NET_ERR("Requested too large fragment. Increase CONFIG_NET_BUF_DATA_SIZE.");
396 		return NULL;
397 	}
398 
399 	frag = net_buf_alloc(pool, timeout);
400 #else
401 	frag = net_buf_alloc_len(pool, min_len, timeout);
402 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
403 
404 	if (!frag) {
405 		return NULL;
406 	}
407 
408 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
409 	NET_FRAG_CHECK_IF_NOT_IN_USE(frag, frag->ref + 1U);
410 #endif
411 
412 	net_pkt_alloc_add(frag, false, caller, line);
413 
414 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
415 	NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
416 		pool2str(pool), get_name(pool), get_frees(pool),
417 		frag, frag->ref, caller, line);
418 #endif
419 
420 	return frag;
421 }
422 
423 /* Get a fragment, try to figure out the pool from where to get
424  * the data.
425  */
426 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_get_frag_debug(struct net_pkt * pkt,size_t min_len,k_timeout_t timeout,const char * caller,int line)427 struct net_buf *net_pkt_get_frag_debug(struct net_pkt *pkt, size_t min_len,
428 				       k_timeout_t timeout,
429 				       const char *caller, int line)
430 #else
431 struct net_buf *net_pkt_get_frag(struct net_pkt *pkt, size_t min_len,
432 				 k_timeout_t timeout)
433 #endif
434 {
435 #if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL)
436 	struct net_context *context;
437 
438 	context = net_pkt_context(pkt);
439 	if (context && context->data_pool) {
440 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
441 		return net_pkt_get_reserve_data_debug(context->data_pool(),
442 						      min_len, timeout,
443 						      caller, line);
444 #else
445 		return net_pkt_get_reserve_data(context->data_pool(), min_len,
446 						timeout);
447 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
448 	}
449 #endif /* CONFIG_NET_CONTEXT_NET_PKT_POOL */
450 
451 	if (pkt->slab == &rx_pkts) {
452 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
453 		return net_pkt_get_reserve_rx_data_debug(min_len, timeout,
454 							 caller, line);
455 #else
456 		return net_pkt_get_reserve_rx_data(min_len, timeout);
457 #endif
458 	}
459 
460 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
461 	return net_pkt_get_reserve_tx_data_debug(min_len, timeout, caller, line);
462 #else
463 	return net_pkt_get_reserve_tx_data(min_len, timeout);
464 #endif
465 }
466 
467 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_get_reserve_rx_data_debug(size_t min_len,k_timeout_t timeout,const char * caller,int line)468 struct net_buf *net_pkt_get_reserve_rx_data_debug(size_t min_len, k_timeout_t timeout,
469 						  const char *caller, int line)
470 {
471 	return net_pkt_get_reserve_data_debug(&rx_bufs, min_len, timeout, caller, line);
472 }
473 
net_pkt_get_reserve_tx_data_debug(size_t min_len,k_timeout_t timeout,const char * caller,int line)474 struct net_buf *net_pkt_get_reserve_tx_data_debug(size_t min_len, k_timeout_t timeout,
475 						  const char *caller, int line)
476 {
477 	return net_pkt_get_reserve_data_debug(&tx_bufs, min_len, timeout, caller, line);
478 }
479 
480 #else /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
481 
net_pkt_get_reserve_rx_data(size_t min_len,k_timeout_t timeout)482 struct net_buf *net_pkt_get_reserve_rx_data(size_t min_len, k_timeout_t timeout)
483 {
484 	return net_pkt_get_reserve_data(&rx_bufs, min_len, timeout);
485 }
486 
net_pkt_get_reserve_tx_data(size_t min_len,k_timeout_t timeout)487 struct net_buf *net_pkt_get_reserve_tx_data(size_t min_len, k_timeout_t timeout)
488 {
489 	return net_pkt_get_reserve_data(&tx_bufs, min_len, timeout);
490 }
491 
492 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
493 
494 
495 #if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL)
get_tx_slab(struct net_context * context)496 static inline struct k_mem_slab *get_tx_slab(struct net_context *context)
497 {
498 	if (context->tx_slab) {
499 		return context->tx_slab();
500 	}
501 
502 	return NULL;
503 }
504 
get_data_pool(struct net_context * context)505 static inline struct net_buf_pool *get_data_pool(struct net_context *context)
506 {
507 	if (context->data_pool) {
508 		return context->data_pool();
509 	}
510 
511 	return NULL;
512 }
513 #else
514 #define get_tx_slab(...) NULL
515 #define get_data_pool(...) NULL
516 #endif /* CONFIG_NET_CONTEXT_NET_PKT_POOL */
517 
518 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
net_pkt_unref_debug(struct net_pkt * pkt,const char * caller,int line)519 void net_pkt_unref_debug(struct net_pkt *pkt, const char *caller, int line)
520 {
521 	struct net_buf *frag;
522 
523 #else
524 void net_pkt_unref(struct net_pkt *pkt)
525 {
526 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
527 	atomic_val_t ref;
528 
529 	if (!pkt) {
530 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
531 		NET_ERR("*** ERROR *** pkt %p (%s():%d)", pkt, caller, line);
532 #endif
533 		return;
534 	}
535 
536 	do {
537 		ref = atomic_get(&pkt->atomic_ref);
538 		if (!ref) {
539 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
540 			const char *func_freed;
541 			int line_freed;
542 
543 			if (net_pkt_alloc_find(pkt, &func_freed, &line_freed)) {
544 				NET_ERR("*** ERROR *** pkt %p is freed already "
545 					"by %s():%d (%s():%d)",
546 					pkt, func_freed, line_freed, caller,
547 					line);
548 			} else {
549 				NET_ERR("*** ERROR *** pkt %p is freed already "
550 					"(%s():%d)", pkt, caller, line);
551 			}
552 #endif
553 			return;
554 		}
555 	} while (!atomic_cas(&pkt->atomic_ref, ref, ref - 1));
556 
557 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
558 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
559 	NET_DBG("%s [%d] pkt %p ref %ld frags %p (%s():%d)",
560 		slab2str(pkt->slab), k_mem_slab_num_free_get(pkt->slab),
561 		pkt, ref - 1, pkt->frags, caller, line);
562 #endif
563 	if (ref > 1) {
564 		goto done;
565 	}
566 
567 	frag = pkt->frags;
568 	while (frag) {
569 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
570 		NET_DBG("%s (%s) [%d] frag %p ref %d frags %p (%s():%d)",
571 			pool2str(net_buf_pool_get(frag->pool_id)),
572 			get_name(net_buf_pool_get(frag->pool_id)),
573 			get_frees(net_buf_pool_get(frag->pool_id)), frag,
574 			frag->ref - 1U, frag->frags, caller, line);
575 #endif
576 
577 		if (!frag->ref) {
578 			const char *func_freed;
579 			int line_freed;
580 
581 			if (net_pkt_alloc_find(frag,
582 					       &func_freed, &line_freed)) {
583 				NET_ERR("*** ERROR *** frag %p is freed "
584 					"already by %s():%d (%s():%d)",
585 					frag, func_freed, line_freed,
586 					caller, line);
587 			} else {
588 				NET_ERR("*** ERROR *** frag %p is freed "
589 					"already (%s():%d)",
590 					frag, caller, line);
591 			}
592 		}
593 
594 		net_pkt_alloc_del(frag, caller, line);
595 
596 		frag = frag->frags;
597 	}
598 
599 	net_pkt_alloc_del(pkt, caller, line);
600 done:
601 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
602 
603 	if (ref > 1) {
604 		return;
605 	}
606 
607 	if (pkt->frags) {
608 		net_pkt_frag_unref(pkt->frags);
609 	}
610 
611 	if (IS_ENABLED(CONFIG_NET_DEBUG_NET_PKT_NON_FRAGILE_ACCESS)) {
612 		pkt->buffer = NULL;
613 		net_pkt_cursor_init(pkt);
614 	}
615 
616 	k_mem_slab_free(pkt->slab, (void *)pkt);
617 }
618 
619 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
620 struct net_pkt *net_pkt_ref_debug(struct net_pkt *pkt, const char *caller,
621 				  int line)
622 #else
623 struct net_pkt *net_pkt_ref(struct net_pkt *pkt)
624 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
625 {
626 	atomic_val_t ref;
627 
628 	do {
629 		ref = pkt ? atomic_get(&pkt->atomic_ref) : 0;
630 		if (!ref) {
631 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
632 			NET_ERR("*** ERROR *** pkt %p (%s():%d)",
633 				pkt, caller, line);
634 #endif
635 			return NULL;
636 		}
637 	} while (!atomic_cas(&pkt->atomic_ref, ref, ref + 1));
638 
639 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
640 	NET_DBG("%s [%d] pkt %p ref %ld (%s():%d)",
641 		slab2str(pkt->slab), k_mem_slab_num_free_get(pkt->slab),
642 		pkt, ref + 1, caller, line);
643 #endif
644 
645 
646 	return pkt;
647 }
648 
649 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
650 struct net_buf *net_pkt_frag_ref_debug(struct net_buf *frag,
651 				       const char *caller, int line)
652 #else
653 struct net_buf *net_pkt_frag_ref(struct net_buf *frag)
654 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
655 {
656 	if (!frag) {
657 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
658 		NET_ERR("*** ERROR *** frag %p (%s():%d)", frag, caller, line);
659 #endif
660 		return NULL;
661 	}
662 
663 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
664 	NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
665 		pool2str(net_buf_pool_get(frag->pool_id)),
666 		get_name(net_buf_pool_get(frag->pool_id)),
667 		get_frees(net_buf_pool_get(frag->pool_id)),
668 		frag, frag->ref + 1U, caller, line);
669 #endif
670 
671 	return net_buf_ref(frag);
672 }
673 
674 
675 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
676 void net_pkt_frag_unref_debug(struct net_buf *frag,
677 			      const char *caller, int line)
678 #else
679 void net_pkt_frag_unref(struct net_buf *frag)
680 #endif /* NET_LOG_LEVEL >= LOG_LEVEL_DBG */
681 {
682 	if (!frag) {
683 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
684 		NET_ERR("*** ERROR *** frag %p (%s():%d)", frag, caller, line);
685 #endif
686 		return;
687 	}
688 
689 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
690 	NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
691 		pool2str(net_buf_pool_get(frag->pool_id)),
692 		get_name(net_buf_pool_get(frag->pool_id)),
693 		get_frees(net_buf_pool_get(frag->pool_id)),
694 		frag, frag->ref - 1U, caller, line);
695 #endif
696 
697 	if (frag->ref == 1U) {
698 		net_pkt_alloc_del(frag, caller, line);
699 	}
700 
701 	net_buf_unref(frag);
702 }
703 
704 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
705 struct net_buf *net_pkt_frag_del_debug(struct net_pkt *pkt,
706 				       struct net_buf *parent,
707 				       struct net_buf *frag,
708 				       const char *caller, int line)
709 #else
710 struct net_buf *net_pkt_frag_del(struct net_pkt *pkt,
711 				 struct net_buf *parent,
712 				 struct net_buf *frag)
713 #endif
714 {
715 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
716 	NET_DBG("pkt %p parent %p frag %p ref %u (%s:%d)",
717 		pkt, parent, frag, frag->ref, caller, line);
718 #endif
719 
720 	if (pkt->frags == frag && !parent) {
721 		struct net_buf *tmp;
722 
723 		if (frag->ref == 1U) {
724 			net_pkt_alloc_del(frag, caller, line);
725 		}
726 
727 		tmp = net_buf_frag_del(NULL, frag);
728 		pkt->frags = tmp;
729 
730 		return tmp;
731 	}
732 
733 	if (frag->ref == 1U) {
734 		net_pkt_alloc_del(frag, caller, line);
735 	}
736 
737 	return net_buf_frag_del(parent, frag);
738 }
739 
740 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
741 void net_pkt_frag_add_debug(struct net_pkt *pkt, struct net_buf *frag,
742 			    const char *caller, int line)
743 #else
744 void net_pkt_frag_add(struct net_pkt *pkt, struct net_buf *frag)
745 #endif
746 {
747 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
748 	NET_DBG("pkt %p frag %p (%s:%d)", pkt, frag, caller, line);
749 #endif
750 
751 	/* We do not use net_buf_frag_add() as this one will refcount
752 	 * the frag once more if !pkt->frags
753 	 */
754 	if (!pkt->frags) {
755 		pkt->frags = frag;
756 		return;
757 	}
758 
759 	net_buf_frag_insert(net_buf_frag_last(pkt->frags), frag);
760 }
761 
762 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
763 void net_pkt_frag_insert_debug(struct net_pkt *pkt, struct net_buf *frag,
764 			       const char *caller, int line)
765 #else
766 void net_pkt_frag_insert(struct net_pkt *pkt, struct net_buf *frag)
767 #endif
768 {
769 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
770 	NET_DBG("pkt %p frag %p (%s:%d)", pkt, frag, caller, line);
771 #endif
772 
773 	net_buf_frag_last(frag)->frags = pkt->frags;
774 	pkt->frags = frag;
775 }
776 
777 void net_pkt_compact(struct net_pkt *pkt)
778 {
779 	struct net_buf *frag, *prev;
780 
781 	NET_DBG("Compacting data in pkt %p", pkt);
782 
783 	frag = pkt->frags;
784 	prev = NULL;
785 
786 	while (frag) {
787 		if (frag->frags) {
788 			/* Copy amount of data from next fragment to this
789 			 * fragment.
790 			 */
791 			size_t copy_len;
792 
793 			copy_len = frag->frags->len;
794 			if (copy_len > net_buf_tailroom(frag)) {
795 				copy_len = net_buf_tailroom(frag);
796 			}
797 
798 			memcpy(net_buf_tail(frag), frag->frags->data, copy_len);
799 			net_buf_add(frag, copy_len);
800 
801 			memmove(frag->frags->data,
802 				frag->frags->data + copy_len,
803 				frag->frags->len - copy_len);
804 
805 			frag->frags->len -= copy_len;
806 
807 			/* Is there any more space in this fragment */
808 			if (net_buf_tailroom(frag)) {
809 				/* There is. This also means that the next
810 				 * fragment is empty as otherwise we could
811 				 * not have copied all data. Remove next
812 				 * fragment as there is no data in it any more.
813 				 */
814 				net_pkt_frag_del(pkt, frag, frag->frags);
815 
816 				/* Then check next fragment */
817 				continue;
818 			}
819 		} else {
820 			if (!frag->len) {
821 				/* Remove the last fragment because there is no
822 				 * data in it.
823 				 */
824 				net_pkt_frag_del(pkt, prev, frag);
825 
826 				break;
827 			}
828 		}
829 
830 		prev = frag;
831 		frag = frag->frags;
832 	}
833 }
834 
835 void net_pkt_get_info(struct k_mem_slab **rx,
836 		      struct k_mem_slab **tx,
837 		      struct net_buf_pool **rx_data,
838 		      struct net_buf_pool **tx_data)
839 {
840 	if (rx) {
841 		*rx = &rx_pkts;
842 	}
843 
844 	if (tx) {
845 		*tx = &tx_pkts;
846 	}
847 
848 	if (rx_data) {
849 		*rx_data = &rx_bufs;
850 	}
851 
852 	if (tx_data) {
853 		*tx_data = &tx_bufs;
854 	}
855 }
856 
857 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
858 void net_pkt_print(void)
859 {
860 	NET_DBG("TX %u RX %u RDATA %d TDATA %d",
861 		k_mem_slab_num_free_get(&tx_pkts),
862 		k_mem_slab_num_free_get(&rx_pkts),
863 		get_frees(&rx_bufs), get_frees(&tx_bufs));
864 }
865 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC */
866 
867 /* New allocator and API starts here */
868 
869 #if defined(CONFIG_NET_PKT_ALLOC_STATS)
870 static struct net_pkt_alloc_stats_slab *find_alloc_stats(struct k_mem_slab *slab)
871 {
872 	STRUCT_SECTION_FOREACH(net_pkt_alloc_stats_slab, tmp) {
873 		if (tmp->slab == slab) {
874 			return tmp;
875 		}
876 	}
877 
878 	NET_ASSERT("slab not found");
879 
880 	/* This will force a crash which is intended in this case as the
881 	 * slab should always have a valid value.
882 	 */
883 	return NULL;
884 }
885 
886 #define NET_PKT_ALLOC_STATS_UPDATE(pkt, alloc_size, start) ({		\
887 	if (pkt->alloc_stats == NULL) {					\
888 		pkt->alloc_stats = find_alloc_stats(pkt->slab);		\
889 	}								\
890 	pkt->alloc_stats->ok.count++;					\
891 	if (pkt->alloc_stats->ok.count == 0) {				\
892 		pkt->alloc_stats->ok.alloc_sum = 0ULL;			\
893 		pkt->alloc_stats->ok.time_sum = 0ULL;			\
894 	} else {							\
895 		pkt->alloc_stats->ok.alloc_sum += (uint64_t)alloc_size;	\
896 		pkt->alloc_stats->ok.time_sum += (uint64_t)(k_cycle_get_32() - start); \
897 	}								\
898 									\
899 	pkt->alloc_stats->ok.count;					\
900 })
901 
902 #define NET_PKT_ALLOC_STATS_FAIL(pkt, alloc_size, start) ({		\
903 	if (pkt->alloc_stats == NULL) {					\
904 		pkt->alloc_stats = find_alloc_stats(pkt->slab);		\
905 	}								\
906 	pkt->alloc_stats->fail.count++;					\
907 	if (pkt->alloc_stats->fail.count == 0) {			\
908 		pkt->alloc_stats->fail.alloc_sum = 0ULL;		\
909 		pkt->alloc_stats->fail.time_sum = 0ULL;			\
910 	} else {							\
911 		pkt->alloc_stats->fail.alloc_sum += (uint64_t)alloc_size;\
912 		pkt->alloc_stats->fail.time_sum += (uint64_t)(k_cycle_get_32() - start); \
913 	}								\
914 									\
915 	pkt->alloc_stats->fail.count;					\
916 })
917 #else
918 #define NET_PKT_ALLOC_STATS_UPDATE(pkt, alloc_size, start) ({ 0; })
919 #define NET_PKT_ALLOC_STATS_FAIL(pkt, alloc_size, start) ({ 0; })
920 #endif /* CONFIG_NET_PKT_ALLOC_STATS */
921 
922 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
923 
924 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
925 static struct net_buf *pkt_alloc_buffer(struct net_pkt *pkt,
926 					struct net_buf_pool *pool,
927 					size_t size, size_t headroom,
928 					k_timeout_t timeout,
929 					const char *caller, int line)
930 #else
931 static struct net_buf *pkt_alloc_buffer(struct net_pkt *pkt,
932 					struct net_buf_pool *pool,
933 					size_t size, size_t headroom,
934 					k_timeout_t timeout)
935 #endif
936 {
937 #if defined(CONFIG_NET_PKT_ALLOC_STATS)
938 	uint32_t start_time = k_cycle_get_32();
939 	size_t total_size = size;
940 #else
941 	ARG_UNUSED(pkt);
942 #endif
943 
944 	k_timepoint_t end = sys_timepoint_calc(timeout);
945 	struct net_buf *first = NULL;
946 	struct net_buf *current = NULL;
947 
948 	do {
949 		struct net_buf *new;
950 
951 		new = net_buf_alloc_fixed(pool, timeout);
952 		if (!new) {
953 			goto error;
954 		}
955 
956 		if (!first && !current) {
957 			first = new;
958 		} else {
959 			current->frags = new;
960 		}
961 
962 		current = new;
963 
964 		/* If there is headroom reserved, then allocate that to the
965 		 * first buf.
966 		 */
967 		if (current == first && headroom > 0) {
968 			if (current->size > (headroom + size)) {
969 				current->size = size + headroom;
970 
971 				size = 0U;
972 			} else {
973 				size -= current->size;
974 			}
975 		} else {
976 			if (current->size > size) {
977 				current->size = size;
978 			}
979 
980 			size -= current->size;
981 		}
982 
983 		timeout = sys_timepoint_timeout(end);
984 
985 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
986 		NET_FRAG_CHECK_IF_NOT_IN_USE(new, new->ref + 1);
987 
988 		net_pkt_alloc_add(new, false, caller, line);
989 
990 		NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
991 			pool2str(pool), get_name(pool), get_frees(pool),
992 			new, new->ref, caller, line);
993 #endif
994 	} while (size);
995 
996 #if defined(CONFIG_NET_PKT_ALLOC_STATS)
997 	if (NET_PKT_ALLOC_STATS_UPDATE(pkt, total_size, start_time) == 0) {
998 		NET_DBG("pkt %p %s stats rollover", pkt, "ok");
999 	}
1000 #endif
1001 
1002 	return first;
1003 error:
1004 	if (first) {
1005 		net_buf_unref(first);
1006 	}
1007 
1008 #if defined(CONFIG_NET_PKT_ALLOC_STATS)
1009 	if (NET_PKT_ALLOC_STATS_FAIL(pkt, total_size, start_time) == 0) {
1010 		NET_DBG("pkt %p %s stats rollover", pkt, "fail");
1011 	}
1012 #endif
1013 
1014 	return NULL;
1015 }
1016 
1017 #else /* !CONFIG_NET_BUF_FIXED_DATA_SIZE */
1018 
1019 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1020 static struct net_buf *pkt_alloc_buffer(struct net_pkt *pkt,
1021 					struct net_buf_pool *pool,
1022 					size_t size, size_t headroom,
1023 					k_timeout_t timeout,
1024 					const char *caller, int line)
1025 #else
1026 static struct net_buf *pkt_alloc_buffer(struct net_pkt *pkt,
1027 					struct net_buf_pool *pool,
1028 					size_t size, size_t headroom,
1029 					k_timeout_t timeout)
1030 #endif
1031 {
1032 	struct net_buf *buf;
1033 
1034 #if defined(CONFIG_NET_PKT_ALLOC_STATS)
1035 	uint32_t start_time = k_cycle_get_32();
1036 	size_t total_size = size;
1037 #else
1038 	ARG_UNUSED(pkt);
1039 #endif
1040 	ARG_UNUSED(headroom);
1041 
1042 	buf = net_buf_alloc_len(pool, size, timeout);
1043 
1044 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
1045 	NET_FRAG_CHECK_IF_NOT_IN_USE(buf, buf->ref + 1);
1046 
1047 	net_pkt_alloc_add(buf, false, caller, line);
1048 
1049 	NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
1050 		pool2str(pool), get_name(pool), get_frees(pool),
1051 		buf, buf->ref, caller, line);
1052 #endif
1053 
1054 #if defined(CONFIG_NET_PKT_ALLOC_STATS)
1055 	if (buf) {
1056 		if (NET_PKT_ALLOC_STATS_UPDATE(pkt, total_size, start_time) == 0) {
1057 			NET_DBG("pkt %p %s stats rollover", pkt, "ok");
1058 		}
1059 	} else {
1060 		if (NET_PKT_ALLOC_STATS_FAIL(pkt, total_size, start_time) == 0) {
1061 			NET_DBG("pkt %p %s stats rollover", pkt, "fail");
1062 		}
1063 	}
1064 #endif /* CONFIG_NET_PKT_ALLOC_STATS */
1065 
1066 	return buf;
1067 }
1068 
1069 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
1070 
1071 static size_t pkt_buffer_length(struct net_pkt *pkt,
1072 				size_t size,
1073 				enum net_ip_protocol proto,
1074 				size_t existing)
1075 {
1076 	sa_family_t family = net_pkt_family(pkt);
1077 	size_t max_len;
1078 
1079 	if (net_pkt_iface(pkt)) {
1080 		max_len = net_if_get_mtu(net_pkt_iface(pkt));
1081 	} else {
1082 		max_len = 0;
1083 	}
1084 
1085 	/* Family vs iface MTU */
1086 	if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
1087 		if (IS_ENABLED(CONFIG_NET_IPV6_FRAGMENT) && (size > max_len)) {
1088 			/* We support larger packets if IPv6 fragmentation is
1089 			 * enabled.
1090 			 */
1091 			max_len = size;
1092 		}
1093 
1094 		max_len = MAX(max_len, NET_IPV6_MTU);
1095 	} else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
1096 		if (IS_ENABLED(CONFIG_NET_IPV4_FRAGMENT) && (size > max_len)) {
1097 			/* We support larger packets if IPv4 fragmentation is enabled */
1098 			max_len = size;
1099 		}
1100 
1101 		max_len = MAX(max_len, NET_IPV4_MTU);
1102 	} else { /* family == AF_UNSPEC */
1103 #if defined (CONFIG_NET_L2_ETHERNET)
1104 		if (net_if_l2(net_pkt_iface(pkt)) ==
1105 		    &NET_L2_GET_NAME(ETHERNET)) {
1106 			max_len += NET_ETH_MAX_HDR_SIZE;
1107 		} else
1108 #endif /* CONFIG_NET_L2_ETHERNET */
1109 		{
1110 			/* Other L2 are not checked as the pkt MTU in this case
1111 			 * is based on the IP layer (IPv6 most of the time).
1112 			 */
1113 			max_len = size;
1114 		}
1115 	}
1116 
1117 	max_len -= existing;
1118 
1119 	return MIN(size, max_len);
1120 }
1121 
1122 static size_t pkt_estimate_headers_length(struct net_pkt *pkt,
1123 					  sa_family_t family,
1124 					  enum net_ip_protocol proto)
1125 {
1126 	size_t hdr_len = 0;
1127 
1128 	if (family == AF_UNSPEC) {
1129 		return  0;
1130 	}
1131 
1132 	/* Family header */
1133 	if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
1134 		hdr_len += NET_IPV6H_LEN;
1135 	} else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
1136 		hdr_len += NET_IPV4H_LEN;
1137 	}
1138 
1139 	/* + protocol header */
1140 	if (IS_ENABLED(CONFIG_NET_TCP) && proto == IPPROTO_TCP) {
1141 		hdr_len += NET_TCPH_LEN + NET_TCP_MAX_OPT_SIZE;
1142 	} else if (IS_ENABLED(CONFIG_NET_UDP) && proto == IPPROTO_UDP) {
1143 		hdr_len += NET_UDPH_LEN;
1144 	} else if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) {
1145 		hdr_len += NET_ICMPH_LEN;
1146 	}
1147 
1148 	NET_DBG("HDRs length estimation %zu", hdr_len);
1149 
1150 	return hdr_len;
1151 }
1152 
1153 static size_t pkt_get_max_len(struct net_pkt *pkt)
1154 {
1155 	struct net_buf *buf = pkt->buffer;
1156 	size_t size = 0;
1157 
1158 	while (buf) {
1159 		size += net_buf_max_len(buf);
1160 		buf = buf->frags;
1161 	}
1162 
1163 	return size;
1164 }
1165 
1166 size_t net_pkt_available_buffer(struct net_pkt *pkt)
1167 {
1168 	if (!pkt) {
1169 		return 0;
1170 	}
1171 
1172 	return pkt_get_max_len(pkt) - net_pkt_get_len(pkt);
1173 }
1174 
1175 size_t net_pkt_available_payload_buffer(struct net_pkt *pkt,
1176 					enum net_ip_protocol proto)
1177 {
1178 	size_t hdr_len = 0;
1179 	size_t len;
1180 
1181 	if (!pkt) {
1182 		return 0;
1183 	}
1184 
1185 	hdr_len = pkt_estimate_headers_length(pkt, net_pkt_family(pkt), proto);
1186 	len = net_pkt_get_len(pkt);
1187 
1188 	hdr_len = hdr_len <= len ? 0 : hdr_len - len;
1189 
1190 	len = net_pkt_available_buffer(pkt) - hdr_len;
1191 
1192 	return len;
1193 }
1194 
1195 void net_pkt_trim_buffer(struct net_pkt *pkt)
1196 {
1197 	struct net_buf *buf, *prev;
1198 
1199 	buf = pkt->buffer;
1200 	prev = buf;
1201 
1202 	while (buf) {
1203 		struct net_buf *next = buf->frags;
1204 
1205 		if (!buf->len) {
1206 			if (buf == pkt->buffer) {
1207 				pkt->buffer = next;
1208 			} else if (buf == prev->frags) {
1209 				prev->frags = next;
1210 			}
1211 
1212 			buf->frags = NULL;
1213 			net_buf_unref(buf);
1214 		} else {
1215 			prev = buf;
1216 		}
1217 
1218 		buf = next;
1219 	}
1220 }
1221 
1222 int net_pkt_remove_tail(struct net_pkt *pkt, size_t length)
1223 {
1224 	struct net_buf *buf = pkt->buffer;
1225 	size_t remaining_len = net_pkt_get_len(pkt);
1226 
1227 	if (remaining_len < length) {
1228 		return -EINVAL;
1229 	}
1230 
1231 	remaining_len -= length;
1232 
1233 	while (buf) {
1234 		if (buf->len >= remaining_len) {
1235 			buf->len = remaining_len;
1236 
1237 			if (buf->frags) {
1238 				net_pkt_frag_unref(buf->frags);
1239 				buf->frags = NULL;
1240 			}
1241 
1242 			break;
1243 		}
1244 
1245 		remaining_len -= buf->len;
1246 		buf = buf->frags;
1247 	}
1248 
1249 	return 0;
1250 }
1251 
1252 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1253 int net_pkt_alloc_buffer_with_reserve_debug(struct net_pkt *pkt,
1254 					    size_t size,
1255 					    size_t reserve,
1256 					    enum net_ip_protocol proto,
1257 					    k_timeout_t timeout,
1258 					    const char *caller,
1259 					    int line)
1260 #else
1261 int net_pkt_alloc_buffer_with_reserve(struct net_pkt *pkt,
1262 				      size_t size,
1263 				      size_t reserve,
1264 				      enum net_ip_protocol proto,
1265 				      k_timeout_t timeout)
1266 #endif
1267 {
1268 	struct net_buf_pool *pool = NULL;
1269 	size_t alloc_len = 0;
1270 	size_t hdr_len = 0;
1271 	struct net_buf *buf;
1272 
1273 	if (!size && proto == 0 && net_pkt_family(pkt) == AF_UNSPEC) {
1274 		return 0;
1275 	}
1276 
1277 	if (k_is_in_isr()) {
1278 		timeout = K_NO_WAIT;
1279 	}
1280 
1281 	/* Verifying existing buffer and take into account free space there */
1282 	alloc_len = net_pkt_available_buffer(pkt);
1283 	if (!alloc_len) {
1284 		/* In case of no free space, it will account for header
1285 		 * space estimation
1286 		 */
1287 		hdr_len = pkt_estimate_headers_length(pkt,
1288 						      net_pkt_family(pkt),
1289 						      proto);
1290 	}
1291 
1292 	/* Calculate the maximum that can be allocated depending on size */
1293 	alloc_len = pkt_buffer_length(pkt, size + hdr_len, proto, alloc_len);
1294 
1295 	NET_DBG("Data allocation maximum size %zu (requested %zu, reserve %zu)",
1296 		alloc_len, size, reserve);
1297 
1298 	if (pkt->context) {
1299 		pool = get_data_pool(pkt->context);
1300 	}
1301 
1302 	if (!pool) {
1303 		pool = pkt->slab == &tx_pkts ? &tx_bufs : &rx_bufs;
1304 	}
1305 
1306 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1307 	buf = pkt_alloc_buffer(pkt, pool, alloc_len, reserve,
1308 			       timeout, caller, line);
1309 #else
1310 	buf = pkt_alloc_buffer(pkt, pool, alloc_len, reserve, timeout);
1311 #endif
1312 
1313 	if (!buf) {
1314 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1315 		NET_ERR("Data buffer (%zu) allocation failed (%s:%d)",
1316 			alloc_len + reserve, caller, line);
1317 #else
1318 		NET_ERR("Data buffer (%zu) allocation failed.",
1319 			alloc_len + reserve);
1320 #endif
1321 		return -ENOMEM;
1322 	}
1323 
1324 	net_pkt_append_buffer(pkt, buf);
1325 
1326 	/* Hide the link layer header for now. The space is used when
1327 	 * link layer header needs to be written to the packet by L2 send.
1328 	 */
1329 	if (reserve > 0U) {
1330 		NET_DBG("Reserving %zu bytes for L2 header", reserve);
1331 
1332 		net_buf_reserve(pkt->buffer, reserve);
1333 
1334 		net_pkt_cursor_init(pkt);
1335 	}
1336 
1337 	return 0;
1338 }
1339 
1340 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1341 int net_pkt_alloc_buffer_debug(struct net_pkt *pkt,
1342 			       size_t size,
1343 			       enum net_ip_protocol proto,
1344 			       k_timeout_t timeout,
1345 			       const char *caller,
1346 			       int line)
1347 #else
1348 int net_pkt_alloc_buffer(struct net_pkt *pkt,
1349 			 size_t size,
1350 			 enum net_ip_protocol proto,
1351 			 k_timeout_t timeout)
1352 #endif
1353 {
1354 	struct net_if *iface;
1355 	int ret;
1356 
1357 	if (!size && proto == 0 && net_pkt_family(pkt) == AF_UNSPEC) {
1358 		return 0;
1359 	}
1360 
1361 	if (k_is_in_isr()) {
1362 		timeout = K_NO_WAIT;
1363 	}
1364 
1365 	iface = net_pkt_iface(pkt);
1366 
1367 	if (iface != NULL && net_if_l2(iface)->alloc != NULL) {
1368 		ret = net_if_l2(iface)->alloc(iface, pkt, size, proto, timeout);
1369 		if (ret != -ENOTSUP) {
1370 			return ret;
1371 		}
1372 	}
1373 
1374 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1375 	ret = net_pkt_alloc_buffer_with_reserve_debug(pkt,
1376 						      size,
1377 						      0U,
1378 						      proto,
1379 						      timeout,
1380 						      caller,
1381 						      line);
1382 #else
1383 	ret = net_pkt_alloc_buffer_with_reserve(pkt,
1384 						size,
1385 						0U,
1386 						proto,
1387 						timeout);
1388 #endif
1389 
1390 	return ret;
1391 }
1392 
1393 
1394 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1395 int net_pkt_alloc_buffer_raw_debug(struct net_pkt *pkt, size_t size,
1396 				   k_timeout_t timeout, const char *caller,
1397 				   int line)
1398 #else
1399 int net_pkt_alloc_buffer_raw(struct net_pkt *pkt, size_t size,
1400 			     k_timeout_t timeout)
1401 #endif
1402 {
1403 	struct net_buf_pool *pool = NULL;
1404 	struct net_buf *buf;
1405 
1406 	if (size == 0) {
1407 		return 0;
1408 	}
1409 
1410 	if (k_is_in_isr()) {
1411 		timeout = K_NO_WAIT;
1412 	}
1413 
1414 	NET_DBG("Data allocation size %zu", size);
1415 
1416 	if (pkt->context) {
1417 		pool = get_data_pool(pkt->context);
1418 	}
1419 
1420 	if (!pool) {
1421 		pool = pkt->slab == &tx_pkts ? &tx_bufs : &rx_bufs;
1422 	}
1423 
1424 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1425 	buf = pkt_alloc_buffer(pkt, pool, size, 0U, timeout, caller, line);
1426 #else
1427 	buf = pkt_alloc_buffer(pkt, pool, size, 0U, timeout);
1428 #endif
1429 
1430 	if (!buf) {
1431 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1432 		NET_ERR("Data buffer (%zd) allocation failed (%s:%d)",
1433 			size, caller, line);
1434 #else
1435 		NET_ERR("Data buffer (%zd) allocation failed.", size);
1436 #endif
1437 		return -ENOMEM;
1438 	}
1439 
1440 	net_pkt_append_buffer(pkt, buf);
1441 
1442 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
1443 	/* net_buf allocators shrink the buffer size to the requested size.
1444 	 * We don't want this behavior here, so restore the real size of the
1445 	 * last fragment.
1446 	 */
1447 	buf = net_buf_frag_last(buf);
1448 	buf->size = CONFIG_NET_BUF_DATA_SIZE;
1449 #endif
1450 
1451 	return 0;
1452 }
1453 
1454 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1455 static struct net_pkt *pkt_alloc(struct k_mem_slab *slab, k_timeout_t timeout,
1456 				 const char *caller, int line)
1457 #else
1458 static struct net_pkt *pkt_alloc(struct k_mem_slab *slab, k_timeout_t timeout)
1459 #endif
1460 {
1461 	struct net_pkt *pkt;
1462 	uint32_t create_time;
1463 	int ret;
1464 
1465 	if (k_is_in_isr()) {
1466 		timeout = K_NO_WAIT;
1467 	}
1468 
1469 	if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS) ||
1470 	    IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS) ||
1471 	    IS_ENABLED(CONFIG_TRACING_NET_CORE)) {
1472 		create_time = k_cycle_get_32();
1473 	} else {
1474 		ARG_UNUSED(create_time);
1475 	}
1476 
1477 	ret = k_mem_slab_alloc(slab, (void **)&pkt, timeout);
1478 	if (ret) {
1479 		return NULL;
1480 	}
1481 
1482 	memset(pkt, 0, sizeof(struct net_pkt));
1483 
1484 	pkt->atomic_ref = ATOMIC_INIT(1);
1485 	pkt->slab = slab;
1486 
1487 	if (IS_ENABLED(CONFIG_NET_IPV6)) {
1488 		net_pkt_set_ipv6_next_hdr(pkt, 255);
1489 	}
1490 
1491 #if defined(CONFIG_NET_TX_DEFAULT_PRIORITY)
1492 #define TX_DEFAULT_PRIORITY CONFIG_NET_TX_DEFAULT_PRIORITY
1493 #else
1494 #define TX_DEFAULT_PRIORITY 0
1495 #endif
1496 
1497 #if defined(CONFIG_NET_RX_DEFAULT_PRIORITY)
1498 #define RX_DEFAULT_PRIORITY CONFIG_NET_RX_DEFAULT_PRIORITY
1499 #else
1500 #define RX_DEFAULT_PRIORITY 0
1501 #endif
1502 
1503 	if (&tx_pkts == slab) {
1504 		net_pkt_set_priority(pkt, TX_DEFAULT_PRIORITY);
1505 	} else if (&rx_pkts == slab) {
1506 		net_pkt_set_priority(pkt, RX_DEFAULT_PRIORITY);
1507 	}
1508 
1509 	if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS) ||
1510 	    IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS) ||
1511 	    IS_ENABLED(CONFIG_TRACING_NET_CORE)) {
1512 		net_pkt_set_create_time(pkt, create_time);
1513 	}
1514 
1515 	net_pkt_set_vlan_tag(pkt, NET_VLAN_TAG_UNSPEC);
1516 
1517 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1518 	net_pkt_alloc_add(pkt, true, caller, line);
1519 #endif
1520 
1521 	net_pkt_cursor_init(pkt);
1522 
1523 	return pkt;
1524 }
1525 
1526 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1527 struct net_pkt *net_pkt_alloc_debug(k_timeout_t timeout,
1528 				    const char *caller, int line)
1529 #else
1530 struct net_pkt *net_pkt_alloc(k_timeout_t timeout)
1531 #endif
1532 {
1533 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1534 	return pkt_alloc(&tx_pkts, timeout, caller, line);
1535 #else
1536 	return pkt_alloc(&tx_pkts, timeout);
1537 #endif
1538 }
1539 
1540 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1541 struct net_pkt *net_pkt_alloc_from_slab_debug(struct k_mem_slab *slab,
1542 					      k_timeout_t timeout,
1543 					      const char *caller, int line)
1544 #else
1545 struct net_pkt *net_pkt_alloc_from_slab(struct k_mem_slab *slab,
1546 					k_timeout_t timeout)
1547 #endif
1548 {
1549 	if (!slab) {
1550 		return NULL;
1551 	}
1552 
1553 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1554 	return pkt_alloc(slab, timeout, caller, line);
1555 #else
1556 	return pkt_alloc(slab, timeout);
1557 #endif
1558 }
1559 
1560 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1561 struct net_pkt *net_pkt_rx_alloc_debug(k_timeout_t timeout,
1562 				       const char *caller, int line)
1563 #else
1564 struct net_pkt *net_pkt_rx_alloc(k_timeout_t timeout)
1565 #endif
1566 {
1567 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1568 	return pkt_alloc(&rx_pkts, timeout, caller, line);
1569 #else
1570 	return pkt_alloc(&rx_pkts, timeout);
1571 #endif
1572 }
1573 
1574 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1575 static struct net_pkt *pkt_alloc_on_iface(struct k_mem_slab *slab,
1576 					  struct net_if *iface,
1577 					  k_timeout_t timeout,
1578 					  const char *caller, int line)
1579 #else
1580 static struct net_pkt *pkt_alloc_on_iface(struct k_mem_slab *slab,
1581 					  struct net_if *iface,
1582 					  k_timeout_t timeout)
1583 
1584 #endif
1585 {
1586 	struct net_pkt *pkt;
1587 
1588 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1589 	pkt = pkt_alloc(slab, timeout, caller, line);
1590 #else
1591 	pkt = pkt_alloc(slab, timeout);
1592 #endif
1593 
1594 	if (pkt) {
1595 		net_pkt_set_iface(pkt, iface);
1596 	}
1597 
1598 	return pkt;
1599 }
1600 
1601 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1602 struct net_pkt *net_pkt_alloc_on_iface_debug(struct net_if *iface,
1603 					     k_timeout_t timeout,
1604 					     const char *caller,
1605 					     int line)
1606 #else
1607 struct net_pkt *net_pkt_alloc_on_iface(struct net_if *iface,
1608 				       k_timeout_t timeout)
1609 #endif
1610 {
1611 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1612 	return pkt_alloc_on_iface(&tx_pkts, iface, timeout, caller, line);
1613 #else
1614 	return pkt_alloc_on_iface(&tx_pkts, iface, timeout);
1615 #endif
1616 }
1617 
1618 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1619 struct net_pkt *net_pkt_rx_alloc_on_iface_debug(struct net_if *iface,
1620 						k_timeout_t timeout,
1621 						const char *caller,
1622 						int line)
1623 #else
1624 struct net_pkt *net_pkt_rx_alloc_on_iface(struct net_if *iface,
1625 					  k_timeout_t timeout)
1626 #endif
1627 {
1628 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1629 	return pkt_alloc_on_iface(&rx_pkts, iface, timeout, caller, line);
1630 #else
1631 	return pkt_alloc_on_iface(&rx_pkts, iface, timeout);
1632 #endif
1633 }
1634 
1635 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1636 static struct net_pkt *
1637 pkt_alloc_with_buffer(struct k_mem_slab *slab,
1638 		      struct net_if *iface,
1639 		      size_t size,
1640 		      sa_family_t family,
1641 		      enum net_ip_protocol proto,
1642 		      k_timeout_t timeout,
1643 		      const char *caller,
1644 		      int line)
1645 #else
1646 static struct net_pkt *
1647 pkt_alloc_with_buffer(struct k_mem_slab *slab,
1648 		      struct net_if *iface,
1649 		      size_t size,
1650 		      sa_family_t family,
1651 		      enum net_ip_protocol proto,
1652 		      k_timeout_t timeout)
1653 #endif
1654 {
1655 	k_timepoint_t end = sys_timepoint_calc(timeout);
1656 	struct net_pkt *pkt;
1657 	int ret;
1658 
1659 	NET_DBG("On iface %d (%p) size %zu", net_if_get_by_iface(iface), iface, size);
1660 
1661 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1662 	pkt = pkt_alloc_on_iface(slab, iface, timeout, caller, line);
1663 #else
1664 	pkt = pkt_alloc_on_iface(slab, iface, timeout);
1665 #endif
1666 
1667 	if (!pkt) {
1668 		return NULL;
1669 	}
1670 
1671 	net_pkt_set_family(pkt, family);
1672 
1673 	timeout = sys_timepoint_timeout(end);
1674 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1675 	ret = net_pkt_alloc_buffer_debug(pkt, size, proto, timeout,
1676 					 caller, line);
1677 #else
1678 	ret = net_pkt_alloc_buffer(pkt, size, proto, timeout);
1679 #endif
1680 
1681 	if (ret) {
1682 		net_pkt_unref(pkt);
1683 		return NULL;
1684 	}
1685 
1686 	return pkt;
1687 }
1688 
1689 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1690 struct net_pkt *net_pkt_alloc_with_buffer_debug(struct net_if *iface,
1691 						size_t size,
1692 						sa_family_t family,
1693 						enum net_ip_protocol proto,
1694 						k_timeout_t timeout,
1695 						const char *caller,
1696 						int line)
1697 #else
1698 struct net_pkt *net_pkt_alloc_with_buffer(struct net_if *iface,
1699 					  size_t size,
1700 					  sa_family_t family,
1701 					  enum net_ip_protocol proto,
1702 					  k_timeout_t timeout)
1703 #endif
1704 {
1705 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1706 	return pkt_alloc_with_buffer(&tx_pkts, iface, size, family,
1707 				     proto, timeout, caller, line);
1708 #else
1709 	return pkt_alloc_with_buffer(&tx_pkts, iface, size, family,
1710 				     proto, timeout);
1711 #endif
1712 }
1713 
1714 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1715 struct net_pkt *net_pkt_rx_alloc_with_buffer_debug(struct net_if *iface,
1716 						   size_t size,
1717 						   sa_family_t family,
1718 						   enum net_ip_protocol proto,
1719 						   k_timeout_t timeout,
1720 						   const char *caller,
1721 						   int line)
1722 #else
1723 struct net_pkt *net_pkt_rx_alloc_with_buffer(struct net_if *iface,
1724 					     size_t size,
1725 					     sa_family_t family,
1726 					     enum net_ip_protocol proto,
1727 					     k_timeout_t timeout)
1728 #endif
1729 {
1730 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
1731 	return pkt_alloc_with_buffer(&rx_pkts, iface, size, family,
1732 					proto, timeout, caller, line);
1733 #else
1734 	return pkt_alloc_with_buffer(&rx_pkts, iface, size, family,
1735 					proto, timeout);
1736 #endif
1737 }
1738 
1739 void net_pkt_append_buffer(struct net_pkt *pkt, struct net_buf *buffer)
1740 {
1741 	if (!pkt->buffer) {
1742 		pkt->buffer = buffer;
1743 		net_pkt_cursor_init(pkt);
1744 	} else {
1745 		net_buf_frag_insert(net_buf_frag_last(pkt->buffer), buffer);
1746 	}
1747 }
1748 
1749 void net_pkt_cursor_init(struct net_pkt *pkt)
1750 {
1751 	pkt->cursor.buf = pkt->buffer;
1752 	if (pkt->cursor.buf) {
1753 		pkt->cursor.pos = pkt->cursor.buf->data;
1754 	} else {
1755 		pkt->cursor.pos = NULL;
1756 	}
1757 }
1758 
1759 static void pkt_cursor_jump(struct net_pkt *pkt, bool write)
1760 {
1761 	struct net_pkt_cursor *cursor = &pkt->cursor;
1762 
1763 	cursor->buf = cursor->buf->frags;
1764 	while (cursor->buf) {
1765 		const size_t len =
1766 			write ? net_buf_max_len(cursor->buf) : cursor->buf->len;
1767 
1768 		if (!len) {
1769 			cursor->buf = cursor->buf->frags;
1770 		} else {
1771 			break;
1772 		}
1773 	}
1774 
1775 	if (cursor->buf) {
1776 		cursor->pos = cursor->buf->data;
1777 	} else {
1778 		cursor->pos = NULL;
1779 	}
1780 }
1781 
1782 static void pkt_cursor_advance(struct net_pkt *pkt, bool write)
1783 {
1784 	struct net_pkt_cursor *cursor = &pkt->cursor;
1785 	size_t len;
1786 
1787 	if (!cursor->buf) {
1788 		return;
1789 	}
1790 
1791 	len = write ? net_buf_max_len(cursor->buf) : cursor->buf->len;
1792 	if ((cursor->pos - cursor->buf->data) == len) {
1793 		pkt_cursor_jump(pkt, write);
1794 	}
1795 }
1796 
1797 static void pkt_cursor_update(struct net_pkt *pkt,
1798 			      size_t length, bool write)
1799 {
1800 	struct net_pkt_cursor *cursor = &pkt->cursor;
1801 	size_t len;
1802 
1803 	if (net_pkt_is_being_overwritten(pkt)) {
1804 		write = false;
1805 	}
1806 
1807 	len = write ? net_buf_max_len(cursor->buf) : cursor->buf->len;
1808 	if (length + (cursor->pos - cursor->buf->data) == len &&
1809 	    !(net_pkt_is_being_overwritten(pkt) &&
1810 	      len < net_buf_max_len(cursor->buf))) {
1811 		pkt_cursor_jump(pkt, write);
1812 	} else {
1813 		cursor->pos += length;
1814 	}
1815 }
1816 
1817 /* Internal function that does all operation (skip/read/write/memset) */
1818 static int net_pkt_cursor_operate(struct net_pkt *pkt,
1819 				  void *data, size_t length,
1820 				  bool copy, bool write)
1821 {
1822 	/* We use such variable to avoid lengthy lines */
1823 	struct net_pkt_cursor *c_op = &pkt->cursor;
1824 
1825 	while (c_op->buf && length) {
1826 		size_t d_len, len;
1827 
1828 		pkt_cursor_advance(pkt, net_pkt_is_being_overwritten(pkt) ?
1829 				   false : write);
1830 		if (c_op->buf == NULL) {
1831 			break;
1832 		}
1833 
1834 		if (write && !net_pkt_is_being_overwritten(pkt)) {
1835 			d_len = net_buf_max_len(c_op->buf) -
1836 				(c_op->pos - c_op->buf->data);
1837 		} else {
1838 			d_len = c_op->buf->len - (c_op->pos - c_op->buf->data);
1839 		}
1840 
1841 		if (!d_len) {
1842 			break;
1843 		}
1844 
1845 		if (length < d_len) {
1846 			len = length;
1847 		} else {
1848 			len = d_len;
1849 		}
1850 
1851 		if (copy && data) {
1852 			memcpy(write ? c_op->pos : data,
1853 			       write ? data : c_op->pos,
1854 			       len);
1855 		} else if (data) {
1856 			memset(c_op->pos, *(int *)data, len);
1857 		}
1858 
1859 		if (write && !net_pkt_is_being_overwritten(pkt)) {
1860 			net_buf_add(c_op->buf, len);
1861 		}
1862 
1863 		pkt_cursor_update(pkt, len, write);
1864 
1865 		if (copy && data) {
1866 			data = (uint8_t *) data + len;
1867 		}
1868 
1869 		length -= len;
1870 	}
1871 
1872 	if (length) {
1873 		NET_DBG("Still some length to go %zu", length);
1874 		return -ENOBUFS;
1875 	}
1876 
1877 	return 0;
1878 }
1879 
1880 int net_pkt_skip(struct net_pkt *pkt, size_t skip)
1881 {
1882 	NET_DBG("pkt %p skip %zu", pkt, skip);
1883 
1884 	return net_pkt_cursor_operate(pkt, NULL, skip, false, true);
1885 }
1886 
1887 int net_pkt_memset(struct net_pkt *pkt, int byte, size_t amount)
1888 {
1889 	NET_DBG("pkt %p byte %d amount %zu", pkt, byte, amount);
1890 
1891 	return net_pkt_cursor_operate(pkt, &byte, amount, false, true);
1892 }
1893 
1894 int net_pkt_read(struct net_pkt *pkt, void *data, size_t length)
1895 {
1896 	NET_DBG("pkt %p data %p length %zu", pkt, data, length);
1897 
1898 	return net_pkt_cursor_operate(pkt, data, length, true, false);
1899 }
1900 
1901 int net_pkt_read_be16(struct net_pkt *pkt, uint16_t *data)
1902 {
1903 	uint8_t d16[2];
1904 	int ret;
1905 
1906 	ret = net_pkt_read(pkt, d16, sizeof(uint16_t));
1907 
1908 	*data = d16[0] << 8 | d16[1];
1909 
1910 	return ret;
1911 }
1912 
1913 int net_pkt_read_le16(struct net_pkt *pkt, uint16_t *data)
1914 {
1915 	uint8_t d16[2];
1916 	int ret;
1917 
1918 	ret = net_pkt_read(pkt, d16, sizeof(uint16_t));
1919 
1920 	*data = d16[1] << 8 | d16[0];
1921 
1922 	return ret;
1923 }
1924 
1925 int net_pkt_read_be32(struct net_pkt *pkt, uint32_t *data)
1926 {
1927 	uint8_t d32[4];
1928 	int ret;
1929 
1930 	ret = net_pkt_read(pkt, d32, sizeof(uint32_t));
1931 
1932 	*data = d32[0] << 24 | d32[1] << 16 | d32[2] << 8 | d32[3];
1933 
1934 	return ret;
1935 }
1936 
1937 int net_pkt_write(struct net_pkt *pkt, const void *data, size_t length)
1938 {
1939 	NET_DBG("pkt %p data %p length %zu", pkt, data, length);
1940 
1941 	if (data == pkt->cursor.pos && net_pkt_is_contiguous(pkt, length)) {
1942 		return net_pkt_skip(pkt, length);
1943 	}
1944 
1945 	return net_pkt_cursor_operate(pkt, (void *)data, length, true, true);
1946 }
1947 
1948 int net_pkt_copy(struct net_pkt *pkt_dst,
1949 		 struct net_pkt *pkt_src,
1950 		 size_t length)
1951 {
1952 	struct net_pkt_cursor *c_dst = &pkt_dst->cursor;
1953 	struct net_pkt_cursor *c_src = &pkt_src->cursor;
1954 
1955 	while (c_dst->buf && c_src->buf && length) {
1956 		size_t s_len, d_len, len;
1957 
1958 		pkt_cursor_advance(pkt_dst, true);
1959 		pkt_cursor_advance(pkt_src, false);
1960 
1961 		if (!c_dst->buf || !c_src->buf) {
1962 			break;
1963 		}
1964 
1965 		s_len = c_src->buf->len - (c_src->pos - c_src->buf->data);
1966 		d_len = net_buf_max_len(c_dst->buf) - (c_dst->pos - c_dst->buf->data);
1967 		if (length < s_len && length < d_len) {
1968 			len = length;
1969 		} else {
1970 			if (d_len < s_len) {
1971 				len = d_len;
1972 			} else {
1973 				len = s_len;
1974 			}
1975 		}
1976 
1977 		if (!len) {
1978 			break;
1979 		}
1980 
1981 		memcpy(c_dst->pos, c_src->pos, len);
1982 
1983 		if (!net_pkt_is_being_overwritten(pkt_dst)) {
1984 			net_buf_add(c_dst->buf, len);
1985 		}
1986 
1987 		pkt_cursor_update(pkt_dst, len, true);
1988 		pkt_cursor_update(pkt_src, len, false);
1989 
1990 		length -= len;
1991 	}
1992 
1993 	if (length) {
1994 		NET_DBG("Still some length to go %zu", length);
1995 		return -ENOBUFS;
1996 	}
1997 
1998 	return 0;
1999 }
2000 
2001 static int32_t net_pkt_find_offset(struct net_pkt *pkt, uint8_t *ptr)
2002 {
2003 	struct net_buf *buf;
2004 	uint32_t ret = -EINVAL;
2005 	uint16_t offset;
2006 
2007 	if (!ptr || !pkt || !pkt->buffer) {
2008 		return ret;
2009 	}
2010 
2011 	offset = 0U;
2012 	buf = pkt->buffer;
2013 
2014 	while (buf) {
2015 		if (buf->data <= ptr && ptr < (buf->data + buf->len)) {
2016 			ret = offset + (ptr - buf->data);
2017 			break;
2018 		}
2019 		offset += buf->len;
2020 		buf = buf->frags;
2021 	}
2022 
2023 	return ret;
2024 }
2025 
2026 static void clone_pkt_lladdr(struct net_pkt *pkt, struct net_pkt *clone_pkt,
2027 			     struct net_linkaddr *lladdr)
2028 {
2029 	int32_t ll_addr_offset;
2030 
2031 	if (!lladdr->addr) {
2032 		return;
2033 	}
2034 
2035 	ll_addr_offset = net_pkt_find_offset(pkt, lladdr->addr);
2036 
2037 	if (ll_addr_offset >= 0) {
2038 		net_pkt_cursor_init(clone_pkt);
2039 		net_pkt_skip(clone_pkt, ll_addr_offset);
2040 		lladdr->addr = net_pkt_cursor_get_pos(clone_pkt);
2041 	}
2042 }
2043 
2044 #if defined(NET_PKT_HAS_CONTROL_BLOCK)
2045 static inline void clone_pkt_cb(struct net_pkt *pkt, struct net_pkt *clone_pkt)
2046 {
2047 	memcpy(net_pkt_cb(clone_pkt), net_pkt_cb(pkt), sizeof(clone_pkt->cb));
2048 }
2049 #else
2050 static inline void clone_pkt_cb(struct net_pkt *pkt, struct net_pkt *clone_pkt)
2051 {
2052 	ARG_UNUSED(pkt);
2053 	ARG_UNUSED(clone_pkt);
2054 }
2055 #endif
2056 
2057 static void clone_pkt_attributes(struct net_pkt *pkt, struct net_pkt *clone_pkt)
2058 {
2059 	net_pkt_set_family(clone_pkt, net_pkt_family(pkt));
2060 	net_pkt_set_context(clone_pkt, net_pkt_context(pkt));
2061 	net_pkt_set_ip_hdr_len(clone_pkt, net_pkt_ip_hdr_len(pkt));
2062 	net_pkt_set_ip_dscp(clone_pkt, net_pkt_ip_dscp(pkt));
2063 	net_pkt_set_ip_ecn(clone_pkt, net_pkt_ip_ecn(pkt));
2064 	net_pkt_set_vlan_tag(clone_pkt, net_pkt_vlan_tag(pkt));
2065 	net_pkt_set_timestamp(clone_pkt, net_pkt_timestamp(pkt));
2066 	net_pkt_set_priority(clone_pkt, net_pkt_priority(pkt));
2067 	net_pkt_set_orig_iface(clone_pkt, net_pkt_orig_iface(pkt));
2068 	net_pkt_set_captured(clone_pkt, net_pkt_is_captured(pkt));
2069 	net_pkt_set_eof(clone_pkt, net_pkt_eof(pkt));
2070 	net_pkt_set_ptp(clone_pkt, net_pkt_is_ptp(pkt));
2071 	net_pkt_set_tx_timestamping(clone_pkt, net_pkt_is_tx_timestamping(pkt));
2072 	net_pkt_set_rx_timestamping(clone_pkt, net_pkt_is_rx_timestamping(pkt));
2073 	net_pkt_set_forwarding(clone_pkt, net_pkt_forwarding(pkt));
2074 	net_pkt_set_chksum_done(clone_pkt, net_pkt_is_chksum_done(pkt));
2075 	net_pkt_set_ip_reassembled(pkt, net_pkt_is_ip_reassembled(pkt));
2076 
2077 	net_pkt_set_l2_bridged(clone_pkt, net_pkt_is_l2_bridged(pkt));
2078 	net_pkt_set_l2_processed(clone_pkt, net_pkt_is_l2_processed(pkt));
2079 	net_pkt_set_ll_proto_type(clone_pkt, net_pkt_ll_proto_type(pkt));
2080 
2081 	if (pkt->buffer && clone_pkt->buffer) {
2082 		memcpy(net_pkt_lladdr_src(clone_pkt), net_pkt_lladdr_src(pkt),
2083 		       sizeof(struct net_linkaddr));
2084 		memcpy(net_pkt_lladdr_dst(clone_pkt), net_pkt_lladdr_dst(pkt),
2085 		       sizeof(struct net_linkaddr));
2086 		/* The link header pointers are usable as-is if we
2087 		 * shallow-copied the buffer even if they point
2088 		 * into the fragment memory of the buffer,
2089 		 * otherwise we have to set the ll address pointer
2090 		 * relative to the new buffer to avoid dangling
2091 		 * pointers into the source packet.
2092 		 */
2093 		if (pkt->buffer != clone_pkt->buffer) {
2094 			clone_pkt_lladdr(pkt, clone_pkt, net_pkt_lladdr_src(clone_pkt));
2095 			clone_pkt_lladdr(pkt, clone_pkt, net_pkt_lladdr_dst(clone_pkt));
2096 		}
2097 	}
2098 
2099 	if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) {
2100 		net_pkt_set_ipv4_ttl(clone_pkt, net_pkt_ipv4_ttl(pkt));
2101 		net_pkt_set_ipv4_opts_len(clone_pkt,
2102 					  net_pkt_ipv4_opts_len(pkt));
2103 	} else if (IS_ENABLED(CONFIG_NET_IPV6) &&
2104 		   net_pkt_family(pkt) == AF_INET6) {
2105 		net_pkt_set_ipv6_hop_limit(clone_pkt,
2106 					   net_pkt_ipv6_hop_limit(pkt));
2107 		net_pkt_set_ipv6_ext_len(clone_pkt, net_pkt_ipv6_ext_len(pkt));
2108 		net_pkt_set_ipv6_ext_opt_len(clone_pkt,
2109 					     net_pkt_ipv6_ext_opt_len(pkt));
2110 		net_pkt_set_ipv6_hdr_prev(clone_pkt,
2111 					  net_pkt_ipv6_hdr_prev(pkt));
2112 		net_pkt_set_ipv6_next_hdr(clone_pkt,
2113 					  net_pkt_ipv6_next_hdr(pkt));
2114 	}
2115 
2116 	clone_pkt_cb(pkt, clone_pkt);
2117 }
2118 
2119 static struct net_pkt *net_pkt_clone_internal(struct net_pkt *pkt,
2120 					      struct k_mem_slab *slab,
2121 					      k_timeout_t timeout)
2122 {
2123 	size_t cursor_offset = net_pkt_get_current_offset(pkt);
2124 	bool overwrite = net_pkt_is_being_overwritten(pkt);
2125 	struct net_pkt_cursor backup;
2126 	struct net_pkt *clone_pkt;
2127 
2128 #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
2129 	clone_pkt = pkt_alloc_with_buffer(slab, net_pkt_iface(pkt),
2130 					  net_pkt_get_len(pkt),
2131 					  AF_UNSPEC, 0, timeout,
2132 					  __func__, __LINE__);
2133 #else
2134 	clone_pkt = pkt_alloc_with_buffer(slab, net_pkt_iface(pkt),
2135 					  net_pkt_get_len(pkt),
2136 					  AF_UNSPEC, 0, timeout);
2137 #endif
2138 	if (!clone_pkt) {
2139 		return NULL;
2140 	}
2141 
2142 	net_pkt_set_overwrite(pkt, true);
2143 	net_pkt_cursor_backup(pkt, &backup);
2144 	net_pkt_cursor_init(pkt);
2145 
2146 	if (net_pkt_copy(clone_pkt, pkt, net_pkt_get_len(pkt))) {
2147 		net_pkt_unref(clone_pkt);
2148 		net_pkt_cursor_restore(pkt, &backup);
2149 		net_pkt_set_overwrite(pkt, overwrite);
2150 		return NULL;
2151 	}
2152 	net_pkt_set_overwrite(clone_pkt, true);
2153 
2154 	clone_pkt_attributes(pkt, clone_pkt);
2155 
2156 	net_pkt_cursor_init(clone_pkt);
2157 
2158 	if (cursor_offset) {
2159 		net_pkt_skip(clone_pkt, cursor_offset);
2160 	}
2161 	net_pkt_set_overwrite(clone_pkt, overwrite);
2162 
2163 	net_pkt_cursor_restore(pkt, &backup);
2164 	net_pkt_set_overwrite(pkt, overwrite);
2165 
2166 	NET_DBG("Cloned %p to %p", pkt, clone_pkt);
2167 
2168 	return clone_pkt;
2169 }
2170 
2171 struct net_pkt *net_pkt_clone(struct net_pkt *pkt, k_timeout_t timeout)
2172 {
2173 	return net_pkt_clone_internal(pkt, pkt->slab, timeout);
2174 }
2175 
2176 struct net_pkt *net_pkt_rx_clone(struct net_pkt *pkt, k_timeout_t timeout)
2177 {
2178 	return net_pkt_clone_internal(pkt, &rx_pkts, timeout);
2179 }
2180 
2181 struct net_pkt *net_pkt_shallow_clone(struct net_pkt *pkt, k_timeout_t timeout)
2182 {
2183 	struct net_pkt *clone_pkt;
2184 	struct net_buf *buf;
2185 
2186 	clone_pkt = net_pkt_alloc(timeout);
2187 	if (!clone_pkt) {
2188 		return NULL;
2189 	}
2190 
2191 	net_pkt_set_iface(clone_pkt, net_pkt_iface(pkt));
2192 	clone_pkt->buffer = pkt->buffer;
2193 	buf = pkt->buffer;
2194 
2195 	net_pkt_frag_ref(buf);
2196 
2197 	clone_pkt_attributes(pkt, clone_pkt);
2198 
2199 	net_pkt_cursor_restore(clone_pkt, &pkt->cursor);
2200 
2201 	NET_DBG("Shallow cloned %p to %p", pkt, clone_pkt);
2202 
2203 	return clone_pkt;
2204 }
2205 
2206 size_t net_pkt_remaining_data(struct net_pkt *pkt)
2207 {
2208 	struct net_buf *buf;
2209 	size_t data_length;
2210 
2211 	if (!pkt || !pkt->cursor.buf || !pkt->cursor.pos) {
2212 		return 0;
2213 	}
2214 
2215 	buf = pkt->cursor.buf;
2216 	data_length = buf->len - (pkt->cursor.pos - buf->data);
2217 
2218 	buf = buf->frags;
2219 	while (buf) {
2220 		data_length += buf->len;
2221 		buf = buf->frags;
2222 	}
2223 
2224 	return data_length;
2225 }
2226 
2227 int net_pkt_update_length(struct net_pkt *pkt, size_t length)
2228 {
2229 	struct net_buf *buf;
2230 
2231 	for (buf = pkt->buffer; buf; buf = buf->frags) {
2232 		if (buf->len < length) {
2233 			length -= buf->len;
2234 		} else {
2235 			buf->len = length;
2236 			length = 0;
2237 		}
2238 	}
2239 
2240 	return !length ? 0 : -EINVAL;
2241 }
2242 
2243 int net_pkt_pull(struct net_pkt *pkt, size_t length)
2244 {
2245 	struct net_pkt_cursor *c_op = &pkt->cursor;
2246 
2247 	while (length) {
2248 		size_t left, rem;
2249 
2250 		pkt_cursor_advance(pkt, false);
2251 
2252 		if (!c_op->buf) {
2253 			break;
2254 		}
2255 
2256 		left = c_op->buf->len - (c_op->pos - c_op->buf->data);
2257 		if (!left) {
2258 			break;
2259 		}
2260 
2261 		rem = left;
2262 		if (rem > length) {
2263 			rem = length;
2264 		}
2265 
2266 		c_op->buf->len -= rem;
2267 		left -= rem;
2268 		if (left) {
2269 			memmove(c_op->pos, c_op->pos+rem, left);
2270 		} else {
2271 			struct net_buf *buf = pkt->buffer;
2272 
2273 			if (buf) {
2274 				pkt->buffer = buf->frags;
2275 				buf->frags = NULL;
2276 				net_buf_unref(buf);
2277 			}
2278 
2279 			net_pkt_cursor_init(pkt);
2280 		}
2281 
2282 		length -= rem;
2283 	}
2284 
2285 	net_pkt_cursor_init(pkt);
2286 
2287 	if (length) {
2288 		NET_DBG("Still some length to go %zu", length);
2289 		return -ENOBUFS;
2290 	}
2291 
2292 	return 0;
2293 }
2294 
2295 uint16_t net_pkt_get_current_offset(struct net_pkt *pkt)
2296 {
2297 	struct net_buf *buf = pkt->buffer;
2298 	uint16_t offset;
2299 
2300 	if (!pkt->cursor.buf || !pkt->cursor.pos) {
2301 		return 0;
2302 	}
2303 
2304 	offset = 0U;
2305 
2306 	while (buf != pkt->cursor.buf) {
2307 		offset += buf->len;
2308 		buf = buf->frags;
2309 	}
2310 
2311 	offset += pkt->cursor.pos - buf->data;
2312 
2313 	return offset;
2314 }
2315 
2316 bool net_pkt_is_contiguous(struct net_pkt *pkt, size_t size)
2317 {
2318 	size_t len = net_pkt_get_contiguous_len(pkt);
2319 
2320 	return len >= size;
2321 }
2322 
2323 size_t net_pkt_get_contiguous_len(struct net_pkt *pkt)
2324 {
2325 	pkt_cursor_advance(pkt, !net_pkt_is_being_overwritten(pkt));
2326 
2327 	if (pkt->cursor.buf && pkt->cursor.pos) {
2328 		size_t len;
2329 
2330 		len = net_pkt_is_being_overwritten(pkt) ?
2331 			pkt->cursor.buf->len : net_buf_max_len(pkt->cursor.buf);
2332 		len -= pkt->cursor.pos - pkt->cursor.buf->data;
2333 
2334 		return len;
2335 	}
2336 
2337 	return 0;
2338 }
2339 
2340 void *net_pkt_get_data(struct net_pkt *pkt,
2341 		       struct net_pkt_data_access *access)
2342 {
2343 	if (IS_ENABLED(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)) {
2344 		if (!net_pkt_is_contiguous(pkt, access->size)) {
2345 			return NULL;
2346 		}
2347 
2348 		return pkt->cursor.pos;
2349 	} else {
2350 		if (net_pkt_is_contiguous(pkt, access->size)) {
2351 			access->data = pkt->cursor.pos;
2352 		} else if (net_pkt_is_being_overwritten(pkt)) {
2353 			struct net_pkt_cursor backup;
2354 
2355 			if (!access->data) {
2356 				NET_ERR("Uncontiguous data"
2357 					" cannot be linearized");
2358 				return NULL;
2359 			}
2360 
2361 			net_pkt_cursor_backup(pkt, &backup);
2362 
2363 			if (net_pkt_read(pkt, access->data, access->size)) {
2364 				net_pkt_cursor_restore(pkt, &backup);
2365 				return NULL;
2366 			}
2367 
2368 			net_pkt_cursor_restore(pkt, &backup);
2369 		}
2370 
2371 		return access->data;
2372 	}
2373 
2374 	return NULL;
2375 }
2376 
2377 int net_pkt_set_data(struct net_pkt *pkt,
2378 		     struct net_pkt_data_access *access)
2379 {
2380 	if (IS_ENABLED(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)) {
2381 		return net_pkt_skip(pkt, access->size);
2382 	}
2383 
2384 	return net_pkt_write(pkt, access->data, access->size);
2385 }
2386 
2387 void net_pkt_init(void)
2388 {
2389 #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
2390 	NET_DBG("Allocating %u RX (%zu bytes), %u TX (%zu bytes), "
2391 		"%d RX data (%u bytes) and %d TX data (%u bytes) buffers",
2392 		k_mem_slab_num_free_get(&rx_pkts),
2393 		(size_t)(k_mem_slab_num_free_get(&rx_pkts) *
2394 			 sizeof(struct net_pkt)),
2395 		k_mem_slab_num_free_get(&tx_pkts),
2396 		(size_t)(k_mem_slab_num_free_get(&tx_pkts) *
2397 			 sizeof(struct net_pkt)),
2398 		get_frees(&rx_bufs), get_size(&rx_bufs),
2399 		get_frees(&tx_bufs), get_size(&tx_bufs));
2400 #endif
2401 }
2402