1 /** @file
2  * @brief Network packet buffer descriptor API
3  *
4  * Network data is passed between different parts of the stack via
5  * net_buf struct.
6  */
7 
8 /*
9  * Copyright (c) 2016 Intel Corporation
10  *
11  * SPDX-License-Identifier: Apache-2.0
12  */
13 
14 /* Data buffer API - used for all data to/from net */
15 
16 #ifndef ZEPHYR_INCLUDE_NET_NET_PKT_H_
17 #define ZEPHYR_INCLUDE_NET_NET_PKT_H_
18 
19 #include <zephyr/types.h>
20 #include <stdbool.h>
21 
22 #include <zephyr/net/buf.h>
23 
24 #if defined(CONFIG_IEEE802154)
25 #include <zephyr/net/ieee802154_pkt.h>
26 #endif
27 #include <zephyr/net/net_core.h>
28 #include <zephyr/net/net_linkaddr.h>
29 #include <zephyr/net/net_ip.h>
30 #include <zephyr/net/net_if.h>
31 #include <zephyr/net/net_context.h>
32 #include <zephyr/net/net_time.h>
33 #include <zephyr/net/ethernet_vlan.h>
34 #include <zephyr/net/ptp_time.h>
35 
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
39 
40 /**
41  * @brief Network packet management library
42  * @defgroup net_pkt Network Packet Library
43  * @ingroup networking
44  * @{
45  */
46 
47 struct net_context;
48 
49 /* buffer cursor used in net_pkt */
50 struct net_pkt_cursor {
51 	/** Current net_buf pointer by the cursor */
52 	struct net_buf *buf;
53 	/** Current position in the data buffer of the net_buf */
54 	uint8_t *pos;
55 };
56 
57 /**
58  * @brief Network packet.
59  *
60  * Note that if you add new fields into net_pkt, remember to update
61  * net_pkt_clone() function.
62  */
63 struct net_pkt {
64 	/**
65 	 * The fifo is used by RX/TX threads and by socket layer. The net_pkt
66 	 * is queued via fifo to the processing thread.
67 	 */
68 	intptr_t fifo;
69 
70 	/** Slab pointer from where it belongs to */
71 	struct k_mem_slab *slab;
72 
73 	/** buffer holding the packet */
74 	union {
75 		struct net_buf *frags;
76 		struct net_buf *buffer;
77 	};
78 
79 	/** Internal buffer iterator used for reading/writing */
80 	struct net_pkt_cursor cursor;
81 
82 	/** Network connection context */
83 	struct net_context *context;
84 
85 	/** Network interface */
86 	struct net_if *iface;
87 
88 	/** @cond ignore */
89 
90 #if defined(CONFIG_NET_TCP)
91 	/** Allow placing the packet into sys_slist_t */
92 	sys_snode_t next;
93 #endif
94 #if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
95 	struct net_if *orig_iface; /* Original network interface */
96 #endif
97 
98 #if defined(CONFIG_NET_PKT_TIMESTAMP) || defined(CONFIG_NET_PKT_TXTIME)
99 	/**
100 	 * TX or RX timestamp if available
101 	 *
102 	 * For packets that have been sent over the medium, the timestamp refers
103 	 * to the time the message timestamp point was encountered at the
104 	 * reference plane.
105 	 *
106 	 * Unsent packages can be scheduled by setting the timestamp to a future
107 	 * point in time.
108 	 *
109 	 * All timestamps refer to the network subsystem's local clock.
110 	 *
111 	 * See @ref net_ptp_time for definitions of local clock, message
112 	 * timestamp point and reference plane. See @ref net_time_t for
113 	 * semantics of the network reference clock.
114 	 *
115 	 * TODO: Replace with net_time_t to decouple from PTP.
116 	 */
117 	struct net_ptp_time timestamp;
118 #endif
119 
120 #if defined(CONFIG_NET_PKT_RXTIME_STATS) || defined(CONFIG_NET_PKT_TXTIME_STATS)
121 	struct {
122 		/** Create time in cycles */
123 		uint32_t create_time;
124 
125 #if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL) || \
126 	defined(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)
127 		/** Collect extra statistics for net_pkt processing
128 		 * from various points in the IP stack. See networking
129 		 * documentation where these points are located and how
130 		 * to interpret the results.
131 		 */
132 		struct {
133 			uint32_t stat[NET_PKT_DETAIL_STATS_COUNT];
134 			int count;
135 		} detail;
136 #endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL ||
137 	  CONFIG_NET_PKT_RXTIME_STATS_DETAIL */
138 	};
139 #endif /* CONFIG_NET_PKT_RXTIME_STATS || CONFIG_NET_PKT_TXTIME_STATS */
140 
141 	/** Reference counter */
142 	atomic_t atomic_ref;
143 
144 	/* Filled by layer 2 when network packet is received. */
145 	struct net_linkaddr lladdr_src;
146 	struct net_linkaddr lladdr_dst;
147 	uint16_t ll_proto_type;
148 
149 #if defined(CONFIG_NET_IP)
150 	uint8_t ip_hdr_len;	/* pre-filled in order to avoid func call */
151 #endif
152 
153 	uint8_t overwrite : 1;	 /* Is packet content being overwritten? */
154 	uint8_t eof : 1;	 /* Last packet before EOF */
155 	uint8_t ptp_pkt : 1;	 /* For outgoing packet: is this packet
156 				  * a L2 PTP packet.
157 				  * Used only if defined (CONFIG_NET_L2_PTP)
158 				  */
159 	uint8_t forwarding : 1;	 /* Are we forwarding this pkt
160 				  * Used only if defined(CONFIG_NET_ROUTE)
161 				  */
162 	uint8_t family : 3;	 /* Address family, see net_ip.h */
163 
164 	/* bitfield byte alignment boundary */
165 
166 #if defined(CONFIG_NET_IPV4_AUTO)
167 	uint8_t ipv4_auto_arp_msg : 1; /* Is this pkt IPv4 autoconf ARP
168 					* message.
169 					* Note: family needs to be
170 					* AF_INET.
171 					*/
172 #endif
173 #if defined(CONFIG_NET_LLDP)
174 	uint8_t lldp_pkt : 1; /* Is this pkt an LLDP message.
175 			       * Note: family needs to be
176 			       * AF_UNSPEC.
177 			       */
178 #endif
179 	uint8_t ppp_msg : 1; /* This is a PPP message */
180 #if defined(CONFIG_NET_TCP)
181 	uint8_t tcp_first_msg : 1; /* Is this the first time this pkt is
182 				    * sent, or is this a resend of a TCP
183 				    * segment.
184 				    */
185 #endif
186 	uint8_t captured : 1;	  /* Set to 1 if this packet is already being
187 				   * captured
188 				   */
189 	uint8_t l2_bridged : 1;	  /* set to 1 if this packet comes from a bridge
190 				   * and already contains its L2 header to be
191 				   * preserved. Useful only if
192 				   * defined(CONFIG_NET_ETHERNET_BRIDGE).
193 				   */
194 	uint8_t l2_processed : 1; /* Set to 1 if this packet has already been
195 				   * processed by the L2
196 				   */
197 
198 	/* bitfield byte alignment boundary */
199 
200 #if defined(CONFIG_NET_IP)
201 	union {
202 		/* IPv6 hop limit or IPv4 ttl for this network packet.
203 		 * The value is shared between IPv6 and IPv4.
204 		 */
205 #if defined(CONFIG_NET_IPV6)
206 		uint8_t ipv6_hop_limit;
207 #endif
208 #if defined(CONFIG_NET_IPV4)
209 		uint8_t ipv4_ttl;
210 #endif
211 	};
212 
213 	union {
214 #if defined(CONFIG_NET_IPV4)
215 		uint8_t ipv4_opts_len; /* length of IPv4 header options */
216 #endif
217 #if defined(CONFIG_NET_IPV6)
218 		uint16_t ipv6_ext_len; /* length of extension headers */
219 #endif
220 	};
221 
222 #if defined(CONFIG_NET_IPV4_FRAGMENT) || defined(CONFIG_NET_IPV6_FRAGMENT)
223 	union {
224 #if defined(CONFIG_NET_IPV4_FRAGMENT)
225 		struct {
226 			uint16_t flags;		/* Fragment offset and M (More Fragment) flag */
227 			uint16_t id;		/* Fragment ID */
228 		} ipv4_fragment;
229 #endif /* CONFIG_NET_IPV4_FRAGMENT */
230 #if defined(CONFIG_NET_IPV6_FRAGMENT)
231 		struct {
232 			uint16_t flags;		/* Fragment offset and M (More Fragment) flag */
233 			uint32_t id;		/* Fragment id */
234 			uint16_t hdr_start;	/* Where starts the fragment header */
235 		} ipv6_fragment;
236 #endif /* CONFIG_NET_IPV6_FRAGMENT */
237 	};
238 #endif /* CONFIG_NET_IPV4_FRAGMENT || CONFIG_NET_IPV6_FRAGMENT */
239 
240 #if defined(CONFIG_NET_IPV6)
241 	/* Where is the start of the last header before payload data
242 	 * in IPv6 packet. This is offset value from start of the IPv6
243 	 * packet. Note that this value should be updated by who ever
244 	 * adds IPv6 extension headers to the network packet.
245 	 */
246 	uint16_t ipv6_prev_hdr_start;
247 
248 	uint8_t ipv6_ext_opt_len; /* IPv6 ND option length */
249 	uint8_t ipv6_next_hdr;	/* What is the very first next header */
250 #endif /* CONFIG_NET_IPV6 */
251 
252 #if defined(CONFIG_NET_IP_DSCP_ECN)
253 	/** IPv4/IPv6 Differentiated Services Code Point value. */
254 	uint8_t ip_dscp : 6;
255 
256 	/** IPv4/IPv6 Explicit Congestion Notification value. */
257 	uint8_t ip_ecn : 2;
258 #endif /* CONFIG_NET_IP_DSCP_ECN */
259 #endif /* CONFIG_NET_IP */
260 
261 #if defined(CONFIG_NET_VLAN)
262 	/* VLAN TCI (Tag Control Information). This contains the Priority
263 	 * Code Point (PCP), Drop Eligible Indicator (DEI) and VLAN
264 	 * Identifier (VID, called more commonly VLAN tag). This value is
265 	 * kept in host byte order.
266 	 */
267 	uint16_t vlan_tci;
268 #endif /* CONFIG_NET_VLAN */
269 
270 #if defined(NET_PKT_HAS_CONTROL_BLOCK)
271 	/* TODO: Evolve this into a union of orthogonal
272 	 *       control block declarations if further L2
273 	 *       stacks require L2-specific attributes.
274 	 */
275 #if defined(CONFIG_IEEE802154)
276 	/* The following structure requires a 4-byte alignment
277 	 * boundary to avoid padding.
278 	 */
279 	struct net_pkt_cb_ieee802154 cb;
280 #endif /* CONFIG_IEEE802154 */
281 #endif /* NET_PKT_HAS_CONTROL_BLOCK */
282 
283 	/** Network packet priority, can be left out in which case packet
284 	 * is not prioritised.
285 	 */
286 	uint8_t priority;
287 
288 	/* @endcond */
289 };
290 
291 /** @cond ignore */
292 
293 /* The interface real ll address */
net_pkt_lladdr_if(struct net_pkt * pkt)294 static inline struct net_linkaddr *net_pkt_lladdr_if(struct net_pkt *pkt)
295 {
296 	return net_if_get_link_addr(pkt->iface);
297 }
298 
net_pkt_context(struct net_pkt * pkt)299 static inline struct net_context *net_pkt_context(struct net_pkt *pkt)
300 {
301 	return pkt->context;
302 }
303 
net_pkt_set_context(struct net_pkt * pkt,struct net_context * ctx)304 static inline void net_pkt_set_context(struct net_pkt *pkt,
305 				       struct net_context *ctx)
306 {
307 	pkt->context = ctx;
308 }
309 
net_pkt_iface(struct net_pkt * pkt)310 static inline struct net_if *net_pkt_iface(struct net_pkt *pkt)
311 {
312 	return pkt->iface;
313 }
314 
net_pkt_set_iface(struct net_pkt * pkt,struct net_if * iface)315 static inline void net_pkt_set_iface(struct net_pkt *pkt, struct net_if *iface)
316 {
317 	pkt->iface = iface;
318 
319 	/* If the network interface is set in pkt, then also set the type of
320 	 * the network address that is stored in pkt. This is done here so
321 	 * that the address type is properly set and is not forgotten.
322 	 */
323 	if (iface) {
324 		uint8_t type = net_if_get_link_addr(iface)->type;
325 
326 		pkt->lladdr_src.type = type;
327 		pkt->lladdr_dst.type = type;
328 	}
329 }
330 
net_pkt_orig_iface(struct net_pkt * pkt)331 static inline struct net_if *net_pkt_orig_iface(struct net_pkt *pkt)
332 {
333 #if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
334 	return pkt->orig_iface;
335 #else
336 	return pkt->iface;
337 #endif
338 }
339 
net_pkt_set_orig_iface(struct net_pkt * pkt,struct net_if * iface)340 static inline void net_pkt_set_orig_iface(struct net_pkt *pkt,
341 					  struct net_if *iface)
342 {
343 #if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
344 	pkt->orig_iface = iface;
345 #endif
346 }
347 
net_pkt_family(struct net_pkt * pkt)348 static inline uint8_t net_pkt_family(struct net_pkt *pkt)
349 {
350 	return pkt->family;
351 }
352 
net_pkt_set_family(struct net_pkt * pkt,uint8_t family)353 static inline void net_pkt_set_family(struct net_pkt *pkt, uint8_t family)
354 {
355 	pkt->family = family;
356 }
357 
net_pkt_is_ptp(struct net_pkt * pkt)358 static inline bool net_pkt_is_ptp(struct net_pkt *pkt)
359 {
360 	return !!(pkt->ptp_pkt);
361 }
362 
net_pkt_set_ptp(struct net_pkt * pkt,bool is_ptp)363 static inline void net_pkt_set_ptp(struct net_pkt *pkt, bool is_ptp)
364 {
365 	pkt->ptp_pkt = is_ptp;
366 }
367 
net_pkt_is_captured(struct net_pkt * pkt)368 static inline bool net_pkt_is_captured(struct net_pkt *pkt)
369 {
370 	return !!(pkt->captured);
371 }
372 
net_pkt_set_captured(struct net_pkt * pkt,bool is_captured)373 static inline void net_pkt_set_captured(struct net_pkt *pkt, bool is_captured)
374 {
375 	pkt->captured = is_captured;
376 }
377 
net_pkt_is_l2_bridged(struct net_pkt * pkt)378 static inline bool net_pkt_is_l2_bridged(struct net_pkt *pkt)
379 {
380 	return IS_ENABLED(CONFIG_NET_ETHERNET_BRIDGE) ? !!(pkt->l2_bridged) : 0;
381 }
382 
net_pkt_set_l2_bridged(struct net_pkt * pkt,bool is_l2_bridged)383 static inline void net_pkt_set_l2_bridged(struct net_pkt *pkt, bool is_l2_bridged)
384 {
385 	if (IS_ENABLED(CONFIG_NET_ETHERNET_BRIDGE)) {
386 		pkt->l2_bridged = is_l2_bridged;
387 	}
388 }
389 
net_pkt_is_l2_processed(struct net_pkt * pkt)390 static inline bool net_pkt_is_l2_processed(struct net_pkt *pkt)
391 {
392 	return !!(pkt->l2_processed);
393 }
394 
net_pkt_set_l2_processed(struct net_pkt * pkt,bool is_l2_processed)395 static inline void net_pkt_set_l2_processed(struct net_pkt *pkt,
396 					    bool is_l2_processed)
397 {
398 	pkt->l2_processed = is_l2_processed;
399 }
400 
net_pkt_ip_hdr_len(struct net_pkt * pkt)401 static inline uint8_t net_pkt_ip_hdr_len(struct net_pkt *pkt)
402 {
403 #if defined(CONFIG_NET_IP)
404 	return pkt->ip_hdr_len;
405 #else
406 	return 0;
407 #endif
408 }
409 
net_pkt_set_ip_hdr_len(struct net_pkt * pkt,uint8_t len)410 static inline void net_pkt_set_ip_hdr_len(struct net_pkt *pkt, uint8_t len)
411 {
412 #if defined(CONFIG_NET_IP)
413 	pkt->ip_hdr_len = len;
414 #endif
415 }
416 
net_pkt_ip_dscp(struct net_pkt * pkt)417 static inline uint8_t net_pkt_ip_dscp(struct net_pkt *pkt)
418 {
419 #if defined(CONFIG_NET_IP_DSCP_ECN)
420 	return pkt->ip_dscp;
421 #else
422 	return 0;
423 #endif
424 }
425 
net_pkt_set_ip_dscp(struct net_pkt * pkt,uint8_t dscp)426 static inline void net_pkt_set_ip_dscp(struct net_pkt *pkt, uint8_t dscp)
427 {
428 #if defined(CONFIG_NET_IP_DSCP_ECN)
429 	pkt->ip_dscp = dscp;
430 #endif
431 }
432 
net_pkt_ip_ecn(struct net_pkt * pkt)433 static inline uint8_t net_pkt_ip_ecn(struct net_pkt *pkt)
434 {
435 #if defined(CONFIG_NET_IP_DSCP_ECN)
436 	return pkt->ip_ecn;
437 #else
438 	return 0;
439 #endif
440 }
441 
net_pkt_set_ip_ecn(struct net_pkt * pkt,uint8_t ecn)442 static inline void net_pkt_set_ip_ecn(struct net_pkt *pkt, uint8_t ecn)
443 {
444 #if defined(CONFIG_NET_IP_DSCP_ECN)
445 	pkt->ip_ecn = ecn;
446 #endif
447 }
448 
net_pkt_tcp_1st_msg(struct net_pkt * pkt)449 static inline uint8_t net_pkt_tcp_1st_msg(struct net_pkt *pkt)
450 {
451 #if defined(CONFIG_NET_TCP)
452 	return pkt->tcp_first_msg;
453 #else
454 	return true;
455 #endif
456 }
457 
net_pkt_set_tcp_1st_msg(struct net_pkt * pkt,bool is_1st)458 static inline void net_pkt_set_tcp_1st_msg(struct net_pkt *pkt, bool is_1st)
459 {
460 #if defined(CONFIG_NET_TCP)
461 	pkt->tcp_first_msg = is_1st;
462 #else
463 	ARG_UNUSED(pkt);
464 	ARG_UNUSED(is_1st);
465 #endif
466 }
467 
net_pkt_eof(struct net_pkt * pkt)468 static inline uint8_t net_pkt_eof(struct net_pkt *pkt)
469 {
470 	return pkt->eof;
471 }
472 
net_pkt_set_eof(struct net_pkt * pkt,bool eof)473 static inline void net_pkt_set_eof(struct net_pkt *pkt, bool eof)
474 {
475 	pkt->eof = eof;
476 }
477 
net_pkt_forwarding(struct net_pkt * pkt)478 static inline bool net_pkt_forwarding(struct net_pkt *pkt)
479 {
480 	return !!(pkt->forwarding);
481 }
482 
net_pkt_set_forwarding(struct net_pkt * pkt,bool forward)483 static inline void net_pkt_set_forwarding(struct net_pkt *pkt, bool forward)
484 {
485 	pkt->forwarding = forward;
486 }
487 
488 #if defined(CONFIG_NET_IPV4)
net_pkt_ipv4_ttl(struct net_pkt * pkt)489 static inline uint8_t net_pkt_ipv4_ttl(struct net_pkt *pkt)
490 {
491 	return pkt->ipv4_ttl;
492 }
493 
net_pkt_set_ipv4_ttl(struct net_pkt * pkt,uint8_t ttl)494 static inline void net_pkt_set_ipv4_ttl(struct net_pkt *pkt,
495 					uint8_t ttl)
496 {
497 	pkt->ipv4_ttl = ttl;
498 }
499 
net_pkt_ipv4_opts_len(struct net_pkt * pkt)500 static inline uint8_t net_pkt_ipv4_opts_len(struct net_pkt *pkt)
501 {
502 	return pkt->ipv4_opts_len;
503 }
504 
net_pkt_set_ipv4_opts_len(struct net_pkt * pkt,uint8_t opts_len)505 static inline void net_pkt_set_ipv4_opts_len(struct net_pkt *pkt,
506 					     uint8_t opts_len)
507 {
508 	pkt->ipv4_opts_len = opts_len;
509 }
510 #else
net_pkt_ipv4_ttl(struct net_pkt * pkt)511 static inline uint8_t net_pkt_ipv4_ttl(struct net_pkt *pkt)
512 {
513 	ARG_UNUSED(pkt);
514 
515 	return 0;
516 }
517 
net_pkt_set_ipv4_ttl(struct net_pkt * pkt,uint8_t ttl)518 static inline void net_pkt_set_ipv4_ttl(struct net_pkt *pkt,
519 					uint8_t ttl)
520 {
521 	ARG_UNUSED(pkt);
522 	ARG_UNUSED(ttl);
523 }
524 
net_pkt_ipv4_opts_len(struct net_pkt * pkt)525 static inline uint8_t net_pkt_ipv4_opts_len(struct net_pkt *pkt)
526 {
527 	ARG_UNUSED(pkt);
528 	return 0;
529 }
530 
net_pkt_set_ipv4_opts_len(struct net_pkt * pkt,uint8_t opts_len)531 static inline void net_pkt_set_ipv4_opts_len(struct net_pkt *pkt,
532 					     uint8_t opts_len)
533 {
534 	ARG_UNUSED(pkt);
535 	ARG_UNUSED(opts_len);
536 }
537 #endif
538 
539 #if defined(CONFIG_NET_IPV6)
net_pkt_ipv6_ext_opt_len(struct net_pkt * pkt)540 static inline uint8_t net_pkt_ipv6_ext_opt_len(struct net_pkt *pkt)
541 {
542 	return pkt->ipv6_ext_opt_len;
543 }
544 
net_pkt_set_ipv6_ext_opt_len(struct net_pkt * pkt,uint8_t len)545 static inline void net_pkt_set_ipv6_ext_opt_len(struct net_pkt *pkt,
546 						uint8_t len)
547 {
548 	pkt->ipv6_ext_opt_len = len;
549 }
550 
net_pkt_ipv6_next_hdr(struct net_pkt * pkt)551 static inline uint8_t net_pkt_ipv6_next_hdr(struct net_pkt *pkt)
552 {
553 	return pkt->ipv6_next_hdr;
554 }
555 
net_pkt_set_ipv6_next_hdr(struct net_pkt * pkt,uint8_t next_hdr)556 static inline void net_pkt_set_ipv6_next_hdr(struct net_pkt *pkt,
557 					     uint8_t next_hdr)
558 {
559 	pkt->ipv6_next_hdr = next_hdr;
560 }
561 
net_pkt_ipv6_ext_len(struct net_pkt * pkt)562 static inline uint16_t net_pkt_ipv6_ext_len(struct net_pkt *pkt)
563 {
564 	return pkt->ipv6_ext_len;
565 }
566 
net_pkt_set_ipv6_ext_len(struct net_pkt * pkt,uint16_t len)567 static inline void net_pkt_set_ipv6_ext_len(struct net_pkt *pkt, uint16_t len)
568 {
569 	pkt->ipv6_ext_len = len;
570 }
571 
net_pkt_ipv6_hdr_prev(struct net_pkt * pkt)572 static inline uint16_t net_pkt_ipv6_hdr_prev(struct net_pkt *pkt)
573 {
574 	return pkt->ipv6_prev_hdr_start;
575 }
576 
net_pkt_set_ipv6_hdr_prev(struct net_pkt * pkt,uint16_t offset)577 static inline void net_pkt_set_ipv6_hdr_prev(struct net_pkt *pkt,
578 					     uint16_t offset)
579 {
580 	pkt->ipv6_prev_hdr_start = offset;
581 }
582 
net_pkt_ipv6_hop_limit(struct net_pkt * pkt)583 static inline uint8_t net_pkt_ipv6_hop_limit(struct net_pkt *pkt)
584 {
585 	return pkt->ipv6_hop_limit;
586 }
587 
net_pkt_set_ipv6_hop_limit(struct net_pkt * pkt,uint8_t hop_limit)588 static inline void net_pkt_set_ipv6_hop_limit(struct net_pkt *pkt,
589 					      uint8_t hop_limit)
590 {
591 	pkt->ipv6_hop_limit = hop_limit;
592 }
593 #else /* CONFIG_NET_IPV6 */
net_pkt_ipv6_ext_opt_len(struct net_pkt * pkt)594 static inline uint8_t net_pkt_ipv6_ext_opt_len(struct net_pkt *pkt)
595 {
596 	ARG_UNUSED(pkt);
597 
598 	return 0;
599 }
600 
net_pkt_set_ipv6_ext_opt_len(struct net_pkt * pkt,uint8_t len)601 static inline void net_pkt_set_ipv6_ext_opt_len(struct net_pkt *pkt,
602 						uint8_t len)
603 {
604 	ARG_UNUSED(pkt);
605 	ARG_UNUSED(len);
606 }
607 
net_pkt_ipv6_next_hdr(struct net_pkt * pkt)608 static inline uint8_t net_pkt_ipv6_next_hdr(struct net_pkt *pkt)
609 {
610 	ARG_UNUSED(pkt);
611 
612 	return 0;
613 }
614 
net_pkt_set_ipv6_next_hdr(struct net_pkt * pkt,uint8_t next_hdr)615 static inline void net_pkt_set_ipv6_next_hdr(struct net_pkt *pkt,
616 					     uint8_t next_hdr)
617 {
618 	ARG_UNUSED(pkt);
619 	ARG_UNUSED(next_hdr);
620 }
621 
net_pkt_ipv6_ext_len(struct net_pkt * pkt)622 static inline uint16_t net_pkt_ipv6_ext_len(struct net_pkt *pkt)
623 {
624 	ARG_UNUSED(pkt);
625 
626 	return 0;
627 }
628 
net_pkt_set_ipv6_ext_len(struct net_pkt * pkt,uint16_t len)629 static inline void net_pkt_set_ipv6_ext_len(struct net_pkt *pkt, uint16_t len)
630 {
631 	ARG_UNUSED(pkt);
632 	ARG_UNUSED(len);
633 }
634 
net_pkt_ipv6_hdr_prev(struct net_pkt * pkt)635 static inline uint16_t net_pkt_ipv6_hdr_prev(struct net_pkt *pkt)
636 {
637 	ARG_UNUSED(pkt);
638 
639 	return 0;
640 }
641 
net_pkt_set_ipv6_hdr_prev(struct net_pkt * pkt,uint16_t offset)642 static inline void net_pkt_set_ipv6_hdr_prev(struct net_pkt *pkt,
643 					     uint16_t offset)
644 {
645 	ARG_UNUSED(pkt);
646 	ARG_UNUSED(offset);
647 }
648 
net_pkt_ipv6_hop_limit(struct net_pkt * pkt)649 static inline uint8_t net_pkt_ipv6_hop_limit(struct net_pkt *pkt)
650 {
651 	ARG_UNUSED(pkt);
652 
653 	return 0;
654 }
655 
net_pkt_set_ipv6_hop_limit(struct net_pkt * pkt,uint8_t hop_limit)656 static inline void net_pkt_set_ipv6_hop_limit(struct net_pkt *pkt,
657 					      uint8_t hop_limit)
658 {
659 	ARG_UNUSED(pkt);
660 	ARG_UNUSED(hop_limit);
661 }
662 #endif /* CONFIG_NET_IPV6 */
663 
net_pkt_ip_opts_len(struct net_pkt * pkt)664 static inline uint16_t net_pkt_ip_opts_len(struct net_pkt *pkt)
665 {
666 #if defined(CONFIG_NET_IPV6)
667 	return pkt->ipv6_ext_len;
668 #elif defined(CONFIG_NET_IPV4)
669 	return pkt->ipv4_opts_len;
670 #else
671 	ARG_UNUSED(pkt);
672 
673 	return 0;
674 #endif
675 }
676 
677 #if defined(CONFIG_NET_IPV4_FRAGMENT)
net_pkt_ipv4_fragment_offset(struct net_pkt * pkt)678 static inline uint16_t net_pkt_ipv4_fragment_offset(struct net_pkt *pkt)
679 {
680 	return (pkt->ipv4_fragment.flags & NET_IPV4_FRAGH_OFFSET_MASK) * 8;
681 }
682 
net_pkt_ipv4_fragment_more(struct net_pkt * pkt)683 static inline bool net_pkt_ipv4_fragment_more(struct net_pkt *pkt)
684 {
685 	return (pkt->ipv4_fragment.flags & NET_IPV4_MORE_FRAG_MASK) != 0;
686 }
687 
net_pkt_set_ipv4_fragment_flags(struct net_pkt * pkt,uint16_t flags)688 static inline void net_pkt_set_ipv4_fragment_flags(struct net_pkt *pkt, uint16_t flags)
689 {
690 	pkt->ipv4_fragment.flags = flags;
691 }
692 
net_pkt_ipv4_fragment_id(struct net_pkt * pkt)693 static inline uint32_t net_pkt_ipv4_fragment_id(struct net_pkt *pkt)
694 {
695 	return pkt->ipv4_fragment.id;
696 }
697 
net_pkt_set_ipv4_fragment_id(struct net_pkt * pkt,uint32_t id)698 static inline void net_pkt_set_ipv4_fragment_id(struct net_pkt *pkt, uint32_t id)
699 {
700 	pkt->ipv4_fragment.id = id;
701 }
702 #else /* CONFIG_NET_IPV4_FRAGMENT */
net_pkt_ipv4_fragment_offset(struct net_pkt * pkt)703 static inline uint16_t net_pkt_ipv4_fragment_offset(struct net_pkt *pkt)
704 {
705 	ARG_UNUSED(pkt);
706 
707 	return 0;
708 }
709 
net_pkt_ipv4_fragment_more(struct net_pkt * pkt)710 static inline bool net_pkt_ipv4_fragment_more(struct net_pkt *pkt)
711 {
712 	ARG_UNUSED(pkt);
713 
714 	return 0;
715 }
716 
net_pkt_set_ipv4_fragment_flags(struct net_pkt * pkt,uint16_t flags)717 static inline void net_pkt_set_ipv4_fragment_flags(struct net_pkt *pkt, uint16_t flags)
718 {
719 	ARG_UNUSED(pkt);
720 	ARG_UNUSED(flags);
721 }
722 
net_pkt_ipv4_fragment_id(struct net_pkt * pkt)723 static inline uint32_t net_pkt_ipv4_fragment_id(struct net_pkt *pkt)
724 {
725 	ARG_UNUSED(pkt);
726 
727 	return 0;
728 }
729 
net_pkt_set_ipv4_fragment_id(struct net_pkt * pkt,uint32_t id)730 static inline void net_pkt_set_ipv4_fragment_id(struct net_pkt *pkt, uint32_t id)
731 {
732 	ARG_UNUSED(pkt);
733 	ARG_UNUSED(id);
734 }
735 #endif /* CONFIG_NET_IPV4_FRAGMENT */
736 
737 #if defined(CONFIG_NET_IPV6_FRAGMENT)
net_pkt_ipv6_fragment_start(struct net_pkt * pkt)738 static inline uint16_t net_pkt_ipv6_fragment_start(struct net_pkt *pkt)
739 {
740 	return pkt->ipv6_fragment.hdr_start;
741 }
742 
net_pkt_set_ipv6_fragment_start(struct net_pkt * pkt,uint16_t start)743 static inline void net_pkt_set_ipv6_fragment_start(struct net_pkt *pkt,
744 						   uint16_t start)
745 {
746 	pkt->ipv6_fragment.hdr_start = start;
747 }
748 
net_pkt_ipv6_fragment_offset(struct net_pkt * pkt)749 static inline uint16_t net_pkt_ipv6_fragment_offset(struct net_pkt *pkt)
750 {
751 	return pkt->ipv6_fragment.flags & NET_IPV6_FRAGH_OFFSET_MASK;
752 }
net_pkt_ipv6_fragment_more(struct net_pkt * pkt)753 static inline bool net_pkt_ipv6_fragment_more(struct net_pkt *pkt)
754 {
755 	return (pkt->ipv6_fragment.flags & 0x01) != 0;
756 }
757 
net_pkt_set_ipv6_fragment_flags(struct net_pkt * pkt,uint16_t flags)758 static inline void net_pkt_set_ipv6_fragment_flags(struct net_pkt *pkt,
759 						   uint16_t flags)
760 {
761 	pkt->ipv6_fragment.flags = flags;
762 }
763 
net_pkt_ipv6_fragment_id(struct net_pkt * pkt)764 static inline uint32_t net_pkt_ipv6_fragment_id(struct net_pkt *pkt)
765 {
766 	return pkt->ipv6_fragment.id;
767 }
768 
net_pkt_set_ipv6_fragment_id(struct net_pkt * pkt,uint32_t id)769 static inline void net_pkt_set_ipv6_fragment_id(struct net_pkt *pkt,
770 						uint32_t id)
771 {
772 	pkt->ipv6_fragment.id = id;
773 }
774 #else /* CONFIG_NET_IPV6_FRAGMENT */
net_pkt_ipv6_fragment_start(struct net_pkt * pkt)775 static inline uint16_t net_pkt_ipv6_fragment_start(struct net_pkt *pkt)
776 {
777 	ARG_UNUSED(pkt);
778 
779 	return 0;
780 }
781 
net_pkt_set_ipv6_fragment_start(struct net_pkt * pkt,uint16_t start)782 static inline void net_pkt_set_ipv6_fragment_start(struct net_pkt *pkt,
783 						   uint16_t start)
784 {
785 	ARG_UNUSED(pkt);
786 	ARG_UNUSED(start);
787 }
788 
net_pkt_ipv6_fragment_offset(struct net_pkt * pkt)789 static inline uint16_t net_pkt_ipv6_fragment_offset(struct net_pkt *pkt)
790 {
791 	ARG_UNUSED(pkt);
792 
793 	return 0;
794 }
795 
net_pkt_ipv6_fragment_more(struct net_pkt * pkt)796 static inline bool net_pkt_ipv6_fragment_more(struct net_pkt *pkt)
797 {
798 	ARG_UNUSED(pkt);
799 
800 	return 0;
801 }
802 
net_pkt_set_ipv6_fragment_flags(struct net_pkt * pkt,uint16_t flags)803 static inline void net_pkt_set_ipv6_fragment_flags(struct net_pkt *pkt,
804 						   uint16_t flags)
805 {
806 	ARG_UNUSED(pkt);
807 	ARG_UNUSED(flags);
808 }
809 
net_pkt_ipv6_fragment_id(struct net_pkt * pkt)810 static inline uint32_t net_pkt_ipv6_fragment_id(struct net_pkt *pkt)
811 {
812 	ARG_UNUSED(pkt);
813 
814 	return 0;
815 }
816 
net_pkt_set_ipv6_fragment_id(struct net_pkt * pkt,uint32_t id)817 static inline void net_pkt_set_ipv6_fragment_id(struct net_pkt *pkt,
818 						uint32_t id)
819 {
820 	ARG_UNUSED(pkt);
821 	ARG_UNUSED(id);
822 }
823 #endif /* CONFIG_NET_IPV6_FRAGMENT */
824 
net_pkt_priority(struct net_pkt * pkt)825 static inline uint8_t net_pkt_priority(struct net_pkt *pkt)
826 {
827 	return pkt->priority;
828 }
829 
net_pkt_set_priority(struct net_pkt * pkt,uint8_t priority)830 static inline void net_pkt_set_priority(struct net_pkt *pkt,
831 					uint8_t priority)
832 {
833 	pkt->priority = priority;
834 }
835 
836 #if defined(CONFIG_NET_VLAN)
net_pkt_vlan_tag(struct net_pkt * pkt)837 static inline uint16_t net_pkt_vlan_tag(struct net_pkt *pkt)
838 {
839 	return net_eth_vlan_get_vid(pkt->vlan_tci);
840 }
841 
net_pkt_set_vlan_tag(struct net_pkt * pkt,uint16_t tag)842 static inline void net_pkt_set_vlan_tag(struct net_pkt *pkt, uint16_t tag)
843 {
844 	pkt->vlan_tci = net_eth_vlan_set_vid(pkt->vlan_tci, tag);
845 }
846 
net_pkt_vlan_priority(struct net_pkt * pkt)847 static inline uint8_t net_pkt_vlan_priority(struct net_pkt *pkt)
848 {
849 	return net_eth_vlan_get_pcp(pkt->vlan_tci);
850 }
851 
net_pkt_set_vlan_priority(struct net_pkt * pkt,uint8_t priority)852 static inline void net_pkt_set_vlan_priority(struct net_pkt *pkt,
853 					     uint8_t priority)
854 {
855 	pkt->vlan_tci = net_eth_vlan_set_pcp(pkt->vlan_tci, priority);
856 }
857 
net_pkt_vlan_dei(struct net_pkt * pkt)858 static inline bool net_pkt_vlan_dei(struct net_pkt *pkt)
859 {
860 	return net_eth_vlan_get_dei(pkt->vlan_tci);
861 }
862 
net_pkt_set_vlan_dei(struct net_pkt * pkt,bool dei)863 static inline void net_pkt_set_vlan_dei(struct net_pkt *pkt, bool dei)
864 {
865 	pkt->vlan_tci = net_eth_vlan_set_dei(pkt->vlan_tci, dei);
866 }
867 
net_pkt_set_vlan_tci(struct net_pkt * pkt,uint16_t tci)868 static inline void net_pkt_set_vlan_tci(struct net_pkt *pkt, uint16_t tci)
869 {
870 	pkt->vlan_tci = tci;
871 }
872 
net_pkt_vlan_tci(struct net_pkt * pkt)873 static inline uint16_t net_pkt_vlan_tci(struct net_pkt *pkt)
874 {
875 	return pkt->vlan_tci;
876 }
877 #else
net_pkt_vlan_tag(struct net_pkt * pkt)878 static inline uint16_t net_pkt_vlan_tag(struct net_pkt *pkt)
879 {
880 	return NET_VLAN_TAG_UNSPEC;
881 }
882 
net_pkt_set_vlan_tag(struct net_pkt * pkt,uint16_t tag)883 static inline void net_pkt_set_vlan_tag(struct net_pkt *pkt, uint16_t tag)
884 {
885 	ARG_UNUSED(pkt);
886 	ARG_UNUSED(tag);
887 }
888 
net_pkt_vlan_priority(struct net_pkt * pkt)889 static inline uint8_t net_pkt_vlan_priority(struct net_pkt *pkt)
890 {
891 	ARG_UNUSED(pkt);
892 	return 0;
893 }
894 
net_pkt_vlan_dei(struct net_pkt * pkt)895 static inline bool net_pkt_vlan_dei(struct net_pkt *pkt)
896 {
897 	return false;
898 }
899 
net_pkt_set_vlan_dei(struct net_pkt * pkt,bool dei)900 static inline void net_pkt_set_vlan_dei(struct net_pkt *pkt, bool dei)
901 {
902 	ARG_UNUSED(pkt);
903 	ARG_UNUSED(dei);
904 }
905 
net_pkt_vlan_tci(struct net_pkt * pkt)906 static inline uint16_t net_pkt_vlan_tci(struct net_pkt *pkt)
907 {
908 	return NET_VLAN_TAG_UNSPEC; /* assumes priority is 0 */
909 }
910 
net_pkt_set_vlan_tci(struct net_pkt * pkt,uint16_t tci)911 static inline void net_pkt_set_vlan_tci(struct net_pkt *pkt, uint16_t tci)
912 {
913 	ARG_UNUSED(pkt);
914 	ARG_UNUSED(tci);
915 }
916 #endif
917 
918 #if defined(CONFIG_NET_PKT_TIMESTAMP) || defined(CONFIG_NET_PKT_TXTIME)
net_pkt_timestamp(struct net_pkt * pkt)919 static inline struct net_ptp_time *net_pkt_timestamp(struct net_pkt *pkt)
920 {
921 	return &pkt->timestamp;
922 }
923 
net_pkt_set_timestamp(struct net_pkt * pkt,struct net_ptp_time * timestamp)924 static inline void net_pkt_set_timestamp(struct net_pkt *pkt,
925 					 struct net_ptp_time *timestamp)
926 {
927 	pkt->timestamp.second = timestamp->second;
928 	pkt->timestamp.nanosecond = timestamp->nanosecond;
929 }
930 
net_pkt_timestamp_ns(struct net_pkt * pkt)931 static inline net_time_t net_pkt_timestamp_ns(struct net_pkt *pkt)
932 {
933 	return net_ptp_time_to_ns(&pkt->timestamp);
934 }
935 
net_pkt_set_timestamp_ns(struct net_pkt * pkt,net_time_t timestamp)936 static inline void net_pkt_set_timestamp_ns(struct net_pkt *pkt, net_time_t timestamp)
937 {
938 	pkt->timestamp = ns_to_net_ptp_time(timestamp);
939 }
940 #else
net_pkt_timestamp(struct net_pkt * pkt)941 static inline struct net_ptp_time *net_pkt_timestamp(struct net_pkt *pkt)
942 {
943 	ARG_UNUSED(pkt);
944 
945 	return NULL;
946 }
947 
net_pkt_set_timestamp(struct net_pkt * pkt,struct net_ptp_time * timestamp)948 static inline void net_pkt_set_timestamp(struct net_pkt *pkt,
949 					 struct net_ptp_time *timestamp)
950 {
951 	ARG_UNUSED(pkt);
952 	ARG_UNUSED(timestamp);
953 }
954 
net_pkt_timestamp_ns(struct net_pkt * pkt)955 static inline net_time_t net_pkt_timestamp_ns(struct net_pkt *pkt)
956 {
957 	ARG_UNUSED(pkt);
958 
959 	return 0;
960 }
961 
net_pkt_set_timestamp_ns(struct net_pkt * pkt,net_time_t timestamp)962 static inline void net_pkt_set_timestamp_ns(struct net_pkt *pkt, net_time_t timestamp)
963 {
964 	ARG_UNUSED(pkt);
965 	ARG_UNUSED(timestamp);
966 }
967 #endif /* CONFIG_NET_PKT_TIMESTAMP || CONFIG_NET_PKT_TXTIME */
968 
969 #if defined(CONFIG_NET_PKT_RXTIME_STATS) || defined(CONFIG_NET_PKT_TXTIME_STATS)
net_pkt_create_time(struct net_pkt * pkt)970 static inline uint32_t net_pkt_create_time(struct net_pkt *pkt)
971 {
972 	return pkt->create_time;
973 }
974 
net_pkt_set_create_time(struct net_pkt * pkt,uint32_t create_time)975 static inline void net_pkt_set_create_time(struct net_pkt *pkt,
976 					   uint32_t create_time)
977 {
978 	pkt->create_time = create_time;
979 }
980 #else
net_pkt_create_time(struct net_pkt * pkt)981 static inline uint32_t net_pkt_create_time(struct net_pkt *pkt)
982 {
983 	ARG_UNUSED(pkt);
984 
985 	return 0U;
986 }
987 
net_pkt_set_create_time(struct net_pkt * pkt,uint32_t create_time)988 static inline void net_pkt_set_create_time(struct net_pkt *pkt,
989 					   uint32_t create_time)
990 {
991 	ARG_UNUSED(pkt);
992 	ARG_UNUSED(create_time);
993 }
994 #endif /* CONFIG_NET_PKT_RXTIME_STATS || CONFIG_NET_PKT_TXTIME_STATS */
995 
996 /**
997  * @deprecated Use @ref net_pkt_timestamp or @ref net_pkt_timestamp_ns instead.
998  */
net_pkt_txtime(struct net_pkt * pkt)999 static inline uint64_t net_pkt_txtime(struct net_pkt *pkt)
1000 {
1001 #if defined(CONFIG_NET_PKT_TXTIME)
1002 	return pkt->timestamp.second * NSEC_PER_SEC + pkt->timestamp.nanosecond;
1003 #else
1004 	ARG_UNUSED(pkt);
1005 
1006 	return 0;
1007 #endif /* CONFIG_NET_PKT_TXTIME */
1008 }
1009 
1010 /**
1011  * @deprecated Use @ref net_pkt_set_timestamp or @ref net_pkt_set_timestamp_ns
1012  * instead.
1013  */
net_pkt_set_txtime(struct net_pkt * pkt,uint64_t txtime)1014 static inline void net_pkt_set_txtime(struct net_pkt *pkt, uint64_t txtime)
1015 {
1016 #if defined(CONFIG_NET_PKT_TXTIME)
1017 	pkt->timestamp.second = txtime / NSEC_PER_SEC;
1018 	pkt->timestamp.nanosecond = txtime % NSEC_PER_SEC;
1019 #else
1020 	ARG_UNUSED(pkt);
1021 	ARG_UNUSED(txtime);
1022 #endif /* CONFIG_NET_PKT_TXTIME */
1023 }
1024 
1025 #if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL) || \
1026 	defined(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)
net_pkt_stats_tick(struct net_pkt * pkt)1027 static inline uint32_t *net_pkt_stats_tick(struct net_pkt *pkt)
1028 {
1029 	return pkt->detail.stat;
1030 }
1031 
net_pkt_stats_tick_count(struct net_pkt * pkt)1032 static inline int net_pkt_stats_tick_count(struct net_pkt *pkt)
1033 {
1034 	return pkt->detail.count;
1035 }
1036 
net_pkt_stats_tick_reset(struct net_pkt * pkt)1037 static inline void net_pkt_stats_tick_reset(struct net_pkt *pkt)
1038 {
1039 	memset(&pkt->detail, 0, sizeof(pkt->detail));
1040 }
1041 
net_pkt_set_stats_tick(struct net_pkt * pkt,uint32_t tick)1042 static ALWAYS_INLINE void net_pkt_set_stats_tick(struct net_pkt *pkt,
1043 						 uint32_t tick)
1044 {
1045 	if (pkt->detail.count >= NET_PKT_DETAIL_STATS_COUNT) {
1046 		NET_ERR("Detail stats count overflow (%d >= %d)",
1047 			pkt->detail.count, NET_PKT_DETAIL_STATS_COUNT);
1048 		return;
1049 	}
1050 
1051 	pkt->detail.stat[pkt->detail.count++] = tick;
1052 }
1053 
1054 #define net_pkt_set_tx_stats_tick(pkt, tick) net_pkt_set_stats_tick(pkt, tick)
1055 #define net_pkt_set_rx_stats_tick(pkt, tick) net_pkt_set_stats_tick(pkt, tick)
1056 #else
net_pkt_stats_tick(struct net_pkt * pkt)1057 static inline uint32_t *net_pkt_stats_tick(struct net_pkt *pkt)
1058 {
1059 	ARG_UNUSED(pkt);
1060 
1061 	return NULL;
1062 }
1063 
net_pkt_stats_tick_count(struct net_pkt * pkt)1064 static inline int net_pkt_stats_tick_count(struct net_pkt *pkt)
1065 {
1066 	ARG_UNUSED(pkt);
1067 
1068 	return 0;
1069 }
1070 
net_pkt_stats_tick_reset(struct net_pkt * pkt)1071 static inline void net_pkt_stats_tick_reset(struct net_pkt *pkt)
1072 {
1073 	ARG_UNUSED(pkt);
1074 }
1075 
net_pkt_set_stats_tick(struct net_pkt * pkt,uint32_t tick)1076 static inline void net_pkt_set_stats_tick(struct net_pkt *pkt, uint32_t tick)
1077 {
1078 	ARG_UNUSED(pkt);
1079 	ARG_UNUSED(tick);
1080 }
1081 
1082 #define net_pkt_set_tx_stats_tick(pkt, tick)
1083 #define net_pkt_set_rx_stats_tick(pkt, tick)
1084 #endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL ||
1085 	  CONFIG_NET_PKT_RXTIME_STATS_DETAIL */
1086 
net_pkt_get_len(struct net_pkt * pkt)1087 static inline size_t net_pkt_get_len(struct net_pkt *pkt)
1088 {
1089 	return net_buf_frags_len(pkt->frags);
1090 }
1091 
net_pkt_data(struct net_pkt * pkt)1092 static inline uint8_t *net_pkt_data(struct net_pkt *pkt)
1093 {
1094 	return pkt->frags->data;
1095 }
1096 
net_pkt_ip_data(struct net_pkt * pkt)1097 static inline uint8_t *net_pkt_ip_data(struct net_pkt *pkt)
1098 {
1099 	return pkt->frags->data;
1100 }
1101 
net_pkt_is_empty(struct net_pkt * pkt)1102 static inline bool net_pkt_is_empty(struct net_pkt *pkt)
1103 {
1104 	return !pkt->buffer || !net_pkt_data(pkt) || pkt->buffer->len == 0;
1105 }
1106 
net_pkt_lladdr_src(struct net_pkt * pkt)1107 static inline struct net_linkaddr *net_pkt_lladdr_src(struct net_pkt *pkt)
1108 {
1109 	return &pkt->lladdr_src;
1110 }
1111 
net_pkt_lladdr_dst(struct net_pkt * pkt)1112 static inline struct net_linkaddr *net_pkt_lladdr_dst(struct net_pkt *pkt)
1113 {
1114 	return &pkt->lladdr_dst;
1115 }
1116 
net_pkt_lladdr_swap(struct net_pkt * pkt)1117 static inline void net_pkt_lladdr_swap(struct net_pkt *pkt)
1118 {
1119 	uint8_t *addr = net_pkt_lladdr_src(pkt)->addr;
1120 
1121 	net_pkt_lladdr_src(pkt)->addr = net_pkt_lladdr_dst(pkt)->addr;
1122 	net_pkt_lladdr_dst(pkt)->addr = addr;
1123 }
1124 
net_pkt_lladdr_clear(struct net_pkt * pkt)1125 static inline void net_pkt_lladdr_clear(struct net_pkt *pkt)
1126 {
1127 	net_pkt_lladdr_src(pkt)->addr = NULL;
1128 	net_pkt_lladdr_src(pkt)->len = 0U;
1129 }
1130 
net_pkt_ll_proto_type(struct net_pkt * pkt)1131 static inline uint16_t net_pkt_ll_proto_type(struct net_pkt *pkt)
1132 {
1133 	return pkt->ll_proto_type;
1134 }
1135 
net_pkt_set_ll_proto_type(struct net_pkt * pkt,uint16_t type)1136 static inline void net_pkt_set_ll_proto_type(struct net_pkt *pkt, uint16_t type)
1137 {
1138 	pkt->ll_proto_type = type;
1139 }
1140 
1141 #if defined(CONFIG_NET_IPV4_AUTO)
net_pkt_ipv4_auto(struct net_pkt * pkt)1142 static inline bool net_pkt_ipv4_auto(struct net_pkt *pkt)
1143 {
1144 	return !!(pkt->ipv4_auto_arp_msg);
1145 }
1146 
net_pkt_set_ipv4_auto(struct net_pkt * pkt,bool is_auto_arp_msg)1147 static inline void net_pkt_set_ipv4_auto(struct net_pkt *pkt,
1148 					 bool is_auto_arp_msg)
1149 {
1150 	pkt->ipv4_auto_arp_msg = is_auto_arp_msg;
1151 }
1152 #else /* CONFIG_NET_IPV4_AUTO */
net_pkt_ipv4_auto(struct net_pkt * pkt)1153 static inline bool net_pkt_ipv4_auto(struct net_pkt *pkt)
1154 {
1155 	ARG_UNUSED(pkt);
1156 
1157 	return false;
1158 }
1159 
net_pkt_set_ipv4_auto(struct net_pkt * pkt,bool is_auto_arp_msg)1160 static inline void net_pkt_set_ipv4_auto(struct net_pkt *pkt,
1161 					 bool is_auto_arp_msg)
1162 {
1163 	ARG_UNUSED(pkt);
1164 	ARG_UNUSED(is_auto_arp_msg);
1165 }
1166 #endif /* CONFIG_NET_IPV4_AUTO */
1167 
1168 #if defined(CONFIG_NET_LLDP)
net_pkt_is_lldp(struct net_pkt * pkt)1169 static inline bool net_pkt_is_lldp(struct net_pkt *pkt)
1170 {
1171 	return !!(pkt->lldp_pkt);
1172 }
1173 
net_pkt_set_lldp(struct net_pkt * pkt,bool is_lldp)1174 static inline void net_pkt_set_lldp(struct net_pkt *pkt, bool is_lldp)
1175 {
1176 	pkt->lldp_pkt = is_lldp;
1177 }
1178 #else
net_pkt_is_lldp(struct net_pkt * pkt)1179 static inline bool net_pkt_is_lldp(struct net_pkt *pkt)
1180 {
1181 	ARG_UNUSED(pkt);
1182 
1183 	return false;
1184 }
1185 
net_pkt_set_lldp(struct net_pkt * pkt,bool is_lldp)1186 static inline void net_pkt_set_lldp(struct net_pkt *pkt, bool is_lldp)
1187 {
1188 	ARG_UNUSED(pkt);
1189 	ARG_UNUSED(is_lldp);
1190 }
1191 #endif /* CONFIG_NET_LLDP */
1192 
1193 #if defined(CONFIG_NET_L2_PPP)
net_pkt_is_ppp(struct net_pkt * pkt)1194 static inline bool net_pkt_is_ppp(struct net_pkt *pkt)
1195 {
1196 	return !!(pkt->ppp_msg);
1197 }
1198 
net_pkt_set_ppp(struct net_pkt * pkt,bool is_ppp_msg)1199 static inline void net_pkt_set_ppp(struct net_pkt *pkt,
1200 				   bool is_ppp_msg)
1201 {
1202 	pkt->ppp_msg = is_ppp_msg;
1203 }
1204 #else /* CONFIG_NET_L2_PPP */
net_pkt_is_ppp(struct net_pkt * pkt)1205 static inline bool net_pkt_is_ppp(struct net_pkt *pkt)
1206 {
1207 	ARG_UNUSED(pkt);
1208 
1209 	return false;
1210 }
1211 
net_pkt_set_ppp(struct net_pkt * pkt,bool is_ppp_msg)1212 static inline void net_pkt_set_ppp(struct net_pkt *pkt,
1213 				   bool is_ppp_msg)
1214 {
1215 	ARG_UNUSED(pkt);
1216 	ARG_UNUSED(is_ppp_msg);
1217 }
1218 #endif /* CONFIG_NET_L2_PPP */
1219 
1220 #if defined(NET_PKT_HAS_CONTROL_BLOCK)
net_pkt_cb(struct net_pkt * pkt)1221 static inline void *net_pkt_cb(struct net_pkt *pkt)
1222 {
1223 	return &pkt->cb;
1224 }
1225 #else
net_pkt_cb(struct net_pkt * pkt)1226 static inline void *net_pkt_cb(struct net_pkt *pkt)
1227 {
1228 	ARG_UNUSED(pkt);
1229 
1230 	return NULL;
1231 }
1232 #endif
1233 
1234 #define NET_IPV6_HDR(pkt) ((struct net_ipv6_hdr *)net_pkt_ip_data(pkt))
1235 #define NET_IPV4_HDR(pkt) ((struct net_ipv4_hdr *)net_pkt_ip_data(pkt))
1236 
net_pkt_set_src_ipv6_addr(struct net_pkt * pkt)1237 static inline void net_pkt_set_src_ipv6_addr(struct net_pkt *pkt)
1238 {
1239 	net_if_ipv6_select_src_addr(net_context_get_iface(
1240 					    net_pkt_context(pkt)),
1241 				    (struct in6_addr *)NET_IPV6_HDR(pkt)->src);
1242 }
1243 
net_pkt_set_overwrite(struct net_pkt * pkt,bool overwrite)1244 static inline void net_pkt_set_overwrite(struct net_pkt *pkt, bool overwrite)
1245 {
1246 	pkt->overwrite = overwrite;
1247 }
1248 
net_pkt_is_being_overwritten(struct net_pkt * pkt)1249 static inline bool net_pkt_is_being_overwritten(struct net_pkt *pkt)
1250 {
1251 	return !!(pkt->overwrite);
1252 }
1253 
1254 #ifdef CONFIG_NET_PKT_FILTER
1255 
1256 bool net_pkt_filter_send_ok(struct net_pkt *pkt);
1257 bool net_pkt_filter_recv_ok(struct net_pkt *pkt);
1258 
1259 #else
1260 
net_pkt_filter_send_ok(struct net_pkt * pkt)1261 static inline bool net_pkt_filter_send_ok(struct net_pkt *pkt)
1262 {
1263 	ARG_UNUSED(pkt);
1264 
1265 	return true;
1266 }
1267 
net_pkt_filter_recv_ok(struct net_pkt * pkt)1268 static inline bool net_pkt_filter_recv_ok(struct net_pkt *pkt)
1269 {
1270 	ARG_UNUSED(pkt);
1271 
1272 	return true;
1273 }
1274 
1275 #endif /* CONFIG_NET_PKT_FILTER */
1276 
1277 #if defined(CONFIG_NET_PKT_FILTER) && \
1278 	(defined(CONFIG_NET_PKT_FILTER_IPV4_HOOK) || defined(CONFIG_NET_PKT_FILTER_IPV6_HOOK))
1279 
1280 bool net_pkt_filter_ip_recv_ok(struct net_pkt *pkt);
1281 
1282 #else
1283 
net_pkt_filter_ip_recv_ok(struct net_pkt * pkt)1284 static inline bool net_pkt_filter_ip_recv_ok(struct net_pkt *pkt)
1285 {
1286 	ARG_UNUSED(pkt);
1287 
1288 	return true;
1289 }
1290 
1291 #endif /* CONFIG_NET_PKT_FILTER_IPV4_HOOK || CONFIG_NET_PKT_FILTER_IPV6_HOOK */
1292 
1293 #if defined(CONFIG_NET_PKT_FILTER) && defined(CONFIG_NET_PKT_FILTER_LOCAL_IN_HOOK)
1294 
1295 bool net_pkt_filter_local_in_recv_ok(struct net_pkt *pkt);
1296 
1297 #else
1298 
net_pkt_filter_local_in_recv_ok(struct net_pkt * pkt)1299 static inline bool net_pkt_filter_local_in_recv_ok(struct net_pkt *pkt)
1300 {
1301 	ARG_UNUSED(pkt);
1302 
1303 	return true;
1304 }
1305 
1306 #endif /* CONFIG_NET_PKT_FILTER && CONFIG_NET_PKT_FILTER_LOCAL_IN_HOOK */
1307 
1308 /* @endcond */
1309 
1310 /**
1311  * @brief Create a net_pkt slab
1312  *
1313  * A net_pkt slab is used to store meta-information about
1314  * network packets. It must be coupled with a data fragment pool
1315  * (@ref NET_PKT_DATA_POOL_DEFINE) used to store the actual
1316  * packet data. The macro can be used by an application to define
1317  * additional custom per-context TX packet slabs (see
1318  * net_context_setup_pools()).
1319  *
1320  * @param name Name of the slab.
1321  * @param count Number of net_pkt in this slab.
1322  */
1323 #define NET_PKT_SLAB_DEFINE(name, count)				\
1324 	K_MEM_SLAB_DEFINE(name, sizeof(struct net_pkt), count, 4)
1325 
1326 /* Backward compatibility macro */
1327 #define NET_PKT_TX_SLAB_DEFINE(name, count) NET_PKT_SLAB_DEFINE(name, count)
1328 
1329 /**
1330  * @brief Create a data fragment net_buf pool
1331  *
1332  * A net_buf pool is used to store actual data for
1333  * network packets. It must be coupled with a net_pkt slab
1334  * (@ref NET_PKT_SLAB_DEFINE) used to store the packet
1335  * meta-information. The macro can be used by an application to
1336  * define additional custom per-context TX packet pools (see
1337  * net_context_setup_pools()).
1338  *
1339  * @param name Name of the pool.
1340  * @param count Number of net_buf in this pool.
1341  */
1342 #define NET_PKT_DATA_POOL_DEFINE(name, count)				\
1343 	NET_BUF_POOL_DEFINE(name, count, CONFIG_NET_BUF_DATA_SIZE,	\
1344 			    0, NULL)
1345 
1346 /** @cond INTERNAL_HIDDEN */
1347 
1348 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC) || \
1349 	(CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG)
1350 #define NET_PKT_DEBUG_ENABLED
1351 #endif
1352 
1353 #if defined(NET_PKT_DEBUG_ENABLED)
1354 
1355 /* Debug versions of the net_pkt functions that are used when tracking
1356  * buffer usage.
1357  */
1358 
1359 struct net_buf *net_pkt_get_reserve_data_debug(struct net_buf_pool *pool,
1360 					       size_t min_len,
1361 					       k_timeout_t timeout,
1362 					       const char *caller,
1363 					       int line);
1364 
1365 #define net_pkt_get_reserve_data(pool, min_len, timeout)				\
1366 	net_pkt_get_reserve_data_debug(pool, min_len, timeout, __func__, __LINE__)
1367 
1368 struct net_buf *net_pkt_get_reserve_rx_data_debug(size_t min_len,
1369 						  k_timeout_t timeout,
1370 						  const char *caller,
1371 						  int line);
1372 #define net_pkt_get_reserve_rx_data(min_len, timeout)				\
1373 	net_pkt_get_reserve_rx_data_debug(min_len, timeout, __func__, __LINE__)
1374 
1375 struct net_buf *net_pkt_get_reserve_tx_data_debug(size_t min_len,
1376 						  k_timeout_t timeout,
1377 						  const char *caller,
1378 						  int line);
1379 #define net_pkt_get_reserve_tx_data(min_len, timeout)				\
1380 	net_pkt_get_reserve_tx_data_debug(min_len, timeout, __func__, __LINE__)
1381 
1382 struct net_buf *net_pkt_get_frag_debug(struct net_pkt *pkt, size_t min_len,
1383 				       k_timeout_t timeout,
1384 				       const char *caller, int line);
1385 #define net_pkt_get_frag(pkt, min_len, timeout)					\
1386 	net_pkt_get_frag_debug(pkt, min_len, timeout, __func__, __LINE__)
1387 
1388 void net_pkt_unref_debug(struct net_pkt *pkt, const char *caller, int line);
1389 #define net_pkt_unref(pkt) net_pkt_unref_debug(pkt, __func__, __LINE__)
1390 
1391 struct net_pkt *net_pkt_ref_debug(struct net_pkt *pkt, const char *caller,
1392 				  int line);
1393 #define net_pkt_ref(pkt) net_pkt_ref_debug(pkt, __func__, __LINE__)
1394 
1395 struct net_buf *net_pkt_frag_ref_debug(struct net_buf *frag,
1396 				       const char *caller, int line);
1397 #define net_pkt_frag_ref(frag) net_pkt_frag_ref_debug(frag, __func__, __LINE__)
1398 
1399 void net_pkt_frag_unref_debug(struct net_buf *frag,
1400 			      const char *caller, int line);
1401 #define net_pkt_frag_unref(frag)				\
1402 	net_pkt_frag_unref_debug(frag, __func__, __LINE__)
1403 
1404 struct net_buf *net_pkt_frag_del_debug(struct net_pkt *pkt,
1405 				       struct net_buf *parent,
1406 				       struct net_buf *frag,
1407 				       const char *caller, int line);
1408 #define net_pkt_frag_del(pkt, parent, frag)				\
1409 	net_pkt_frag_del_debug(pkt, parent, frag, __func__, __LINE__)
1410 
1411 void net_pkt_frag_add_debug(struct net_pkt *pkt, struct net_buf *frag,
1412 			    const char *caller, int line);
1413 #define net_pkt_frag_add(pkt, frag)				\
1414 	net_pkt_frag_add_debug(pkt, frag, __func__, __LINE__)
1415 
1416 void net_pkt_frag_insert_debug(struct net_pkt *pkt, struct net_buf *frag,
1417 			       const char *caller, int line);
1418 #define net_pkt_frag_insert(pkt, frag)					\
1419 	net_pkt_frag_insert_debug(pkt, frag, __func__, __LINE__)
1420 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC ||
1421 	* CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
1422 	*/
1423 /** @endcond */
1424 
1425 /**
1426  * @brief Print fragment list and the fragment sizes
1427  *
1428  * @details Only available if debugging is activated.
1429  *
1430  * @param pkt Network pkt.
1431  */
1432 #if defined(NET_PKT_DEBUG_ENABLED)
1433 void net_pkt_print_frags(struct net_pkt *pkt);
1434 #else
1435 #define net_pkt_print_frags(pkt)
1436 #endif
1437 
1438 /**
1439  * @brief Get RX DATA buffer from pool.
1440  * Normally you should use net_pkt_get_frag() instead.
1441  *
1442  * @details Normally this version is not useful for applications
1443  * but is mainly used by network fragmentation code.
1444  *
1445  * @param min_len Minimum length of the requested fragment.
1446  * @param timeout Affects the action taken should the net buf pool be empty.
1447  *        If K_NO_WAIT, then return immediately. If K_FOREVER, then
1448  *        wait as long as necessary. Otherwise, wait up to the specified time.
1449  *
1450  * @return Network buffer if successful, NULL otherwise.
1451  */
1452 #if !defined(NET_PKT_DEBUG_ENABLED)
1453 struct net_buf *net_pkt_get_reserve_rx_data(size_t min_len, k_timeout_t timeout);
1454 #endif
1455 
1456 /**
1457  * @brief Get TX DATA buffer from pool.
1458  * Normally you should use net_pkt_get_frag() instead.
1459  *
1460  * @details Normally this version is not useful for applications
1461  * but is mainly used by network fragmentation code.
1462  *
1463  * @param min_len Minimum length of the requested fragment.
1464  * @param timeout Affects the action taken should the net buf pool be empty.
1465  *        If K_NO_WAIT, then return immediately. If K_FOREVER, then
1466  *        wait as long as necessary. Otherwise, wait up to the specified time.
1467  *
1468  * @return Network buffer if successful, NULL otherwise.
1469  */
1470 #if !defined(NET_PKT_DEBUG_ENABLED)
1471 struct net_buf *net_pkt_get_reserve_tx_data(size_t min_len, k_timeout_t timeout);
1472 #endif
1473 
1474 /**
1475  * @brief Get a data fragment that might be from user specific
1476  * buffer pool or from global DATA pool.
1477  *
1478  * @param pkt Network packet.
1479  * @param min_len Minimum length of the requested fragment.
1480  * @param timeout Affects the action taken should the net buf pool be empty.
1481  *        If K_NO_WAIT, then return immediately. If K_FOREVER, then
1482  *        wait as long as necessary. Otherwise, wait up to the specified time.
1483  *
1484  * @return Network buffer if successful, NULL otherwise.
1485  */
1486 #if !defined(NET_PKT_DEBUG_ENABLED)
1487 struct net_buf *net_pkt_get_frag(struct net_pkt *pkt, size_t min_len,
1488 				 k_timeout_t timeout);
1489 #endif
1490 
1491 /**
1492  * @brief Place packet back into the available packets slab
1493  *
1494  * @details Releases the packet to other use. This needs to be
1495  * called by application after it has finished with the packet.
1496  *
1497  * @param pkt Network packet to release.
1498  *
1499  */
1500 #if !defined(NET_PKT_DEBUG_ENABLED)
1501 void net_pkt_unref(struct net_pkt *pkt);
1502 #endif
1503 
1504 /**
1505  * @brief Increase the packet ref count
1506  *
1507  * @details Mark the packet to be used still.
1508  *
1509  * @param pkt Network packet to ref.
1510  *
1511  * @return Network packet if successful, NULL otherwise.
1512  */
1513 #if !defined(NET_PKT_DEBUG_ENABLED)
1514 struct net_pkt *net_pkt_ref(struct net_pkt *pkt);
1515 #endif
1516 
1517 /**
1518  * @brief Increase the packet fragment ref count
1519  *
1520  * @details Mark the fragment to be used still.
1521  *
1522  * @param frag Network fragment to ref.
1523  *
1524  * @return a pointer on the referenced Network fragment.
1525  */
1526 #if !defined(NET_PKT_DEBUG_ENABLED)
1527 struct net_buf *net_pkt_frag_ref(struct net_buf *frag);
1528 #endif
1529 
1530 /**
1531  * @brief Decrease the packet fragment ref count
1532  *
1533  * @param frag Network fragment to unref.
1534  */
1535 #if !defined(NET_PKT_DEBUG_ENABLED)
1536 void net_pkt_frag_unref(struct net_buf *frag);
1537 #endif
1538 
1539 /**
1540  * @brief Delete existing fragment from a packet
1541  *
1542  * @param pkt Network packet from which frag belongs to.
1543  * @param parent parent fragment of frag, or NULL if none.
1544  * @param frag Fragment to delete.
1545  *
1546  * @return Pointer to the following fragment, or NULL if it had no
1547  *         further fragments.
1548  */
1549 #if !defined(NET_PKT_DEBUG_ENABLED)
1550 struct net_buf *net_pkt_frag_del(struct net_pkt *pkt,
1551 				 struct net_buf *parent,
1552 				 struct net_buf *frag);
1553 #endif
1554 
1555 /**
1556  * @brief Add a fragment to a packet at the end of its fragment list
1557  *
1558  * @param pkt pkt Network packet where to add the fragment
1559  * @param frag Fragment to add
1560  */
1561 #if !defined(NET_PKT_DEBUG_ENABLED)
1562 void net_pkt_frag_add(struct net_pkt *pkt, struct net_buf *frag);
1563 #endif
1564 
1565 /**
1566  * @brief Insert a fragment to a packet at the beginning of its fragment list
1567  *
1568  * @param pkt pkt Network packet where to insert the fragment
1569  * @param frag Fragment to insert
1570  */
1571 #if !defined(NET_PKT_DEBUG_ENABLED)
1572 void net_pkt_frag_insert(struct net_pkt *pkt, struct net_buf *frag);
1573 #endif
1574 
1575 /**
1576  * @brief Compact the fragment list of a packet.
1577  *
1578  * @details After this there is no more any free space in individual fragments.
1579  * @param pkt Network packet.
1580  */
1581 void net_pkt_compact(struct net_pkt *pkt);
1582 
1583 /**
1584  * @brief Get information about predefined RX, TX and DATA pools.
1585  *
1586  * @param rx Pointer to RX pool is returned.
1587  * @param tx Pointer to TX pool is returned.
1588  * @param rx_data Pointer to RX DATA pool is returned.
1589  * @param tx_data Pointer to TX DATA pool is returned.
1590  */
1591 void net_pkt_get_info(struct k_mem_slab **rx,
1592 		      struct k_mem_slab **tx,
1593 		      struct net_buf_pool **rx_data,
1594 		      struct net_buf_pool **tx_data);
1595 
1596 /** @cond INTERNAL_HIDDEN */
1597 
1598 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
1599 /**
1600  * @brief Debug helper to print out the buffer allocations
1601  */
1602 void net_pkt_print(void);
1603 
1604 typedef void (*net_pkt_allocs_cb_t)(struct net_pkt *pkt,
1605 				    struct net_buf *buf,
1606 				    const char *func_alloc,
1607 				    int line_alloc,
1608 				    const char *func_free,
1609 				    int line_free,
1610 				    bool in_use,
1611 				    void *user_data);
1612 
1613 void net_pkt_allocs_foreach(net_pkt_allocs_cb_t cb, void *user_data);
1614 
1615 const char *net_pkt_slab2str(struct k_mem_slab *slab);
1616 const char *net_pkt_pool2str(struct net_buf_pool *pool);
1617 
1618 #else
1619 #define net_pkt_print(...)
1620 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC */
1621 
1622 /* New allocator, and API are defined below.
1623  * This will be simpler when time will come to get rid of former API above.
1624  */
1625 #if defined(NET_PKT_DEBUG_ENABLED)
1626 
1627 struct net_pkt *net_pkt_alloc_debug(k_timeout_t timeout,
1628 				    const char *caller, int line);
1629 #define net_pkt_alloc(_timeout)					\
1630 	net_pkt_alloc_debug(_timeout, __func__, __LINE__)
1631 
1632 struct net_pkt *net_pkt_alloc_from_slab_debug(struct k_mem_slab *slab,
1633 					      k_timeout_t timeout,
1634 					      const char *caller, int line);
1635 #define net_pkt_alloc_from_slab(_slab, _timeout)			\
1636 	net_pkt_alloc_from_slab_debug(_slab, _timeout, __func__, __LINE__)
1637 
1638 struct net_pkt *net_pkt_rx_alloc_debug(k_timeout_t timeout,
1639 				       const char *caller, int line);
1640 #define net_pkt_rx_alloc(_timeout)				\
1641 	net_pkt_rx_alloc_debug(_timeout, __func__, __LINE__)
1642 
1643 struct net_pkt *net_pkt_alloc_on_iface_debug(struct net_if *iface,
1644 					     k_timeout_t timeout,
1645 					     const char *caller,
1646 					     int line);
1647 #define net_pkt_alloc_on_iface(_iface, _timeout)			\
1648 	net_pkt_alloc_on_iface_debug(_iface, _timeout, __func__, __LINE__)
1649 
1650 struct net_pkt *net_pkt_rx_alloc_on_iface_debug(struct net_if *iface,
1651 						k_timeout_t timeout,
1652 						const char *caller,
1653 						int line);
1654 #define net_pkt_rx_alloc_on_iface(_iface, _timeout)			\
1655 	net_pkt_rx_alloc_on_iface_debug(_iface, _timeout,		\
1656 					__func__, __LINE__)
1657 
1658 int net_pkt_alloc_buffer_debug(struct net_pkt *pkt,
1659 			       size_t size,
1660 			       enum net_ip_protocol proto,
1661 			       k_timeout_t timeout,
1662 			       const char *caller, int line);
1663 #define net_pkt_alloc_buffer(_pkt, _size, _proto, _timeout)		\
1664 	net_pkt_alloc_buffer_debug(_pkt, _size, _proto, _timeout,	\
1665 				   __func__, __LINE__)
1666 
1667 struct net_pkt *net_pkt_alloc_with_buffer_debug(struct net_if *iface,
1668 						size_t size,
1669 						sa_family_t family,
1670 						enum net_ip_protocol proto,
1671 						k_timeout_t timeout,
1672 						const char *caller,
1673 						int line);
1674 #define net_pkt_alloc_with_buffer(_iface, _size, _family,		\
1675 				  _proto, _timeout)			\
1676 	net_pkt_alloc_with_buffer_debug(_iface, _size, _family,		\
1677 					_proto, _timeout,		\
1678 					__func__, __LINE__)
1679 
1680 struct net_pkt *net_pkt_rx_alloc_with_buffer_debug(struct net_if *iface,
1681 						   size_t size,
1682 						   sa_family_t family,
1683 						   enum net_ip_protocol proto,
1684 						   k_timeout_t timeout,
1685 						   const char *caller,
1686 						   int line);
1687 #define net_pkt_rx_alloc_with_buffer(_iface, _size, _family,		\
1688 				     _proto, _timeout)			\
1689 	net_pkt_rx_alloc_with_buffer_debug(_iface, _size, _family,	\
1690 					   _proto, _timeout,		\
1691 					   __func__, __LINE__)
1692 #endif /* NET_PKT_DEBUG_ENABLED */
1693 /** @endcond */
1694 
1695 /**
1696  * @brief Allocate an initialized net_pkt
1697  *
1698  * @details for the time being, 2 pools are used. One for TX and one for RX.
1699  *          This allocator has to be used for TX.
1700  *
1701  * @param timeout Maximum time to wait for an allocation.
1702  *
1703  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1704  */
1705 #if !defined(NET_PKT_DEBUG_ENABLED)
1706 struct net_pkt *net_pkt_alloc(k_timeout_t timeout);
1707 #endif
1708 
1709 /**
1710  * @brief Allocate an initialized net_pkt from a specific slab
1711  *
1712  * @details unlike net_pkt_alloc() which uses core slabs, this one will use
1713  *          an external slab (see NET_PKT_SLAB_DEFINE()).
1714  *          Do _not_ use it unless you know what you are doing. Basically, only
1715  *          net_context should be using this, in order to allocate packet and
1716  *          then buffer on its local slab/pool (if any).
1717  *
1718  * @param slab    The slab to use for allocating the packet
1719  * @param timeout Maximum time to wait for an allocation.
1720  *
1721  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1722  */
1723 #if !defined(NET_PKT_DEBUG_ENABLED)
1724 struct net_pkt *net_pkt_alloc_from_slab(struct k_mem_slab *slab,
1725 					k_timeout_t timeout);
1726 #endif
1727 
1728 /**
1729  * @brief Allocate an initialized net_pkt for RX
1730  *
1731  * @details for the time being, 2 pools are used. One for TX and one for RX.
1732  *          This allocator has to be used for RX.
1733  *
1734  * @param timeout Maximum time to wait for an allocation.
1735  *
1736  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1737  */
1738 #if !defined(NET_PKT_DEBUG_ENABLED)
1739 struct net_pkt *net_pkt_rx_alloc(k_timeout_t timeout);
1740 #endif
1741 
1742 /**
1743  * @brief Allocate a network packet for a specific network interface.
1744  *
1745  * @param iface The network interface the packet is supposed to go through.
1746  * @param timeout Maximum time to wait for an allocation.
1747  *
1748  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1749  */
1750 #if !defined(NET_PKT_DEBUG_ENABLED)
1751 struct net_pkt *net_pkt_alloc_on_iface(struct net_if *iface,
1752 				       k_timeout_t timeout);
1753 
1754 /* Same as above but specifically for RX packet */
1755 struct net_pkt *net_pkt_rx_alloc_on_iface(struct net_if *iface,
1756 					  k_timeout_t timeout);
1757 #endif
1758 
1759 /**
1760  * @brief Allocate buffer for a net_pkt
1761  *
1762  * @details: such allocator will take into account space necessary for headers,
1763  *           MTU, and existing buffer (if any). Beware that, due to all these
1764  *           criteria, the allocated size might be smaller/bigger than
1765  *           requested one.
1766  *
1767  * @param pkt     The network packet requiring buffer to be allocated.
1768  * @param size    The size of buffer being requested.
1769  * @param proto   The IP protocol type (can be 0 for none).
1770  * @param timeout Maximum time to wait for an allocation.
1771  *
1772  * @return 0 on success, negative errno code otherwise.
1773  */
1774 #if !defined(NET_PKT_DEBUG_ENABLED)
1775 int net_pkt_alloc_buffer(struct net_pkt *pkt,
1776 			 size_t size,
1777 			 enum net_ip_protocol proto,
1778 			 k_timeout_t timeout);
1779 #endif
1780 
1781 /**
1782  * @brief Allocate a network packet and buffer at once
1783  *
1784  * @param iface   The network interface the packet is supposed to go through.
1785  * @param size    The size of buffer.
1786  * @param family  The family to which the packet belongs.
1787  * @param proto   The IP protocol type (can be 0 for none).
1788  * @param timeout Maximum time to wait for an allocation.
1789  *
1790  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1791  */
1792 #if !defined(NET_PKT_DEBUG_ENABLED)
1793 struct net_pkt *net_pkt_alloc_with_buffer(struct net_if *iface,
1794 					  size_t size,
1795 					  sa_family_t family,
1796 					  enum net_ip_protocol proto,
1797 					  k_timeout_t timeout);
1798 
1799 /* Same as above but specifically for RX packet */
1800 struct net_pkt *net_pkt_rx_alloc_with_buffer(struct net_if *iface,
1801 					     size_t size,
1802 					     sa_family_t family,
1803 					     enum net_ip_protocol proto,
1804 					     k_timeout_t timeout);
1805 #endif
1806 
1807 /**
1808  * @brief Append a buffer in packet
1809  *
1810  * @param pkt    Network packet where to append the buffer
1811  * @param buffer Buffer to append
1812  */
1813 void net_pkt_append_buffer(struct net_pkt *pkt, struct net_buf *buffer);
1814 
1815 /**
1816  * @brief Get available buffer space from a pkt
1817  *
1818  * @note Reserved bytes (headroom) in any of the fragments are not considered to
1819  *       be available.
1820  *
1821  * @param pkt The net_pkt which buffer availability should be evaluated
1822  *
1823  * @return the amount of buffer available
1824  */
1825 size_t net_pkt_available_buffer(struct net_pkt *pkt);
1826 
1827 /**
1828  * @brief Get available buffer space for payload from a pkt
1829  *
1830  * @note Reserved bytes (headroom) in any of the fragments are not considered to
1831  *       be available.
1832  *
1833  * @details Unlike net_pkt_available_buffer(), this will take into account
1834  *          the headers space.
1835  *
1836  * @param pkt   The net_pkt which payload buffer availability should
1837  *              be evaluated
1838  * @param proto The IP protocol type (can be 0 for none).
1839  *
1840  * @return the amount of buffer available for payload
1841  */
1842 size_t net_pkt_available_payload_buffer(struct net_pkt *pkt,
1843 					enum net_ip_protocol proto);
1844 
1845 /**
1846  * @brief Trim net_pkt buffer
1847  *
1848  * @details This will basically check for unused buffers and deallocate
1849  *          them relevantly
1850  *
1851  * @param pkt The net_pkt which buffer will be trimmed
1852  */
1853 void net_pkt_trim_buffer(struct net_pkt *pkt);
1854 
1855 /**
1856  * @brief Remove @a length bytes from tail of packet
1857  *
1858  * @details This function does not take packet cursor into account. It is a
1859  *          helper to remove unneeded bytes from tail of packet (like appended
1860  *          CRC). It takes care of buffer deallocation if removed bytes span
1861  *          whole buffer(s).
1862  *
1863  * @param pkt    Network packet
1864  * @param length Number of bytes to be removed
1865  *
1866  * @retval 0       On success.
1867  * @retval -EINVAL If packet length is shorter than @a length.
1868  */
1869 int net_pkt_remove_tail(struct net_pkt *pkt, size_t length);
1870 
1871 /**
1872  * @brief Initialize net_pkt cursor
1873  *
1874  * @details This will initialize the net_pkt cursor from its buffer.
1875  *
1876  * @param pkt The net_pkt whose cursor is going to be initialized
1877  */
1878 void net_pkt_cursor_init(struct net_pkt *pkt);
1879 
1880 /**
1881  * @brief Backup net_pkt cursor
1882  *
1883  * @param pkt    The net_pkt whose cursor is going to be backed up
1884  * @param backup The cursor where to backup net_pkt cursor
1885  */
net_pkt_cursor_backup(struct net_pkt * pkt,struct net_pkt_cursor * backup)1886 static inline void net_pkt_cursor_backup(struct net_pkt *pkt,
1887 					 struct net_pkt_cursor *backup)
1888 {
1889 	backup->buf = pkt->cursor.buf;
1890 	backup->pos = pkt->cursor.pos;
1891 }
1892 
1893 /**
1894  * @brief Restore net_pkt cursor from a backup
1895  *
1896  * @param pkt    The net_pkt whose cursor is going to be restored
1897  * @param backup The cursor from where to restore net_pkt cursor
1898  */
net_pkt_cursor_restore(struct net_pkt * pkt,struct net_pkt_cursor * backup)1899 static inline void net_pkt_cursor_restore(struct net_pkt *pkt,
1900 					  struct net_pkt_cursor *backup)
1901 {
1902 	pkt->cursor.buf = backup->buf;
1903 	pkt->cursor.pos = backup->pos;
1904 }
1905 
1906 /**
1907  * @brief Returns current position of the cursor
1908  *
1909  * @param pkt The net_pkt whose cursor position is going to be returned
1910  *
1911  * @return cursor's position
1912  */
net_pkt_cursor_get_pos(struct net_pkt * pkt)1913 static inline void *net_pkt_cursor_get_pos(struct net_pkt *pkt)
1914 {
1915 	return pkt->cursor.pos;
1916 }
1917 
1918 /**
1919  * @brief Skip some data from a net_pkt
1920  *
1921  * @details net_pkt's cursor should be properly initialized
1922  *          Cursor position will be updated after the operation.
1923  *          Depending on the value of pkt->overwrite bit, this function
1924  *          will affect the buffer length or not. If it's true, it will
1925  *          advance the cursor to the requested length. If it's false,
1926  *          it will do the same but if the cursor was already also at the
1927  *          end of existing data, it will increment the buffer length.
1928  *          So in this case, its behavior is just like net_pkt_write or
1929  *          net_pkt_memset, difference being that it will not affect the
1930  *          buffer content itself (which may be just garbage then).
1931  *
1932  * @param pkt    The net_pkt whose cursor will be updated to skip given
1933  *               amount of data from the buffer.
1934  * @param length Amount of data to skip in the buffer
1935  *
1936  * @return 0 in success, negative errno code otherwise.
1937  */
1938 int net_pkt_skip(struct net_pkt *pkt, size_t length);
1939 
1940 /**
1941  * @brief Memset some data in a net_pkt
1942  *
1943  * @details net_pkt's cursor should be properly initialized and,
1944  *          if needed, positioned using net_pkt_skip.
1945  *          Cursor position will be updated after the operation.
1946  *
1947  * @param pkt    The net_pkt whose buffer to fill starting at the current
1948  *               cursor position.
1949  * @param byte   The byte to write in memory
1950  * @param length Amount of data to memset with given byte
1951  *
1952  * @return 0 in success, negative errno code otherwise.
1953  */
1954 int net_pkt_memset(struct net_pkt *pkt, int byte, size_t length);
1955 
1956 /**
1957  * @brief Copy data from a packet into another one.
1958  *
1959  * @details Both net_pkt cursors should be properly initialized and,
1960  *          if needed, positioned using net_pkt_skip.
1961  *          The cursors will be updated after the operation.
1962  *
1963  * @param pkt_dst Destination network packet.
1964  * @param pkt_src Source network packet.
1965  * @param length  Length of data to be copied.
1966  *
1967  * @return 0 on success, negative errno code otherwise.
1968  */
1969 int net_pkt_copy(struct net_pkt *pkt_dst,
1970 		 struct net_pkt *pkt_src,
1971 		 size_t length);
1972 
1973 /**
1974  * @brief Clone pkt and its buffer. The cloned packet will be allocated on
1975  *        the same pool as the original one.
1976  *
1977  * @param pkt Original pkt to be cloned
1978  * @param timeout Timeout to wait for free buffer
1979  *
1980  * @return NULL if error, cloned packet otherwise.
1981  */
1982 struct net_pkt *net_pkt_clone(struct net_pkt *pkt, k_timeout_t timeout);
1983 
1984 /**
1985  * @brief Clone pkt and its buffer. The cloned packet will be allocated on
1986  *        the RX packet poll.
1987  *
1988  * @param pkt Original pkt to be cloned
1989  * @param timeout Timeout to wait for free buffer
1990  *
1991  * @return NULL if error, cloned packet otherwise.
1992  */
1993 struct net_pkt *net_pkt_rx_clone(struct net_pkt *pkt, k_timeout_t timeout);
1994 
1995 /**
1996  * @brief Clone pkt and increase the refcount of its buffer.
1997  *
1998  * @param pkt Original pkt to be shallow cloned
1999  * @param timeout Timeout to wait for free packet
2000  *
2001  * @return NULL if error, cloned packet otherwise.
2002  */
2003 struct net_pkt *net_pkt_shallow_clone(struct net_pkt *pkt,
2004 				      k_timeout_t timeout);
2005 
2006 /**
2007  * @brief Read some data from a net_pkt
2008  *
2009  * @details net_pkt's cursor should be properly initialized and,
2010  *          if needed, positioned using net_pkt_skip.
2011  *          Cursor position will be updated after the operation.
2012  *
2013  * @param pkt    The network packet from where to read some data
2014  * @param data   The destination buffer where to copy the data
2015  * @param length The amount of data to copy
2016  *
2017  * @return 0 on success, negative errno code otherwise.
2018  */
2019 int net_pkt_read(struct net_pkt *pkt, void *data, size_t length);
2020 
2021 /* Read uint8_t data data a net_pkt */
net_pkt_read_u8(struct net_pkt * pkt,uint8_t * data)2022 static inline int net_pkt_read_u8(struct net_pkt *pkt, uint8_t *data)
2023 {
2024 	return net_pkt_read(pkt, data, 1);
2025 }
2026 
2027 /**
2028  * @brief Read uint16_t big endian data from a net_pkt
2029  *
2030  * @details net_pkt's cursor should be properly initialized and,
2031  *          if needed, positioned using net_pkt_skip.
2032  *          Cursor position will be updated after the operation.
2033  *
2034  * @param pkt  The network packet from where to read
2035  * @param data The destination uint16_t where to copy the data
2036  *
2037  * @return 0 on success, negative errno code otherwise.
2038  */
2039 int net_pkt_read_be16(struct net_pkt *pkt, uint16_t *data);
2040 
2041 /**
2042  * @brief Read uint16_t little endian data from a net_pkt
2043  *
2044  * @details net_pkt's cursor should be properly initialized and,
2045  *          if needed, positioned using net_pkt_skip.
2046  *          Cursor position will be updated after the operation.
2047  *
2048  * @param pkt  The network packet from where to read
2049  * @param data The destination uint16_t where to copy the data
2050  *
2051  * @return 0 on success, negative errno code otherwise.
2052  */
2053 int net_pkt_read_le16(struct net_pkt *pkt, uint16_t *data);
2054 
2055 /**
2056  * @brief Read uint32_t big endian data from a net_pkt
2057  *
2058  * @details net_pkt's cursor should be properly initialized and,
2059  *          if needed, positioned using net_pkt_skip.
2060  *          Cursor position will be updated after the operation.
2061  *
2062  * @param pkt  The network packet from where to read
2063  * @param data The destination uint32_t where to copy the data
2064  *
2065  * @return 0 on success, negative errno code otherwise.
2066  */
2067 int net_pkt_read_be32(struct net_pkt *pkt, uint32_t *data);
2068 
2069 /**
2070  * @brief Write data into a net_pkt
2071  *
2072  * @details net_pkt's cursor should be properly initialized and,
2073  *          if needed, positioned using net_pkt_skip.
2074  *          Cursor position will be updated after the operation.
2075  *
2076  * @param pkt    The network packet where to write
2077  * @param data   Data to be written
2078  * @param length Length of the data to be written
2079  *
2080  * @return 0 on success, negative errno code otherwise.
2081  */
2082 int net_pkt_write(struct net_pkt *pkt, const void *data, size_t length);
2083 
2084 /* Write uint8_t data into a net_pkt. */
net_pkt_write_u8(struct net_pkt * pkt,uint8_t data)2085 static inline int net_pkt_write_u8(struct net_pkt *pkt, uint8_t data)
2086 {
2087 	return net_pkt_write(pkt, &data, sizeof(uint8_t));
2088 }
2089 
2090 /* Write uint16_t big endian data into a net_pkt. */
net_pkt_write_be16(struct net_pkt * pkt,uint16_t data)2091 static inline int net_pkt_write_be16(struct net_pkt *pkt, uint16_t data)
2092 {
2093 	uint16_t data_be16 = htons(data);
2094 
2095 	return net_pkt_write(pkt, &data_be16, sizeof(uint16_t));
2096 }
2097 
2098 /* Write uint32_t big endian data into a net_pkt. */
net_pkt_write_be32(struct net_pkt * pkt,uint32_t data)2099 static inline int net_pkt_write_be32(struct net_pkt *pkt, uint32_t data)
2100 {
2101 	uint32_t data_be32 = htonl(data);
2102 
2103 	return net_pkt_write(pkt, &data_be32, sizeof(uint32_t));
2104 }
2105 
2106 /* Write uint32_t little endian data into a net_pkt. */
net_pkt_write_le32(struct net_pkt * pkt,uint32_t data)2107 static inline int net_pkt_write_le32(struct net_pkt *pkt, uint32_t data)
2108 {
2109 	uint32_t data_le32 = sys_cpu_to_le32(data);
2110 
2111 	return net_pkt_write(pkt, &data_le32, sizeof(uint32_t));
2112 }
2113 
2114 /* Write uint16_t little endian data into a net_pkt. */
net_pkt_write_le16(struct net_pkt * pkt,uint16_t data)2115 static inline int net_pkt_write_le16(struct net_pkt *pkt, uint16_t data)
2116 {
2117 	uint16_t data_le16 = sys_cpu_to_le16(data);
2118 
2119 	return net_pkt_write(pkt, &data_le16, sizeof(uint16_t));
2120 }
2121 
2122 /**
2123  * @brief Get the amount of data which can be read from current cursor position
2124  *
2125  * @param pkt Network packet
2126  *
2127  * @return Amount of data which can be read from current pkt cursor
2128  */
2129 size_t net_pkt_remaining_data(struct net_pkt *pkt);
2130 
2131 /**
2132  * @brief Update the overall length of a packet
2133  *
2134  * @details Unlike net_pkt_pull() below, this does not take packet cursor
2135  *          into account. It's mainly a helper dedicated for ipv4 and ipv6
2136  *          input functions. It shrinks the overall length by given parameter.
2137  *
2138  * @param pkt    Network packet
2139  * @param length The new length of the packet
2140  *
2141  * @return 0 on success, negative errno code otherwise.
2142  */
2143 int net_pkt_update_length(struct net_pkt *pkt, size_t length);
2144 
2145 /**
2146  * @brief Remove data from the packet at current location
2147  *
2148  * @details net_pkt's cursor should be properly initialized and,
2149  *          eventually, properly positioned using net_pkt_skip/read/write.
2150  *          Note that net_pkt's cursor is reset by this function.
2151  *
2152  * @param pkt    Network packet
2153  * @param length Number of bytes to be removed
2154  *
2155  * @return 0 on success, negative errno code otherwise.
2156  */
2157 int net_pkt_pull(struct net_pkt *pkt, size_t length);
2158 
2159 /**
2160  * @brief Get the actual offset in the packet from its cursor
2161  *
2162  * @param pkt Network packet.
2163  *
2164  * @return a valid offset on success, 0 otherwise as there is nothing that
2165  *         can be done to evaluate the offset.
2166  */
2167 uint16_t net_pkt_get_current_offset(struct net_pkt *pkt);
2168 
2169 /**
2170  * @brief Check if a data size could fit contiguously
2171  *
2172  * @details net_pkt's cursor should be properly initialized and,
2173  *          if needed, positioned using net_pkt_skip.
2174  *
2175  * @param pkt  Network packet.
2176  * @param size The size to check for contiguity
2177  *
2178  * @return true if that is the case, false otherwise.
2179  */
2180 bool net_pkt_is_contiguous(struct net_pkt *pkt, size_t size);
2181 
2182 /**
2183  * Get the contiguous buffer space
2184  *
2185  * @param pkt Network packet
2186  *
2187  * @return The available contiguous buffer space in bytes starting from the
2188  *         current cursor position. 0 in case of an error.
2189  */
2190 size_t net_pkt_get_contiguous_len(struct net_pkt *pkt);
2191 
2192 struct net_pkt_data_access {
2193 #if !defined(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)
2194 	void *data;
2195 #endif
2196 	const size_t size;
2197 };
2198 
2199 #if defined(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)
2200 #define NET_PKT_DATA_ACCESS_DEFINE(_name, _type)		\
2201 	struct net_pkt_data_access _name = {			\
2202 		.size = sizeof(_type),				\
2203 	}
2204 
2205 #define NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(_name, _type)	\
2206 	NET_PKT_DATA_ACCESS_DEFINE(_name, _type)
2207 
2208 #else
2209 #define NET_PKT_DATA_ACCESS_DEFINE(_name, _type)		\
2210 	_type _hdr_##_name;					\
2211 	struct net_pkt_data_access _name = {			\
2212 		.data = &_hdr_##_name,				\
2213 		.size = sizeof(_type),				\
2214 	}
2215 
2216 #define NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(_name, _type)	\
2217 	struct net_pkt_data_access _name = {			\
2218 		.data = NULL,					\
2219 		.size = sizeof(_type),				\
2220 	}
2221 
2222 #endif /* CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS */
2223 
2224 /**
2225  * @brief Get data from a network packet in a contiguous way
2226  *
2227  * @details net_pkt's cursor should be properly initialized and,
2228  *          if needed, positioned using net_pkt_skip. Unlike other functions,
2229  *          cursor position will not be updated after the operation.
2230  *
2231  * @param pkt    The network packet from where to get the data.
2232  * @param access A pointer to a valid net_pkt_data_access describing the
2233  *        data to get in a contiguous way.
2234  *
2235  * @return a pointer to the requested contiguous data, NULL otherwise.
2236  */
2237 void *net_pkt_get_data(struct net_pkt *pkt,
2238 		       struct net_pkt_data_access *access);
2239 
2240 /**
2241  * @brief Set contiguous data into a network packet
2242  *
2243  * @details net_pkt's cursor should be properly initialized and,
2244  *          if needed, positioned using net_pkt_skip.
2245  *          Cursor position will be updated after the operation.
2246  *
2247  * @param pkt    The network packet to where the data should be set.
2248  * @param access A pointer to a valid net_pkt_data_access describing the
2249  *        data to set.
2250  *
2251  * @return 0 on success, a negative errno otherwise.
2252  */
2253 int net_pkt_set_data(struct net_pkt *pkt,
2254 		     struct net_pkt_data_access *access);
2255 
2256 /**
2257  * Acknowledge previously contiguous data taken from a network packet
2258  * Packet needs to be set to overwrite mode.
2259  */
net_pkt_acknowledge_data(struct net_pkt * pkt,struct net_pkt_data_access * access)2260 static inline int net_pkt_acknowledge_data(struct net_pkt *pkt,
2261 					   struct net_pkt_data_access *access)
2262 {
2263 	return net_pkt_skip(pkt, access->size);
2264 }
2265 
2266 /**
2267  * @}
2268  */
2269 
2270 #ifdef __cplusplus
2271 }
2272 #endif
2273 
2274 #endif /* ZEPHYR_INCLUDE_NET_NET_PKT_H_ */
2275