1 /** @file
2  * @brief Network packet buffer descriptor API
3  *
4  * Network data is passed between different parts of the stack via
5  * net_buf struct.
6  */
7 
8 /*
9  * Copyright (c) 2016 Intel Corporation
10  *
11  * SPDX-License-Identifier: Apache-2.0
12  */
13 
14 /* Data buffer API - used for all data to/from net */
15 
16 #ifndef ZEPHYR_INCLUDE_NET_NET_PKT_H_
17 #define ZEPHYR_INCLUDE_NET_NET_PKT_H_
18 
19 #include <zephyr/types.h>
20 #include <stdbool.h>
21 
22 #include <zephyr/net/buf.h>
23 
24 #if defined(CONFIG_IEEE802154)
25 #include <zephyr/net/ieee802154_pkt.h>
26 #endif
27 #include <zephyr/net/net_core.h>
28 #include <zephyr/net/net_linkaddr.h>
29 #include <zephyr/net/net_ip.h>
30 #include <zephyr/net/net_if.h>
31 #include <zephyr/net/net_context.h>
32 #include <zephyr/net/net_time.h>
33 #include <zephyr/net/ethernet_vlan.h>
34 #include <zephyr/net/ptp_time.h>
35 
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
39 
40 /**
41  * @brief Network packet management library
42  * @defgroup net_pkt Network Packet Library
43  * @since 1.5
44  * @version 0.8.0
45  * @ingroup networking
46  * @{
47  */
48 
49 struct net_context;
50 
51 /** @cond INTERNAL_HIDDEN */
52 
53 /* buffer cursor used in net_pkt */
54 struct net_pkt_cursor {
55 	/** Current net_buf pointer by the cursor */
56 	struct net_buf *buf;
57 	/** Current position in the data buffer of the net_buf */
58 	uint8_t *pos;
59 };
60 
61 /** @endcond */
62 
63 /**
64  * @brief Network packet.
65  *
66  * Note that if you add new fields into net_pkt, remember to update
67  * net_pkt_clone() function.
68  */
69 struct net_pkt {
70 	/**
71 	 * The fifo is used by RX/TX threads and by socket layer. The net_pkt
72 	 * is queued via fifo to the processing thread.
73 	 */
74 	intptr_t fifo;
75 
76 	/** Slab pointer from where it belongs to */
77 	struct k_mem_slab *slab;
78 
79 	/** buffer holding the packet */
80 	union {
81 		struct net_buf *frags;   /**< buffer fragment */
82 		struct net_buf *buffer;  /**< alias to a buffer fragment */
83 	};
84 
85 	/** Internal buffer iterator used for reading/writing */
86 	struct net_pkt_cursor cursor;
87 
88 	/** Network connection context */
89 	struct net_context *context;
90 
91 	/** Network interface */
92 	struct net_if *iface;
93 
94 	/** @cond ignore */
95 
96 #if defined(CONFIG_NET_TCP)
97 	/** Allow placing the packet into sys_slist_t */
98 	sys_snode_t next;
99 #endif
100 #if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
101 	struct net_if *orig_iface; /* Original network interface */
102 #endif
103 
104 #if defined(CONFIG_NET_PKT_TIMESTAMP) || defined(CONFIG_NET_PKT_TXTIME)
105 	/**
106 	 * TX or RX timestamp if available
107 	 *
108 	 * For packets that have been sent over the medium, the timestamp refers
109 	 * to the time the message timestamp point was encountered at the
110 	 * reference plane.
111 	 *
112 	 * Unsent packages can be scheduled by setting the timestamp to a future
113 	 * point in time.
114 	 *
115 	 * All timestamps refer to the network subsystem's local clock.
116 	 *
117 	 * See @ref net_ptp_time for definitions of local clock, message
118 	 * timestamp point and reference plane. See @ref net_time_t for
119 	 * semantics of the network reference clock.
120 	 *
121 	 * TODO: Replace with net_time_t to decouple from PTP.
122 	 */
123 	struct net_ptp_time timestamp;
124 #endif
125 
126 #if defined(CONFIG_NET_PKT_RXTIME_STATS) || defined(CONFIG_NET_PKT_TXTIME_STATS)
127 	struct {
128 		/** Create time in cycles */
129 		uint32_t create_time;
130 
131 #if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL) || \
132 	defined(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)
133 		/** Collect extra statistics for net_pkt processing
134 		 * from various points in the IP stack. See networking
135 		 * documentation where these points are located and how
136 		 * to interpret the results.
137 		 */
138 		struct {
139 			uint32_t stat[NET_PKT_DETAIL_STATS_COUNT];
140 			int count;
141 		} detail;
142 #endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL ||
143 	  CONFIG_NET_PKT_RXTIME_STATS_DETAIL */
144 	};
145 #endif /* CONFIG_NET_PKT_RXTIME_STATS || CONFIG_NET_PKT_TXTIME_STATS */
146 
147 	/** Reference counter */
148 	atomic_t atomic_ref;
149 
150 	/* Filled by layer 2 when network packet is received. */
151 	struct net_linkaddr lladdr_src;
152 	struct net_linkaddr lladdr_dst;
153 	uint16_t ll_proto_type;
154 
155 #if defined(CONFIG_NET_IP)
156 	uint8_t ip_hdr_len;	/* pre-filled in order to avoid func call */
157 #endif
158 
159 	uint8_t overwrite : 1;	 /* Is packet content being overwritten? */
160 	uint8_t eof : 1;	 /* Last packet before EOF */
161 	uint8_t ptp_pkt : 1;	 /* For outgoing packet: is this packet
162 				  * a L2 PTP packet.
163 				  * Used only if defined (CONFIG_NET_L2_PTP)
164 				  */
165 	uint8_t forwarding : 1;	 /* Are we forwarding this pkt
166 				  * Used only if defined(CONFIG_NET_ROUTE)
167 				  */
168 	uint8_t family : 3;	 /* Address family, see net_ip.h */
169 
170 	/* bitfield byte alignment boundary */
171 
172 #if defined(CONFIG_NET_IPV4_ACD)
173 	uint8_t ipv4_acd_arp_msg : 1;  /* Is this pkt IPv4 conflict detection ARP
174 					* message.
175 					* Note: family needs to be
176 					* AF_INET.
177 					*/
178 #endif
179 #if defined(CONFIG_NET_LLDP)
180 	uint8_t lldp_pkt : 1; /* Is this pkt an LLDP message.
181 			       * Note: family needs to be
182 			       * AF_UNSPEC.
183 			       */
184 #endif
185 	uint8_t ppp_msg : 1; /* This is a PPP message */
186 	uint8_t captured : 1;	  /* Set to 1 if this packet is already being
187 				   * captured
188 				   */
189 	uint8_t l2_bridged : 1;	  /* set to 1 if this packet comes from a bridge
190 				   * and already contains its L2 header to be
191 				   * preserved. Useful only if
192 				   * defined(CONFIG_NET_ETHERNET_BRIDGE).
193 				   */
194 	uint8_t l2_processed : 1; /* Set to 1 if this packet has already been
195 				   * processed by the L2
196 				   */
197 	uint8_t chksum_done : 1; /* Checksum has already been computed for
198 				  * the packet.
199 				  */
200 #if defined(CONFIG_NET_IP_FRAGMENT)
201 	uint8_t ip_reassembled : 1; /* Packet is a reassembled IP packet. */
202 #endif
203 #if defined(CONFIG_NET_PKT_TIMESTAMP)
204 	uint8_t tx_timestamping : 1; /** Timestamp transmitted packet */
205 	uint8_t rx_timestamping : 1; /** Timestamp received packet */
206 #endif
207 	/* bitfield byte alignment boundary */
208 
209 #if defined(CONFIG_NET_IP)
210 	union {
211 		/* IPv6 hop limit or IPv4 ttl for this network packet.
212 		 * The value is shared between IPv6 and IPv4.
213 		 */
214 #if defined(CONFIG_NET_IPV6)
215 		uint8_t ipv6_hop_limit;
216 #endif
217 #if defined(CONFIG_NET_IPV4)
218 		uint8_t ipv4_ttl;
219 #endif
220 	};
221 
222 	union {
223 #if defined(CONFIG_NET_IPV4)
224 		uint8_t ipv4_opts_len; /* length of IPv4 header options */
225 #endif
226 #if defined(CONFIG_NET_IPV6)
227 		uint16_t ipv6_ext_len; /* length of extension headers */
228 #endif
229 	};
230 
231 #if defined(CONFIG_NET_IP_FRAGMENT)
232 	union {
233 #if defined(CONFIG_NET_IPV4_FRAGMENT)
234 		struct {
235 			uint16_t flags;		/* Fragment offset and M (More Fragment) flag */
236 			uint16_t id;		/* Fragment ID */
237 		} ipv4_fragment;
238 #endif /* CONFIG_NET_IPV4_FRAGMENT */
239 #if defined(CONFIG_NET_IPV6_FRAGMENT)
240 		struct {
241 			uint16_t flags;		/* Fragment offset and M (More Fragment) flag */
242 			uint32_t id;		/* Fragment id */
243 			uint16_t hdr_start;	/* Where starts the fragment header */
244 		} ipv6_fragment;
245 #endif /* CONFIG_NET_IPV6_FRAGMENT */
246 	};
247 #endif /* CONFIG_NET_IP_FRAGMENT */
248 
249 #if defined(CONFIG_NET_IPV6)
250 	/* Where is the start of the last header before payload data
251 	 * in IPv6 packet. This is offset value from start of the IPv6
252 	 * packet. Note that this value should be updated by who ever
253 	 * adds IPv6 extension headers to the network packet.
254 	 */
255 	uint16_t ipv6_prev_hdr_start;
256 
257 	uint8_t ipv6_ext_opt_len; /* IPv6 ND option length */
258 	uint8_t ipv6_next_hdr;	/* What is the very first next header */
259 #endif /* CONFIG_NET_IPV6 */
260 
261 #if defined(CONFIG_NET_IP_DSCP_ECN)
262 	/** IPv4/IPv6 Differentiated Services Code Point value. */
263 	uint8_t ip_dscp : 6;
264 
265 	/** IPv4/IPv6 Explicit Congestion Notification value. */
266 	uint8_t ip_ecn : 2;
267 #endif /* CONFIG_NET_IP_DSCP_ECN */
268 #endif /* CONFIG_NET_IP */
269 
270 #if defined(CONFIG_NET_VLAN)
271 	/* VLAN TCI (Tag Control Information). This contains the Priority
272 	 * Code Point (PCP), Drop Eligible Indicator (DEI) and VLAN
273 	 * Identifier (VID, called more commonly VLAN tag). This value is
274 	 * kept in host byte order.
275 	 */
276 	uint16_t vlan_tci;
277 #endif /* CONFIG_NET_VLAN */
278 
279 #if defined(NET_PKT_HAS_CONTROL_BLOCK)
280 	/* TODO: Evolve this into a union of orthogonal
281 	 *       control block declarations if further L2
282 	 *       stacks require L2-specific attributes.
283 	 */
284 #if defined(CONFIG_IEEE802154)
285 	/* The following structure requires a 4-byte alignment
286 	 * boundary to avoid padding.
287 	 */
288 	struct net_pkt_cb_ieee802154 cb;
289 #endif /* CONFIG_IEEE802154 */
290 #endif /* NET_PKT_HAS_CONTROL_BLOCK */
291 
292 	/** Network packet priority, can be left out in which case packet
293 	 * is not prioritised.
294 	 */
295 	uint8_t priority;
296 
297 #if defined(CONFIG_NET_OFFLOAD) || defined(CONFIG_NET_L2_IPIP)
298 	/* Remote address of the received packet. This is only used by
299 	 * network interfaces with an offloaded TCP/IP stack, or if we
300 	 * have network tunneling in use.
301 	 */
302 	union {
303 		struct sockaddr remote;
304 
305 		/* This will make sure that there is enough storage to store
306 		 * the address struct. The access to value is via remote
307 		 * address.
308 		 */
309 		struct sockaddr_storage remote_storage;
310 	};
311 #endif /* CONFIG_NET_OFFLOAD */
312 
313 #if defined(CONFIG_NET_CAPTURE_COOKED_MODE)
314 	/* Tell the capture api that this is a captured packet */
315 	uint8_t cooked_mode_pkt : 1;
316 #endif /* CONFIG_NET_CAPTURE_COOKED_MODE */
317 
318 	/* @endcond */
319 };
320 
321 /** @cond ignore */
322 
323 /* The interface real ll address */
net_pkt_lladdr_if(struct net_pkt * pkt)324 static inline struct net_linkaddr *net_pkt_lladdr_if(struct net_pkt *pkt)
325 {
326 	return net_if_get_link_addr(pkt->iface);
327 }
328 
net_pkt_context(struct net_pkt * pkt)329 static inline struct net_context *net_pkt_context(struct net_pkt *pkt)
330 {
331 	return pkt->context;
332 }
333 
net_pkt_set_context(struct net_pkt * pkt,struct net_context * ctx)334 static inline void net_pkt_set_context(struct net_pkt *pkt,
335 				       struct net_context *ctx)
336 {
337 	pkt->context = ctx;
338 }
339 
net_pkt_iface(struct net_pkt * pkt)340 static inline struct net_if *net_pkt_iface(struct net_pkt *pkt)
341 {
342 	return pkt->iface;
343 }
344 
net_pkt_set_iface(struct net_pkt * pkt,struct net_if * iface)345 static inline void net_pkt_set_iface(struct net_pkt *pkt, struct net_if *iface)
346 {
347 	pkt->iface = iface;
348 
349 	/* If the network interface is set in pkt, then also set the type of
350 	 * the network address that is stored in pkt. This is done here so
351 	 * that the address type is properly set and is not forgotten.
352 	 */
353 	if (iface) {
354 		uint8_t type = net_if_get_link_addr(iface)->type;
355 
356 		pkt->lladdr_src.type = type;
357 		pkt->lladdr_dst.type = type;
358 	}
359 }
360 
net_pkt_orig_iface(struct net_pkt * pkt)361 static inline struct net_if *net_pkt_orig_iface(struct net_pkt *pkt)
362 {
363 #if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
364 	return pkt->orig_iface;
365 #else
366 	return pkt->iface;
367 #endif
368 }
369 
net_pkt_set_orig_iface(struct net_pkt * pkt,struct net_if * iface)370 static inline void net_pkt_set_orig_iface(struct net_pkt *pkt,
371 					  struct net_if *iface)
372 {
373 #if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
374 	pkt->orig_iface = iface;
375 #else
376 	ARG_UNUSED(pkt);
377 	ARG_UNUSED(iface);
378 #endif
379 }
380 
net_pkt_family(struct net_pkt * pkt)381 static inline uint8_t net_pkt_family(struct net_pkt *pkt)
382 {
383 	return pkt->family;
384 }
385 
net_pkt_set_family(struct net_pkt * pkt,uint8_t family)386 static inline void net_pkt_set_family(struct net_pkt *pkt, uint8_t family)
387 {
388 	pkt->family = family;
389 }
390 
net_pkt_is_ptp(struct net_pkt * pkt)391 static inline bool net_pkt_is_ptp(struct net_pkt *pkt)
392 {
393 	return !!(pkt->ptp_pkt);
394 }
395 
net_pkt_set_ptp(struct net_pkt * pkt,bool is_ptp)396 static inline void net_pkt_set_ptp(struct net_pkt *pkt, bool is_ptp)
397 {
398 	pkt->ptp_pkt = is_ptp;
399 }
400 
net_pkt_is_tx_timestamping(struct net_pkt * pkt)401 static inline bool net_pkt_is_tx_timestamping(struct net_pkt *pkt)
402 {
403 #if defined(CONFIG_NET_PKT_TIMESTAMP)
404 	return !!(pkt->tx_timestamping);
405 #else
406 	ARG_UNUSED(pkt);
407 
408 	return false;
409 #endif
410 }
411 
net_pkt_set_tx_timestamping(struct net_pkt * pkt,bool is_timestamping)412 static inline void net_pkt_set_tx_timestamping(struct net_pkt *pkt, bool is_timestamping)
413 {
414 #if defined(CONFIG_NET_PKT_TIMESTAMP)
415 	pkt->tx_timestamping = is_timestamping;
416 #else
417 	ARG_UNUSED(pkt);
418 	ARG_UNUSED(is_timestamping);
419 #endif
420 }
421 
net_pkt_is_rx_timestamping(struct net_pkt * pkt)422 static inline bool net_pkt_is_rx_timestamping(struct net_pkt *pkt)
423 {
424 #if defined(CONFIG_NET_PKT_TIMESTAMP)
425 	return !!(pkt->rx_timestamping);
426 #else
427 	ARG_UNUSED(pkt);
428 
429 	return false;
430 #endif
431 }
432 
net_pkt_set_rx_timestamping(struct net_pkt * pkt,bool is_timestamping)433 static inline void net_pkt_set_rx_timestamping(struct net_pkt *pkt, bool is_timestamping)
434 {
435 #if defined(CONFIG_NET_PKT_TIMESTAMP)
436 	pkt->rx_timestamping = is_timestamping;
437 #else
438 	ARG_UNUSED(pkt);
439 	ARG_UNUSED(is_timestamping);
440 #endif
441 }
442 
net_pkt_is_captured(struct net_pkt * pkt)443 static inline bool net_pkt_is_captured(struct net_pkt *pkt)
444 {
445 	return !!(pkt->captured);
446 }
447 
net_pkt_set_captured(struct net_pkt * pkt,bool is_captured)448 static inline void net_pkt_set_captured(struct net_pkt *pkt, bool is_captured)
449 {
450 	pkt->captured = is_captured;
451 }
452 
net_pkt_is_l2_bridged(struct net_pkt * pkt)453 static inline bool net_pkt_is_l2_bridged(struct net_pkt *pkt)
454 {
455 	return IS_ENABLED(CONFIG_NET_ETHERNET_BRIDGE) ? !!(pkt->l2_bridged) : 0;
456 }
457 
net_pkt_set_l2_bridged(struct net_pkt * pkt,bool is_l2_bridged)458 static inline void net_pkt_set_l2_bridged(struct net_pkt *pkt, bool is_l2_bridged)
459 {
460 	if (IS_ENABLED(CONFIG_NET_ETHERNET_BRIDGE)) {
461 		pkt->l2_bridged = is_l2_bridged;
462 	}
463 }
464 
net_pkt_is_l2_processed(struct net_pkt * pkt)465 static inline bool net_pkt_is_l2_processed(struct net_pkt *pkt)
466 {
467 	return !!(pkt->l2_processed);
468 }
469 
net_pkt_set_l2_processed(struct net_pkt * pkt,bool is_l2_processed)470 static inline void net_pkt_set_l2_processed(struct net_pkt *pkt,
471 					    bool is_l2_processed)
472 {
473 	pkt->l2_processed = is_l2_processed;
474 }
475 
net_pkt_is_chksum_done(struct net_pkt * pkt)476 static inline bool net_pkt_is_chksum_done(struct net_pkt *pkt)
477 {
478 	return !!(pkt->chksum_done);
479 }
480 
net_pkt_set_chksum_done(struct net_pkt * pkt,bool is_chksum_done)481 static inline void net_pkt_set_chksum_done(struct net_pkt *pkt,
482 					   bool is_chksum_done)
483 {
484 	pkt->chksum_done = is_chksum_done;
485 }
486 
net_pkt_ip_hdr_len(struct net_pkt * pkt)487 static inline uint8_t net_pkt_ip_hdr_len(struct net_pkt *pkt)
488 {
489 #if defined(CONFIG_NET_IP)
490 	return pkt->ip_hdr_len;
491 #else
492 	ARG_UNUSED(pkt);
493 
494 	return 0;
495 #endif
496 }
497 
net_pkt_set_ip_hdr_len(struct net_pkt * pkt,uint8_t len)498 static inline void net_pkt_set_ip_hdr_len(struct net_pkt *pkt, uint8_t len)
499 {
500 #if defined(CONFIG_NET_IP)
501 	pkt->ip_hdr_len = len;
502 #else
503 	ARG_UNUSED(pkt);
504 	ARG_UNUSED(len);
505 #endif
506 }
507 
net_pkt_ip_dscp(struct net_pkt * pkt)508 static inline uint8_t net_pkt_ip_dscp(struct net_pkt *pkt)
509 {
510 #if defined(CONFIG_NET_IP_DSCP_ECN)
511 	return pkt->ip_dscp;
512 #else
513 	ARG_UNUSED(pkt);
514 
515 	return 0;
516 #endif
517 }
518 
net_pkt_set_ip_dscp(struct net_pkt * pkt,uint8_t dscp)519 static inline void net_pkt_set_ip_dscp(struct net_pkt *pkt, uint8_t dscp)
520 {
521 #if defined(CONFIG_NET_IP_DSCP_ECN)
522 	pkt->ip_dscp = dscp;
523 #else
524 	ARG_UNUSED(pkt);
525 	ARG_UNUSED(dscp);
526 #endif
527 }
528 
net_pkt_ip_ecn(struct net_pkt * pkt)529 static inline uint8_t net_pkt_ip_ecn(struct net_pkt *pkt)
530 {
531 #if defined(CONFIG_NET_IP_DSCP_ECN)
532 	return pkt->ip_ecn;
533 #else
534 	ARG_UNUSED(pkt);
535 
536 	return 0;
537 #endif
538 }
539 
net_pkt_set_ip_ecn(struct net_pkt * pkt,uint8_t ecn)540 static inline void net_pkt_set_ip_ecn(struct net_pkt *pkt, uint8_t ecn)
541 {
542 #if defined(CONFIG_NET_IP_DSCP_ECN)
543 	pkt->ip_ecn = ecn;
544 #else
545 	ARG_UNUSED(pkt);
546 	ARG_UNUSED(ecn);
547 #endif
548 }
549 
net_pkt_eof(struct net_pkt * pkt)550 static inline uint8_t net_pkt_eof(struct net_pkt *pkt)
551 {
552 	return pkt->eof;
553 }
554 
net_pkt_set_eof(struct net_pkt * pkt,bool eof)555 static inline void net_pkt_set_eof(struct net_pkt *pkt, bool eof)
556 {
557 	pkt->eof = eof;
558 }
559 
net_pkt_forwarding(struct net_pkt * pkt)560 static inline bool net_pkt_forwarding(struct net_pkt *pkt)
561 {
562 	return !!(pkt->forwarding);
563 }
564 
net_pkt_set_forwarding(struct net_pkt * pkt,bool forward)565 static inline void net_pkt_set_forwarding(struct net_pkt *pkt, bool forward)
566 {
567 	pkt->forwarding = forward;
568 }
569 
570 #if defined(CONFIG_NET_IPV4)
net_pkt_ipv4_ttl(struct net_pkt * pkt)571 static inline uint8_t net_pkt_ipv4_ttl(struct net_pkt *pkt)
572 {
573 	return pkt->ipv4_ttl;
574 }
575 
net_pkt_set_ipv4_ttl(struct net_pkt * pkt,uint8_t ttl)576 static inline void net_pkt_set_ipv4_ttl(struct net_pkt *pkt,
577 					uint8_t ttl)
578 {
579 	pkt->ipv4_ttl = ttl;
580 }
581 
net_pkt_ipv4_opts_len(struct net_pkt * pkt)582 static inline uint8_t net_pkt_ipv4_opts_len(struct net_pkt *pkt)
583 {
584 	return pkt->ipv4_opts_len;
585 }
586 
net_pkt_set_ipv4_opts_len(struct net_pkt * pkt,uint8_t opts_len)587 static inline void net_pkt_set_ipv4_opts_len(struct net_pkt *pkt,
588 					     uint8_t opts_len)
589 {
590 	pkt->ipv4_opts_len = opts_len;
591 }
592 #else
net_pkt_ipv4_ttl(struct net_pkt * pkt)593 static inline uint8_t net_pkt_ipv4_ttl(struct net_pkt *pkt)
594 {
595 	ARG_UNUSED(pkt);
596 
597 	return 0;
598 }
599 
net_pkt_set_ipv4_ttl(struct net_pkt * pkt,uint8_t ttl)600 static inline void net_pkt_set_ipv4_ttl(struct net_pkt *pkt,
601 					uint8_t ttl)
602 {
603 	ARG_UNUSED(pkt);
604 	ARG_UNUSED(ttl);
605 }
606 
net_pkt_ipv4_opts_len(struct net_pkt * pkt)607 static inline uint8_t net_pkt_ipv4_opts_len(struct net_pkt *pkt)
608 {
609 	ARG_UNUSED(pkt);
610 	return 0;
611 }
612 
net_pkt_set_ipv4_opts_len(struct net_pkt * pkt,uint8_t opts_len)613 static inline void net_pkt_set_ipv4_opts_len(struct net_pkt *pkt,
614 					     uint8_t opts_len)
615 {
616 	ARG_UNUSED(pkt);
617 	ARG_UNUSED(opts_len);
618 }
619 #endif
620 
621 #if defined(CONFIG_NET_IPV6)
net_pkt_ipv6_ext_opt_len(struct net_pkt * pkt)622 static inline uint8_t net_pkt_ipv6_ext_opt_len(struct net_pkt *pkt)
623 {
624 	return pkt->ipv6_ext_opt_len;
625 }
626 
net_pkt_set_ipv6_ext_opt_len(struct net_pkt * pkt,uint8_t len)627 static inline void net_pkt_set_ipv6_ext_opt_len(struct net_pkt *pkt,
628 						uint8_t len)
629 {
630 	pkt->ipv6_ext_opt_len = len;
631 }
632 
net_pkt_ipv6_next_hdr(struct net_pkt * pkt)633 static inline uint8_t net_pkt_ipv6_next_hdr(struct net_pkt *pkt)
634 {
635 	return pkt->ipv6_next_hdr;
636 }
637 
net_pkt_set_ipv6_next_hdr(struct net_pkt * pkt,uint8_t next_hdr)638 static inline void net_pkt_set_ipv6_next_hdr(struct net_pkt *pkt,
639 					     uint8_t next_hdr)
640 {
641 	pkt->ipv6_next_hdr = next_hdr;
642 }
643 
net_pkt_ipv6_ext_len(struct net_pkt * pkt)644 static inline uint16_t net_pkt_ipv6_ext_len(struct net_pkt *pkt)
645 {
646 	return pkt->ipv6_ext_len;
647 }
648 
net_pkt_set_ipv6_ext_len(struct net_pkt * pkt,uint16_t len)649 static inline void net_pkt_set_ipv6_ext_len(struct net_pkt *pkt, uint16_t len)
650 {
651 	pkt->ipv6_ext_len = len;
652 }
653 
net_pkt_ipv6_hdr_prev(struct net_pkt * pkt)654 static inline uint16_t net_pkt_ipv6_hdr_prev(struct net_pkt *pkt)
655 {
656 	return pkt->ipv6_prev_hdr_start;
657 }
658 
net_pkt_set_ipv6_hdr_prev(struct net_pkt * pkt,uint16_t offset)659 static inline void net_pkt_set_ipv6_hdr_prev(struct net_pkt *pkt,
660 					     uint16_t offset)
661 {
662 	pkt->ipv6_prev_hdr_start = offset;
663 }
664 
net_pkt_ipv6_hop_limit(struct net_pkt * pkt)665 static inline uint8_t net_pkt_ipv6_hop_limit(struct net_pkt *pkt)
666 {
667 	return pkt->ipv6_hop_limit;
668 }
669 
net_pkt_set_ipv6_hop_limit(struct net_pkt * pkt,uint8_t hop_limit)670 static inline void net_pkt_set_ipv6_hop_limit(struct net_pkt *pkt,
671 					      uint8_t hop_limit)
672 {
673 	pkt->ipv6_hop_limit = hop_limit;
674 }
675 #else /* CONFIG_NET_IPV6 */
net_pkt_ipv6_ext_opt_len(struct net_pkt * pkt)676 static inline uint8_t net_pkt_ipv6_ext_opt_len(struct net_pkt *pkt)
677 {
678 	ARG_UNUSED(pkt);
679 
680 	return 0;
681 }
682 
net_pkt_set_ipv6_ext_opt_len(struct net_pkt * pkt,uint8_t len)683 static inline void net_pkt_set_ipv6_ext_opt_len(struct net_pkt *pkt,
684 						uint8_t len)
685 {
686 	ARG_UNUSED(pkt);
687 	ARG_UNUSED(len);
688 }
689 
net_pkt_ipv6_next_hdr(struct net_pkt * pkt)690 static inline uint8_t net_pkt_ipv6_next_hdr(struct net_pkt *pkt)
691 {
692 	ARG_UNUSED(pkt);
693 
694 	return 0;
695 }
696 
net_pkt_set_ipv6_next_hdr(struct net_pkt * pkt,uint8_t next_hdr)697 static inline void net_pkt_set_ipv6_next_hdr(struct net_pkt *pkt,
698 					     uint8_t next_hdr)
699 {
700 	ARG_UNUSED(pkt);
701 	ARG_UNUSED(next_hdr);
702 }
703 
net_pkt_ipv6_ext_len(struct net_pkt * pkt)704 static inline uint16_t net_pkt_ipv6_ext_len(struct net_pkt *pkt)
705 {
706 	ARG_UNUSED(pkt);
707 
708 	return 0;
709 }
710 
net_pkt_set_ipv6_ext_len(struct net_pkt * pkt,uint16_t len)711 static inline void net_pkt_set_ipv6_ext_len(struct net_pkt *pkt, uint16_t len)
712 {
713 	ARG_UNUSED(pkt);
714 	ARG_UNUSED(len);
715 }
716 
net_pkt_ipv6_hdr_prev(struct net_pkt * pkt)717 static inline uint16_t net_pkt_ipv6_hdr_prev(struct net_pkt *pkt)
718 {
719 	ARG_UNUSED(pkt);
720 
721 	return 0;
722 }
723 
net_pkt_set_ipv6_hdr_prev(struct net_pkt * pkt,uint16_t offset)724 static inline void net_pkt_set_ipv6_hdr_prev(struct net_pkt *pkt,
725 					     uint16_t offset)
726 {
727 	ARG_UNUSED(pkt);
728 	ARG_UNUSED(offset);
729 }
730 
net_pkt_ipv6_hop_limit(struct net_pkt * pkt)731 static inline uint8_t net_pkt_ipv6_hop_limit(struct net_pkt *pkt)
732 {
733 	ARG_UNUSED(pkt);
734 
735 	return 0;
736 }
737 
net_pkt_set_ipv6_hop_limit(struct net_pkt * pkt,uint8_t hop_limit)738 static inline void net_pkt_set_ipv6_hop_limit(struct net_pkt *pkt,
739 					      uint8_t hop_limit)
740 {
741 	ARG_UNUSED(pkt);
742 	ARG_UNUSED(hop_limit);
743 }
744 #endif /* CONFIG_NET_IPV6 */
745 
net_pkt_ip_opts_len(struct net_pkt * pkt)746 static inline uint16_t net_pkt_ip_opts_len(struct net_pkt *pkt)
747 {
748 #if defined(CONFIG_NET_IPV6)
749 	return pkt->ipv6_ext_len;
750 #elif defined(CONFIG_NET_IPV4)
751 	return pkt->ipv4_opts_len;
752 #else
753 	ARG_UNUSED(pkt);
754 
755 	return 0;
756 #endif
757 }
758 
759 #if defined(CONFIG_NET_IPV4_FRAGMENT)
net_pkt_ipv4_fragment_offset(struct net_pkt * pkt)760 static inline uint16_t net_pkt_ipv4_fragment_offset(struct net_pkt *pkt)
761 {
762 	return (pkt->ipv4_fragment.flags & NET_IPV4_FRAGH_OFFSET_MASK) * 8;
763 }
764 
net_pkt_ipv4_fragment_more(struct net_pkt * pkt)765 static inline bool net_pkt_ipv4_fragment_more(struct net_pkt *pkt)
766 {
767 	return (pkt->ipv4_fragment.flags & NET_IPV4_MORE_FRAG_MASK) != 0;
768 }
769 
net_pkt_set_ipv4_fragment_flags(struct net_pkt * pkt,uint16_t flags)770 static inline void net_pkt_set_ipv4_fragment_flags(struct net_pkt *pkt, uint16_t flags)
771 {
772 	pkt->ipv4_fragment.flags = flags;
773 }
774 
net_pkt_ipv4_fragment_id(struct net_pkt * pkt)775 static inline uint32_t net_pkt_ipv4_fragment_id(struct net_pkt *pkt)
776 {
777 	return pkt->ipv4_fragment.id;
778 }
779 
net_pkt_set_ipv4_fragment_id(struct net_pkt * pkt,uint32_t id)780 static inline void net_pkt_set_ipv4_fragment_id(struct net_pkt *pkt, uint32_t id)
781 {
782 	pkt->ipv4_fragment.id = id;
783 }
784 #else /* CONFIG_NET_IPV4_FRAGMENT */
net_pkt_ipv4_fragment_offset(struct net_pkt * pkt)785 static inline uint16_t net_pkt_ipv4_fragment_offset(struct net_pkt *pkt)
786 {
787 	ARG_UNUSED(pkt);
788 
789 	return 0;
790 }
791 
net_pkt_ipv4_fragment_more(struct net_pkt * pkt)792 static inline bool net_pkt_ipv4_fragment_more(struct net_pkt *pkt)
793 {
794 	ARG_UNUSED(pkt);
795 
796 	return 0;
797 }
798 
net_pkt_set_ipv4_fragment_flags(struct net_pkt * pkt,uint16_t flags)799 static inline void net_pkt_set_ipv4_fragment_flags(struct net_pkt *pkt, uint16_t flags)
800 {
801 	ARG_UNUSED(pkt);
802 	ARG_UNUSED(flags);
803 }
804 
net_pkt_ipv4_fragment_id(struct net_pkt * pkt)805 static inline uint32_t net_pkt_ipv4_fragment_id(struct net_pkt *pkt)
806 {
807 	ARG_UNUSED(pkt);
808 
809 	return 0;
810 }
811 
net_pkt_set_ipv4_fragment_id(struct net_pkt * pkt,uint32_t id)812 static inline void net_pkt_set_ipv4_fragment_id(struct net_pkt *pkt, uint32_t id)
813 {
814 	ARG_UNUSED(pkt);
815 	ARG_UNUSED(id);
816 }
817 #endif /* CONFIG_NET_IPV4_FRAGMENT */
818 
819 #if defined(CONFIG_NET_IPV6_FRAGMENT)
net_pkt_ipv6_fragment_start(struct net_pkt * pkt)820 static inline uint16_t net_pkt_ipv6_fragment_start(struct net_pkt *pkt)
821 {
822 	return pkt->ipv6_fragment.hdr_start;
823 }
824 
net_pkt_set_ipv6_fragment_start(struct net_pkt * pkt,uint16_t start)825 static inline void net_pkt_set_ipv6_fragment_start(struct net_pkt *pkt,
826 						   uint16_t start)
827 {
828 	pkt->ipv6_fragment.hdr_start = start;
829 }
830 
net_pkt_ipv6_fragment_offset(struct net_pkt * pkt)831 static inline uint16_t net_pkt_ipv6_fragment_offset(struct net_pkt *pkt)
832 {
833 	return pkt->ipv6_fragment.flags & NET_IPV6_FRAGH_OFFSET_MASK;
834 }
net_pkt_ipv6_fragment_more(struct net_pkt * pkt)835 static inline bool net_pkt_ipv6_fragment_more(struct net_pkt *pkt)
836 {
837 	return (pkt->ipv6_fragment.flags & 0x01) != 0;
838 }
839 
net_pkt_set_ipv6_fragment_flags(struct net_pkt * pkt,uint16_t flags)840 static inline void net_pkt_set_ipv6_fragment_flags(struct net_pkt *pkt,
841 						   uint16_t flags)
842 {
843 	pkt->ipv6_fragment.flags = flags;
844 }
845 
net_pkt_ipv6_fragment_id(struct net_pkt * pkt)846 static inline uint32_t net_pkt_ipv6_fragment_id(struct net_pkt *pkt)
847 {
848 	return pkt->ipv6_fragment.id;
849 }
850 
net_pkt_set_ipv6_fragment_id(struct net_pkt * pkt,uint32_t id)851 static inline void net_pkt_set_ipv6_fragment_id(struct net_pkt *pkt,
852 						uint32_t id)
853 {
854 	pkt->ipv6_fragment.id = id;
855 }
856 #else /* CONFIG_NET_IPV6_FRAGMENT */
net_pkt_ipv6_fragment_start(struct net_pkt * pkt)857 static inline uint16_t net_pkt_ipv6_fragment_start(struct net_pkt *pkt)
858 {
859 	ARG_UNUSED(pkt);
860 
861 	return 0;
862 }
863 
net_pkt_set_ipv6_fragment_start(struct net_pkt * pkt,uint16_t start)864 static inline void net_pkt_set_ipv6_fragment_start(struct net_pkt *pkt,
865 						   uint16_t start)
866 {
867 	ARG_UNUSED(pkt);
868 	ARG_UNUSED(start);
869 }
870 
net_pkt_ipv6_fragment_offset(struct net_pkt * pkt)871 static inline uint16_t net_pkt_ipv6_fragment_offset(struct net_pkt *pkt)
872 {
873 	ARG_UNUSED(pkt);
874 
875 	return 0;
876 }
877 
net_pkt_ipv6_fragment_more(struct net_pkt * pkt)878 static inline bool net_pkt_ipv6_fragment_more(struct net_pkt *pkt)
879 {
880 	ARG_UNUSED(pkt);
881 
882 	return 0;
883 }
884 
net_pkt_set_ipv6_fragment_flags(struct net_pkt * pkt,uint16_t flags)885 static inline void net_pkt_set_ipv6_fragment_flags(struct net_pkt *pkt,
886 						   uint16_t flags)
887 {
888 	ARG_UNUSED(pkt);
889 	ARG_UNUSED(flags);
890 }
891 
net_pkt_ipv6_fragment_id(struct net_pkt * pkt)892 static inline uint32_t net_pkt_ipv6_fragment_id(struct net_pkt *pkt)
893 {
894 	ARG_UNUSED(pkt);
895 
896 	return 0;
897 }
898 
net_pkt_set_ipv6_fragment_id(struct net_pkt * pkt,uint32_t id)899 static inline void net_pkt_set_ipv6_fragment_id(struct net_pkt *pkt,
900 						uint32_t id)
901 {
902 	ARG_UNUSED(pkt);
903 	ARG_UNUSED(id);
904 }
905 #endif /* CONFIG_NET_IPV6_FRAGMENT */
906 
907 #if defined(CONFIG_NET_IP_FRAGMENT)
net_pkt_is_ip_reassembled(struct net_pkt * pkt)908 static inline bool net_pkt_is_ip_reassembled(struct net_pkt *pkt)
909 {
910 	return !!(pkt->ip_reassembled);
911 }
912 
net_pkt_set_ip_reassembled(struct net_pkt * pkt,bool reassembled)913 static inline void net_pkt_set_ip_reassembled(struct net_pkt *pkt,
914 					      bool reassembled)
915 {
916 	pkt->ip_reassembled = reassembled;
917 }
918 #else /* CONFIG_NET_IP_FRAGMENT */
net_pkt_is_ip_reassembled(struct net_pkt * pkt)919 static inline bool net_pkt_is_ip_reassembled(struct net_pkt *pkt)
920 {
921 	ARG_UNUSED(pkt);
922 
923 	return false;
924 }
925 
net_pkt_set_ip_reassembled(struct net_pkt * pkt,bool reassembled)926 static inline void net_pkt_set_ip_reassembled(struct net_pkt *pkt,
927 					      bool reassembled)
928 {
929 	ARG_UNUSED(pkt);
930 	ARG_UNUSED(reassembled);
931 }
932 #endif /* CONFIG_NET_IP_FRAGMENT */
933 
net_pkt_priority(struct net_pkt * pkt)934 static inline uint8_t net_pkt_priority(struct net_pkt *pkt)
935 {
936 	return pkt->priority;
937 }
938 
net_pkt_set_priority(struct net_pkt * pkt,uint8_t priority)939 static inline void net_pkt_set_priority(struct net_pkt *pkt,
940 					uint8_t priority)
941 {
942 	pkt->priority = priority;
943 }
944 
945 #if defined(CONFIG_NET_CAPTURE_COOKED_MODE)
net_pkt_is_cooked_mode(struct net_pkt * pkt)946 static inline bool net_pkt_is_cooked_mode(struct net_pkt *pkt)
947 {
948 	return pkt->cooked_mode_pkt;
949 }
950 
net_pkt_set_cooked_mode(struct net_pkt * pkt,bool value)951 static inline void net_pkt_set_cooked_mode(struct net_pkt *pkt, bool value)
952 {
953 	pkt->cooked_mode_pkt = value;
954 }
955 #else
net_pkt_is_cooked_mode(struct net_pkt * pkt)956 static inline bool net_pkt_is_cooked_mode(struct net_pkt *pkt)
957 {
958 	ARG_UNUSED(pkt);
959 
960 	return false;
961 }
962 
net_pkt_set_cooked_mode(struct net_pkt * pkt,bool value)963 static inline void net_pkt_set_cooked_mode(struct net_pkt *pkt, bool value)
964 {
965 	ARG_UNUSED(pkt);
966 	ARG_UNUSED(value);
967 }
968 #endif /* CONFIG_NET_CAPTURE_COOKED_MODE */
969 
970 #if defined(CONFIG_NET_VLAN)
net_pkt_vlan_tag(struct net_pkt * pkt)971 static inline uint16_t net_pkt_vlan_tag(struct net_pkt *pkt)
972 {
973 	return net_eth_vlan_get_vid(pkt->vlan_tci);
974 }
975 
net_pkt_set_vlan_tag(struct net_pkt * pkt,uint16_t tag)976 static inline void net_pkt_set_vlan_tag(struct net_pkt *pkt, uint16_t tag)
977 {
978 	pkt->vlan_tci = net_eth_vlan_set_vid(pkt->vlan_tci, tag);
979 }
980 
net_pkt_vlan_priority(struct net_pkt * pkt)981 static inline uint8_t net_pkt_vlan_priority(struct net_pkt *pkt)
982 {
983 	return net_eth_vlan_get_pcp(pkt->vlan_tci);
984 }
985 
net_pkt_set_vlan_priority(struct net_pkt * pkt,uint8_t priority)986 static inline void net_pkt_set_vlan_priority(struct net_pkt *pkt,
987 					     uint8_t priority)
988 {
989 	pkt->vlan_tci = net_eth_vlan_set_pcp(pkt->vlan_tci, priority);
990 }
991 
net_pkt_vlan_dei(struct net_pkt * pkt)992 static inline bool net_pkt_vlan_dei(struct net_pkt *pkt)
993 {
994 	return net_eth_vlan_get_dei(pkt->vlan_tci);
995 }
996 
net_pkt_set_vlan_dei(struct net_pkt * pkt,bool dei)997 static inline void net_pkt_set_vlan_dei(struct net_pkt *pkt, bool dei)
998 {
999 	pkt->vlan_tci = net_eth_vlan_set_dei(pkt->vlan_tci, dei);
1000 }
1001 
net_pkt_set_vlan_tci(struct net_pkt * pkt,uint16_t tci)1002 static inline void net_pkt_set_vlan_tci(struct net_pkt *pkt, uint16_t tci)
1003 {
1004 	pkt->vlan_tci = tci;
1005 }
1006 
net_pkt_vlan_tci(struct net_pkt * pkt)1007 static inline uint16_t net_pkt_vlan_tci(struct net_pkt *pkt)
1008 {
1009 	return pkt->vlan_tci;
1010 }
1011 #else
net_pkt_vlan_tag(struct net_pkt * pkt)1012 static inline uint16_t net_pkt_vlan_tag(struct net_pkt *pkt)
1013 {
1014 	ARG_UNUSED(pkt);
1015 
1016 	return NET_VLAN_TAG_UNSPEC;
1017 }
1018 
net_pkt_set_vlan_tag(struct net_pkt * pkt,uint16_t tag)1019 static inline void net_pkt_set_vlan_tag(struct net_pkt *pkt, uint16_t tag)
1020 {
1021 	ARG_UNUSED(pkt);
1022 	ARG_UNUSED(tag);
1023 }
1024 
net_pkt_vlan_priority(struct net_pkt * pkt)1025 static inline uint8_t net_pkt_vlan_priority(struct net_pkt *pkt)
1026 {
1027 	ARG_UNUSED(pkt);
1028 
1029 	return 0;
1030 }
1031 
net_pkt_vlan_dei(struct net_pkt * pkt)1032 static inline bool net_pkt_vlan_dei(struct net_pkt *pkt)
1033 {
1034 	ARG_UNUSED(pkt);
1035 
1036 	return false;
1037 }
1038 
net_pkt_set_vlan_dei(struct net_pkt * pkt,bool dei)1039 static inline void net_pkt_set_vlan_dei(struct net_pkt *pkt, bool dei)
1040 {
1041 	ARG_UNUSED(pkt);
1042 	ARG_UNUSED(dei);
1043 }
1044 
net_pkt_vlan_tci(struct net_pkt * pkt)1045 static inline uint16_t net_pkt_vlan_tci(struct net_pkt *pkt)
1046 {
1047 	ARG_UNUSED(pkt);
1048 
1049 	return NET_VLAN_TAG_UNSPEC; /* assumes priority is 0 */
1050 }
1051 
net_pkt_set_vlan_tci(struct net_pkt * pkt,uint16_t tci)1052 static inline void net_pkt_set_vlan_tci(struct net_pkt *pkt, uint16_t tci)
1053 {
1054 	ARG_UNUSED(pkt);
1055 	ARG_UNUSED(tci);
1056 }
1057 #endif
1058 
1059 #if defined(CONFIG_NET_PKT_TIMESTAMP) || defined(CONFIG_NET_PKT_TXTIME)
net_pkt_timestamp(struct net_pkt * pkt)1060 static inline struct net_ptp_time *net_pkt_timestamp(struct net_pkt *pkt)
1061 {
1062 	return &pkt->timestamp;
1063 }
1064 
net_pkt_set_timestamp(struct net_pkt * pkt,struct net_ptp_time * timestamp)1065 static inline void net_pkt_set_timestamp(struct net_pkt *pkt,
1066 					 struct net_ptp_time *timestamp)
1067 {
1068 	pkt->timestamp.second = timestamp->second;
1069 	pkt->timestamp.nanosecond = timestamp->nanosecond;
1070 }
1071 
net_pkt_timestamp_ns(struct net_pkt * pkt)1072 static inline net_time_t net_pkt_timestamp_ns(struct net_pkt *pkt)
1073 {
1074 	return net_ptp_time_to_ns(&pkt->timestamp);
1075 }
1076 
net_pkt_set_timestamp_ns(struct net_pkt * pkt,net_time_t timestamp)1077 static inline void net_pkt_set_timestamp_ns(struct net_pkt *pkt, net_time_t timestamp)
1078 {
1079 	pkt->timestamp = ns_to_net_ptp_time(timestamp);
1080 }
1081 #else
net_pkt_timestamp(struct net_pkt * pkt)1082 static inline struct net_ptp_time *net_pkt_timestamp(struct net_pkt *pkt)
1083 {
1084 	ARG_UNUSED(pkt);
1085 
1086 	return NULL;
1087 }
1088 
net_pkt_set_timestamp(struct net_pkt * pkt,struct net_ptp_time * timestamp)1089 static inline void net_pkt_set_timestamp(struct net_pkt *pkt,
1090 					 struct net_ptp_time *timestamp)
1091 {
1092 	ARG_UNUSED(pkt);
1093 	ARG_UNUSED(timestamp);
1094 }
1095 
net_pkt_timestamp_ns(struct net_pkt * pkt)1096 static inline net_time_t net_pkt_timestamp_ns(struct net_pkt *pkt)
1097 {
1098 	ARG_UNUSED(pkt);
1099 
1100 	return 0;
1101 }
1102 
net_pkt_set_timestamp_ns(struct net_pkt * pkt,net_time_t timestamp)1103 static inline void net_pkt_set_timestamp_ns(struct net_pkt *pkt, net_time_t timestamp)
1104 {
1105 	ARG_UNUSED(pkt);
1106 	ARG_UNUSED(timestamp);
1107 }
1108 #endif /* CONFIG_NET_PKT_TIMESTAMP || CONFIG_NET_PKT_TXTIME */
1109 
1110 #if defined(CONFIG_NET_PKT_RXTIME_STATS) || defined(CONFIG_NET_PKT_TXTIME_STATS)
net_pkt_create_time(struct net_pkt * pkt)1111 static inline uint32_t net_pkt_create_time(struct net_pkt *pkt)
1112 {
1113 	return pkt->create_time;
1114 }
1115 
net_pkt_set_create_time(struct net_pkt * pkt,uint32_t create_time)1116 static inline void net_pkt_set_create_time(struct net_pkt *pkt,
1117 					   uint32_t create_time)
1118 {
1119 	pkt->create_time = create_time;
1120 }
1121 #else
net_pkt_create_time(struct net_pkt * pkt)1122 static inline uint32_t net_pkt_create_time(struct net_pkt *pkt)
1123 {
1124 	ARG_UNUSED(pkt);
1125 
1126 	return 0U;
1127 }
1128 
net_pkt_set_create_time(struct net_pkt * pkt,uint32_t create_time)1129 static inline void net_pkt_set_create_time(struct net_pkt *pkt,
1130 					   uint32_t create_time)
1131 {
1132 	ARG_UNUSED(pkt);
1133 	ARG_UNUSED(create_time);
1134 }
1135 #endif /* CONFIG_NET_PKT_RXTIME_STATS || CONFIG_NET_PKT_TXTIME_STATS */
1136 
1137 /**
1138  * @deprecated Use @ref net_pkt_timestamp or @ref net_pkt_timestamp_ns instead.
1139  */
net_pkt_txtime(struct net_pkt * pkt)1140 static inline uint64_t net_pkt_txtime(struct net_pkt *pkt)
1141 {
1142 #if defined(CONFIG_NET_PKT_TXTIME)
1143 	return pkt->timestamp.second * NSEC_PER_SEC + pkt->timestamp.nanosecond;
1144 #else
1145 	ARG_UNUSED(pkt);
1146 
1147 	return 0;
1148 #endif /* CONFIG_NET_PKT_TXTIME */
1149 }
1150 
1151 /**
1152  * @deprecated Use @ref net_pkt_set_timestamp or @ref net_pkt_set_timestamp_ns
1153  * instead.
1154  */
net_pkt_set_txtime(struct net_pkt * pkt,uint64_t txtime)1155 static inline void net_pkt_set_txtime(struct net_pkt *pkt, uint64_t txtime)
1156 {
1157 #if defined(CONFIG_NET_PKT_TXTIME)
1158 	pkt->timestamp.second = txtime / NSEC_PER_SEC;
1159 	pkt->timestamp.nanosecond = txtime % NSEC_PER_SEC;
1160 #else
1161 	ARG_UNUSED(pkt);
1162 	ARG_UNUSED(txtime);
1163 #endif /* CONFIG_NET_PKT_TXTIME */
1164 }
1165 
1166 #if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL) || \
1167 	defined(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)
net_pkt_stats_tick(struct net_pkt * pkt)1168 static inline uint32_t *net_pkt_stats_tick(struct net_pkt *pkt)
1169 {
1170 	return pkt->detail.stat;
1171 }
1172 
net_pkt_stats_tick_count(struct net_pkt * pkt)1173 static inline int net_pkt_stats_tick_count(struct net_pkt *pkt)
1174 {
1175 	return pkt->detail.count;
1176 }
1177 
net_pkt_stats_tick_reset(struct net_pkt * pkt)1178 static inline void net_pkt_stats_tick_reset(struct net_pkt *pkt)
1179 {
1180 	memset(&pkt->detail, 0, sizeof(pkt->detail));
1181 }
1182 
net_pkt_set_stats_tick(struct net_pkt * pkt,uint32_t tick)1183 static ALWAYS_INLINE void net_pkt_set_stats_tick(struct net_pkt *pkt,
1184 						 uint32_t tick)
1185 {
1186 	if (pkt->detail.count >= NET_PKT_DETAIL_STATS_COUNT) {
1187 		NET_ERR("Detail stats count overflow (%d >= %d)",
1188 			pkt->detail.count, NET_PKT_DETAIL_STATS_COUNT);
1189 		return;
1190 	}
1191 
1192 	pkt->detail.stat[pkt->detail.count++] = tick;
1193 }
1194 
1195 #define net_pkt_set_tx_stats_tick(pkt, tick) net_pkt_set_stats_tick(pkt, tick)
1196 #define net_pkt_set_rx_stats_tick(pkt, tick) net_pkt_set_stats_tick(pkt, tick)
1197 #else
net_pkt_stats_tick(struct net_pkt * pkt)1198 static inline uint32_t *net_pkt_stats_tick(struct net_pkt *pkt)
1199 {
1200 	ARG_UNUSED(pkt);
1201 
1202 	return NULL;
1203 }
1204 
net_pkt_stats_tick_count(struct net_pkt * pkt)1205 static inline int net_pkt_stats_tick_count(struct net_pkt *pkt)
1206 {
1207 	ARG_UNUSED(pkt);
1208 
1209 	return 0;
1210 }
1211 
net_pkt_stats_tick_reset(struct net_pkt * pkt)1212 static inline void net_pkt_stats_tick_reset(struct net_pkt *pkt)
1213 {
1214 	ARG_UNUSED(pkt);
1215 }
1216 
net_pkt_set_stats_tick(struct net_pkt * pkt,uint32_t tick)1217 static inline void net_pkt_set_stats_tick(struct net_pkt *pkt, uint32_t tick)
1218 {
1219 	ARG_UNUSED(pkt);
1220 	ARG_UNUSED(tick);
1221 }
1222 
1223 #define net_pkt_set_tx_stats_tick(pkt, tick)
1224 #define net_pkt_set_rx_stats_tick(pkt, tick)
1225 #endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL ||
1226 	  CONFIG_NET_PKT_RXTIME_STATS_DETAIL */
1227 
net_pkt_get_len(struct net_pkt * pkt)1228 static inline size_t net_pkt_get_len(struct net_pkt *pkt)
1229 {
1230 	return net_buf_frags_len(pkt->frags);
1231 }
1232 
net_pkt_data(struct net_pkt * pkt)1233 static inline uint8_t *net_pkt_data(struct net_pkt *pkt)
1234 {
1235 	return pkt->frags->data;
1236 }
1237 
net_pkt_ip_data(struct net_pkt * pkt)1238 static inline uint8_t *net_pkt_ip_data(struct net_pkt *pkt)
1239 {
1240 	return pkt->frags->data;
1241 }
1242 
net_pkt_is_empty(struct net_pkt * pkt)1243 static inline bool net_pkt_is_empty(struct net_pkt *pkt)
1244 {
1245 	return !pkt->buffer || !net_pkt_data(pkt) || pkt->buffer->len == 0;
1246 }
1247 
net_pkt_lladdr_src(struct net_pkt * pkt)1248 static inline struct net_linkaddr *net_pkt_lladdr_src(struct net_pkt *pkt)
1249 {
1250 	return &pkt->lladdr_src;
1251 }
1252 
net_pkt_lladdr_dst(struct net_pkt * pkt)1253 static inline struct net_linkaddr *net_pkt_lladdr_dst(struct net_pkt *pkt)
1254 {
1255 	return &pkt->lladdr_dst;
1256 }
1257 
net_pkt_lladdr_swap(struct net_pkt * pkt)1258 static inline void net_pkt_lladdr_swap(struct net_pkt *pkt)
1259 {
1260 	uint8_t *addr = net_pkt_lladdr_src(pkt)->addr;
1261 
1262 	net_pkt_lladdr_src(pkt)->addr = net_pkt_lladdr_dst(pkt)->addr;
1263 	net_pkt_lladdr_dst(pkt)->addr = addr;
1264 }
1265 
net_pkt_lladdr_clear(struct net_pkt * pkt)1266 static inline void net_pkt_lladdr_clear(struct net_pkt *pkt)
1267 {
1268 	net_pkt_lladdr_src(pkt)->addr = NULL;
1269 	net_pkt_lladdr_src(pkt)->len = 0U;
1270 }
1271 
net_pkt_ll_proto_type(struct net_pkt * pkt)1272 static inline uint16_t net_pkt_ll_proto_type(struct net_pkt *pkt)
1273 {
1274 	return pkt->ll_proto_type;
1275 }
1276 
net_pkt_set_ll_proto_type(struct net_pkt * pkt,uint16_t type)1277 static inline void net_pkt_set_ll_proto_type(struct net_pkt *pkt, uint16_t type)
1278 {
1279 	pkt->ll_proto_type = type;
1280 }
1281 
1282 #if defined(CONFIG_NET_IPV4_ACD)
net_pkt_ipv4_acd(struct net_pkt * pkt)1283 static inline bool net_pkt_ipv4_acd(struct net_pkt *pkt)
1284 {
1285 	return !!(pkt->ipv4_acd_arp_msg);
1286 }
1287 
net_pkt_set_ipv4_acd(struct net_pkt * pkt,bool is_acd_arp_msg)1288 static inline void net_pkt_set_ipv4_acd(struct net_pkt *pkt,
1289 					bool is_acd_arp_msg)
1290 {
1291 	pkt->ipv4_acd_arp_msg = is_acd_arp_msg;
1292 }
1293 #else /* CONFIG_NET_IPV4_ACD */
net_pkt_ipv4_acd(struct net_pkt * pkt)1294 static inline bool net_pkt_ipv4_acd(struct net_pkt *pkt)
1295 {
1296 	ARG_UNUSED(pkt);
1297 
1298 	return false;
1299 }
1300 
net_pkt_set_ipv4_acd(struct net_pkt * pkt,bool is_acd_arp_msg)1301 static inline void net_pkt_set_ipv4_acd(struct net_pkt *pkt,
1302 					bool is_acd_arp_msg)
1303 {
1304 	ARG_UNUSED(pkt);
1305 	ARG_UNUSED(is_acd_arp_msg);
1306 }
1307 #endif /* CONFIG_NET_IPV4_ACD */
1308 
1309 #if defined(CONFIG_NET_LLDP)
net_pkt_is_lldp(struct net_pkt * pkt)1310 static inline bool net_pkt_is_lldp(struct net_pkt *pkt)
1311 {
1312 	return !!(pkt->lldp_pkt);
1313 }
1314 
net_pkt_set_lldp(struct net_pkt * pkt,bool is_lldp)1315 static inline void net_pkt_set_lldp(struct net_pkt *pkt, bool is_lldp)
1316 {
1317 	pkt->lldp_pkt = is_lldp;
1318 }
1319 #else
net_pkt_is_lldp(struct net_pkt * pkt)1320 static inline bool net_pkt_is_lldp(struct net_pkt *pkt)
1321 {
1322 	ARG_UNUSED(pkt);
1323 
1324 	return false;
1325 }
1326 
net_pkt_set_lldp(struct net_pkt * pkt,bool is_lldp)1327 static inline void net_pkt_set_lldp(struct net_pkt *pkt, bool is_lldp)
1328 {
1329 	ARG_UNUSED(pkt);
1330 	ARG_UNUSED(is_lldp);
1331 }
1332 #endif /* CONFIG_NET_LLDP */
1333 
1334 #if defined(CONFIG_NET_L2_PPP)
net_pkt_is_ppp(struct net_pkt * pkt)1335 static inline bool net_pkt_is_ppp(struct net_pkt *pkt)
1336 {
1337 	return !!(pkt->ppp_msg);
1338 }
1339 
net_pkt_set_ppp(struct net_pkt * pkt,bool is_ppp_msg)1340 static inline void net_pkt_set_ppp(struct net_pkt *pkt,
1341 				   bool is_ppp_msg)
1342 {
1343 	pkt->ppp_msg = is_ppp_msg;
1344 }
1345 #else /* CONFIG_NET_L2_PPP */
net_pkt_is_ppp(struct net_pkt * pkt)1346 static inline bool net_pkt_is_ppp(struct net_pkt *pkt)
1347 {
1348 	ARG_UNUSED(pkt);
1349 
1350 	return false;
1351 }
1352 
net_pkt_set_ppp(struct net_pkt * pkt,bool is_ppp_msg)1353 static inline void net_pkt_set_ppp(struct net_pkt *pkt,
1354 				   bool is_ppp_msg)
1355 {
1356 	ARG_UNUSED(pkt);
1357 	ARG_UNUSED(is_ppp_msg);
1358 }
1359 #endif /* CONFIG_NET_L2_PPP */
1360 
1361 #if defined(NET_PKT_HAS_CONTROL_BLOCK)
net_pkt_cb(struct net_pkt * pkt)1362 static inline void *net_pkt_cb(struct net_pkt *pkt)
1363 {
1364 	return &pkt->cb;
1365 }
1366 #else
net_pkt_cb(struct net_pkt * pkt)1367 static inline void *net_pkt_cb(struct net_pkt *pkt)
1368 {
1369 	ARG_UNUSED(pkt);
1370 
1371 	return NULL;
1372 }
1373 #endif
1374 
1375 #define NET_IPV6_HDR(pkt) ((struct net_ipv6_hdr *)net_pkt_ip_data(pkt))
1376 #define NET_IPV4_HDR(pkt) ((struct net_ipv4_hdr *)net_pkt_ip_data(pkt))
1377 
net_pkt_set_src_ipv6_addr(struct net_pkt * pkt)1378 static inline void net_pkt_set_src_ipv6_addr(struct net_pkt *pkt)
1379 {
1380 	net_if_ipv6_select_src_addr(net_context_get_iface(
1381 					    net_pkt_context(pkt)),
1382 				    (struct in6_addr *)NET_IPV6_HDR(pkt)->src);
1383 }
1384 
net_pkt_set_overwrite(struct net_pkt * pkt,bool overwrite)1385 static inline void net_pkt_set_overwrite(struct net_pkt *pkt, bool overwrite)
1386 {
1387 	pkt->overwrite = overwrite;
1388 }
1389 
net_pkt_is_being_overwritten(struct net_pkt * pkt)1390 static inline bool net_pkt_is_being_overwritten(struct net_pkt *pkt)
1391 {
1392 	return !!(pkt->overwrite);
1393 }
1394 
1395 #ifdef CONFIG_NET_PKT_FILTER
1396 
1397 bool net_pkt_filter_send_ok(struct net_pkt *pkt);
1398 bool net_pkt_filter_recv_ok(struct net_pkt *pkt);
1399 
1400 #else
1401 
net_pkt_filter_send_ok(struct net_pkt * pkt)1402 static inline bool net_pkt_filter_send_ok(struct net_pkt *pkt)
1403 {
1404 	ARG_UNUSED(pkt);
1405 
1406 	return true;
1407 }
1408 
net_pkt_filter_recv_ok(struct net_pkt * pkt)1409 static inline bool net_pkt_filter_recv_ok(struct net_pkt *pkt)
1410 {
1411 	ARG_UNUSED(pkt);
1412 
1413 	return true;
1414 }
1415 
1416 #endif /* CONFIG_NET_PKT_FILTER */
1417 
1418 #if defined(CONFIG_NET_PKT_FILTER) && \
1419 	(defined(CONFIG_NET_PKT_FILTER_IPV4_HOOK) || defined(CONFIG_NET_PKT_FILTER_IPV6_HOOK))
1420 
1421 bool net_pkt_filter_ip_recv_ok(struct net_pkt *pkt);
1422 
1423 #else
1424 
net_pkt_filter_ip_recv_ok(struct net_pkt * pkt)1425 static inline bool net_pkt_filter_ip_recv_ok(struct net_pkt *pkt)
1426 {
1427 	ARG_UNUSED(pkt);
1428 
1429 	return true;
1430 }
1431 
1432 #endif /* CONFIG_NET_PKT_FILTER_IPV4_HOOK || CONFIG_NET_PKT_FILTER_IPV6_HOOK */
1433 
1434 #if defined(CONFIG_NET_PKT_FILTER) && defined(CONFIG_NET_PKT_FILTER_LOCAL_IN_HOOK)
1435 
1436 bool net_pkt_filter_local_in_recv_ok(struct net_pkt *pkt);
1437 
1438 #else
1439 
net_pkt_filter_local_in_recv_ok(struct net_pkt * pkt)1440 static inline bool net_pkt_filter_local_in_recv_ok(struct net_pkt *pkt)
1441 {
1442 	ARG_UNUSED(pkt);
1443 
1444 	return true;
1445 }
1446 
1447 #endif /* CONFIG_NET_PKT_FILTER && CONFIG_NET_PKT_FILTER_LOCAL_IN_HOOK */
1448 
1449 #if defined(CONFIG_NET_OFFLOAD) || defined(CONFIG_NET_L2_IPIP)
net_pkt_remote_address(struct net_pkt * pkt)1450 static inline struct sockaddr *net_pkt_remote_address(struct net_pkt *pkt)
1451 {
1452 	return &pkt->remote;
1453 }
1454 
net_pkt_set_remote_address(struct net_pkt * pkt,struct sockaddr * address,socklen_t len)1455 static inline void net_pkt_set_remote_address(struct net_pkt *pkt,
1456 					      struct sockaddr *address,
1457 					      socklen_t len)
1458 {
1459 	memcpy(&pkt->remote, address, len);
1460 }
1461 #endif /* CONFIG_NET_OFFLOAD || CONFIG_NET_L2_IPIP */
1462 
1463 /* @endcond */
1464 
1465 /**
1466  * @brief Create a net_pkt slab
1467  *
1468  * A net_pkt slab is used to store meta-information about
1469  * network packets. It must be coupled with a data fragment pool
1470  * (@ref NET_PKT_DATA_POOL_DEFINE) used to store the actual
1471  * packet data. The macro can be used by an application to define
1472  * additional custom per-context TX packet slabs (see
1473  * net_context_setup_pools()).
1474  *
1475  * @param name Name of the slab.
1476  * @param count Number of net_pkt in this slab.
1477  */
1478 #define NET_PKT_SLAB_DEFINE(name, count)				\
1479 	K_MEM_SLAB_DEFINE(name, sizeof(struct net_pkt), count, 4)
1480 
1481 /** @cond INTERNAL_HIDDEN */
1482 
1483 /* Backward compatibility macro */
1484 #define NET_PKT_TX_SLAB_DEFINE(name, count) NET_PKT_SLAB_DEFINE(name, count)
1485 
1486 /** @endcond */
1487 
1488 /**
1489  * @brief Create a data fragment net_buf pool
1490  *
1491  * A net_buf pool is used to store actual data for
1492  * network packets. It must be coupled with a net_pkt slab
1493  * (@ref NET_PKT_SLAB_DEFINE) used to store the packet
1494  * meta-information. The macro can be used by an application to
1495  * define additional custom per-context TX packet pools (see
1496  * net_context_setup_pools()).
1497  *
1498  * @param name Name of the pool.
1499  * @param count Number of net_buf in this pool.
1500  */
1501 #define NET_PKT_DATA_POOL_DEFINE(name, count)				\
1502 	NET_BUF_POOL_DEFINE(name, count, CONFIG_NET_BUF_DATA_SIZE,	\
1503 			    0, NULL)
1504 
1505 /** @cond INTERNAL_HIDDEN */
1506 
1507 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC) || \
1508 	(CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG)
1509 #define NET_PKT_DEBUG_ENABLED
1510 #endif
1511 
1512 #if defined(NET_PKT_DEBUG_ENABLED)
1513 
1514 /* Debug versions of the net_pkt functions that are used when tracking
1515  * buffer usage.
1516  */
1517 
1518 struct net_buf *net_pkt_get_reserve_data_debug(struct net_buf_pool *pool,
1519 					       size_t min_len,
1520 					       k_timeout_t timeout,
1521 					       const char *caller,
1522 					       int line);
1523 
1524 #define net_pkt_get_reserve_data(pool, min_len, timeout)				\
1525 	net_pkt_get_reserve_data_debug(pool, min_len, timeout, __func__, __LINE__)
1526 
1527 struct net_buf *net_pkt_get_reserve_rx_data_debug(size_t min_len,
1528 						  k_timeout_t timeout,
1529 						  const char *caller,
1530 						  int line);
1531 #define net_pkt_get_reserve_rx_data(min_len, timeout)				\
1532 	net_pkt_get_reserve_rx_data_debug(min_len, timeout, __func__, __LINE__)
1533 
1534 struct net_buf *net_pkt_get_reserve_tx_data_debug(size_t min_len,
1535 						  k_timeout_t timeout,
1536 						  const char *caller,
1537 						  int line);
1538 #define net_pkt_get_reserve_tx_data(min_len, timeout)				\
1539 	net_pkt_get_reserve_tx_data_debug(min_len, timeout, __func__, __LINE__)
1540 
1541 struct net_buf *net_pkt_get_frag_debug(struct net_pkt *pkt, size_t min_len,
1542 				       k_timeout_t timeout,
1543 				       const char *caller, int line);
1544 #define net_pkt_get_frag(pkt, min_len, timeout)					\
1545 	net_pkt_get_frag_debug(pkt, min_len, timeout, __func__, __LINE__)
1546 
1547 void net_pkt_unref_debug(struct net_pkt *pkt, const char *caller, int line);
1548 #define net_pkt_unref(pkt) net_pkt_unref_debug(pkt, __func__, __LINE__)
1549 
1550 struct net_pkt *net_pkt_ref_debug(struct net_pkt *pkt, const char *caller,
1551 				  int line);
1552 #define net_pkt_ref(pkt) net_pkt_ref_debug(pkt, __func__, __LINE__)
1553 
1554 struct net_buf *net_pkt_frag_ref_debug(struct net_buf *frag,
1555 				       const char *caller, int line);
1556 #define net_pkt_frag_ref(frag) net_pkt_frag_ref_debug(frag, __func__, __LINE__)
1557 
1558 void net_pkt_frag_unref_debug(struct net_buf *frag,
1559 			      const char *caller, int line);
1560 #define net_pkt_frag_unref(frag)				\
1561 	net_pkt_frag_unref_debug(frag, __func__, __LINE__)
1562 
1563 struct net_buf *net_pkt_frag_del_debug(struct net_pkt *pkt,
1564 				       struct net_buf *parent,
1565 				       struct net_buf *frag,
1566 				       const char *caller, int line);
1567 #define net_pkt_frag_del(pkt, parent, frag)				\
1568 	net_pkt_frag_del_debug(pkt, parent, frag, __func__, __LINE__)
1569 
1570 void net_pkt_frag_add_debug(struct net_pkt *pkt, struct net_buf *frag,
1571 			    const char *caller, int line);
1572 #define net_pkt_frag_add(pkt, frag)				\
1573 	net_pkt_frag_add_debug(pkt, frag, __func__, __LINE__)
1574 
1575 void net_pkt_frag_insert_debug(struct net_pkt *pkt, struct net_buf *frag,
1576 			       const char *caller, int line);
1577 #define net_pkt_frag_insert(pkt, frag)					\
1578 	net_pkt_frag_insert_debug(pkt, frag, __func__, __LINE__)
1579 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC ||
1580 	* CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
1581 	*/
1582 /** @endcond */
1583 
1584 /**
1585  * @brief Print fragment list and the fragment sizes
1586  *
1587  * @details Only available if debugging is activated.
1588  *
1589  * @param pkt Network pkt.
1590  */
1591 #if defined(NET_PKT_DEBUG_ENABLED)
1592 void net_pkt_print_frags(struct net_pkt *pkt);
1593 #else
1594 #define net_pkt_print_frags(pkt)
1595 #endif
1596 
1597 /**
1598  * @brief Get a data buffer from a given pool.
1599  *
1600  * @details Normally this version is not useful for applications
1601  * but is mainly used by network fragmentation code.
1602  *
1603  * @param pool The net_buf pool to use.
1604  * @param min_len Minimum length of the requested fragment.
1605  * @param timeout Affects the action taken should the net buf pool be empty.
1606  *        If K_NO_WAIT, then return immediately. If K_FOREVER, then
1607  *        wait as long as necessary. Otherwise, wait up to the specified time.
1608  *
1609  * @return Network buffer if successful, NULL otherwise.
1610  */
1611 #if !defined(NET_PKT_DEBUG_ENABLED)
1612 struct net_buf *net_pkt_get_reserve_data(struct net_buf_pool *pool,
1613 					 size_t min_len, k_timeout_t timeout);
1614 #endif
1615 
1616 /**
1617  * @brief Get RX DATA buffer from pool.
1618  * Normally you should use net_pkt_get_frag() instead.
1619  *
1620  * @details Normally this version is not useful for applications
1621  * but is mainly used by network fragmentation code.
1622  *
1623  * @param min_len Minimum length of the requested fragment.
1624  * @param timeout Affects the action taken should the net buf pool be empty.
1625  *        If K_NO_WAIT, then return immediately. If K_FOREVER, then
1626  *        wait as long as necessary. Otherwise, wait up to the specified time.
1627  *
1628  * @return Network buffer if successful, NULL otherwise.
1629  */
1630 #if !defined(NET_PKT_DEBUG_ENABLED)
1631 struct net_buf *net_pkt_get_reserve_rx_data(size_t min_len, k_timeout_t timeout);
1632 #endif
1633 
1634 /**
1635  * @brief Get TX DATA buffer from pool.
1636  * Normally you should use net_pkt_get_frag() instead.
1637  *
1638  * @details Normally this version is not useful for applications
1639  * but is mainly used by network fragmentation code.
1640  *
1641  * @param min_len Minimum length of the requested fragment.
1642  * @param timeout Affects the action taken should the net buf pool be empty.
1643  *        If K_NO_WAIT, then return immediately. If K_FOREVER, then
1644  *        wait as long as necessary. Otherwise, wait up to the specified time.
1645  *
1646  * @return Network buffer if successful, NULL otherwise.
1647  */
1648 #if !defined(NET_PKT_DEBUG_ENABLED)
1649 struct net_buf *net_pkt_get_reserve_tx_data(size_t min_len, k_timeout_t timeout);
1650 #endif
1651 
1652 /**
1653  * @brief Get a data fragment that might be from user specific
1654  * buffer pool or from global DATA pool.
1655  *
1656  * @param pkt Network packet.
1657  * @param min_len Minimum length of the requested fragment.
1658  * @param timeout Affects the action taken should the net buf pool be empty.
1659  *        If K_NO_WAIT, then return immediately. If K_FOREVER, then
1660  *        wait as long as necessary. Otherwise, wait up to the specified time.
1661  *
1662  * @return Network buffer if successful, NULL otherwise.
1663  */
1664 #if !defined(NET_PKT_DEBUG_ENABLED)
1665 struct net_buf *net_pkt_get_frag(struct net_pkt *pkt, size_t min_len,
1666 				 k_timeout_t timeout);
1667 #endif
1668 
1669 /**
1670  * @brief Place packet back into the available packets slab
1671  *
1672  * @details Releases the packet to other use. This needs to be
1673  * called by application after it has finished with the packet.
1674  *
1675  * @param pkt Network packet to release.
1676  *
1677  */
1678 #if !defined(NET_PKT_DEBUG_ENABLED)
1679 void net_pkt_unref(struct net_pkt *pkt);
1680 #endif
1681 
1682 /**
1683  * @brief Increase the packet ref count
1684  *
1685  * @details Mark the packet to be used still.
1686  *
1687  * @param pkt Network packet to ref.
1688  *
1689  * @return Network packet if successful, NULL otherwise.
1690  */
1691 #if !defined(NET_PKT_DEBUG_ENABLED)
1692 struct net_pkt *net_pkt_ref(struct net_pkt *pkt);
1693 #endif
1694 
1695 /**
1696  * @brief Increase the packet fragment ref count
1697  *
1698  * @details Mark the fragment to be used still.
1699  *
1700  * @param frag Network fragment to ref.
1701  *
1702  * @return a pointer on the referenced Network fragment.
1703  */
1704 #if !defined(NET_PKT_DEBUG_ENABLED)
1705 struct net_buf *net_pkt_frag_ref(struct net_buf *frag);
1706 #endif
1707 
1708 /**
1709  * @brief Decrease the packet fragment ref count
1710  *
1711  * @param frag Network fragment to unref.
1712  */
1713 #if !defined(NET_PKT_DEBUG_ENABLED)
1714 void net_pkt_frag_unref(struct net_buf *frag);
1715 #endif
1716 
1717 /**
1718  * @brief Delete existing fragment from a packet
1719  *
1720  * @param pkt Network packet from which frag belongs to.
1721  * @param parent parent fragment of frag, or NULL if none.
1722  * @param frag Fragment to delete.
1723  *
1724  * @return Pointer to the following fragment, or NULL if it had no
1725  *         further fragments.
1726  */
1727 #if !defined(NET_PKT_DEBUG_ENABLED)
1728 struct net_buf *net_pkt_frag_del(struct net_pkt *pkt,
1729 				 struct net_buf *parent,
1730 				 struct net_buf *frag);
1731 #endif
1732 
1733 /**
1734  * @brief Add a fragment to a packet at the end of its fragment list
1735  *
1736  * @param pkt pkt Network packet where to add the fragment
1737  * @param frag Fragment to add
1738  */
1739 #if !defined(NET_PKT_DEBUG_ENABLED)
1740 void net_pkt_frag_add(struct net_pkt *pkt, struct net_buf *frag);
1741 #endif
1742 
1743 /**
1744  * @brief Insert a fragment to a packet at the beginning of its fragment list
1745  *
1746  * @param pkt pkt Network packet where to insert the fragment
1747  * @param frag Fragment to insert
1748  */
1749 #if !defined(NET_PKT_DEBUG_ENABLED)
1750 void net_pkt_frag_insert(struct net_pkt *pkt, struct net_buf *frag);
1751 #endif
1752 
1753 /**
1754  * @brief Compact the fragment list of a packet.
1755  *
1756  * @details After this there is no more any free space in individual fragments.
1757  * @param pkt Network packet.
1758  */
1759 void net_pkt_compact(struct net_pkt *pkt);
1760 
1761 /**
1762  * @brief Get information about predefined RX, TX and DATA pools.
1763  *
1764  * @param rx Pointer to RX pool is returned.
1765  * @param tx Pointer to TX pool is returned.
1766  * @param rx_data Pointer to RX DATA pool is returned.
1767  * @param tx_data Pointer to TX DATA pool is returned.
1768  */
1769 void net_pkt_get_info(struct k_mem_slab **rx,
1770 		      struct k_mem_slab **tx,
1771 		      struct net_buf_pool **rx_data,
1772 		      struct net_buf_pool **tx_data);
1773 
1774 /** @cond INTERNAL_HIDDEN */
1775 
1776 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
1777 /**
1778  * @brief Debug helper to print out the buffer allocations
1779  */
1780 void net_pkt_print(void);
1781 
1782 typedef void (*net_pkt_allocs_cb_t)(struct net_pkt *pkt,
1783 				    struct net_buf *buf,
1784 				    const char *func_alloc,
1785 				    int line_alloc,
1786 				    const char *func_free,
1787 				    int line_free,
1788 				    bool in_use,
1789 				    void *user_data);
1790 
1791 void net_pkt_allocs_foreach(net_pkt_allocs_cb_t cb, void *user_data);
1792 
1793 const char *net_pkt_slab2str(struct k_mem_slab *slab);
1794 const char *net_pkt_pool2str(struct net_buf_pool *pool);
1795 
1796 #else
1797 #define net_pkt_print(...)
1798 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC */
1799 
1800 /* New allocator, and API are defined below.
1801  * This will be simpler when time will come to get rid of former API above.
1802  */
1803 #if defined(NET_PKT_DEBUG_ENABLED)
1804 
1805 struct net_pkt *net_pkt_alloc_debug(k_timeout_t timeout,
1806 				    const char *caller, int line);
1807 #define net_pkt_alloc(_timeout)					\
1808 	net_pkt_alloc_debug(_timeout, __func__, __LINE__)
1809 
1810 struct net_pkt *net_pkt_alloc_from_slab_debug(struct k_mem_slab *slab,
1811 					      k_timeout_t timeout,
1812 					      const char *caller, int line);
1813 #define net_pkt_alloc_from_slab(_slab, _timeout)			\
1814 	net_pkt_alloc_from_slab_debug(_slab, _timeout, __func__, __LINE__)
1815 
1816 struct net_pkt *net_pkt_rx_alloc_debug(k_timeout_t timeout,
1817 				       const char *caller, int line);
1818 #define net_pkt_rx_alloc(_timeout)				\
1819 	net_pkt_rx_alloc_debug(_timeout, __func__, __LINE__)
1820 
1821 struct net_pkt *net_pkt_alloc_on_iface_debug(struct net_if *iface,
1822 					     k_timeout_t timeout,
1823 					     const char *caller,
1824 					     int line);
1825 #define net_pkt_alloc_on_iface(_iface, _timeout)			\
1826 	net_pkt_alloc_on_iface_debug(_iface, _timeout, __func__, __LINE__)
1827 
1828 struct net_pkt *net_pkt_rx_alloc_on_iface_debug(struct net_if *iface,
1829 						k_timeout_t timeout,
1830 						const char *caller,
1831 						int line);
1832 #define net_pkt_rx_alloc_on_iface(_iface, _timeout)			\
1833 	net_pkt_rx_alloc_on_iface_debug(_iface, _timeout,		\
1834 					__func__, __LINE__)
1835 
1836 int net_pkt_alloc_buffer_debug(struct net_pkt *pkt,
1837 			       size_t size,
1838 			       enum net_ip_protocol proto,
1839 			       k_timeout_t timeout,
1840 			       const char *caller, int line);
1841 #define net_pkt_alloc_buffer(_pkt, _size, _proto, _timeout)		\
1842 	net_pkt_alloc_buffer_debug(_pkt, _size, _proto, _timeout,	\
1843 				   __func__, __LINE__)
1844 
1845 int net_pkt_alloc_buffer_raw_debug(struct net_pkt *pkt, size_t size,
1846 				   k_timeout_t timeout,
1847 				   const char *caller, int line);
1848 #define net_pkt_alloc_buffer_raw(_pkt, _size, _timeout)	\
1849 	net_pkt_alloc_buffer_raw_debug(_pkt, _size, _timeout,	\
1850 				       __func__, __LINE__)
1851 
1852 struct net_pkt *net_pkt_alloc_with_buffer_debug(struct net_if *iface,
1853 						size_t size,
1854 						sa_family_t family,
1855 						enum net_ip_protocol proto,
1856 						k_timeout_t timeout,
1857 						const char *caller,
1858 						int line);
1859 #define net_pkt_alloc_with_buffer(_iface, _size, _family,		\
1860 				  _proto, _timeout)			\
1861 	net_pkt_alloc_with_buffer_debug(_iface, _size, _family,		\
1862 					_proto, _timeout,		\
1863 					__func__, __LINE__)
1864 
1865 struct net_pkt *net_pkt_rx_alloc_with_buffer_debug(struct net_if *iface,
1866 						   size_t size,
1867 						   sa_family_t family,
1868 						   enum net_ip_protocol proto,
1869 						   k_timeout_t timeout,
1870 						   const char *caller,
1871 						   int line);
1872 #define net_pkt_rx_alloc_with_buffer(_iface, _size, _family,		\
1873 				     _proto, _timeout)			\
1874 	net_pkt_rx_alloc_with_buffer_debug(_iface, _size, _family,	\
1875 					   _proto, _timeout,		\
1876 					   __func__, __LINE__)
1877 #endif /* NET_PKT_DEBUG_ENABLED */
1878 /** @endcond */
1879 
1880 /**
1881  * @brief Allocate an initialized net_pkt
1882  *
1883  * @details for the time being, 2 pools are used. One for TX and one for RX.
1884  *          This allocator has to be used for TX.
1885  *
1886  * @param timeout Maximum time to wait for an allocation.
1887  *
1888  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1889  */
1890 #if !defined(NET_PKT_DEBUG_ENABLED)
1891 struct net_pkt *net_pkt_alloc(k_timeout_t timeout);
1892 #endif
1893 
1894 /**
1895  * @brief Allocate an initialized net_pkt from a specific slab
1896  *
1897  * @details unlike net_pkt_alloc() which uses core slabs, this one will use
1898  *          an external slab (see NET_PKT_SLAB_DEFINE()).
1899  *          Do _not_ use it unless you know what you are doing. Basically, only
1900  *          net_context should be using this, in order to allocate packet and
1901  *          then buffer on its local slab/pool (if any).
1902  *
1903  * @param slab    The slab to use for allocating the packet
1904  * @param timeout Maximum time to wait for an allocation.
1905  *
1906  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1907  */
1908 #if !defined(NET_PKT_DEBUG_ENABLED)
1909 struct net_pkt *net_pkt_alloc_from_slab(struct k_mem_slab *slab,
1910 					k_timeout_t timeout);
1911 #endif
1912 
1913 /**
1914  * @brief Allocate an initialized net_pkt for RX
1915  *
1916  * @details for the time being, 2 pools are used. One for TX and one for RX.
1917  *          This allocator has to be used for RX.
1918  *
1919  * @param timeout Maximum time to wait for an allocation.
1920  *
1921  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1922  */
1923 #if !defined(NET_PKT_DEBUG_ENABLED)
1924 struct net_pkt *net_pkt_rx_alloc(k_timeout_t timeout);
1925 #endif
1926 
1927 /**
1928  * @brief Allocate a network packet for a specific network interface.
1929  *
1930  * @param iface The network interface the packet is supposed to go through.
1931  * @param timeout Maximum time to wait for an allocation.
1932  *
1933  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1934  */
1935 #if !defined(NET_PKT_DEBUG_ENABLED)
1936 struct net_pkt *net_pkt_alloc_on_iface(struct net_if *iface,
1937 				       k_timeout_t timeout);
1938 
1939 /** @cond INTERNAL_HIDDEN */
1940 
1941 /* Same as above but specifically for RX packet */
1942 struct net_pkt *net_pkt_rx_alloc_on_iface(struct net_if *iface,
1943 					  k_timeout_t timeout);
1944 /** @endcond */
1945 
1946 #endif
1947 
1948 /**
1949  * @brief Allocate buffer for a net_pkt
1950  *
1951  * @details: such allocator will take into account space necessary for headers,
1952  *           MTU, and existing buffer (if any). Beware that, due to all these
1953  *           criteria, the allocated size might be smaller/bigger than
1954  *           requested one.
1955  *
1956  * @param pkt     The network packet requiring buffer to be allocated.
1957  * @param size    The size of buffer being requested.
1958  * @param proto   The IP protocol type (can be 0 for none).
1959  * @param timeout Maximum time to wait for an allocation.
1960  *
1961  * @return 0 on success, negative errno code otherwise.
1962  */
1963 #if !defined(NET_PKT_DEBUG_ENABLED)
1964 int net_pkt_alloc_buffer(struct net_pkt *pkt,
1965 			 size_t size,
1966 			 enum net_ip_protocol proto,
1967 			 k_timeout_t timeout);
1968 #endif
1969 
1970 /**
1971  * @brief Allocate buffer for a net_pkt, of specified size, w/o any additional
1972  *        preconditions
1973  *
1974  * @details: The actual buffer size may be larger than requested one if fixed
1975  *           size buffers are in use.
1976  *
1977  * @param pkt     The network packet requiring buffer to be allocated.
1978  * @param size    The size of buffer being requested.
1979  * @param timeout Maximum time to wait for an allocation.
1980  *
1981  * @return 0 on success, negative errno code otherwise.
1982  */
1983 #if !defined(NET_PKT_DEBUG_ENABLED)
1984 int net_pkt_alloc_buffer_raw(struct net_pkt *pkt, size_t size,
1985 			     k_timeout_t timeout);
1986 #endif
1987 
1988 /**
1989  * @brief Allocate a network packet and buffer at once
1990  *
1991  * @param iface   The network interface the packet is supposed to go through.
1992  * @param size    The size of buffer.
1993  * @param family  The family to which the packet belongs.
1994  * @param proto   The IP protocol type (can be 0 for none).
1995  * @param timeout Maximum time to wait for an allocation.
1996  *
1997  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1998  */
1999 #if !defined(NET_PKT_DEBUG_ENABLED)
2000 struct net_pkt *net_pkt_alloc_with_buffer(struct net_if *iface,
2001 					  size_t size,
2002 					  sa_family_t family,
2003 					  enum net_ip_protocol proto,
2004 					  k_timeout_t timeout);
2005 
2006 /** @cond INTERNAL_HIDDEN */
2007 
2008 /* Same as above but specifically for RX packet */
2009 struct net_pkt *net_pkt_rx_alloc_with_buffer(struct net_if *iface,
2010 					     size_t size,
2011 					     sa_family_t family,
2012 					     enum net_ip_protocol proto,
2013 					     k_timeout_t timeout);
2014 
2015 /** @endcond */
2016 
2017 #endif
2018 
2019 /**
2020  * @brief Append a buffer in packet
2021  *
2022  * @param pkt    Network packet where to append the buffer
2023  * @param buffer Buffer to append
2024  */
2025 void net_pkt_append_buffer(struct net_pkt *pkt, struct net_buf *buffer);
2026 
2027 /**
2028  * @brief Get available buffer space from a pkt
2029  *
2030  * @note Reserved bytes (headroom) in any of the fragments are not considered to
2031  *       be available.
2032  *
2033  * @param pkt The net_pkt which buffer availability should be evaluated
2034  *
2035  * @return the amount of buffer available
2036  */
2037 size_t net_pkt_available_buffer(struct net_pkt *pkt);
2038 
2039 /**
2040  * @brief Get available buffer space for payload from a pkt
2041  *
2042  * @note Reserved bytes (headroom) in any of the fragments are not considered to
2043  *       be available.
2044  *
2045  * @details Unlike net_pkt_available_buffer(), this will take into account
2046  *          the headers space.
2047  *
2048  * @param pkt   The net_pkt which payload buffer availability should
2049  *              be evaluated
2050  * @param proto The IP protocol type (can be 0 for none).
2051  *
2052  * @return the amount of buffer available for payload
2053  */
2054 size_t net_pkt_available_payload_buffer(struct net_pkt *pkt,
2055 					enum net_ip_protocol proto);
2056 
2057 /**
2058  * @brief Trim net_pkt buffer
2059  *
2060  * @details This will basically check for unused buffers and deallocate
2061  *          them relevantly
2062  *
2063  * @param pkt The net_pkt which buffer will be trimmed
2064  */
2065 void net_pkt_trim_buffer(struct net_pkt *pkt);
2066 
2067 /**
2068  * @brief Remove @a length bytes from tail of packet
2069  *
2070  * @details This function does not take packet cursor into account. It is a
2071  *          helper to remove unneeded bytes from tail of packet (like appended
2072  *          CRC). It takes care of buffer deallocation if removed bytes span
2073  *          whole buffer(s).
2074  *
2075  * @param pkt    Network packet
2076  * @param length Number of bytes to be removed
2077  *
2078  * @retval 0       On success.
2079  * @retval -EINVAL If packet length is shorter than @a length.
2080  */
2081 int net_pkt_remove_tail(struct net_pkt *pkt, size_t length);
2082 
2083 /**
2084  * @brief Initialize net_pkt cursor
2085  *
2086  * @details This will initialize the net_pkt cursor from its buffer.
2087  *
2088  * @param pkt The net_pkt whose cursor is going to be initialized
2089  */
2090 void net_pkt_cursor_init(struct net_pkt *pkt);
2091 
2092 /**
2093  * @brief Backup net_pkt cursor
2094  *
2095  * @param pkt    The net_pkt whose cursor is going to be backed up
2096  * @param backup The cursor where to backup net_pkt cursor
2097  */
net_pkt_cursor_backup(struct net_pkt * pkt,struct net_pkt_cursor * backup)2098 static inline void net_pkt_cursor_backup(struct net_pkt *pkt,
2099 					 struct net_pkt_cursor *backup)
2100 {
2101 	backup->buf = pkt->cursor.buf;
2102 	backup->pos = pkt->cursor.pos;
2103 }
2104 
2105 /**
2106  * @brief Restore net_pkt cursor from a backup
2107  *
2108  * @param pkt    The net_pkt whose cursor is going to be restored
2109  * @param backup The cursor from where to restore net_pkt cursor
2110  */
net_pkt_cursor_restore(struct net_pkt * pkt,struct net_pkt_cursor * backup)2111 static inline void net_pkt_cursor_restore(struct net_pkt *pkt,
2112 					  struct net_pkt_cursor *backup)
2113 {
2114 	pkt->cursor.buf = backup->buf;
2115 	pkt->cursor.pos = backup->pos;
2116 }
2117 
2118 /**
2119  * @brief Returns current position of the cursor
2120  *
2121  * @param pkt The net_pkt whose cursor position is going to be returned
2122  *
2123  * @return cursor's position
2124  */
net_pkt_cursor_get_pos(struct net_pkt * pkt)2125 static inline void *net_pkt_cursor_get_pos(struct net_pkt *pkt)
2126 {
2127 	return pkt->cursor.pos;
2128 }
2129 
2130 /**
2131  * @brief Skip some data from a net_pkt
2132  *
2133  * @details net_pkt's cursor should be properly initialized
2134  *          Cursor position will be updated after the operation.
2135  *          Depending on the value of pkt->overwrite bit, this function
2136  *          will affect the buffer length or not. If it's true, it will
2137  *          advance the cursor to the requested length. If it's false,
2138  *          it will do the same but if the cursor was already also at the
2139  *          end of existing data, it will increment the buffer length.
2140  *          So in this case, its behavior is just like net_pkt_write or
2141  *          net_pkt_memset, difference being that it will not affect the
2142  *          buffer content itself (which may be just garbage then).
2143  *
2144  * @param pkt    The net_pkt whose cursor will be updated to skip given
2145  *               amount of data from the buffer.
2146  * @param length Amount of data to skip in the buffer
2147  *
2148  * @return 0 in success, negative errno code otherwise.
2149  */
2150 int net_pkt_skip(struct net_pkt *pkt, size_t length);
2151 
2152 /**
2153  * @brief Memset some data in a net_pkt
2154  *
2155  * @details net_pkt's cursor should be properly initialized and,
2156  *          if needed, positioned using net_pkt_skip.
2157  *          Cursor position will be updated after the operation.
2158  *
2159  * @param pkt    The net_pkt whose buffer to fill starting at the current
2160  *               cursor position.
2161  * @param byte   The byte to write in memory
2162  * @param length Amount of data to memset with given byte
2163  *
2164  * @return 0 in success, negative errno code otherwise.
2165  */
2166 int net_pkt_memset(struct net_pkt *pkt, int byte, size_t length);
2167 
2168 /**
2169  * @brief Copy data from a packet into another one.
2170  *
2171  * @details Both net_pkt cursors should be properly initialized and,
2172  *          if needed, positioned using net_pkt_skip.
2173  *          The cursors will be updated after the operation.
2174  *
2175  * @param pkt_dst Destination network packet.
2176  * @param pkt_src Source network packet.
2177  * @param length  Length of data to be copied.
2178  *
2179  * @return 0 on success, negative errno code otherwise.
2180  */
2181 int net_pkt_copy(struct net_pkt *pkt_dst,
2182 		 struct net_pkt *pkt_src,
2183 		 size_t length);
2184 
2185 /**
2186  * @brief Clone pkt and its buffer. The cloned packet will be allocated on
2187  *        the same pool as the original one.
2188  *
2189  * @param pkt Original pkt to be cloned
2190  * @param timeout Timeout to wait for free buffer
2191  *
2192  * @return NULL if error, cloned packet otherwise.
2193  */
2194 struct net_pkt *net_pkt_clone(struct net_pkt *pkt, k_timeout_t timeout);
2195 
2196 /**
2197  * @brief Clone pkt and its buffer. The cloned packet will be allocated on
2198  *        the RX packet poll.
2199  *
2200  * @param pkt Original pkt to be cloned
2201  * @param timeout Timeout to wait for free buffer
2202  *
2203  * @return NULL if error, cloned packet otherwise.
2204  */
2205 struct net_pkt *net_pkt_rx_clone(struct net_pkt *pkt, k_timeout_t timeout);
2206 
2207 /**
2208  * @brief Clone pkt and increase the refcount of its buffer.
2209  *
2210  * @param pkt Original pkt to be shallow cloned
2211  * @param timeout Timeout to wait for free packet
2212  *
2213  * @return NULL if error, cloned packet otherwise.
2214  */
2215 struct net_pkt *net_pkt_shallow_clone(struct net_pkt *pkt,
2216 				      k_timeout_t timeout);
2217 
2218 /**
2219  * @brief Read some data from a net_pkt
2220  *
2221  * @details net_pkt's cursor should be properly initialized and,
2222  *          if needed, positioned using net_pkt_skip.
2223  *          Cursor position will be updated after the operation.
2224  *
2225  * @param pkt    The network packet from where to read some data
2226  * @param data   The destination buffer where to copy the data
2227  * @param length The amount of data to copy
2228  *
2229  * @return 0 on success, negative errno code otherwise.
2230  */
2231 int net_pkt_read(struct net_pkt *pkt, void *data, size_t length);
2232 
2233 /**
2234  * @brief Read a byte (uint8_t) from a net_pkt
2235  *
2236  * @details net_pkt's cursor should be properly initialized and,
2237  *          if needed, positioned using net_pkt_skip.
2238  *          Cursor position will be updated after the operation.
2239  *
2240  * @param pkt  The network packet from where to read
2241  * @param data The destination uint8_t where to copy the data
2242  *
2243  * @return 0 on success, negative errno code otherwise.
2244  */
net_pkt_read_u8(struct net_pkt * pkt,uint8_t * data)2245 static inline int net_pkt_read_u8(struct net_pkt *pkt, uint8_t *data)
2246 {
2247 	return net_pkt_read(pkt, data, 1);
2248 }
2249 
2250 /**
2251  * @brief Read uint16_t big endian data from a net_pkt
2252  *
2253  * @details net_pkt's cursor should be properly initialized and,
2254  *          if needed, positioned using net_pkt_skip.
2255  *          Cursor position will be updated after the operation.
2256  *
2257  * @param pkt  The network packet from where to read
2258  * @param data The destination uint16_t where to copy the data
2259  *
2260  * @return 0 on success, negative errno code otherwise.
2261  */
2262 int net_pkt_read_be16(struct net_pkt *pkt, uint16_t *data);
2263 
2264 /**
2265  * @brief Read uint16_t little endian data from a net_pkt
2266  *
2267  * @details net_pkt's cursor should be properly initialized and,
2268  *          if needed, positioned using net_pkt_skip.
2269  *          Cursor position will be updated after the operation.
2270  *
2271  * @param pkt  The network packet from where to read
2272  * @param data The destination uint16_t where to copy the data
2273  *
2274  * @return 0 on success, negative errno code otherwise.
2275  */
2276 int net_pkt_read_le16(struct net_pkt *pkt, uint16_t *data);
2277 
2278 /**
2279  * @brief Read uint32_t big endian data from a net_pkt
2280  *
2281  * @details net_pkt's cursor should be properly initialized and,
2282  *          if needed, positioned using net_pkt_skip.
2283  *          Cursor position will be updated after the operation.
2284  *
2285  * @param pkt  The network packet from where to read
2286  * @param data The destination uint32_t where to copy the data
2287  *
2288  * @return 0 on success, negative errno code otherwise.
2289  */
2290 int net_pkt_read_be32(struct net_pkt *pkt, uint32_t *data);
2291 
2292 /**
2293  * @brief Write data into a net_pkt
2294  *
2295  * @details net_pkt's cursor should be properly initialized and,
2296  *          if needed, positioned using net_pkt_skip.
2297  *          Cursor position will be updated after the operation.
2298  *
2299  * @param pkt    The network packet where to write
2300  * @param data   Data to be written
2301  * @param length Length of the data to be written
2302  *
2303  * @return 0 on success, negative errno code otherwise.
2304  */
2305 int net_pkt_write(struct net_pkt *pkt, const void *data, size_t length);
2306 
2307 /**
2308  * @brief Write a byte (uint8_t) data to a net_pkt
2309  *
2310  * @details net_pkt's cursor should be properly initialized and,
2311  *          if needed, positioned using net_pkt_skip.
2312  *          Cursor position will be updated after the operation.
2313  *
2314  * @param pkt  The network packet from where to read
2315  * @param data The uint8_t value to write
2316  *
2317  * @return 0 on success, negative errno code otherwise.
2318  */
net_pkt_write_u8(struct net_pkt * pkt,uint8_t data)2319 static inline int net_pkt_write_u8(struct net_pkt *pkt, uint8_t data)
2320 {
2321 	return net_pkt_write(pkt, &data, sizeof(uint8_t));
2322 }
2323 
2324 /**
2325  * @brief Write a uint16_t big endian data to a net_pkt
2326  *
2327  * @details net_pkt's cursor should be properly initialized and,
2328  *          if needed, positioned using net_pkt_skip.
2329  *          Cursor position will be updated after the operation.
2330  *
2331  * @param pkt  The network packet from where to read
2332  * @param data The uint16_t value in host byte order to write
2333  *
2334  * @return 0 on success, negative errno code otherwise.
2335  */
net_pkt_write_be16(struct net_pkt * pkt,uint16_t data)2336 static inline int net_pkt_write_be16(struct net_pkt *pkt, uint16_t data)
2337 {
2338 	uint16_t data_be16 = htons(data);
2339 
2340 	return net_pkt_write(pkt, &data_be16, sizeof(uint16_t));
2341 }
2342 
2343 /**
2344  * @brief Write a uint32_t big endian data to a net_pkt
2345  *
2346  * @details net_pkt's cursor should be properly initialized and,
2347  *          if needed, positioned using net_pkt_skip.
2348  *          Cursor position will be updated after the operation.
2349  *
2350  * @param pkt  The network packet from where to read
2351  * @param data The uint32_t value in host byte order to write
2352  *
2353  * @return 0 on success, negative errno code otherwise.
2354  */
net_pkt_write_be32(struct net_pkt * pkt,uint32_t data)2355 static inline int net_pkt_write_be32(struct net_pkt *pkt, uint32_t data)
2356 {
2357 	uint32_t data_be32 = htonl(data);
2358 
2359 	return net_pkt_write(pkt, &data_be32, sizeof(uint32_t));
2360 }
2361 
2362 /**
2363  * @brief Write a uint32_t little endian data to a net_pkt
2364  *
2365  * @details net_pkt's cursor should be properly initialized and,
2366  *          if needed, positioned using net_pkt_skip.
2367  *          Cursor position will be updated after the operation.
2368  *
2369  * @param pkt  The network packet from where to read
2370  * @param data The uint32_t value in host byte order to write
2371  *
2372  * @return 0 on success, negative errno code otherwise.
2373  */
net_pkt_write_le32(struct net_pkt * pkt,uint32_t data)2374 static inline int net_pkt_write_le32(struct net_pkt *pkt, uint32_t data)
2375 {
2376 	uint32_t data_le32 = sys_cpu_to_le32(data);
2377 
2378 	return net_pkt_write(pkt, &data_le32, sizeof(uint32_t));
2379 }
2380 
2381 /**
2382  * @brief Write a uint16_t little endian data to a net_pkt
2383  *
2384  * @details net_pkt's cursor should be properly initialized and,
2385  *          if needed, positioned using net_pkt_skip.
2386  *          Cursor position will be updated after the operation.
2387  *
2388  * @param pkt  The network packet from where to read
2389  * @param data The uint16_t value in host byte order to write
2390  *
2391  * @return 0 on success, negative errno code otherwise.
2392  */
net_pkt_write_le16(struct net_pkt * pkt,uint16_t data)2393 static inline int net_pkt_write_le16(struct net_pkt *pkt, uint16_t data)
2394 {
2395 	uint16_t data_le16 = sys_cpu_to_le16(data);
2396 
2397 	return net_pkt_write(pkt, &data_le16, sizeof(uint16_t));
2398 }
2399 
2400 /**
2401  * @brief Get the amount of data which can be read from current cursor position
2402  *
2403  * @param pkt Network packet
2404  *
2405  * @return Amount of data which can be read from current pkt cursor
2406  */
2407 size_t net_pkt_remaining_data(struct net_pkt *pkt);
2408 
2409 /**
2410  * @brief Update the overall length of a packet
2411  *
2412  * @details Unlike net_pkt_pull() below, this does not take packet cursor
2413  *          into account. It's mainly a helper dedicated for ipv4 and ipv6
2414  *          input functions. It shrinks the overall length by given parameter.
2415  *
2416  * @param pkt    Network packet
2417  * @param length The new length of the packet
2418  *
2419  * @return 0 on success, negative errno code otherwise.
2420  */
2421 int net_pkt_update_length(struct net_pkt *pkt, size_t length);
2422 
2423 /**
2424  * @brief Remove data from the packet at current location
2425  *
2426  * @details net_pkt's cursor should be properly initialized and,
2427  *          eventually, properly positioned using net_pkt_skip/read/write.
2428  *          Note that net_pkt's cursor is reset by this function.
2429  *
2430  * @param pkt    Network packet
2431  * @param length Number of bytes to be removed
2432  *
2433  * @return 0 on success, negative errno code otherwise.
2434  */
2435 int net_pkt_pull(struct net_pkt *pkt, size_t length);
2436 
2437 /**
2438  * @brief Get the actual offset in the packet from its cursor
2439  *
2440  * @param pkt Network packet.
2441  *
2442  * @return a valid offset on success, 0 otherwise as there is nothing that
2443  *         can be done to evaluate the offset.
2444  */
2445 uint16_t net_pkt_get_current_offset(struct net_pkt *pkt);
2446 
2447 /**
2448  * @brief Check if a data size could fit contiguously
2449  *
2450  * @details net_pkt's cursor should be properly initialized and,
2451  *          if needed, positioned using net_pkt_skip.
2452  *
2453  * @param pkt  Network packet.
2454  * @param size The size to check for contiguity
2455  *
2456  * @return true if that is the case, false otherwise.
2457  */
2458 bool net_pkt_is_contiguous(struct net_pkt *pkt, size_t size);
2459 
2460 /**
2461  * Get the contiguous buffer space
2462  *
2463  * @param pkt Network packet
2464  *
2465  * @return The available contiguous buffer space in bytes starting from the
2466  *         current cursor position. 0 in case of an error.
2467  */
2468 size_t net_pkt_get_contiguous_len(struct net_pkt *pkt);
2469 
2470 /** @cond INTERNAL_HIDDEN */
2471 
2472 struct net_pkt_data_access {
2473 #if !defined(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)
2474 	void *data;
2475 #endif
2476 	const size_t size;
2477 };
2478 
2479 #if defined(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)
2480 #define NET_PKT_DATA_ACCESS_DEFINE(_name, _type)		\
2481 	struct net_pkt_data_access _name = {			\
2482 		.size = sizeof(_type),				\
2483 	}
2484 
2485 #define NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(_name, _type)	\
2486 	NET_PKT_DATA_ACCESS_DEFINE(_name, _type)
2487 
2488 #else
2489 #define NET_PKT_DATA_ACCESS_DEFINE(_name, _type)		\
2490 	_type _hdr_##_name;					\
2491 	struct net_pkt_data_access _name = {			\
2492 		.data = &_hdr_##_name,				\
2493 		.size = sizeof(_type),				\
2494 	}
2495 
2496 #define NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(_name, _type)	\
2497 	struct net_pkt_data_access _name = {			\
2498 		.data = NULL,					\
2499 		.size = sizeof(_type),				\
2500 	}
2501 
2502 #endif /* CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS */
2503 
2504 /** @endcond */
2505 
2506 /**
2507  * @brief Get data from a network packet in a contiguous way
2508  *
2509  * @details net_pkt's cursor should be properly initialized and,
2510  *          if needed, positioned using net_pkt_skip. Unlike other functions,
2511  *          cursor position will not be updated after the operation.
2512  *
2513  * @param pkt    The network packet from where to get the data.
2514  * @param access A pointer to a valid net_pkt_data_access describing the
2515  *        data to get in a contiguous way.
2516  *
2517  * @return a pointer to the requested contiguous data, NULL otherwise.
2518  */
2519 void *net_pkt_get_data(struct net_pkt *pkt,
2520 		       struct net_pkt_data_access *access);
2521 
2522 /**
2523  * @brief Set contiguous data into a network packet
2524  *
2525  * @details net_pkt's cursor should be properly initialized and,
2526  *          if needed, positioned using net_pkt_skip.
2527  *          Cursor position will be updated after the operation.
2528  *
2529  * @param pkt    The network packet to where the data should be set.
2530  * @param access A pointer to a valid net_pkt_data_access describing the
2531  *        data to set.
2532  *
2533  * @return 0 on success, a negative errno otherwise.
2534  */
2535 int net_pkt_set_data(struct net_pkt *pkt,
2536 		     struct net_pkt_data_access *access);
2537 
2538 /**
2539  * Acknowledge previously contiguous data taken from a network packet
2540  * Packet needs to be set to overwrite mode.
2541  */
net_pkt_acknowledge_data(struct net_pkt * pkt,struct net_pkt_data_access * access)2542 static inline int net_pkt_acknowledge_data(struct net_pkt *pkt,
2543 					   struct net_pkt_data_access *access)
2544 {
2545 	return net_pkt_skip(pkt, access->size);
2546 }
2547 
2548 /**
2549  * @}
2550  */
2551 
2552 #ifdef __cplusplus
2553 }
2554 #endif
2555 
2556 #endif /* ZEPHYR_INCLUDE_NET_NET_PKT_H_ */
2557