1 /** @file
2  * @brief Network packet buffer descriptor API
3  *
4  * Network data is passed between different parts of the stack via
5  * net_buf struct.
6  */
7 
8 /*
9  * Copyright (c) 2016 Intel Corporation
10  *
11  * SPDX-License-Identifier: Apache-2.0
12  */
13 
14 /* Data buffer API - used for all data to/from net */
15 
16 #ifndef ZEPHYR_INCLUDE_NET_NET_PKT_H_
17 #define ZEPHYR_INCLUDE_NET_NET_PKT_H_
18 
19 #include <zephyr/types.h>
20 #include <stdbool.h>
21 
22 #include <zephyr/net_buf.h>
23 
24 #if defined(CONFIG_IEEE802154)
25 #include <zephyr/net/ieee802154_pkt.h>
26 #endif
27 #include <zephyr/net/net_core.h>
28 #include <zephyr/net/net_linkaddr.h>
29 #include <zephyr/net/net_ip.h>
30 #include <zephyr/net/net_if.h>
31 #include <zephyr/net/net_context.h>
32 #include <zephyr/net/net_time.h>
33 #include <zephyr/net/ethernet_vlan.h>
34 #include <zephyr/net/ptp_time.h>
35 
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
39 
40 /**
41  * @brief Network packet management library
42  * @defgroup net_pkt Network Packet Library
43  * @since 1.5
44  * @version 0.8.0
45  * @ingroup networking
46  * @{
47  */
48 
49 struct net_context;
50 
51 /** @cond INTERNAL_HIDDEN */
52 
53 #if defined(CONFIG_NET_PKT_ALLOC_STATS)
54 struct net_pkt_alloc_stats {
55 	uint64_t alloc_sum;
56 	uint64_t time_sum;
57 	uint32_t count;
58 };
59 
60 struct net_pkt_alloc_stats_slab {
61 	struct net_pkt_alloc_stats ok;
62 	struct net_pkt_alloc_stats fail;
63 	struct k_mem_slab *slab;
64 };
65 
66 #define NET_PKT_ALLOC_STATS_DEFINE(alloc_name, slab_name)		  \
67 	STRUCT_SECTION_ITERABLE(net_pkt_alloc_stats_slab, alloc_name) = { \
68 		.slab = &slab_name,					  \
69 	}
70 
71 #else
72 #define NET_PKT_ALLOC_STATS_DEFINE(name, slab)
73 #endif /* CONFIG_NET_PKT_ALLOC_STATS */
74 
75 /* buffer cursor used in net_pkt */
76 struct net_pkt_cursor {
77 	/** Current net_buf pointer by the cursor */
78 	struct net_buf *buf;
79 	/** Current position in the data buffer of the net_buf */
80 	uint8_t *pos;
81 };
82 
83 /** @endcond */
84 
85 /**
86  * @brief Network packet.
87  *
88  * Note that if you add new fields into net_pkt, remember to update
89  * net_pkt_clone() function.
90  */
91 struct net_pkt {
92 	/**
93 	 * The fifo is used by RX/TX threads and by socket layer. The net_pkt
94 	 * is queued via fifo to the processing thread.
95 	 */
96 	intptr_t fifo;
97 
98 	/** Slab pointer from where it belongs to */
99 	struct k_mem_slab *slab;
100 
101 	/** buffer holding the packet */
102 	union {
103 		struct net_buf *frags;   /**< buffer fragment */
104 		struct net_buf *buffer;  /**< alias to a buffer fragment */
105 	};
106 
107 	/** Internal buffer iterator used for reading/writing */
108 	struct net_pkt_cursor cursor;
109 
110 	/** Network connection context */
111 	struct net_context *context;
112 
113 	/** Network interface */
114 	struct net_if *iface;
115 
116 	/** @cond ignore */
117 
118 #if defined(CONFIG_NET_TCP)
119 	/** Allow placing the packet into sys_slist_t */
120 	sys_snode_t next;
121 #endif
122 #if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
123 	struct net_if *orig_iface; /* Original network interface */
124 #endif
125 
126 #if defined(CONFIG_NET_PKT_TIMESTAMP) || defined(CONFIG_NET_PKT_TXTIME)
127 	/**
128 	 * TX or RX timestamp if available
129 	 *
130 	 * For packets that have been sent over the medium, the timestamp refers
131 	 * to the time the message timestamp point was encountered at the
132 	 * reference plane.
133 	 *
134 	 * Unsent packages can be scheduled by setting the timestamp to a future
135 	 * point in time.
136 	 *
137 	 * All timestamps refer to the network subsystem's local clock.
138 	 *
139 	 * See @ref net_ptp_time for definitions of local clock, message
140 	 * timestamp point and reference plane. See @ref net_time_t for
141 	 * semantics of the network reference clock.
142 	 *
143 	 * TODO: Replace with net_time_t to decouple from PTP.
144 	 */
145 	struct net_ptp_time timestamp;
146 #endif
147 
148 #if defined(CONFIG_NET_PKT_RXTIME_STATS) || defined(CONFIG_NET_PKT_TXTIME_STATS) || \
149 	defined(CONFIG_TRACING_NET_CORE)
150 	struct {
151 		/** Create time in cycles */
152 		uint32_t create_time;
153 
154 #if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL) || \
155 	defined(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)
156 		/** Collect extra statistics for net_pkt processing
157 		 * from various points in the IP stack. See networking
158 		 * documentation where these points are located and how
159 		 * to interpret the results.
160 		 */
161 		struct {
162 			uint32_t stat[NET_PKT_DETAIL_STATS_COUNT];
163 			int count;
164 		} detail;
165 #endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL ||
166 	  CONFIG_NET_PKT_RXTIME_STATS_DETAIL */
167 	};
168 #endif /* CONFIG_NET_PKT_RXTIME_STATS || CONFIG_NET_PKT_TXTIME_STATS */
169 
170 #if defined(CONFIG_NET_PKT_ALLOC_STATS)
171 	struct net_pkt_alloc_stats_slab *alloc_stats;
172 #endif /* CONFIG_NET_PKT_ALLOC_STATS */
173 
174 	/** Reference counter */
175 	atomic_t atomic_ref;
176 
177 	/* Filled by layer 2 when network packet is received. */
178 	struct net_linkaddr lladdr_src;
179 	struct net_linkaddr lladdr_dst;
180 	uint16_t ll_proto_type;
181 
182 #if defined(CONFIG_NET_IP)
183 	uint8_t ip_hdr_len;	/* pre-filled in order to avoid func call */
184 #endif
185 
186 	uint8_t overwrite : 1;	 /* Is packet content being overwritten? */
187 	uint8_t eof : 1;	 /* Last packet before EOF */
188 	uint8_t ptp_pkt : 1;	 /* For outgoing packet: is this packet
189 				  * a L2 PTP packet.
190 				  * Used only if defined (CONFIG_NET_L2_PTP)
191 				  */
192 	uint8_t forwarding : 1;	 /* Are we forwarding this pkt
193 				  * Used only if defined(CONFIG_NET_ROUTE)
194 				  */
195 	uint8_t family : 3;	 /* Address family, see net_ip.h */
196 
197 	/* bitfield byte alignment boundary */
198 
199 #if defined(CONFIG_NET_IPV4_ACD)
200 	uint8_t ipv4_acd_arp_msg : 1;  /* Is this pkt IPv4 conflict detection ARP
201 					* message.
202 					* Note: family needs to be
203 					* AF_INET.
204 					*/
205 #endif
206 #if defined(CONFIG_NET_LLDP)
207 	uint8_t lldp_pkt : 1; /* Is this pkt an LLDP message.
208 			       * Note: family needs to be
209 			       * AF_UNSPEC.
210 			       */
211 #endif
212 	uint8_t ppp_msg : 1; /* This is a PPP message */
213 	uint8_t captured : 1;	  /* Set to 1 if this packet is already being
214 				   * captured
215 				   */
216 	uint8_t l2_bridged : 1;	  /* set to 1 if this packet comes from a bridge
217 				   * and already contains its L2 header to be
218 				   * preserved. Useful only if
219 				   * defined(CONFIG_NET_ETHERNET_BRIDGE).
220 				   */
221 	uint8_t l2_processed : 1; /* Set to 1 if this packet has already been
222 				   * processed by the L2
223 				   */
224 	uint8_t chksum_done : 1; /* Checksum has already been computed for
225 				  * the packet.
226 				  */
227 #if defined(CONFIG_NET_IP_FRAGMENT)
228 	uint8_t ip_reassembled : 1; /* Packet is a reassembled IP packet. */
229 #endif
230 #if defined(CONFIG_NET_PKT_TIMESTAMP)
231 	uint8_t tx_timestamping : 1; /** Timestamp transmitted packet */
232 	uint8_t rx_timestamping : 1; /** Timestamp received packet */
233 #endif
234 	/* bitfield byte alignment boundary */
235 
236 #if defined(CONFIG_NET_IP)
237 	union {
238 		/* IPv6 hop limit or IPv4 ttl for this network packet.
239 		 * The value is shared between IPv6 and IPv4.
240 		 */
241 #if defined(CONFIG_NET_IPV6)
242 		uint8_t ipv6_hop_limit;
243 #endif
244 #if defined(CONFIG_NET_IPV4)
245 		uint8_t ipv4_ttl;
246 #endif
247 	};
248 
249 	union {
250 #if defined(CONFIG_NET_IPV4)
251 		uint8_t ipv4_opts_len; /* length of IPv4 header options */
252 #endif
253 #if defined(CONFIG_NET_IPV6)
254 		uint16_t ipv6_ext_len; /* length of extension headers */
255 #endif
256 	};
257 
258 #if defined(CONFIG_NET_IP_FRAGMENT)
259 	union {
260 #if defined(CONFIG_NET_IPV4_FRAGMENT)
261 		struct {
262 			uint16_t flags;		/* Fragment offset and M (More Fragment) flag */
263 			uint16_t id;		/* Fragment ID */
264 		} ipv4_fragment;
265 #endif /* CONFIG_NET_IPV4_FRAGMENT */
266 #if defined(CONFIG_NET_IPV6_FRAGMENT)
267 		struct {
268 			uint16_t flags;		/* Fragment offset and M (More Fragment) flag */
269 			uint32_t id;		/* Fragment id */
270 			uint16_t hdr_start;	/* Where starts the fragment header */
271 		} ipv6_fragment;
272 #endif /* CONFIG_NET_IPV6_FRAGMENT */
273 	};
274 #endif /* CONFIG_NET_IP_FRAGMENT */
275 
276 #if defined(CONFIG_NET_IPV6)
277 	/* Where is the start of the last header before payload data
278 	 * in IPv6 packet. This is offset value from start of the IPv6
279 	 * packet. Note that this value should be updated by who ever
280 	 * adds IPv6 extension headers to the network packet.
281 	 */
282 	uint16_t ipv6_prev_hdr_start;
283 
284 	uint8_t ipv6_ext_opt_len; /* IPv6 ND option length */
285 	uint8_t ipv6_next_hdr;	/* What is the very first next header */
286 #endif /* CONFIG_NET_IPV6 */
287 
288 #if defined(CONFIG_NET_IP_DSCP_ECN)
289 	/** IPv4/IPv6 Differentiated Services Code Point value. */
290 	uint8_t ip_dscp : 6;
291 
292 	/** IPv4/IPv6 Explicit Congestion Notification value. */
293 	uint8_t ip_ecn : 2;
294 #endif /* CONFIG_NET_IP_DSCP_ECN */
295 #endif /* CONFIG_NET_IP */
296 
297 #if defined(CONFIG_NET_VLAN)
298 	/* VLAN TCI (Tag Control Information). This contains the Priority
299 	 * Code Point (PCP), Drop Eligible Indicator (DEI) and VLAN
300 	 * Identifier (VID, called more commonly VLAN tag). This value is
301 	 * kept in host byte order.
302 	 */
303 	uint16_t vlan_tci;
304 #endif /* CONFIG_NET_VLAN */
305 
306 #if defined(NET_PKT_HAS_CONTROL_BLOCK)
307 	/* TODO: Evolve this into a union of orthogonal
308 	 *       control block declarations if further L2
309 	 *       stacks require L2-specific attributes.
310 	 */
311 #if defined(CONFIG_IEEE802154)
312 	/* The following structure requires a 4-byte alignment
313 	 * boundary to avoid padding.
314 	 */
315 	struct net_pkt_cb_ieee802154 cb;
316 #endif /* CONFIG_IEEE802154 */
317 #endif /* NET_PKT_HAS_CONTROL_BLOCK */
318 
319 	/** Network packet priority, can be left out in which case packet
320 	 * is not prioritised.
321 	 */
322 	uint8_t priority;
323 
324 #if defined(CONFIG_NET_OFFLOAD) || defined(CONFIG_NET_L2_IPIP)
325 	/* Remote address of the received packet. This is only used by
326 	 * network interfaces with an offloaded TCP/IP stack, or if we
327 	 * have network tunneling in use.
328 	 */
329 	union {
330 		struct sockaddr remote;
331 
332 		/* This will make sure that there is enough storage to store
333 		 * the address struct. The access to value is via remote
334 		 * address.
335 		 */
336 		struct sockaddr_storage remote_storage;
337 	};
338 #endif /* CONFIG_NET_OFFLOAD */
339 
340 #if defined(CONFIG_NET_CAPTURE_COOKED_MODE)
341 	/* Tell the capture api that this is a captured packet */
342 	uint8_t cooked_mode_pkt : 1;
343 #endif /* CONFIG_NET_CAPTURE_COOKED_MODE */
344 
345 #if defined(CONFIG_NET_IPV4_PMTU)
346 	/* Path MTU needed for this destination address */
347 	uint8_t ipv4_pmtu : 1;
348 #endif /* CONFIG_NET_IPV4_PMTU */
349 
350 	/* @endcond */
351 };
352 
353 /** @cond ignore */
354 
355 /* The interface real ll address */
net_pkt_lladdr_if(struct net_pkt * pkt)356 static inline struct net_linkaddr *net_pkt_lladdr_if(struct net_pkt *pkt)
357 {
358 	return net_if_get_link_addr(pkt->iface);
359 }
360 
net_pkt_context(struct net_pkt * pkt)361 static inline struct net_context *net_pkt_context(struct net_pkt *pkt)
362 {
363 	return pkt->context;
364 }
365 
net_pkt_set_context(struct net_pkt * pkt,struct net_context * ctx)366 static inline void net_pkt_set_context(struct net_pkt *pkt,
367 				       struct net_context *ctx)
368 {
369 	pkt->context = ctx;
370 }
371 
net_pkt_iface(struct net_pkt * pkt)372 static inline struct net_if *net_pkt_iface(struct net_pkt *pkt)
373 {
374 	return pkt->iface;
375 }
376 
net_pkt_set_iface(struct net_pkt * pkt,struct net_if * iface)377 static inline void net_pkt_set_iface(struct net_pkt *pkt, struct net_if *iface)
378 {
379 	pkt->iface = iface;
380 
381 	/* If the network interface is set in pkt, then also set the type of
382 	 * the network address that is stored in pkt. This is done here so
383 	 * that the address type is properly set and is not forgotten.
384 	 */
385 	if (iface) {
386 		uint8_t type = net_if_get_link_addr(iface)->type;
387 
388 		pkt->lladdr_src.type = type;
389 		pkt->lladdr_dst.type = type;
390 	}
391 }
392 
net_pkt_orig_iface(struct net_pkt * pkt)393 static inline struct net_if *net_pkt_orig_iface(struct net_pkt *pkt)
394 {
395 #if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
396 	return pkt->orig_iface;
397 #else
398 	return pkt->iface;
399 #endif
400 }
401 
net_pkt_set_orig_iface(struct net_pkt * pkt,struct net_if * iface)402 static inline void net_pkt_set_orig_iface(struct net_pkt *pkt,
403 					  struct net_if *iface)
404 {
405 #if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
406 	pkt->orig_iface = iface;
407 #else
408 	ARG_UNUSED(pkt);
409 	ARG_UNUSED(iface);
410 #endif
411 }
412 
net_pkt_family(struct net_pkt * pkt)413 static inline uint8_t net_pkt_family(struct net_pkt *pkt)
414 {
415 	return pkt->family;
416 }
417 
net_pkt_set_family(struct net_pkt * pkt,uint8_t family)418 static inline void net_pkt_set_family(struct net_pkt *pkt, uint8_t family)
419 {
420 	pkt->family = family;
421 }
422 
net_pkt_is_ptp(struct net_pkt * pkt)423 static inline bool net_pkt_is_ptp(struct net_pkt *pkt)
424 {
425 	return !!(pkt->ptp_pkt);
426 }
427 
net_pkt_set_ptp(struct net_pkt * pkt,bool is_ptp)428 static inline void net_pkt_set_ptp(struct net_pkt *pkt, bool is_ptp)
429 {
430 	pkt->ptp_pkt = is_ptp;
431 }
432 
net_pkt_is_tx_timestamping(struct net_pkt * pkt)433 static inline bool net_pkt_is_tx_timestamping(struct net_pkt *pkt)
434 {
435 #if defined(CONFIG_NET_PKT_TIMESTAMP)
436 	return !!(pkt->tx_timestamping);
437 #else
438 	ARG_UNUSED(pkt);
439 
440 	return false;
441 #endif
442 }
443 
net_pkt_set_tx_timestamping(struct net_pkt * pkt,bool is_timestamping)444 static inline void net_pkt_set_tx_timestamping(struct net_pkt *pkt, bool is_timestamping)
445 {
446 #if defined(CONFIG_NET_PKT_TIMESTAMP)
447 	pkt->tx_timestamping = is_timestamping;
448 #else
449 	ARG_UNUSED(pkt);
450 	ARG_UNUSED(is_timestamping);
451 #endif
452 }
453 
net_pkt_is_rx_timestamping(struct net_pkt * pkt)454 static inline bool net_pkt_is_rx_timestamping(struct net_pkt *pkt)
455 {
456 #if defined(CONFIG_NET_PKT_TIMESTAMP)
457 	return !!(pkt->rx_timestamping);
458 #else
459 	ARG_UNUSED(pkt);
460 
461 	return false;
462 #endif
463 }
464 
net_pkt_set_rx_timestamping(struct net_pkt * pkt,bool is_timestamping)465 static inline void net_pkt_set_rx_timestamping(struct net_pkt *pkt, bool is_timestamping)
466 {
467 #if defined(CONFIG_NET_PKT_TIMESTAMP)
468 	pkt->rx_timestamping = is_timestamping;
469 #else
470 	ARG_UNUSED(pkt);
471 	ARG_UNUSED(is_timestamping);
472 #endif
473 }
474 
net_pkt_is_captured(struct net_pkt * pkt)475 static inline bool net_pkt_is_captured(struct net_pkt *pkt)
476 {
477 	return !!(pkt->captured);
478 }
479 
net_pkt_set_captured(struct net_pkt * pkt,bool is_captured)480 static inline void net_pkt_set_captured(struct net_pkt *pkt, bool is_captured)
481 {
482 	pkt->captured = is_captured;
483 }
484 
net_pkt_is_l2_bridged(struct net_pkt * pkt)485 static inline bool net_pkt_is_l2_bridged(struct net_pkt *pkt)
486 {
487 	return IS_ENABLED(CONFIG_NET_ETHERNET_BRIDGE) ? !!(pkt->l2_bridged) : 0;
488 }
489 
net_pkt_set_l2_bridged(struct net_pkt * pkt,bool is_l2_bridged)490 static inline void net_pkt_set_l2_bridged(struct net_pkt *pkt, bool is_l2_bridged)
491 {
492 	if (IS_ENABLED(CONFIG_NET_ETHERNET_BRIDGE)) {
493 		pkt->l2_bridged = is_l2_bridged;
494 	}
495 }
496 
net_pkt_is_l2_processed(struct net_pkt * pkt)497 static inline bool net_pkt_is_l2_processed(struct net_pkt *pkt)
498 {
499 	return !!(pkt->l2_processed);
500 }
501 
net_pkt_set_l2_processed(struct net_pkt * pkt,bool is_l2_processed)502 static inline void net_pkt_set_l2_processed(struct net_pkt *pkt,
503 					    bool is_l2_processed)
504 {
505 	pkt->l2_processed = is_l2_processed;
506 }
507 
net_pkt_is_chksum_done(struct net_pkt * pkt)508 static inline bool net_pkt_is_chksum_done(struct net_pkt *pkt)
509 {
510 	return !!(pkt->chksum_done);
511 }
512 
net_pkt_set_chksum_done(struct net_pkt * pkt,bool is_chksum_done)513 static inline void net_pkt_set_chksum_done(struct net_pkt *pkt,
514 					   bool is_chksum_done)
515 {
516 	pkt->chksum_done = is_chksum_done;
517 }
518 
net_pkt_ip_hdr_len(struct net_pkt * pkt)519 static inline uint8_t net_pkt_ip_hdr_len(struct net_pkt *pkt)
520 {
521 #if defined(CONFIG_NET_IP)
522 	return pkt->ip_hdr_len;
523 #else
524 	ARG_UNUSED(pkt);
525 
526 	return 0;
527 #endif
528 }
529 
net_pkt_set_ip_hdr_len(struct net_pkt * pkt,uint8_t len)530 static inline void net_pkt_set_ip_hdr_len(struct net_pkt *pkt, uint8_t len)
531 {
532 #if defined(CONFIG_NET_IP)
533 	pkt->ip_hdr_len = len;
534 #else
535 	ARG_UNUSED(pkt);
536 	ARG_UNUSED(len);
537 #endif
538 }
539 
net_pkt_ip_dscp(struct net_pkt * pkt)540 static inline uint8_t net_pkt_ip_dscp(struct net_pkt *pkt)
541 {
542 #if defined(CONFIG_NET_IP_DSCP_ECN)
543 	return pkt->ip_dscp;
544 #else
545 	ARG_UNUSED(pkt);
546 
547 	return 0;
548 #endif
549 }
550 
net_pkt_set_ip_dscp(struct net_pkt * pkt,uint8_t dscp)551 static inline void net_pkt_set_ip_dscp(struct net_pkt *pkt, uint8_t dscp)
552 {
553 #if defined(CONFIG_NET_IP_DSCP_ECN)
554 	pkt->ip_dscp = dscp;
555 #else
556 	ARG_UNUSED(pkt);
557 	ARG_UNUSED(dscp);
558 #endif
559 }
560 
net_pkt_ip_ecn(struct net_pkt * pkt)561 static inline uint8_t net_pkt_ip_ecn(struct net_pkt *pkt)
562 {
563 #if defined(CONFIG_NET_IP_DSCP_ECN)
564 	return pkt->ip_ecn;
565 #else
566 	ARG_UNUSED(pkt);
567 
568 	return 0;
569 #endif
570 }
571 
net_pkt_set_ip_ecn(struct net_pkt * pkt,uint8_t ecn)572 static inline void net_pkt_set_ip_ecn(struct net_pkt *pkt, uint8_t ecn)
573 {
574 #if defined(CONFIG_NET_IP_DSCP_ECN)
575 	pkt->ip_ecn = ecn;
576 #else
577 	ARG_UNUSED(pkt);
578 	ARG_UNUSED(ecn);
579 #endif
580 }
581 
net_pkt_eof(struct net_pkt * pkt)582 static inline uint8_t net_pkt_eof(struct net_pkt *pkt)
583 {
584 	return pkt->eof;
585 }
586 
net_pkt_set_eof(struct net_pkt * pkt,bool eof)587 static inline void net_pkt_set_eof(struct net_pkt *pkt, bool eof)
588 {
589 	pkt->eof = eof;
590 }
591 
net_pkt_forwarding(struct net_pkt * pkt)592 static inline bool net_pkt_forwarding(struct net_pkt *pkt)
593 {
594 	return !!(pkt->forwarding);
595 }
596 
net_pkt_set_forwarding(struct net_pkt * pkt,bool forward)597 static inline void net_pkt_set_forwarding(struct net_pkt *pkt, bool forward)
598 {
599 	pkt->forwarding = forward;
600 }
601 
602 #if defined(CONFIG_NET_IPV4)
net_pkt_ipv4_ttl(struct net_pkt * pkt)603 static inline uint8_t net_pkt_ipv4_ttl(struct net_pkt *pkt)
604 {
605 	return pkt->ipv4_ttl;
606 }
607 
net_pkt_set_ipv4_ttl(struct net_pkt * pkt,uint8_t ttl)608 static inline void net_pkt_set_ipv4_ttl(struct net_pkt *pkt,
609 					uint8_t ttl)
610 {
611 	pkt->ipv4_ttl = ttl;
612 }
613 
net_pkt_ipv4_opts_len(struct net_pkt * pkt)614 static inline uint8_t net_pkt_ipv4_opts_len(struct net_pkt *pkt)
615 {
616 	return pkt->ipv4_opts_len;
617 }
618 
net_pkt_set_ipv4_opts_len(struct net_pkt * pkt,uint8_t opts_len)619 static inline void net_pkt_set_ipv4_opts_len(struct net_pkt *pkt,
620 					     uint8_t opts_len)
621 {
622 	pkt->ipv4_opts_len = opts_len;
623 }
624 #else
net_pkt_ipv4_ttl(struct net_pkt * pkt)625 static inline uint8_t net_pkt_ipv4_ttl(struct net_pkt *pkt)
626 {
627 	ARG_UNUSED(pkt);
628 
629 	return 0;
630 }
631 
net_pkt_set_ipv4_ttl(struct net_pkt * pkt,uint8_t ttl)632 static inline void net_pkt_set_ipv4_ttl(struct net_pkt *pkt,
633 					uint8_t ttl)
634 {
635 	ARG_UNUSED(pkt);
636 	ARG_UNUSED(ttl);
637 }
638 
net_pkt_ipv4_opts_len(struct net_pkt * pkt)639 static inline uint8_t net_pkt_ipv4_opts_len(struct net_pkt *pkt)
640 {
641 	ARG_UNUSED(pkt);
642 	return 0;
643 }
644 
net_pkt_set_ipv4_opts_len(struct net_pkt * pkt,uint8_t opts_len)645 static inline void net_pkt_set_ipv4_opts_len(struct net_pkt *pkt,
646 					     uint8_t opts_len)
647 {
648 	ARG_UNUSED(pkt);
649 	ARG_UNUSED(opts_len);
650 }
651 #endif
652 
653 #if defined(CONFIG_NET_IPV6)
net_pkt_ipv6_ext_opt_len(struct net_pkt * pkt)654 static inline uint8_t net_pkt_ipv6_ext_opt_len(struct net_pkt *pkt)
655 {
656 	return pkt->ipv6_ext_opt_len;
657 }
658 
net_pkt_set_ipv6_ext_opt_len(struct net_pkt * pkt,uint8_t len)659 static inline void net_pkt_set_ipv6_ext_opt_len(struct net_pkt *pkt,
660 						uint8_t len)
661 {
662 	pkt->ipv6_ext_opt_len = len;
663 }
664 
net_pkt_ipv6_next_hdr(struct net_pkt * pkt)665 static inline uint8_t net_pkt_ipv6_next_hdr(struct net_pkt *pkt)
666 {
667 	return pkt->ipv6_next_hdr;
668 }
669 
net_pkt_set_ipv6_next_hdr(struct net_pkt * pkt,uint8_t next_hdr)670 static inline void net_pkt_set_ipv6_next_hdr(struct net_pkt *pkt,
671 					     uint8_t next_hdr)
672 {
673 	pkt->ipv6_next_hdr = next_hdr;
674 }
675 
net_pkt_ipv6_ext_len(struct net_pkt * pkt)676 static inline uint16_t net_pkt_ipv6_ext_len(struct net_pkt *pkt)
677 {
678 	return pkt->ipv6_ext_len;
679 }
680 
net_pkt_set_ipv6_ext_len(struct net_pkt * pkt,uint16_t len)681 static inline void net_pkt_set_ipv6_ext_len(struct net_pkt *pkt, uint16_t len)
682 {
683 	pkt->ipv6_ext_len = len;
684 }
685 
net_pkt_ipv6_hdr_prev(struct net_pkt * pkt)686 static inline uint16_t net_pkt_ipv6_hdr_prev(struct net_pkt *pkt)
687 {
688 	return pkt->ipv6_prev_hdr_start;
689 }
690 
net_pkt_set_ipv6_hdr_prev(struct net_pkt * pkt,uint16_t offset)691 static inline void net_pkt_set_ipv6_hdr_prev(struct net_pkt *pkt,
692 					     uint16_t offset)
693 {
694 	pkt->ipv6_prev_hdr_start = offset;
695 }
696 
net_pkt_ipv6_hop_limit(struct net_pkt * pkt)697 static inline uint8_t net_pkt_ipv6_hop_limit(struct net_pkt *pkt)
698 {
699 	return pkt->ipv6_hop_limit;
700 }
701 
net_pkt_set_ipv6_hop_limit(struct net_pkt * pkt,uint8_t hop_limit)702 static inline void net_pkt_set_ipv6_hop_limit(struct net_pkt *pkt,
703 					      uint8_t hop_limit)
704 {
705 	pkt->ipv6_hop_limit = hop_limit;
706 }
707 #else /* CONFIG_NET_IPV6 */
net_pkt_ipv6_ext_opt_len(struct net_pkt * pkt)708 static inline uint8_t net_pkt_ipv6_ext_opt_len(struct net_pkt *pkt)
709 {
710 	ARG_UNUSED(pkt);
711 
712 	return 0;
713 }
714 
net_pkt_set_ipv6_ext_opt_len(struct net_pkt * pkt,uint8_t len)715 static inline void net_pkt_set_ipv6_ext_opt_len(struct net_pkt *pkt,
716 						uint8_t len)
717 {
718 	ARG_UNUSED(pkt);
719 	ARG_UNUSED(len);
720 }
721 
net_pkt_ipv6_next_hdr(struct net_pkt * pkt)722 static inline uint8_t net_pkt_ipv6_next_hdr(struct net_pkt *pkt)
723 {
724 	ARG_UNUSED(pkt);
725 
726 	return 0;
727 }
728 
net_pkt_set_ipv6_next_hdr(struct net_pkt * pkt,uint8_t next_hdr)729 static inline void net_pkt_set_ipv6_next_hdr(struct net_pkt *pkt,
730 					     uint8_t next_hdr)
731 {
732 	ARG_UNUSED(pkt);
733 	ARG_UNUSED(next_hdr);
734 }
735 
net_pkt_ipv6_ext_len(struct net_pkt * pkt)736 static inline uint16_t net_pkt_ipv6_ext_len(struct net_pkt *pkt)
737 {
738 	ARG_UNUSED(pkt);
739 
740 	return 0;
741 }
742 
net_pkt_set_ipv6_ext_len(struct net_pkt * pkt,uint16_t len)743 static inline void net_pkt_set_ipv6_ext_len(struct net_pkt *pkt, uint16_t len)
744 {
745 	ARG_UNUSED(pkt);
746 	ARG_UNUSED(len);
747 }
748 
net_pkt_ipv6_hdr_prev(struct net_pkt * pkt)749 static inline uint16_t net_pkt_ipv6_hdr_prev(struct net_pkt *pkt)
750 {
751 	ARG_UNUSED(pkt);
752 
753 	return 0;
754 }
755 
net_pkt_set_ipv6_hdr_prev(struct net_pkt * pkt,uint16_t offset)756 static inline void net_pkt_set_ipv6_hdr_prev(struct net_pkt *pkt,
757 					     uint16_t offset)
758 {
759 	ARG_UNUSED(pkt);
760 	ARG_UNUSED(offset);
761 }
762 
net_pkt_ipv6_hop_limit(struct net_pkt * pkt)763 static inline uint8_t net_pkt_ipv6_hop_limit(struct net_pkt *pkt)
764 {
765 	ARG_UNUSED(pkt);
766 
767 	return 0;
768 }
769 
net_pkt_set_ipv6_hop_limit(struct net_pkt * pkt,uint8_t hop_limit)770 static inline void net_pkt_set_ipv6_hop_limit(struct net_pkt *pkt,
771 					      uint8_t hop_limit)
772 {
773 	ARG_UNUSED(pkt);
774 	ARG_UNUSED(hop_limit);
775 }
776 #endif /* CONFIG_NET_IPV6 */
777 
net_pkt_ip_opts_len(struct net_pkt * pkt)778 static inline uint16_t net_pkt_ip_opts_len(struct net_pkt *pkt)
779 {
780 #if defined(CONFIG_NET_IPV6)
781 	return pkt->ipv6_ext_len;
782 #elif defined(CONFIG_NET_IPV4)
783 	return pkt->ipv4_opts_len;
784 #else
785 	ARG_UNUSED(pkt);
786 
787 	return 0;
788 #endif
789 }
790 
791 #if defined(CONFIG_NET_IPV4_PMTU)
net_pkt_ipv4_pmtu(struct net_pkt * pkt)792 static inline bool net_pkt_ipv4_pmtu(struct net_pkt *pkt)
793 {
794 	return !!pkt->ipv4_pmtu;
795 }
796 
net_pkt_set_ipv4_pmtu(struct net_pkt * pkt,bool value)797 static inline void net_pkt_set_ipv4_pmtu(struct net_pkt *pkt, bool value)
798 {
799 	pkt->ipv4_pmtu = value;
800 }
801 #else
net_pkt_ipv4_pmtu(struct net_pkt * pkt)802 static inline bool net_pkt_ipv4_pmtu(struct net_pkt *pkt)
803 {
804 	ARG_UNUSED(pkt);
805 
806 	return false;
807 }
808 
net_pkt_set_ipv4_pmtu(struct net_pkt * pkt,bool value)809 static inline void net_pkt_set_ipv4_pmtu(struct net_pkt *pkt, bool value)
810 {
811 	ARG_UNUSED(pkt);
812 	ARG_UNUSED(value);
813 }
814 #endif /* CONFIG_NET_IPV4_PMTU */
815 
816 #if defined(CONFIG_NET_IPV4_FRAGMENT)
net_pkt_ipv4_fragment_offset(struct net_pkt * pkt)817 static inline uint16_t net_pkt_ipv4_fragment_offset(struct net_pkt *pkt)
818 {
819 	return (pkt->ipv4_fragment.flags & NET_IPV4_FRAGH_OFFSET_MASK) * 8;
820 }
821 
net_pkt_ipv4_fragment_more(struct net_pkt * pkt)822 static inline bool net_pkt_ipv4_fragment_more(struct net_pkt *pkt)
823 {
824 	return (pkt->ipv4_fragment.flags & NET_IPV4_MORE_FRAG_MASK) != 0;
825 }
826 
net_pkt_set_ipv4_fragment_flags(struct net_pkt * pkt,uint16_t flags)827 static inline void net_pkt_set_ipv4_fragment_flags(struct net_pkt *pkt, uint16_t flags)
828 {
829 	pkt->ipv4_fragment.flags = flags;
830 }
831 
net_pkt_ipv4_fragment_id(struct net_pkt * pkt)832 static inline uint32_t net_pkt_ipv4_fragment_id(struct net_pkt *pkt)
833 {
834 	return pkt->ipv4_fragment.id;
835 }
836 
net_pkt_set_ipv4_fragment_id(struct net_pkt * pkt,uint32_t id)837 static inline void net_pkt_set_ipv4_fragment_id(struct net_pkt *pkt, uint32_t id)
838 {
839 	pkt->ipv4_fragment.id = id;
840 }
841 #else /* CONFIG_NET_IPV4_FRAGMENT */
net_pkt_ipv4_fragment_offset(struct net_pkt * pkt)842 static inline uint16_t net_pkt_ipv4_fragment_offset(struct net_pkt *pkt)
843 {
844 	ARG_UNUSED(pkt);
845 
846 	return 0;
847 }
848 
net_pkt_ipv4_fragment_more(struct net_pkt * pkt)849 static inline bool net_pkt_ipv4_fragment_more(struct net_pkt *pkt)
850 {
851 	ARG_UNUSED(pkt);
852 
853 	return 0;
854 }
855 
net_pkt_set_ipv4_fragment_flags(struct net_pkt * pkt,uint16_t flags)856 static inline void net_pkt_set_ipv4_fragment_flags(struct net_pkt *pkt, uint16_t flags)
857 {
858 	ARG_UNUSED(pkt);
859 	ARG_UNUSED(flags);
860 }
861 
net_pkt_ipv4_fragment_id(struct net_pkt * pkt)862 static inline uint32_t net_pkt_ipv4_fragment_id(struct net_pkt *pkt)
863 {
864 	ARG_UNUSED(pkt);
865 
866 	return 0;
867 }
868 
net_pkt_set_ipv4_fragment_id(struct net_pkt * pkt,uint32_t id)869 static inline void net_pkt_set_ipv4_fragment_id(struct net_pkt *pkt, uint32_t id)
870 {
871 	ARG_UNUSED(pkt);
872 	ARG_UNUSED(id);
873 }
874 #endif /* CONFIG_NET_IPV4_FRAGMENT */
875 
876 #if defined(CONFIG_NET_IPV6_FRAGMENT)
net_pkt_ipv6_fragment_start(struct net_pkt * pkt)877 static inline uint16_t net_pkt_ipv6_fragment_start(struct net_pkt *pkt)
878 {
879 	return pkt->ipv6_fragment.hdr_start;
880 }
881 
net_pkt_set_ipv6_fragment_start(struct net_pkt * pkt,uint16_t start)882 static inline void net_pkt_set_ipv6_fragment_start(struct net_pkt *pkt,
883 						   uint16_t start)
884 {
885 	pkt->ipv6_fragment.hdr_start = start;
886 }
887 
net_pkt_ipv6_fragment_offset(struct net_pkt * pkt)888 static inline uint16_t net_pkt_ipv6_fragment_offset(struct net_pkt *pkt)
889 {
890 	return pkt->ipv6_fragment.flags & NET_IPV6_FRAGH_OFFSET_MASK;
891 }
net_pkt_ipv6_fragment_more(struct net_pkt * pkt)892 static inline bool net_pkt_ipv6_fragment_more(struct net_pkt *pkt)
893 {
894 	return (pkt->ipv6_fragment.flags & 0x01) != 0;
895 }
896 
net_pkt_set_ipv6_fragment_flags(struct net_pkt * pkt,uint16_t flags)897 static inline void net_pkt_set_ipv6_fragment_flags(struct net_pkt *pkt,
898 						   uint16_t flags)
899 {
900 	pkt->ipv6_fragment.flags = flags;
901 }
902 
net_pkt_ipv6_fragment_id(struct net_pkt * pkt)903 static inline uint32_t net_pkt_ipv6_fragment_id(struct net_pkt *pkt)
904 {
905 	return pkt->ipv6_fragment.id;
906 }
907 
net_pkt_set_ipv6_fragment_id(struct net_pkt * pkt,uint32_t id)908 static inline void net_pkt_set_ipv6_fragment_id(struct net_pkt *pkt,
909 						uint32_t id)
910 {
911 	pkt->ipv6_fragment.id = id;
912 }
913 #else /* CONFIG_NET_IPV6_FRAGMENT */
net_pkt_ipv6_fragment_start(struct net_pkt * pkt)914 static inline uint16_t net_pkt_ipv6_fragment_start(struct net_pkt *pkt)
915 {
916 	ARG_UNUSED(pkt);
917 
918 	return 0;
919 }
920 
net_pkt_set_ipv6_fragment_start(struct net_pkt * pkt,uint16_t start)921 static inline void net_pkt_set_ipv6_fragment_start(struct net_pkt *pkt,
922 						   uint16_t start)
923 {
924 	ARG_UNUSED(pkt);
925 	ARG_UNUSED(start);
926 }
927 
net_pkt_ipv6_fragment_offset(struct net_pkt * pkt)928 static inline uint16_t net_pkt_ipv6_fragment_offset(struct net_pkt *pkt)
929 {
930 	ARG_UNUSED(pkt);
931 
932 	return 0;
933 }
934 
net_pkt_ipv6_fragment_more(struct net_pkt * pkt)935 static inline bool net_pkt_ipv6_fragment_more(struct net_pkt *pkt)
936 {
937 	ARG_UNUSED(pkt);
938 
939 	return 0;
940 }
941 
net_pkt_set_ipv6_fragment_flags(struct net_pkt * pkt,uint16_t flags)942 static inline void net_pkt_set_ipv6_fragment_flags(struct net_pkt *pkt,
943 						   uint16_t flags)
944 {
945 	ARG_UNUSED(pkt);
946 	ARG_UNUSED(flags);
947 }
948 
net_pkt_ipv6_fragment_id(struct net_pkt * pkt)949 static inline uint32_t net_pkt_ipv6_fragment_id(struct net_pkt *pkt)
950 {
951 	ARG_UNUSED(pkt);
952 
953 	return 0;
954 }
955 
net_pkt_set_ipv6_fragment_id(struct net_pkt * pkt,uint32_t id)956 static inline void net_pkt_set_ipv6_fragment_id(struct net_pkt *pkt,
957 						uint32_t id)
958 {
959 	ARG_UNUSED(pkt);
960 	ARG_UNUSED(id);
961 }
962 #endif /* CONFIG_NET_IPV6_FRAGMENT */
963 
964 #if defined(CONFIG_NET_IP_FRAGMENT)
net_pkt_is_ip_reassembled(struct net_pkt * pkt)965 static inline bool net_pkt_is_ip_reassembled(struct net_pkt *pkt)
966 {
967 	return !!(pkt->ip_reassembled);
968 }
969 
net_pkt_set_ip_reassembled(struct net_pkt * pkt,bool reassembled)970 static inline void net_pkt_set_ip_reassembled(struct net_pkt *pkt,
971 					      bool reassembled)
972 {
973 	pkt->ip_reassembled = reassembled;
974 }
975 #else /* CONFIG_NET_IP_FRAGMENT */
net_pkt_is_ip_reassembled(struct net_pkt * pkt)976 static inline bool net_pkt_is_ip_reassembled(struct net_pkt *pkt)
977 {
978 	ARG_UNUSED(pkt);
979 
980 	return false;
981 }
982 
net_pkt_set_ip_reassembled(struct net_pkt * pkt,bool reassembled)983 static inline void net_pkt_set_ip_reassembled(struct net_pkt *pkt,
984 					      bool reassembled)
985 {
986 	ARG_UNUSED(pkt);
987 	ARG_UNUSED(reassembled);
988 }
989 #endif /* CONFIG_NET_IP_FRAGMENT */
990 
net_pkt_priority(struct net_pkt * pkt)991 static inline uint8_t net_pkt_priority(struct net_pkt *pkt)
992 {
993 	return pkt->priority;
994 }
995 
net_pkt_set_priority(struct net_pkt * pkt,uint8_t priority)996 static inline void net_pkt_set_priority(struct net_pkt *pkt,
997 					uint8_t priority)
998 {
999 	pkt->priority = priority;
1000 }
1001 
1002 #if defined(CONFIG_NET_CAPTURE_COOKED_MODE)
net_pkt_is_cooked_mode(struct net_pkt * pkt)1003 static inline bool net_pkt_is_cooked_mode(struct net_pkt *pkt)
1004 {
1005 	return pkt->cooked_mode_pkt;
1006 }
1007 
net_pkt_set_cooked_mode(struct net_pkt * pkt,bool value)1008 static inline void net_pkt_set_cooked_mode(struct net_pkt *pkt, bool value)
1009 {
1010 	pkt->cooked_mode_pkt = value;
1011 }
1012 #else
net_pkt_is_cooked_mode(struct net_pkt * pkt)1013 static inline bool net_pkt_is_cooked_mode(struct net_pkt *pkt)
1014 {
1015 	ARG_UNUSED(pkt);
1016 
1017 	return false;
1018 }
1019 
net_pkt_set_cooked_mode(struct net_pkt * pkt,bool value)1020 static inline void net_pkt_set_cooked_mode(struct net_pkt *pkt, bool value)
1021 {
1022 	ARG_UNUSED(pkt);
1023 	ARG_UNUSED(value);
1024 }
1025 #endif /* CONFIG_NET_CAPTURE_COOKED_MODE */
1026 
1027 #if defined(CONFIG_NET_VLAN)
net_pkt_vlan_tag(struct net_pkt * pkt)1028 static inline uint16_t net_pkt_vlan_tag(struct net_pkt *pkt)
1029 {
1030 	return net_eth_vlan_get_vid(pkt->vlan_tci);
1031 }
1032 
net_pkt_set_vlan_tag(struct net_pkt * pkt,uint16_t tag)1033 static inline void net_pkt_set_vlan_tag(struct net_pkt *pkt, uint16_t tag)
1034 {
1035 	pkt->vlan_tci = net_eth_vlan_set_vid(pkt->vlan_tci, tag);
1036 }
1037 
net_pkt_vlan_priority(struct net_pkt * pkt)1038 static inline uint8_t net_pkt_vlan_priority(struct net_pkt *pkt)
1039 {
1040 	return net_eth_vlan_get_pcp(pkt->vlan_tci);
1041 }
1042 
net_pkt_set_vlan_priority(struct net_pkt * pkt,uint8_t priority)1043 static inline void net_pkt_set_vlan_priority(struct net_pkt *pkt,
1044 					     uint8_t priority)
1045 {
1046 	pkt->vlan_tci = net_eth_vlan_set_pcp(pkt->vlan_tci, priority);
1047 }
1048 
net_pkt_vlan_dei(struct net_pkt * pkt)1049 static inline bool net_pkt_vlan_dei(struct net_pkt *pkt)
1050 {
1051 	return net_eth_vlan_get_dei(pkt->vlan_tci);
1052 }
1053 
net_pkt_set_vlan_dei(struct net_pkt * pkt,bool dei)1054 static inline void net_pkt_set_vlan_dei(struct net_pkt *pkt, bool dei)
1055 {
1056 	pkt->vlan_tci = net_eth_vlan_set_dei(pkt->vlan_tci, dei);
1057 }
1058 
net_pkt_set_vlan_tci(struct net_pkt * pkt,uint16_t tci)1059 static inline void net_pkt_set_vlan_tci(struct net_pkt *pkt, uint16_t tci)
1060 {
1061 	pkt->vlan_tci = tci;
1062 }
1063 
net_pkt_vlan_tci(struct net_pkt * pkt)1064 static inline uint16_t net_pkt_vlan_tci(struct net_pkt *pkt)
1065 {
1066 	return pkt->vlan_tci;
1067 }
1068 #else
net_pkt_vlan_tag(struct net_pkt * pkt)1069 static inline uint16_t net_pkt_vlan_tag(struct net_pkt *pkt)
1070 {
1071 	ARG_UNUSED(pkt);
1072 
1073 	return NET_VLAN_TAG_UNSPEC;
1074 }
1075 
net_pkt_set_vlan_tag(struct net_pkt * pkt,uint16_t tag)1076 static inline void net_pkt_set_vlan_tag(struct net_pkt *pkt, uint16_t tag)
1077 {
1078 	ARG_UNUSED(pkt);
1079 	ARG_UNUSED(tag);
1080 }
1081 
net_pkt_vlan_priority(struct net_pkt * pkt)1082 static inline uint8_t net_pkt_vlan_priority(struct net_pkt *pkt)
1083 {
1084 	ARG_UNUSED(pkt);
1085 
1086 	return 0;
1087 }
1088 
net_pkt_vlan_dei(struct net_pkt * pkt)1089 static inline bool net_pkt_vlan_dei(struct net_pkt *pkt)
1090 {
1091 	ARG_UNUSED(pkt);
1092 
1093 	return false;
1094 }
1095 
net_pkt_set_vlan_dei(struct net_pkt * pkt,bool dei)1096 static inline void net_pkt_set_vlan_dei(struct net_pkt *pkt, bool dei)
1097 {
1098 	ARG_UNUSED(pkt);
1099 	ARG_UNUSED(dei);
1100 }
1101 
net_pkt_vlan_tci(struct net_pkt * pkt)1102 static inline uint16_t net_pkt_vlan_tci(struct net_pkt *pkt)
1103 {
1104 	ARG_UNUSED(pkt);
1105 
1106 	return NET_VLAN_TAG_UNSPEC; /* assumes priority is 0 */
1107 }
1108 
net_pkt_set_vlan_tci(struct net_pkt * pkt,uint16_t tci)1109 static inline void net_pkt_set_vlan_tci(struct net_pkt *pkt, uint16_t tci)
1110 {
1111 	ARG_UNUSED(pkt);
1112 	ARG_UNUSED(tci);
1113 }
1114 #endif
1115 
1116 #if defined(CONFIG_NET_PKT_TIMESTAMP) || defined(CONFIG_NET_PKT_TXTIME)
net_pkt_timestamp(struct net_pkt * pkt)1117 static inline struct net_ptp_time *net_pkt_timestamp(struct net_pkt *pkt)
1118 {
1119 	return &pkt->timestamp;
1120 }
1121 
net_pkt_set_timestamp(struct net_pkt * pkt,struct net_ptp_time * timestamp)1122 static inline void net_pkt_set_timestamp(struct net_pkt *pkt,
1123 					 struct net_ptp_time *timestamp)
1124 {
1125 	pkt->timestamp.second = timestamp->second;
1126 	pkt->timestamp.nanosecond = timestamp->nanosecond;
1127 }
1128 
net_pkt_timestamp_ns(struct net_pkt * pkt)1129 static inline net_time_t net_pkt_timestamp_ns(struct net_pkt *pkt)
1130 {
1131 	return net_ptp_time_to_ns(&pkt->timestamp);
1132 }
1133 
net_pkt_set_timestamp_ns(struct net_pkt * pkt,net_time_t timestamp)1134 static inline void net_pkt_set_timestamp_ns(struct net_pkt *pkt, net_time_t timestamp)
1135 {
1136 	pkt->timestamp = ns_to_net_ptp_time(timestamp);
1137 }
1138 #else
net_pkt_timestamp(struct net_pkt * pkt)1139 static inline struct net_ptp_time *net_pkt_timestamp(struct net_pkt *pkt)
1140 {
1141 	ARG_UNUSED(pkt);
1142 
1143 	return NULL;
1144 }
1145 
net_pkt_set_timestamp(struct net_pkt * pkt,struct net_ptp_time * timestamp)1146 static inline void net_pkt_set_timestamp(struct net_pkt *pkt,
1147 					 struct net_ptp_time *timestamp)
1148 {
1149 	ARG_UNUSED(pkt);
1150 	ARG_UNUSED(timestamp);
1151 }
1152 
net_pkt_timestamp_ns(struct net_pkt * pkt)1153 static inline net_time_t net_pkt_timestamp_ns(struct net_pkt *pkt)
1154 {
1155 	ARG_UNUSED(pkt);
1156 
1157 	return 0;
1158 }
1159 
net_pkt_set_timestamp_ns(struct net_pkt * pkt,net_time_t timestamp)1160 static inline void net_pkt_set_timestamp_ns(struct net_pkt *pkt, net_time_t timestamp)
1161 {
1162 	ARG_UNUSED(pkt);
1163 	ARG_UNUSED(timestamp);
1164 }
1165 #endif /* CONFIG_NET_PKT_TIMESTAMP || CONFIG_NET_PKT_TXTIME */
1166 
1167 #if defined(CONFIG_NET_PKT_RXTIME_STATS) || defined(CONFIG_NET_PKT_TXTIME_STATS) || \
1168 	defined(CONFIG_TRACING_NET_CORE)
1169 
net_pkt_create_time(struct net_pkt * pkt)1170 static inline uint32_t net_pkt_create_time(struct net_pkt *pkt)
1171 {
1172 	return pkt->create_time;
1173 }
1174 
net_pkt_set_create_time(struct net_pkt * pkt,uint32_t create_time)1175 static inline void net_pkt_set_create_time(struct net_pkt *pkt,
1176 					   uint32_t create_time)
1177 {
1178 	pkt->create_time = create_time;
1179 }
1180 #else
net_pkt_create_time(struct net_pkt * pkt)1181 static inline uint32_t net_pkt_create_time(struct net_pkt *pkt)
1182 {
1183 	ARG_UNUSED(pkt);
1184 
1185 	return 0U;
1186 }
1187 
net_pkt_set_create_time(struct net_pkt * pkt,uint32_t create_time)1188 static inline void net_pkt_set_create_time(struct net_pkt *pkt,
1189 					   uint32_t create_time)
1190 {
1191 	ARG_UNUSED(pkt);
1192 	ARG_UNUSED(create_time);
1193 }
1194 #endif /* CONFIG_NET_PKT_RXTIME_STATS || CONFIG_NET_PKT_TXTIME_STATS ||
1195 	* CONFIG_TRACING_NET_CORE
1196 	*/
1197 
1198 #if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL) || \
1199 	defined(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)
net_pkt_stats_tick(struct net_pkt * pkt)1200 static inline uint32_t *net_pkt_stats_tick(struct net_pkt *pkt)
1201 {
1202 	return pkt->detail.stat;
1203 }
1204 
net_pkt_stats_tick_count(struct net_pkt * pkt)1205 static inline int net_pkt_stats_tick_count(struct net_pkt *pkt)
1206 {
1207 	return pkt->detail.count;
1208 }
1209 
net_pkt_stats_tick_reset(struct net_pkt * pkt)1210 static inline void net_pkt_stats_tick_reset(struct net_pkt *pkt)
1211 {
1212 	memset(&pkt->detail, 0, sizeof(pkt->detail));
1213 }
1214 
net_pkt_set_stats_tick(struct net_pkt * pkt,uint32_t tick)1215 static ALWAYS_INLINE void net_pkt_set_stats_tick(struct net_pkt *pkt,
1216 						 uint32_t tick)
1217 {
1218 	if (pkt->detail.count >= NET_PKT_DETAIL_STATS_COUNT) {
1219 		NET_ERR("Detail stats count overflow (%d >= %d)",
1220 			pkt->detail.count, NET_PKT_DETAIL_STATS_COUNT);
1221 		return;
1222 	}
1223 
1224 	pkt->detail.stat[pkt->detail.count++] = tick;
1225 }
1226 
1227 #define net_pkt_set_tx_stats_tick(pkt, tick) net_pkt_set_stats_tick(pkt, tick)
1228 #define net_pkt_set_rx_stats_tick(pkt, tick) net_pkt_set_stats_tick(pkt, tick)
1229 #else
net_pkt_stats_tick(struct net_pkt * pkt)1230 static inline uint32_t *net_pkt_stats_tick(struct net_pkt *pkt)
1231 {
1232 	ARG_UNUSED(pkt);
1233 
1234 	return NULL;
1235 }
1236 
net_pkt_stats_tick_count(struct net_pkt * pkt)1237 static inline int net_pkt_stats_tick_count(struct net_pkt *pkt)
1238 {
1239 	ARG_UNUSED(pkt);
1240 
1241 	return 0;
1242 }
1243 
net_pkt_stats_tick_reset(struct net_pkt * pkt)1244 static inline void net_pkt_stats_tick_reset(struct net_pkt *pkt)
1245 {
1246 	ARG_UNUSED(pkt);
1247 }
1248 
net_pkt_set_stats_tick(struct net_pkt * pkt,uint32_t tick)1249 static inline void net_pkt_set_stats_tick(struct net_pkt *pkt, uint32_t tick)
1250 {
1251 	ARG_UNUSED(pkt);
1252 	ARG_UNUSED(tick);
1253 }
1254 
1255 #define net_pkt_set_tx_stats_tick(pkt, tick)
1256 #define net_pkt_set_rx_stats_tick(pkt, tick)
1257 #endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL ||
1258 	  CONFIG_NET_PKT_RXTIME_STATS_DETAIL */
1259 
net_pkt_get_len(struct net_pkt * pkt)1260 static inline size_t net_pkt_get_len(struct net_pkt *pkt)
1261 {
1262 	return net_buf_frags_len(pkt->frags);
1263 }
1264 
net_pkt_data(struct net_pkt * pkt)1265 static inline uint8_t *net_pkt_data(struct net_pkt *pkt)
1266 {
1267 	return pkt->frags->data;
1268 }
1269 
net_pkt_ip_data(struct net_pkt * pkt)1270 static inline uint8_t *net_pkt_ip_data(struct net_pkt *pkt)
1271 {
1272 	return pkt->frags->data;
1273 }
1274 
net_pkt_is_empty(struct net_pkt * pkt)1275 static inline bool net_pkt_is_empty(struct net_pkt *pkt)
1276 {
1277 	return !pkt->buffer || !net_pkt_data(pkt) || pkt->buffer->len == 0;
1278 }
1279 
net_pkt_lladdr_src(struct net_pkt * pkt)1280 static inline struct net_linkaddr *net_pkt_lladdr_src(struct net_pkt *pkt)
1281 {
1282 	return &pkt->lladdr_src;
1283 }
1284 
net_pkt_lladdr_dst(struct net_pkt * pkt)1285 static inline struct net_linkaddr *net_pkt_lladdr_dst(struct net_pkt *pkt)
1286 {
1287 	return &pkt->lladdr_dst;
1288 }
1289 
net_pkt_lladdr_swap(struct net_pkt * pkt)1290 static inline void net_pkt_lladdr_swap(struct net_pkt *pkt)
1291 {
1292 	uint8_t *addr = net_pkt_lladdr_src(pkt)->addr;
1293 
1294 	net_pkt_lladdr_src(pkt)->addr = net_pkt_lladdr_dst(pkt)->addr;
1295 	net_pkt_lladdr_dst(pkt)->addr = addr;
1296 }
1297 
net_pkt_lladdr_clear(struct net_pkt * pkt)1298 static inline void net_pkt_lladdr_clear(struct net_pkt *pkt)
1299 {
1300 	net_pkt_lladdr_src(pkt)->addr = NULL;
1301 	net_pkt_lladdr_src(pkt)->len = 0U;
1302 }
1303 
net_pkt_ll_proto_type(struct net_pkt * pkt)1304 static inline uint16_t net_pkt_ll_proto_type(struct net_pkt *pkt)
1305 {
1306 	return pkt->ll_proto_type;
1307 }
1308 
net_pkt_set_ll_proto_type(struct net_pkt * pkt,uint16_t type)1309 static inline void net_pkt_set_ll_proto_type(struct net_pkt *pkt, uint16_t type)
1310 {
1311 	pkt->ll_proto_type = type;
1312 }
1313 
1314 #if defined(CONFIG_NET_IPV4_ACD)
net_pkt_ipv4_acd(struct net_pkt * pkt)1315 static inline bool net_pkt_ipv4_acd(struct net_pkt *pkt)
1316 {
1317 	return !!(pkt->ipv4_acd_arp_msg);
1318 }
1319 
net_pkt_set_ipv4_acd(struct net_pkt * pkt,bool is_acd_arp_msg)1320 static inline void net_pkt_set_ipv4_acd(struct net_pkt *pkt,
1321 					bool is_acd_arp_msg)
1322 {
1323 	pkt->ipv4_acd_arp_msg = is_acd_arp_msg;
1324 }
1325 #else /* CONFIG_NET_IPV4_ACD */
net_pkt_ipv4_acd(struct net_pkt * pkt)1326 static inline bool net_pkt_ipv4_acd(struct net_pkt *pkt)
1327 {
1328 	ARG_UNUSED(pkt);
1329 
1330 	return false;
1331 }
1332 
net_pkt_set_ipv4_acd(struct net_pkt * pkt,bool is_acd_arp_msg)1333 static inline void net_pkt_set_ipv4_acd(struct net_pkt *pkt,
1334 					bool is_acd_arp_msg)
1335 {
1336 	ARG_UNUSED(pkt);
1337 	ARG_UNUSED(is_acd_arp_msg);
1338 }
1339 #endif /* CONFIG_NET_IPV4_ACD */
1340 
1341 #if defined(CONFIG_NET_LLDP)
net_pkt_is_lldp(struct net_pkt * pkt)1342 static inline bool net_pkt_is_lldp(struct net_pkt *pkt)
1343 {
1344 	return !!(pkt->lldp_pkt);
1345 }
1346 
net_pkt_set_lldp(struct net_pkt * pkt,bool is_lldp)1347 static inline void net_pkt_set_lldp(struct net_pkt *pkt, bool is_lldp)
1348 {
1349 	pkt->lldp_pkt = is_lldp;
1350 }
1351 #else
net_pkt_is_lldp(struct net_pkt * pkt)1352 static inline bool net_pkt_is_lldp(struct net_pkt *pkt)
1353 {
1354 	ARG_UNUSED(pkt);
1355 
1356 	return false;
1357 }
1358 
net_pkt_set_lldp(struct net_pkt * pkt,bool is_lldp)1359 static inline void net_pkt_set_lldp(struct net_pkt *pkt, bool is_lldp)
1360 {
1361 	ARG_UNUSED(pkt);
1362 	ARG_UNUSED(is_lldp);
1363 }
1364 #endif /* CONFIG_NET_LLDP */
1365 
1366 #if defined(CONFIG_NET_L2_PPP)
net_pkt_is_ppp(struct net_pkt * pkt)1367 static inline bool net_pkt_is_ppp(struct net_pkt *pkt)
1368 {
1369 	return !!(pkt->ppp_msg);
1370 }
1371 
net_pkt_set_ppp(struct net_pkt * pkt,bool is_ppp_msg)1372 static inline void net_pkt_set_ppp(struct net_pkt *pkt,
1373 				   bool is_ppp_msg)
1374 {
1375 	pkt->ppp_msg = is_ppp_msg;
1376 }
1377 #else /* CONFIG_NET_L2_PPP */
net_pkt_is_ppp(struct net_pkt * pkt)1378 static inline bool net_pkt_is_ppp(struct net_pkt *pkt)
1379 {
1380 	ARG_UNUSED(pkt);
1381 
1382 	return false;
1383 }
1384 
net_pkt_set_ppp(struct net_pkt * pkt,bool is_ppp_msg)1385 static inline void net_pkt_set_ppp(struct net_pkt *pkt,
1386 				   bool is_ppp_msg)
1387 {
1388 	ARG_UNUSED(pkt);
1389 	ARG_UNUSED(is_ppp_msg);
1390 }
1391 #endif /* CONFIG_NET_L2_PPP */
1392 
1393 #if defined(NET_PKT_HAS_CONTROL_BLOCK)
net_pkt_cb(struct net_pkt * pkt)1394 static inline void *net_pkt_cb(struct net_pkt *pkt)
1395 {
1396 	return &pkt->cb;
1397 }
1398 #else
net_pkt_cb(struct net_pkt * pkt)1399 static inline void *net_pkt_cb(struct net_pkt *pkt)
1400 {
1401 	ARG_UNUSED(pkt);
1402 
1403 	return NULL;
1404 }
1405 #endif
1406 
1407 #define NET_IPV6_HDR(pkt) ((struct net_ipv6_hdr *)net_pkt_ip_data(pkt))
1408 #define NET_IPV4_HDR(pkt) ((struct net_ipv4_hdr *)net_pkt_ip_data(pkt))
1409 
net_pkt_set_src_ipv6_addr(struct net_pkt * pkt)1410 static inline void net_pkt_set_src_ipv6_addr(struct net_pkt *pkt)
1411 {
1412 	net_if_ipv6_select_src_addr(net_context_get_iface(
1413 					    net_pkt_context(pkt)),
1414 				    (struct in6_addr *)NET_IPV6_HDR(pkt)->src);
1415 }
1416 
net_pkt_set_overwrite(struct net_pkt * pkt,bool overwrite)1417 static inline void net_pkt_set_overwrite(struct net_pkt *pkt, bool overwrite)
1418 {
1419 	pkt->overwrite = overwrite;
1420 }
1421 
net_pkt_is_being_overwritten(struct net_pkt * pkt)1422 static inline bool net_pkt_is_being_overwritten(struct net_pkt *pkt)
1423 {
1424 	return !!(pkt->overwrite);
1425 }
1426 
1427 #ifdef CONFIG_NET_PKT_FILTER
1428 
1429 bool net_pkt_filter_send_ok(struct net_pkt *pkt);
1430 bool net_pkt_filter_recv_ok(struct net_pkt *pkt);
1431 
1432 #else
1433 
net_pkt_filter_send_ok(struct net_pkt * pkt)1434 static inline bool net_pkt_filter_send_ok(struct net_pkt *pkt)
1435 {
1436 	ARG_UNUSED(pkt);
1437 
1438 	return true;
1439 }
1440 
net_pkt_filter_recv_ok(struct net_pkt * pkt)1441 static inline bool net_pkt_filter_recv_ok(struct net_pkt *pkt)
1442 {
1443 	ARG_UNUSED(pkt);
1444 
1445 	return true;
1446 }
1447 
1448 #endif /* CONFIG_NET_PKT_FILTER */
1449 
1450 #if defined(CONFIG_NET_PKT_FILTER) && \
1451 	(defined(CONFIG_NET_PKT_FILTER_IPV4_HOOK) || defined(CONFIG_NET_PKT_FILTER_IPV6_HOOK))
1452 
1453 bool net_pkt_filter_ip_recv_ok(struct net_pkt *pkt);
1454 
1455 #else
1456 
net_pkt_filter_ip_recv_ok(struct net_pkt * pkt)1457 static inline bool net_pkt_filter_ip_recv_ok(struct net_pkt *pkt)
1458 {
1459 	ARG_UNUSED(pkt);
1460 
1461 	return true;
1462 }
1463 
1464 #endif /* CONFIG_NET_PKT_FILTER_IPV4_HOOK || CONFIG_NET_PKT_FILTER_IPV6_HOOK */
1465 
1466 #if defined(CONFIG_NET_PKT_FILTER) && defined(CONFIG_NET_PKT_FILTER_LOCAL_IN_HOOK)
1467 
1468 bool net_pkt_filter_local_in_recv_ok(struct net_pkt *pkt);
1469 
1470 #else
1471 
net_pkt_filter_local_in_recv_ok(struct net_pkt * pkt)1472 static inline bool net_pkt_filter_local_in_recv_ok(struct net_pkt *pkt)
1473 {
1474 	ARG_UNUSED(pkt);
1475 
1476 	return true;
1477 }
1478 
1479 #endif /* CONFIG_NET_PKT_FILTER && CONFIG_NET_PKT_FILTER_LOCAL_IN_HOOK */
1480 
1481 #if defined(CONFIG_NET_OFFLOAD) || defined(CONFIG_NET_L2_IPIP)
net_pkt_remote_address(struct net_pkt * pkt)1482 static inline struct sockaddr *net_pkt_remote_address(struct net_pkt *pkt)
1483 {
1484 	return &pkt->remote;
1485 }
1486 
net_pkt_set_remote_address(struct net_pkt * pkt,struct sockaddr * address,socklen_t len)1487 static inline void net_pkt_set_remote_address(struct net_pkt *pkt,
1488 					      struct sockaddr *address,
1489 					      socklen_t len)
1490 {
1491 	memcpy(&pkt->remote, address, len);
1492 }
1493 #endif /* CONFIG_NET_OFFLOAD || CONFIG_NET_L2_IPIP */
1494 
1495 /* @endcond */
1496 
1497 /**
1498  * @brief Create a net_pkt slab
1499  *
1500  * A net_pkt slab is used to store meta-information about
1501  * network packets. It must be coupled with a data fragment pool
1502  * (@ref NET_PKT_DATA_POOL_DEFINE) used to store the actual
1503  * packet data. The macro can be used by an application to define
1504  * additional custom per-context TX packet slabs (see
1505  * net_context_setup_pools()).
1506  *
1507  * @param name Name of the slab.
1508  * @param count Number of net_pkt in this slab.
1509  */
1510 #define NET_PKT_SLAB_DEFINE(name, count)				\
1511 	K_MEM_SLAB_DEFINE(name, sizeof(struct net_pkt), count, 4);      \
1512 	NET_PKT_ALLOC_STATS_DEFINE(pkt_alloc_stats_##name, name)
1513 
1514 /** @cond INTERNAL_HIDDEN */
1515 
1516 /* Backward compatibility macro */
1517 #define NET_PKT_TX_SLAB_DEFINE(name, count) NET_PKT_SLAB_DEFINE(name, count)
1518 
1519 /** @endcond */
1520 
1521 /**
1522  * @brief Create a data fragment net_buf pool
1523  *
1524  * A net_buf pool is used to store actual data for
1525  * network packets. It must be coupled with a net_pkt slab
1526  * (@ref NET_PKT_SLAB_DEFINE) used to store the packet
1527  * meta-information. The macro can be used by an application to
1528  * define additional custom per-context TX packet pools (see
1529  * net_context_setup_pools()).
1530  *
1531  * @param name Name of the pool.
1532  * @param count Number of net_buf in this pool.
1533  */
1534 #define NET_PKT_DATA_POOL_DEFINE(name, count)				\
1535 	NET_BUF_POOL_DEFINE(name, count, CONFIG_NET_BUF_DATA_SIZE,	\
1536 			    0, NULL)
1537 
1538 /** @cond INTERNAL_HIDDEN */
1539 
1540 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC) || \
1541 	(CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG)
1542 #define NET_PKT_DEBUG_ENABLED
1543 #endif
1544 
1545 #if defined(NET_PKT_DEBUG_ENABLED)
1546 
1547 /* Debug versions of the net_pkt functions that are used when tracking
1548  * buffer usage.
1549  */
1550 
1551 struct net_buf *net_pkt_get_reserve_data_debug(struct net_buf_pool *pool,
1552 					       size_t min_len,
1553 					       k_timeout_t timeout,
1554 					       const char *caller,
1555 					       int line);
1556 
1557 #define net_pkt_get_reserve_data(pool, min_len, timeout)				\
1558 	net_pkt_get_reserve_data_debug(pool, min_len, timeout, __func__, __LINE__)
1559 
1560 struct net_buf *net_pkt_get_reserve_rx_data_debug(size_t min_len,
1561 						  k_timeout_t timeout,
1562 						  const char *caller,
1563 						  int line);
1564 #define net_pkt_get_reserve_rx_data(min_len, timeout)				\
1565 	net_pkt_get_reserve_rx_data_debug(min_len, timeout, __func__, __LINE__)
1566 
1567 struct net_buf *net_pkt_get_reserve_tx_data_debug(size_t min_len,
1568 						  k_timeout_t timeout,
1569 						  const char *caller,
1570 						  int line);
1571 #define net_pkt_get_reserve_tx_data(min_len, timeout)				\
1572 	net_pkt_get_reserve_tx_data_debug(min_len, timeout, __func__, __LINE__)
1573 
1574 struct net_buf *net_pkt_get_frag_debug(struct net_pkt *pkt, size_t min_len,
1575 				       k_timeout_t timeout,
1576 				       const char *caller, int line);
1577 #define net_pkt_get_frag(pkt, min_len, timeout)					\
1578 	net_pkt_get_frag_debug(pkt, min_len, timeout, __func__, __LINE__)
1579 
1580 void net_pkt_unref_debug(struct net_pkt *pkt, const char *caller, int line);
1581 #define net_pkt_unref(pkt) net_pkt_unref_debug(pkt, __func__, __LINE__)
1582 
1583 struct net_pkt *net_pkt_ref_debug(struct net_pkt *pkt, const char *caller,
1584 				  int line);
1585 #define net_pkt_ref(pkt) net_pkt_ref_debug(pkt, __func__, __LINE__)
1586 
1587 struct net_buf *net_pkt_frag_ref_debug(struct net_buf *frag,
1588 				       const char *caller, int line);
1589 #define net_pkt_frag_ref(frag) net_pkt_frag_ref_debug(frag, __func__, __LINE__)
1590 
1591 void net_pkt_frag_unref_debug(struct net_buf *frag,
1592 			      const char *caller, int line);
1593 #define net_pkt_frag_unref(frag)				\
1594 	net_pkt_frag_unref_debug(frag, __func__, __LINE__)
1595 
1596 struct net_buf *net_pkt_frag_del_debug(struct net_pkt *pkt,
1597 				       struct net_buf *parent,
1598 				       struct net_buf *frag,
1599 				       const char *caller, int line);
1600 #define net_pkt_frag_del(pkt, parent, frag)				\
1601 	net_pkt_frag_del_debug(pkt, parent, frag, __func__, __LINE__)
1602 
1603 void net_pkt_frag_add_debug(struct net_pkt *pkt, struct net_buf *frag,
1604 			    const char *caller, int line);
1605 #define net_pkt_frag_add(pkt, frag)				\
1606 	net_pkt_frag_add_debug(pkt, frag, __func__, __LINE__)
1607 
1608 void net_pkt_frag_insert_debug(struct net_pkt *pkt, struct net_buf *frag,
1609 			       const char *caller, int line);
1610 #define net_pkt_frag_insert(pkt, frag)					\
1611 	net_pkt_frag_insert_debug(pkt, frag, __func__, __LINE__)
1612 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC ||
1613 	* CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
1614 	*/
1615 /** @endcond */
1616 
1617 #if defined(NET_PKT_DEBUG_ENABLED)
1618 /**
1619  * @brief Print fragment list and the fragment sizes
1620  *
1621  * @details Only available if debugging is activated.
1622  *
1623  * @param pkt Network pkt.
1624  */
1625 void net_pkt_print_frags(struct net_pkt *pkt);
1626 #else
1627 #define net_pkt_print_frags(pkt)
1628 #endif
1629 
1630 #if !defined(NET_PKT_DEBUG_ENABLED)
1631 /**
1632  * @brief Get a data buffer from a given pool.
1633  *
1634  * @details Normally this version is not useful for applications
1635  * but is mainly used by network fragmentation code.
1636  *
1637  * @param pool The net_buf pool to use.
1638  * @param min_len Minimum length of the requested fragment.
1639  * @param timeout Affects the action taken should the net buf pool be empty.
1640  *        If K_NO_WAIT, then return immediately. If K_FOREVER, then
1641  *        wait as long as necessary. Otherwise, wait up to the specified time.
1642  *
1643  * @return Network buffer if successful, NULL otherwise.
1644  */
1645 struct net_buf *net_pkt_get_reserve_data(struct net_buf_pool *pool,
1646 					 size_t min_len, k_timeout_t timeout);
1647 #endif
1648 
1649 #if !defined(NET_PKT_DEBUG_ENABLED)
1650 /**
1651  * @brief Get RX DATA buffer from pool.
1652  * Normally you should use net_pkt_get_frag() instead.
1653  *
1654  * @details Normally this version is not useful for applications
1655  * but is mainly used by network fragmentation code.
1656  *
1657  * @param min_len Minimum length of the requested fragment.
1658  * @param timeout Affects the action taken should the net buf pool be empty.
1659  *        If K_NO_WAIT, then return immediately. If K_FOREVER, then
1660  *        wait as long as necessary. Otherwise, wait up to the specified time.
1661  *
1662  * @return Network buffer if successful, NULL otherwise.
1663  */
1664 struct net_buf *net_pkt_get_reserve_rx_data(size_t min_len, k_timeout_t timeout);
1665 #endif
1666 
1667 #if !defined(NET_PKT_DEBUG_ENABLED)
1668 /**
1669  * @brief Get TX DATA buffer from pool.
1670  * Normally you should use net_pkt_get_frag() instead.
1671  *
1672  * @details Normally this version is not useful for applications
1673  * but is mainly used by network fragmentation code.
1674  *
1675  * @param min_len Minimum length of the requested fragment.
1676  * @param timeout Affects the action taken should the net buf pool be empty.
1677  *        If K_NO_WAIT, then return immediately. If K_FOREVER, then
1678  *        wait as long as necessary. Otherwise, wait up to the specified time.
1679  *
1680  * @return Network buffer if successful, NULL otherwise.
1681  */
1682 struct net_buf *net_pkt_get_reserve_tx_data(size_t min_len, k_timeout_t timeout);
1683 #endif
1684 
1685 #if !defined(NET_PKT_DEBUG_ENABLED)
1686 /**
1687  * @brief Get a data fragment that might be from user specific
1688  * buffer pool or from global DATA pool.
1689  *
1690  * @param pkt Network packet.
1691  * @param min_len Minimum length of the requested fragment.
1692  * @param timeout Affects the action taken should the net buf pool be empty.
1693  *        If K_NO_WAIT, then return immediately. If K_FOREVER, then
1694  *        wait as long as necessary. Otherwise, wait up to the specified time.
1695  *
1696  * @return Network buffer if successful, NULL otherwise.
1697  */
1698 struct net_buf *net_pkt_get_frag(struct net_pkt *pkt, size_t min_len,
1699 				 k_timeout_t timeout);
1700 #endif
1701 
1702 #if !defined(NET_PKT_DEBUG_ENABLED)
1703 /**
1704  * @brief Place packet back into the available packets slab
1705  *
1706  * @details Releases the packet to other use. This needs to be
1707  * called by application after it has finished with the packet.
1708  *
1709  * @param pkt Network packet to release.
1710  *
1711  */
1712 void net_pkt_unref(struct net_pkt *pkt);
1713 #endif
1714 
1715 #if !defined(NET_PKT_DEBUG_ENABLED)
1716 /**
1717  * @brief Increase the packet ref count
1718  *
1719  * @details Mark the packet to be used still.
1720  *
1721  * @param pkt Network packet to ref.
1722  *
1723  * @return Network packet if successful, NULL otherwise.
1724  */
1725 struct net_pkt *net_pkt_ref(struct net_pkt *pkt);
1726 #endif
1727 
1728 #if !defined(NET_PKT_DEBUG_ENABLED)
1729 /**
1730  * @brief Increase the packet fragment ref count
1731  *
1732  * @details Mark the fragment to be used still.
1733  *
1734  * @param frag Network fragment to ref.
1735  *
1736  * @return a pointer on the referenced Network fragment.
1737  */
1738 struct net_buf *net_pkt_frag_ref(struct net_buf *frag);
1739 #endif
1740 
1741 #if !defined(NET_PKT_DEBUG_ENABLED)
1742 /**
1743  * @brief Decrease the packet fragment ref count
1744  *
1745  * @param frag Network fragment to unref.
1746  */
1747 void net_pkt_frag_unref(struct net_buf *frag);
1748 #endif
1749 
1750 #if !defined(NET_PKT_DEBUG_ENABLED)
1751 /**
1752  * @brief Delete existing fragment from a packet
1753  *
1754  * @param pkt Network packet from which frag belongs to.
1755  * @param parent parent fragment of frag, or NULL if none.
1756  * @param frag Fragment to delete.
1757  *
1758  * @return Pointer to the following fragment, or NULL if it had no
1759  *         further fragments.
1760  */
1761 struct net_buf *net_pkt_frag_del(struct net_pkt *pkt,
1762 				 struct net_buf *parent,
1763 				 struct net_buf *frag);
1764 #endif
1765 
1766 #if !defined(NET_PKT_DEBUG_ENABLED)
1767 /**
1768  * @brief Add a fragment to a packet at the end of its fragment list
1769  *
1770  * @param pkt pkt Network packet where to add the fragment
1771  * @param frag Fragment to add
1772  */
1773 void net_pkt_frag_add(struct net_pkt *pkt, struct net_buf *frag);
1774 #endif
1775 
1776 #if !defined(NET_PKT_DEBUG_ENABLED)
1777 /**
1778  * @brief Insert a fragment to a packet at the beginning of its fragment list
1779  *
1780  * @param pkt pkt Network packet where to insert the fragment
1781  * @param frag Fragment to insert
1782  */
1783 void net_pkt_frag_insert(struct net_pkt *pkt, struct net_buf *frag);
1784 #endif
1785 
1786 /**
1787  * @brief Compact the fragment list of a packet.
1788  *
1789  * @details After this there is no more any free space in individual fragments.
1790  * @param pkt Network packet.
1791  */
1792 void net_pkt_compact(struct net_pkt *pkt);
1793 
1794 /**
1795  * @brief Get information about predefined RX, TX and DATA pools.
1796  *
1797  * @param rx Pointer to RX pool is returned.
1798  * @param tx Pointer to TX pool is returned.
1799  * @param rx_data Pointer to RX DATA pool is returned.
1800  * @param tx_data Pointer to TX DATA pool is returned.
1801  */
1802 void net_pkt_get_info(struct k_mem_slab **rx,
1803 		      struct k_mem_slab **tx,
1804 		      struct net_buf_pool **rx_data,
1805 		      struct net_buf_pool **tx_data);
1806 
1807 /** @cond INTERNAL_HIDDEN */
1808 
1809 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
1810 /**
1811  * @brief Debug helper to print out the buffer allocations
1812  */
1813 void net_pkt_print(void);
1814 
1815 typedef void (*net_pkt_allocs_cb_t)(struct net_pkt *pkt,
1816 				    struct net_buf *buf,
1817 				    const char *func_alloc,
1818 				    int line_alloc,
1819 				    const char *func_free,
1820 				    int line_free,
1821 				    bool in_use,
1822 				    void *user_data);
1823 
1824 void net_pkt_allocs_foreach(net_pkt_allocs_cb_t cb, void *user_data);
1825 
1826 const char *net_pkt_slab2str(struct k_mem_slab *slab);
1827 const char *net_pkt_pool2str(struct net_buf_pool *pool);
1828 
1829 #else
1830 #define net_pkt_print(...)
1831 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC */
1832 
1833 /* New allocator, and API are defined below.
1834  * This will be simpler when time will come to get rid of former API above.
1835  */
1836 #if defined(NET_PKT_DEBUG_ENABLED)
1837 
1838 struct net_pkt *net_pkt_alloc_debug(k_timeout_t timeout,
1839 				    const char *caller, int line);
1840 #define net_pkt_alloc(_timeout)					\
1841 	net_pkt_alloc_debug(_timeout, __func__, __LINE__)
1842 
1843 struct net_pkt *net_pkt_alloc_from_slab_debug(struct k_mem_slab *slab,
1844 					      k_timeout_t timeout,
1845 					      const char *caller, int line);
1846 #define net_pkt_alloc_from_slab(_slab, _timeout)			\
1847 	net_pkt_alloc_from_slab_debug(_slab, _timeout, __func__, __LINE__)
1848 
1849 struct net_pkt *net_pkt_rx_alloc_debug(k_timeout_t timeout,
1850 				       const char *caller, int line);
1851 #define net_pkt_rx_alloc(_timeout)				\
1852 	net_pkt_rx_alloc_debug(_timeout, __func__, __LINE__)
1853 
1854 struct net_pkt *net_pkt_alloc_on_iface_debug(struct net_if *iface,
1855 					     k_timeout_t timeout,
1856 					     const char *caller,
1857 					     int line);
1858 #define net_pkt_alloc_on_iface(_iface, _timeout)			\
1859 	net_pkt_alloc_on_iface_debug(_iface, _timeout, __func__, __LINE__)
1860 
1861 struct net_pkt *net_pkt_rx_alloc_on_iface_debug(struct net_if *iface,
1862 						k_timeout_t timeout,
1863 						const char *caller,
1864 						int line);
1865 #define net_pkt_rx_alloc_on_iface(_iface, _timeout)			\
1866 	net_pkt_rx_alloc_on_iface_debug(_iface, _timeout,		\
1867 					__func__, __LINE__)
1868 
1869 int net_pkt_alloc_buffer_debug(struct net_pkt *pkt,
1870 			       size_t size,
1871 			       enum net_ip_protocol proto,
1872 			       k_timeout_t timeout,
1873 			       const char *caller, int line);
1874 #define net_pkt_alloc_buffer(_pkt, _size, _proto, _timeout)		\
1875 	net_pkt_alloc_buffer_debug(_pkt, _size, _proto, _timeout,	\
1876 				   __func__, __LINE__)
1877 
1878 int net_pkt_alloc_buffer_raw_debug(struct net_pkt *pkt, size_t size,
1879 				   k_timeout_t timeout,
1880 				   const char *caller, int line);
1881 #define net_pkt_alloc_buffer_raw(_pkt, _size, _timeout)	\
1882 	net_pkt_alloc_buffer_raw_debug(_pkt, _size, _timeout,	\
1883 				       __func__, __LINE__)
1884 
1885 struct net_pkt *net_pkt_alloc_with_buffer_debug(struct net_if *iface,
1886 						size_t size,
1887 						sa_family_t family,
1888 						enum net_ip_protocol proto,
1889 						k_timeout_t timeout,
1890 						const char *caller,
1891 						int line);
1892 #define net_pkt_alloc_with_buffer(_iface, _size, _family,		\
1893 				  _proto, _timeout)			\
1894 	net_pkt_alloc_with_buffer_debug(_iface, _size, _family,		\
1895 					_proto, _timeout,		\
1896 					__func__, __LINE__)
1897 
1898 struct net_pkt *net_pkt_rx_alloc_with_buffer_debug(struct net_if *iface,
1899 						   size_t size,
1900 						   sa_family_t family,
1901 						   enum net_ip_protocol proto,
1902 						   k_timeout_t timeout,
1903 						   const char *caller,
1904 						   int line);
1905 #define net_pkt_rx_alloc_with_buffer(_iface, _size, _family,		\
1906 				     _proto, _timeout)			\
1907 	net_pkt_rx_alloc_with_buffer_debug(_iface, _size, _family,	\
1908 					   _proto, _timeout,		\
1909 					   __func__, __LINE__)
1910 
1911 int net_pkt_alloc_buffer_with_reserve_debug(struct net_pkt *pkt,
1912 					    size_t size,
1913 					    size_t reserve,
1914 					    enum net_ip_protocol proto,
1915 					    k_timeout_t timeout,
1916 					    const char *caller,
1917 					    int line);
1918 #define net_pkt_alloc_buffer_with_reserve(_pkt, _size, _reserve, _proto, _timeout) \
1919 	net_pkt_alloc_buffer_with_reserve_debug(_pkt, _size, _reserve, _proto, \
1920 						_timeout, __func__, __LINE__)
1921 
1922 #endif /* NET_PKT_DEBUG_ENABLED */
1923 /** @endcond */
1924 
1925 #if !defined(NET_PKT_DEBUG_ENABLED)
1926 /**
1927  * @brief Allocate an initialized net_pkt
1928  *
1929  * @details for the time being, 2 pools are used. One for TX and one for RX.
1930  *          This allocator has to be used for TX.
1931  *
1932  * @param timeout Maximum time to wait for an allocation.
1933  *
1934  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1935  */
1936 struct net_pkt *net_pkt_alloc(k_timeout_t timeout);
1937 #endif
1938 
1939 #if !defined(NET_PKT_DEBUG_ENABLED)
1940 /**
1941  * @brief Allocate an initialized net_pkt from a specific slab
1942  *
1943  * @details unlike net_pkt_alloc() which uses core slabs, this one will use
1944  *          an external slab (see NET_PKT_SLAB_DEFINE()).
1945  *          Do _not_ use it unless you know what you are doing. Basically, only
1946  *          net_context should be using this, in order to allocate packet and
1947  *          then buffer on its local slab/pool (if any).
1948  *
1949  * @param slab    The slab to use for allocating the packet
1950  * @param timeout Maximum time to wait for an allocation.
1951  *
1952  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1953  */
1954 struct net_pkt *net_pkt_alloc_from_slab(struct k_mem_slab *slab,
1955 					k_timeout_t timeout);
1956 #endif
1957 
1958 #if !defined(NET_PKT_DEBUG_ENABLED)
1959 /**
1960  * @brief Allocate an initialized net_pkt for RX
1961  *
1962  * @details for the time being, 2 pools are used. One for TX and one for RX.
1963  *          This allocator has to be used for RX.
1964  *
1965  * @param timeout Maximum time to wait for an allocation.
1966  *
1967  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1968  */
1969 struct net_pkt *net_pkt_rx_alloc(k_timeout_t timeout);
1970 #endif
1971 
1972 #if !defined(NET_PKT_DEBUG_ENABLED)
1973 /**
1974  * @brief Allocate a network packet for a specific network interface.
1975  *
1976  * @param iface The network interface the packet is supposed to go through.
1977  * @param timeout Maximum time to wait for an allocation.
1978  *
1979  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1980  */
1981 struct net_pkt *net_pkt_alloc_on_iface(struct net_if *iface,
1982 				       k_timeout_t timeout);
1983 
1984 /** @cond INTERNAL_HIDDEN */
1985 
1986 /* Same as above but specifically for RX packet */
1987 struct net_pkt *net_pkt_rx_alloc_on_iface(struct net_if *iface,
1988 					  k_timeout_t timeout);
1989 /** @endcond */
1990 
1991 #endif
1992 
1993 #if !defined(NET_PKT_DEBUG_ENABLED)
1994 /**
1995  * @brief Allocate buffer for a net_pkt
1996  *
1997  * @details: such allocator will take into account space necessary for headers,
1998  *           MTU, and existing buffer (if any). Beware that, due to all these
1999  *           criteria, the allocated size might be smaller/bigger than
2000  *           requested one.
2001  *
2002  * @param pkt     The network packet requiring buffer to be allocated.
2003  * @param size    The size of buffer being requested.
2004  * @param proto   The IP protocol type (can be 0 for none).
2005  * @param timeout Maximum time to wait for an allocation.
2006  *
2007  * @return 0 on success, negative errno code otherwise.
2008  */
2009 int net_pkt_alloc_buffer(struct net_pkt *pkt,
2010 			 size_t size,
2011 			 enum net_ip_protocol proto,
2012 			 k_timeout_t timeout);
2013 #endif
2014 
2015 #if !defined(NET_PKT_DEBUG_ENABLED)
2016 /**
2017  * @brief Allocate buffer for a net_pkt and reserve some space in the first net_buf.
2018  *
2019  * @details: such allocator will take into account space necessary for headers,
2020  *           MTU, and existing buffer (if any). Beware that, due to all these
2021  *           criteria, the allocated size might be smaller/bigger than
2022  *           requested one.
2023  *
2024  * @param pkt     The network packet requiring buffer to be allocated.
2025  * @param size    The size of buffer being requested.
2026  * @param reserve The L2 header size to reserve. This can be 0, in which case
2027  *                the L2 header is placed into a separate net_buf.
2028  * @param proto   The IP protocol type (can be 0 for none).
2029  * @param timeout Maximum time to wait for an allocation.
2030  *
2031  * @return 0 on success, negative errno code otherwise.
2032  */
2033 #if !defined(NET_PKT_DEBUG_ENABLED)
2034 int net_pkt_alloc_buffer_with_reserve(struct net_pkt *pkt,
2035 				      size_t size,
2036 				      size_t reserve,
2037 				      enum net_ip_protocol proto,
2038 				      k_timeout_t timeout);
2039 #endif
2040 
2041 /**
2042  * @brief Allocate buffer for a net_pkt, of specified size, w/o any additional
2043  *        preconditions
2044  *
2045  * @details: The actual buffer size may be larger than requested one if fixed
2046  *           size buffers are in use.
2047  *
2048  * @param pkt     The network packet requiring buffer to be allocated.
2049  * @param size    The size of buffer being requested.
2050  * @param timeout Maximum time to wait for an allocation.
2051  *
2052  * @return 0 on success, negative errno code otherwise.
2053  */
2054 int net_pkt_alloc_buffer_raw(struct net_pkt *pkt, size_t size,
2055 			     k_timeout_t timeout);
2056 #endif
2057 
2058 #if !defined(NET_PKT_DEBUG_ENABLED)
2059 /**
2060  * @brief Allocate a network packet and buffer at once
2061  *
2062  * @param iface   The network interface the packet is supposed to go through.
2063  * @param size    The size of buffer.
2064  * @param family  The family to which the packet belongs.
2065  * @param proto   The IP protocol type (can be 0 for none).
2066  * @param timeout Maximum time to wait for an allocation.
2067  *
2068  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
2069  */
2070 struct net_pkt *net_pkt_alloc_with_buffer(struct net_if *iface,
2071 					  size_t size,
2072 					  sa_family_t family,
2073 					  enum net_ip_protocol proto,
2074 					  k_timeout_t timeout);
2075 
2076 /** @cond INTERNAL_HIDDEN */
2077 
2078 /* Same as above but specifically for RX packet */
2079 struct net_pkt *net_pkt_rx_alloc_with_buffer(struct net_if *iface,
2080 					     size_t size,
2081 					     sa_family_t family,
2082 					     enum net_ip_protocol proto,
2083 					     k_timeout_t timeout);
2084 
2085 /** @endcond */
2086 
2087 #endif
2088 
2089 /**
2090  * @brief Append a buffer in packet
2091  *
2092  * @param pkt    Network packet where to append the buffer
2093  * @param buffer Buffer to append
2094  */
2095 void net_pkt_append_buffer(struct net_pkt *pkt, struct net_buf *buffer);
2096 
2097 /**
2098  * @brief Get available buffer space from a pkt
2099  *
2100  * @note Reserved bytes (headroom) in any of the fragments are not considered to
2101  *       be available.
2102  *
2103  * @param pkt The net_pkt which buffer availability should be evaluated
2104  *
2105  * @return the amount of buffer available
2106  */
2107 size_t net_pkt_available_buffer(struct net_pkt *pkt);
2108 
2109 /**
2110  * @brief Get available buffer space for payload from a pkt
2111  *
2112  * @note Reserved bytes (headroom) in any of the fragments are not considered to
2113  *       be available.
2114  *
2115  * @details Unlike net_pkt_available_buffer(), this will take into account
2116  *          the headers space.
2117  *
2118  * @param pkt   The net_pkt which payload buffer availability should
2119  *              be evaluated
2120  * @param proto The IP protocol type (can be 0 for none).
2121  *
2122  * @return the amount of buffer available for payload
2123  */
2124 size_t net_pkt_available_payload_buffer(struct net_pkt *pkt,
2125 					enum net_ip_protocol proto);
2126 
2127 /**
2128  * @brief Trim net_pkt buffer
2129  *
2130  * @details This will basically check for unused buffers and deallocate
2131  *          them relevantly
2132  *
2133  * @param pkt The net_pkt which buffer will be trimmed
2134  */
2135 void net_pkt_trim_buffer(struct net_pkt *pkt);
2136 
2137 /**
2138  * @brief Remove @a length bytes from tail of packet
2139  *
2140  * @details This function does not take packet cursor into account. It is a
2141  *          helper to remove unneeded bytes from tail of packet (like appended
2142  *          CRC). It takes care of buffer deallocation if removed bytes span
2143  *          whole buffer(s).
2144  *
2145  * @param pkt    Network packet
2146  * @param length Number of bytes to be removed
2147  *
2148  * @retval 0       On success.
2149  * @retval -EINVAL If packet length is shorter than @a length.
2150  */
2151 int net_pkt_remove_tail(struct net_pkt *pkt, size_t length);
2152 
2153 /**
2154  * @brief Initialize net_pkt cursor
2155  *
2156  * @details This will initialize the net_pkt cursor from its buffer.
2157  *
2158  * @param pkt The net_pkt whose cursor is going to be initialized
2159  */
2160 void net_pkt_cursor_init(struct net_pkt *pkt);
2161 
2162 /**
2163  * @brief Backup net_pkt cursor
2164  *
2165  * @param pkt    The net_pkt whose cursor is going to be backed up
2166  * @param backup The cursor where to backup net_pkt cursor
2167  */
net_pkt_cursor_backup(struct net_pkt * pkt,struct net_pkt_cursor * backup)2168 static inline void net_pkt_cursor_backup(struct net_pkt *pkt,
2169 					 struct net_pkt_cursor *backup)
2170 {
2171 	backup->buf = pkt->cursor.buf;
2172 	backup->pos = pkt->cursor.pos;
2173 }
2174 
2175 /**
2176  * @brief Restore net_pkt cursor from a backup
2177  *
2178  * @param pkt    The net_pkt whose cursor is going to be restored
2179  * @param backup The cursor from where to restore net_pkt cursor
2180  */
net_pkt_cursor_restore(struct net_pkt * pkt,struct net_pkt_cursor * backup)2181 static inline void net_pkt_cursor_restore(struct net_pkt *pkt,
2182 					  struct net_pkt_cursor *backup)
2183 {
2184 	pkt->cursor.buf = backup->buf;
2185 	pkt->cursor.pos = backup->pos;
2186 }
2187 
2188 /**
2189  * @brief Returns current position of the cursor
2190  *
2191  * @param pkt The net_pkt whose cursor position is going to be returned
2192  *
2193  * @return cursor's position
2194  */
net_pkt_cursor_get_pos(struct net_pkt * pkt)2195 static inline void *net_pkt_cursor_get_pos(struct net_pkt *pkt)
2196 {
2197 	return pkt->cursor.pos;
2198 }
2199 
2200 /**
2201  * @brief Skip some data from a net_pkt
2202  *
2203  * @details net_pkt's cursor should be properly initialized
2204  *          Cursor position will be updated after the operation.
2205  *          Depending on the value of pkt->overwrite bit, this function
2206  *          will affect the buffer length or not. If it's true, it will
2207  *          advance the cursor to the requested length. If it's false,
2208  *          it will do the same but if the cursor was already also at the
2209  *          end of existing data, it will increment the buffer length.
2210  *          So in this case, its behavior is just like net_pkt_write or
2211  *          net_pkt_memset, difference being that it will not affect the
2212  *          buffer content itself (which may be just garbage then).
2213  *
2214  * @param pkt    The net_pkt whose cursor will be updated to skip given
2215  *               amount of data from the buffer.
2216  * @param length Amount of data to skip in the buffer
2217  *
2218  * @return 0 in success, negative errno code otherwise.
2219  */
2220 int net_pkt_skip(struct net_pkt *pkt, size_t length);
2221 
2222 /**
2223  * @brief Memset some data in a net_pkt
2224  *
2225  * @details net_pkt's cursor should be properly initialized and,
2226  *          if needed, positioned using net_pkt_skip.
2227  *          Cursor position will be updated after the operation.
2228  *
2229  * @param pkt    The net_pkt whose buffer to fill starting at the current
2230  *               cursor position.
2231  * @param byte   The byte to write in memory
2232  * @param length Amount of data to memset with given byte
2233  *
2234  * @return 0 in success, negative errno code otherwise.
2235  */
2236 int net_pkt_memset(struct net_pkt *pkt, int byte, size_t length);
2237 
2238 /**
2239  * @brief Copy data from a packet into another one.
2240  *
2241  * @details Both net_pkt cursors should be properly initialized and,
2242  *          if needed, positioned using net_pkt_skip.
2243  *          The cursors will be updated after the operation.
2244  *
2245  * @param pkt_dst Destination network packet.
2246  * @param pkt_src Source network packet.
2247  * @param length  Length of data to be copied.
2248  *
2249  * @return 0 on success, negative errno code otherwise.
2250  */
2251 int net_pkt_copy(struct net_pkt *pkt_dst,
2252 		 struct net_pkt *pkt_src,
2253 		 size_t length);
2254 
2255 /**
2256  * @brief Clone pkt and its buffer. The cloned packet will be allocated on
2257  *        the same pool as the original one.
2258  *
2259  * @param pkt Original pkt to be cloned
2260  * @param timeout Timeout to wait for free buffer
2261  *
2262  * @return NULL if error, cloned packet otherwise.
2263  */
2264 struct net_pkt *net_pkt_clone(struct net_pkt *pkt, k_timeout_t timeout);
2265 
2266 /**
2267  * @brief Clone pkt and its buffer. The cloned packet will be allocated on
2268  *        the RX packet poll.
2269  *
2270  * @param pkt Original pkt to be cloned
2271  * @param timeout Timeout to wait for free buffer
2272  *
2273  * @return NULL if error, cloned packet otherwise.
2274  */
2275 struct net_pkt *net_pkt_rx_clone(struct net_pkt *pkt, k_timeout_t timeout);
2276 
2277 /**
2278  * @brief Clone pkt and increase the refcount of its buffer.
2279  *
2280  * @param pkt Original pkt to be shallow cloned
2281  * @param timeout Timeout to wait for free packet
2282  *
2283  * @return NULL if error, cloned packet otherwise.
2284  */
2285 struct net_pkt *net_pkt_shallow_clone(struct net_pkt *pkt,
2286 				      k_timeout_t timeout);
2287 
2288 /**
2289  * @brief Read some data from a net_pkt
2290  *
2291  * @details net_pkt's cursor should be properly initialized and,
2292  *          if needed, positioned using net_pkt_skip.
2293  *          Cursor position will be updated after the operation.
2294  *
2295  * @param pkt    The network packet from where to read some data
2296  * @param data   The destination buffer where to copy the data
2297  * @param length The amount of data to copy
2298  *
2299  * @return 0 on success, negative errno code otherwise.
2300  */
2301 int net_pkt_read(struct net_pkt *pkt, void *data, size_t length);
2302 
2303 /**
2304  * @brief Read a byte (uint8_t) from a net_pkt
2305  *
2306  * @details net_pkt's cursor should be properly initialized and,
2307  *          if needed, positioned using net_pkt_skip.
2308  *          Cursor position will be updated after the operation.
2309  *
2310  * @param pkt  The network packet from where to read
2311  * @param data The destination uint8_t where to copy the data
2312  *
2313  * @return 0 on success, negative errno code otherwise.
2314  */
net_pkt_read_u8(struct net_pkt * pkt,uint8_t * data)2315 static inline int net_pkt_read_u8(struct net_pkt *pkt, uint8_t *data)
2316 {
2317 	return net_pkt_read(pkt, data, 1);
2318 }
2319 
2320 /**
2321  * @brief Read uint16_t big endian data from a net_pkt
2322  *
2323  * @details net_pkt's cursor should be properly initialized and,
2324  *          if needed, positioned using net_pkt_skip.
2325  *          Cursor position will be updated after the operation.
2326  *
2327  * @param pkt  The network packet from where to read
2328  * @param data The destination uint16_t where to copy the data
2329  *
2330  * @return 0 on success, negative errno code otherwise.
2331  */
2332 int net_pkt_read_be16(struct net_pkt *pkt, uint16_t *data);
2333 
2334 /**
2335  * @brief Read uint16_t little endian data from a net_pkt
2336  *
2337  * @details net_pkt's cursor should be properly initialized and,
2338  *          if needed, positioned using net_pkt_skip.
2339  *          Cursor position will be updated after the operation.
2340  *
2341  * @param pkt  The network packet from where to read
2342  * @param data The destination uint16_t where to copy the data
2343  *
2344  * @return 0 on success, negative errno code otherwise.
2345  */
2346 int net_pkt_read_le16(struct net_pkt *pkt, uint16_t *data);
2347 
2348 /**
2349  * @brief Read uint32_t big endian data from a net_pkt
2350  *
2351  * @details net_pkt's cursor should be properly initialized and,
2352  *          if needed, positioned using net_pkt_skip.
2353  *          Cursor position will be updated after the operation.
2354  *
2355  * @param pkt  The network packet from where to read
2356  * @param data The destination uint32_t where to copy the data
2357  *
2358  * @return 0 on success, negative errno code otherwise.
2359  */
2360 int net_pkt_read_be32(struct net_pkt *pkt, uint32_t *data);
2361 
2362 /**
2363  * @brief Write data into a net_pkt
2364  *
2365  * @details net_pkt's cursor should be properly initialized and,
2366  *          if needed, positioned using net_pkt_skip.
2367  *          Cursor position will be updated after the operation.
2368  *
2369  * @param pkt    The network packet where to write
2370  * @param data   Data to be written
2371  * @param length Length of the data to be written
2372  *
2373  * @return 0 on success, negative errno code otherwise.
2374  */
2375 int net_pkt_write(struct net_pkt *pkt, const void *data, size_t length);
2376 
2377 /**
2378  * @brief Write a byte (uint8_t) data to a net_pkt
2379  *
2380  * @details net_pkt's cursor should be properly initialized and,
2381  *          if needed, positioned using net_pkt_skip.
2382  *          Cursor position will be updated after the operation.
2383  *
2384  * @param pkt  The network packet from where to read
2385  * @param data The uint8_t value to write
2386  *
2387  * @return 0 on success, negative errno code otherwise.
2388  */
net_pkt_write_u8(struct net_pkt * pkt,uint8_t data)2389 static inline int net_pkt_write_u8(struct net_pkt *pkt, uint8_t data)
2390 {
2391 	return net_pkt_write(pkt, &data, sizeof(uint8_t));
2392 }
2393 
2394 /**
2395  * @brief Write a uint16_t big endian data to a net_pkt
2396  *
2397  * @details net_pkt's cursor should be properly initialized and,
2398  *          if needed, positioned using net_pkt_skip.
2399  *          Cursor position will be updated after the operation.
2400  *
2401  * @param pkt  The network packet from where to read
2402  * @param data The uint16_t value in host byte order to write
2403  *
2404  * @return 0 on success, negative errno code otherwise.
2405  */
net_pkt_write_be16(struct net_pkt * pkt,uint16_t data)2406 static inline int net_pkt_write_be16(struct net_pkt *pkt, uint16_t data)
2407 {
2408 	uint16_t data_be16 = htons(data);
2409 
2410 	return net_pkt_write(pkt, &data_be16, sizeof(uint16_t));
2411 }
2412 
2413 /**
2414  * @brief Write a uint32_t big endian data to a net_pkt
2415  *
2416  * @details net_pkt's cursor should be properly initialized and,
2417  *          if needed, positioned using net_pkt_skip.
2418  *          Cursor position will be updated after the operation.
2419  *
2420  * @param pkt  The network packet from where to read
2421  * @param data The uint32_t value in host byte order to write
2422  *
2423  * @return 0 on success, negative errno code otherwise.
2424  */
net_pkt_write_be32(struct net_pkt * pkt,uint32_t data)2425 static inline int net_pkt_write_be32(struct net_pkt *pkt, uint32_t data)
2426 {
2427 	uint32_t data_be32 = htonl(data);
2428 
2429 	return net_pkt_write(pkt, &data_be32, sizeof(uint32_t));
2430 }
2431 
2432 /**
2433  * @brief Write a uint32_t little endian data to a net_pkt
2434  *
2435  * @details net_pkt's cursor should be properly initialized and,
2436  *          if needed, positioned using net_pkt_skip.
2437  *          Cursor position will be updated after the operation.
2438  *
2439  * @param pkt  The network packet from where to read
2440  * @param data The uint32_t value in host byte order to write
2441  *
2442  * @return 0 on success, negative errno code otherwise.
2443  */
net_pkt_write_le32(struct net_pkt * pkt,uint32_t data)2444 static inline int net_pkt_write_le32(struct net_pkt *pkt, uint32_t data)
2445 {
2446 	uint32_t data_le32 = sys_cpu_to_le32(data);
2447 
2448 	return net_pkt_write(pkt, &data_le32, sizeof(uint32_t));
2449 }
2450 
2451 /**
2452  * @brief Write a uint16_t little endian data to a net_pkt
2453  *
2454  * @details net_pkt's cursor should be properly initialized and,
2455  *          if needed, positioned using net_pkt_skip.
2456  *          Cursor position will be updated after the operation.
2457  *
2458  * @param pkt  The network packet from where to read
2459  * @param data The uint16_t value in host byte order to write
2460  *
2461  * @return 0 on success, negative errno code otherwise.
2462  */
net_pkt_write_le16(struct net_pkt * pkt,uint16_t data)2463 static inline int net_pkt_write_le16(struct net_pkt *pkt, uint16_t data)
2464 {
2465 	uint16_t data_le16 = sys_cpu_to_le16(data);
2466 
2467 	return net_pkt_write(pkt, &data_le16, sizeof(uint16_t));
2468 }
2469 
2470 /**
2471  * @brief Get the amount of data which can be read from current cursor position
2472  *
2473  * @param pkt Network packet
2474  *
2475  * @return Amount of data which can be read from current pkt cursor
2476  */
2477 size_t net_pkt_remaining_data(struct net_pkt *pkt);
2478 
2479 /**
2480  * @brief Update the overall length of a packet
2481  *
2482  * @details Unlike net_pkt_pull() below, this does not take packet cursor
2483  *          into account. It's mainly a helper dedicated for ipv4 and ipv6
2484  *          input functions. It shrinks the overall length by given parameter.
2485  *
2486  * @param pkt    Network packet
2487  * @param length The new length of the packet
2488  *
2489  * @return 0 on success, negative errno code otherwise.
2490  */
2491 int net_pkt_update_length(struct net_pkt *pkt, size_t length);
2492 
2493 /**
2494  * @brief Remove data from the packet at current location
2495  *
2496  * @details net_pkt's cursor should be properly initialized and,
2497  *          eventually, properly positioned using net_pkt_skip/read/write.
2498  *          Note that net_pkt's cursor is reset by this function.
2499  *
2500  * @param pkt    Network packet
2501  * @param length Number of bytes to be removed
2502  *
2503  * @return 0 on success, negative errno code otherwise.
2504  */
2505 int net_pkt_pull(struct net_pkt *pkt, size_t length);
2506 
2507 /**
2508  * @brief Get the actual offset in the packet from its cursor
2509  *
2510  * @param pkt Network packet.
2511  *
2512  * @return a valid offset on success, 0 otherwise as there is nothing that
2513  *         can be done to evaluate the offset.
2514  */
2515 uint16_t net_pkt_get_current_offset(struct net_pkt *pkt);
2516 
2517 /**
2518  * @brief Check if a data size could fit contiguously
2519  *
2520  * @details net_pkt's cursor should be properly initialized and,
2521  *          if needed, positioned using net_pkt_skip.
2522  *
2523  * @param pkt  Network packet.
2524  * @param size The size to check for contiguity
2525  *
2526  * @return true if that is the case, false otherwise.
2527  */
2528 bool net_pkt_is_contiguous(struct net_pkt *pkt, size_t size);
2529 
2530 /**
2531  * Get the contiguous buffer space
2532  *
2533  * @param pkt Network packet
2534  *
2535  * @return The available contiguous buffer space in bytes starting from the
2536  *         current cursor position. 0 in case of an error.
2537  */
2538 size_t net_pkt_get_contiguous_len(struct net_pkt *pkt);
2539 
2540 /** @cond INTERNAL_HIDDEN */
2541 
2542 struct net_pkt_data_access {
2543 #if !defined(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)
2544 	void *data;
2545 #endif
2546 	const size_t size;
2547 };
2548 
2549 #if defined(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)
2550 #define NET_PKT_DATA_ACCESS_DEFINE(_name, _type)		\
2551 	struct net_pkt_data_access _name = {			\
2552 		.size = sizeof(_type),				\
2553 	}
2554 
2555 #define NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(_name, _type)	\
2556 	NET_PKT_DATA_ACCESS_DEFINE(_name, _type)
2557 
2558 #else
2559 #define NET_PKT_DATA_ACCESS_DEFINE(_name, _type)		\
2560 	_type _hdr_##_name;					\
2561 	struct net_pkt_data_access _name = {			\
2562 		.data = &_hdr_##_name,				\
2563 		.size = sizeof(_type),				\
2564 	}
2565 
2566 #define NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(_name, _type)	\
2567 	struct net_pkt_data_access _name = {			\
2568 		.data = NULL,					\
2569 		.size = sizeof(_type),				\
2570 	}
2571 
2572 #endif /* CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS */
2573 
2574 /** @endcond */
2575 
2576 /**
2577  * @brief Get data from a network packet in a contiguous way
2578  *
2579  * @details net_pkt's cursor should be properly initialized and,
2580  *          if needed, positioned using net_pkt_skip. Unlike other functions,
2581  *          cursor position will not be updated after the operation.
2582  *
2583  * @param pkt    The network packet from where to get the data.
2584  * @param access A pointer to a valid net_pkt_data_access describing the
2585  *        data to get in a contiguous way.
2586  *
2587  * @return a pointer to the requested contiguous data, NULL otherwise.
2588  */
2589 void *net_pkt_get_data(struct net_pkt *pkt,
2590 		       struct net_pkt_data_access *access);
2591 
2592 /**
2593  * @brief Set contiguous data into a network packet
2594  *
2595  * @details net_pkt's cursor should be properly initialized and,
2596  *          if needed, positioned using net_pkt_skip.
2597  *          Cursor position will be updated after the operation.
2598  *
2599  * @param pkt    The network packet to where the data should be set.
2600  * @param access A pointer to a valid net_pkt_data_access describing the
2601  *        data to set.
2602  *
2603  * @return 0 on success, a negative errno otherwise.
2604  */
2605 int net_pkt_set_data(struct net_pkt *pkt,
2606 		     struct net_pkt_data_access *access);
2607 
2608 /**
2609  * Acknowledge previously contiguous data taken from a network packet
2610  * Packet needs to be set to overwrite mode.
2611  */
net_pkt_acknowledge_data(struct net_pkt * pkt,struct net_pkt_data_access * access)2612 static inline int net_pkt_acknowledge_data(struct net_pkt *pkt,
2613 					   struct net_pkt_data_access *access)
2614 {
2615 	return net_pkt_skip(pkt, access->size);
2616 }
2617 
2618 /**
2619  * @}
2620  */
2621 
2622 #ifdef __cplusplus
2623 }
2624 #endif
2625 
2626 #endif /* ZEPHYR_INCLUDE_NET_NET_PKT_H_ */
2627