1 /** @file
2  * @brief Network packet buffer descriptor API
3  *
4  * Network data is passed between different parts of the stack via
5  * net_buf struct.
6  */
7 
8 /*
9  * Copyright (c) 2016 Intel Corporation
10  *
11  * SPDX-License-Identifier: Apache-2.0
12  */
13 
14 /* Data buffer API - used for all data to/from net */
15 
16 #ifndef ZEPHYR_INCLUDE_NET_NET_PKT_H_
17 #define ZEPHYR_INCLUDE_NET_NET_PKT_H_
18 
19 #include <zephyr/types.h>
20 #include <stdbool.h>
21 
22 #include <zephyr/net_buf.h>
23 
24 #if defined(CONFIG_IEEE802154)
25 #include <zephyr/net/ieee802154_pkt.h>
26 #endif
27 #include <zephyr/net/net_core.h>
28 #include <zephyr/net/net_linkaddr.h>
29 #include <zephyr/net/net_ip.h>
30 #include <zephyr/net/net_if.h>
31 #include <zephyr/net/net_context.h>
32 #include <zephyr/net/net_time.h>
33 #include <zephyr/net/ethernet_vlan.h>
34 #include <zephyr/net/ptp_time.h>
35 
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
39 
40 /**
41  * @brief Network packet management library
42  * @defgroup net_pkt Network Packet Library
43  * @since 1.5
44  * @version 0.8.0
45  * @ingroup networking
46  * @{
47  */
48 
49 struct net_context;
50 
51 /** @cond INTERNAL_HIDDEN */
52 
53 #if defined(CONFIG_NET_PKT_ALLOC_STATS)
54 struct net_pkt_alloc_stats {
55 	uint64_t alloc_sum;
56 	uint64_t time_sum;
57 	uint32_t count;
58 };
59 
60 struct net_pkt_alloc_stats_slab {
61 	struct net_pkt_alloc_stats ok;
62 	struct net_pkt_alloc_stats fail;
63 	struct k_mem_slab *slab;
64 };
65 
66 #define NET_PKT_ALLOC_STATS_DEFINE(alloc_name, slab_name)		  \
67 	STRUCT_SECTION_ITERABLE(net_pkt_alloc_stats_slab, alloc_name) = { \
68 		.slab = &slab_name,					  \
69 	}
70 
71 #else
72 #define NET_PKT_ALLOC_STATS_DEFINE(name, slab)
73 #endif /* CONFIG_NET_PKT_ALLOC_STATS */
74 
75 /* buffer cursor used in net_pkt */
76 struct net_pkt_cursor {
77 	/** Current net_buf pointer by the cursor */
78 	struct net_buf *buf;
79 	/** Current position in the data buffer of the net_buf */
80 	uint8_t *pos;
81 };
82 
83 /** @endcond */
84 
85 /**
86  * @brief Network packet.
87  *
88  * Note that if you add new fields into net_pkt, remember to update
89  * net_pkt_clone() function.
90  */
91 struct net_pkt {
92 	/**
93 	 * The fifo is used by RX/TX threads and by socket layer. The net_pkt
94 	 * is queued via fifo to the processing thread.
95 	 */
96 	intptr_t fifo;
97 
98 	/** Slab pointer from where it belongs to */
99 	struct k_mem_slab *slab;
100 
101 	/** buffer holding the packet */
102 	union {
103 		struct net_buf *frags;   /**< buffer fragment */
104 		struct net_buf *buffer;  /**< alias to a buffer fragment */
105 	};
106 
107 	/** Internal buffer iterator used for reading/writing */
108 	struct net_pkt_cursor cursor;
109 
110 	/** Network connection context */
111 	struct net_context *context;
112 
113 	/** Network interface */
114 	struct net_if *iface;
115 
116 	/** @cond ignore */
117 
118 #if defined(CONFIG_NET_TCP)
119 	/** Allow placing the packet into sys_slist_t */
120 	sys_snode_t next;
121 #endif
122 #if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
123 	struct net_if *orig_iface; /* Original network interface */
124 #endif
125 
126 #if defined(CONFIG_NET_PKT_TIMESTAMP) || defined(CONFIG_NET_PKT_TXTIME)
127 	/**
128 	 * TX or RX timestamp if available
129 	 *
130 	 * For packets that have been sent over the medium, the timestamp refers
131 	 * to the time the message timestamp point was encountered at the
132 	 * reference plane.
133 	 *
134 	 * Unsent packages can be scheduled by setting the timestamp to a future
135 	 * point in time.
136 	 *
137 	 * All timestamps refer to the network subsystem's local clock.
138 	 *
139 	 * See @ref net_ptp_time for definitions of local clock, message
140 	 * timestamp point and reference plane. See @ref net_time_t for
141 	 * semantics of the network reference clock.
142 	 *
143 	 * TODO: Replace with net_time_t to decouple from PTP.
144 	 */
145 	struct net_ptp_time timestamp;
146 #endif
147 
148 #if defined(CONFIG_NET_PKT_RXTIME_STATS) || defined(CONFIG_NET_PKT_TXTIME_STATS) || \
149 	defined(CONFIG_TRACING_NET_CORE)
150 	struct {
151 		/** Create time in cycles */
152 		uint32_t create_time;
153 
154 #if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL) || \
155 	defined(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)
156 		/** Collect extra statistics for net_pkt processing
157 		 * from various points in the IP stack. See networking
158 		 * documentation where these points are located and how
159 		 * to interpret the results.
160 		 */
161 		struct {
162 			uint32_t stat[NET_PKT_DETAIL_STATS_COUNT];
163 			int count;
164 		} detail;
165 #endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL ||
166 	  CONFIG_NET_PKT_RXTIME_STATS_DETAIL */
167 	};
168 #endif /* CONFIG_NET_PKT_RXTIME_STATS || CONFIG_NET_PKT_TXTIME_STATS */
169 
170 #if defined(CONFIG_NET_PKT_ALLOC_STATS)
171 	struct net_pkt_alloc_stats_slab *alloc_stats;
172 #endif /* CONFIG_NET_PKT_ALLOC_STATS */
173 
174 	/** Reference counter */
175 	atomic_t atomic_ref;
176 
177 	/* Filled by layer 2 when network packet is received. */
178 	struct net_linkaddr lladdr_src;
179 	struct net_linkaddr lladdr_dst;
180 	uint16_t ll_proto_type;
181 
182 #if defined(CONFIG_NET_IP)
183 	uint8_t ip_hdr_len;	/* pre-filled in order to avoid func call */
184 #endif
185 
186 	uint8_t overwrite : 1;	 /* Is packet content being overwritten? */
187 	uint8_t eof : 1;	 /* Last packet before EOF */
188 	uint8_t ptp_pkt : 1;	 /* For outgoing packet: is this packet
189 				  * a L2 PTP packet.
190 				  * Used only if defined (CONFIG_NET_L2_PTP)
191 				  */
192 	uint8_t forwarding : 1;	 /* Are we forwarding this pkt
193 				  * Used only if defined(CONFIG_NET_ROUTE)
194 				  */
195 	uint8_t family : 3;	 /* Address family, see net_ip.h */
196 
197 	/* bitfield byte alignment boundary */
198 
199 #if defined(CONFIG_NET_IPV4_ACD)
200 	uint8_t ipv4_acd_arp_msg : 1;  /* Is this pkt IPv4 conflict detection ARP
201 					* message.
202 					* Note: family needs to be
203 					* AF_INET.
204 					*/
205 #endif
206 #if defined(CONFIG_NET_LLDP)
207 	uint8_t lldp_pkt : 1; /* Is this pkt an LLDP message.
208 			       * Note: family needs to be
209 			       * AF_UNSPEC.
210 			       */
211 #endif
212 	uint8_t ppp_msg : 1; /* This is a PPP message */
213 	uint8_t captured : 1;	  /* Set to 1 if this packet is already being
214 				   * captured
215 				   */
216 	uint8_t l2_bridged : 1;	  /* set to 1 if this packet comes from a bridge
217 				   * and already contains its L2 header to be
218 				   * preserved. Useful only if
219 				   * defined(CONFIG_NET_ETHERNET_BRIDGE).
220 				   */
221 	uint8_t l2_processed : 1; /* Set to 1 if this packet has already been
222 				   * processed by the L2
223 				   */
224 	uint8_t chksum_done : 1; /* Checksum has already been computed for
225 				  * the packet.
226 				  */
227 #if defined(CONFIG_NET_IP_FRAGMENT)
228 	uint8_t ip_reassembled : 1; /* Packet is a reassembled IP packet. */
229 #endif
230 #if defined(CONFIG_NET_PKT_TIMESTAMP)
231 	uint8_t tx_timestamping : 1; /** Timestamp transmitted packet */
232 	uint8_t rx_timestamping : 1; /** Timestamp received packet */
233 #endif
234 	/* bitfield byte alignment boundary */
235 
236 #if defined(CONFIG_NET_IP)
237 	union {
238 		/* IPv6 hop limit or IPv4 ttl for this network packet.
239 		 * The value is shared between IPv6 and IPv4.
240 		 */
241 #if defined(CONFIG_NET_IPV6)
242 		uint8_t ipv6_hop_limit;
243 #endif
244 #if defined(CONFIG_NET_IPV4)
245 		uint8_t ipv4_ttl;
246 #endif
247 	};
248 
249 	union {
250 #if defined(CONFIG_NET_IPV4)
251 		uint8_t ipv4_opts_len; /* length of IPv4 header options */
252 #endif
253 #if defined(CONFIG_NET_IPV6)
254 		uint16_t ipv6_ext_len; /* length of extension headers */
255 #endif
256 	};
257 
258 #if defined(CONFIG_NET_IP_FRAGMENT)
259 	union {
260 #if defined(CONFIG_NET_IPV4_FRAGMENT)
261 		struct {
262 			uint16_t flags;		/* Fragment offset and M (More Fragment) flag */
263 			uint16_t id;		/* Fragment ID */
264 		} ipv4_fragment;
265 #endif /* CONFIG_NET_IPV4_FRAGMENT */
266 #if defined(CONFIG_NET_IPV6_FRAGMENT)
267 		struct {
268 			uint16_t flags;		/* Fragment offset and M (More Fragment) flag */
269 			uint32_t id;		/* Fragment id */
270 			uint16_t hdr_start;	/* Where starts the fragment header */
271 		} ipv6_fragment;
272 #endif /* CONFIG_NET_IPV6_FRAGMENT */
273 	};
274 #endif /* CONFIG_NET_IP_FRAGMENT */
275 
276 #if defined(CONFIG_NET_IPV6)
277 	/* Where is the start of the last header before payload data
278 	 * in IPv6 packet. This is offset value from start of the IPv6
279 	 * packet. Note that this value should be updated by who ever
280 	 * adds IPv6 extension headers to the network packet.
281 	 */
282 	uint16_t ipv6_prev_hdr_start;
283 
284 	uint8_t ipv6_ext_opt_len; /* IPv6 ND option length */
285 	uint8_t ipv6_next_hdr;	/* What is the very first next header */
286 #endif /* CONFIG_NET_IPV6 */
287 
288 #if defined(CONFIG_NET_IP_DSCP_ECN)
289 	/** IPv4/IPv6 Differentiated Services Code Point value. */
290 	uint8_t ip_dscp : 6;
291 
292 	/** IPv4/IPv6 Explicit Congestion Notification value. */
293 	uint8_t ip_ecn : 2;
294 #endif /* CONFIG_NET_IP_DSCP_ECN */
295 #endif /* CONFIG_NET_IP */
296 
297 #if defined(CONFIG_NET_VLAN)
298 	/* VLAN TCI (Tag Control Information). This contains the Priority
299 	 * Code Point (PCP), Drop Eligible Indicator (DEI) and VLAN
300 	 * Identifier (VID, called more commonly VLAN tag). This value is
301 	 * kept in host byte order.
302 	 */
303 	uint16_t vlan_tci;
304 #endif /* CONFIG_NET_VLAN */
305 
306 #if defined(NET_PKT_HAS_CONTROL_BLOCK)
307 	/* TODO: Evolve this into a union of orthogonal
308 	 *       control block declarations if further L2
309 	 *       stacks require L2-specific attributes.
310 	 */
311 #if defined(CONFIG_IEEE802154)
312 	/* The following structure requires a 4-byte alignment
313 	 * boundary to avoid padding.
314 	 */
315 	struct net_pkt_cb_ieee802154 cb;
316 #endif /* CONFIG_IEEE802154 */
317 #endif /* NET_PKT_HAS_CONTROL_BLOCK */
318 
319 	/** Network packet priority, can be left out in which case packet
320 	 * is not prioritised.
321 	 */
322 	uint8_t priority;
323 
324 #if defined(CONFIG_NET_OFFLOAD) || defined(CONFIG_NET_L2_IPIP)
325 	/* Remote address of the received packet. This is only used by
326 	 * network interfaces with an offloaded TCP/IP stack, or if we
327 	 * have network tunneling in use.
328 	 */
329 	union {
330 		struct sockaddr remote;
331 
332 		/* This will make sure that there is enough storage to store
333 		 * the address struct. The access to value is via remote
334 		 * address.
335 		 */
336 		struct sockaddr_storage remote_storage;
337 	};
338 #endif /* CONFIG_NET_OFFLOAD */
339 
340 #if defined(CONFIG_NET_CAPTURE_COOKED_MODE)
341 	/* Tell the capture api that this is a captured packet */
342 	uint8_t cooked_mode_pkt : 1;
343 #endif /* CONFIG_NET_CAPTURE_COOKED_MODE */
344 
345 #if defined(CONFIG_NET_IPV4_PMTU)
346 	/* Path MTU needed for this destination address */
347 	uint8_t ipv4_pmtu : 1;
348 #endif /* CONFIG_NET_IPV4_PMTU */
349 
350 	/* @endcond */
351 };
352 
353 /** @cond ignore */
354 
355 /* The interface real ll address */
net_pkt_lladdr_if(struct net_pkt * pkt)356 static inline struct net_linkaddr *net_pkt_lladdr_if(struct net_pkt *pkt)
357 {
358 	return net_if_get_link_addr(pkt->iface);
359 }
360 
net_pkt_context(struct net_pkt * pkt)361 static inline struct net_context *net_pkt_context(struct net_pkt *pkt)
362 {
363 	return pkt->context;
364 }
365 
net_pkt_set_context(struct net_pkt * pkt,struct net_context * ctx)366 static inline void net_pkt_set_context(struct net_pkt *pkt,
367 				       struct net_context *ctx)
368 {
369 	pkt->context = ctx;
370 }
371 
net_pkt_iface(struct net_pkt * pkt)372 static inline struct net_if *net_pkt_iface(struct net_pkt *pkt)
373 {
374 	return pkt->iface;
375 }
376 
net_pkt_set_iface(struct net_pkt * pkt,struct net_if * iface)377 static inline void net_pkt_set_iface(struct net_pkt *pkt, struct net_if *iface)
378 {
379 	pkt->iface = iface;
380 
381 	/* If the network interface is set in pkt, then also set the type of
382 	 * the network address that is stored in pkt. This is done here so
383 	 * that the address type is properly set and is not forgotten.
384 	 */
385 	if (iface) {
386 		uint8_t type = net_if_get_link_addr(iface)->type;
387 
388 		pkt->lladdr_src.type = type;
389 		pkt->lladdr_dst.type = type;
390 	}
391 }
392 
net_pkt_orig_iface(struct net_pkt * pkt)393 static inline struct net_if *net_pkt_orig_iface(struct net_pkt *pkt)
394 {
395 #if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
396 	return pkt->orig_iface;
397 #else
398 	return pkt->iface;
399 #endif
400 }
401 
net_pkt_set_orig_iface(struct net_pkt * pkt,struct net_if * iface)402 static inline void net_pkt_set_orig_iface(struct net_pkt *pkt,
403 					  struct net_if *iface)
404 {
405 #if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
406 	pkt->orig_iface = iface;
407 #else
408 	ARG_UNUSED(pkt);
409 	ARG_UNUSED(iface);
410 #endif
411 }
412 
net_pkt_family(struct net_pkt * pkt)413 static inline uint8_t net_pkt_family(struct net_pkt *pkt)
414 {
415 	return pkt->family;
416 }
417 
net_pkt_set_family(struct net_pkt * pkt,uint8_t family)418 static inline void net_pkt_set_family(struct net_pkt *pkt, uint8_t family)
419 {
420 	pkt->family = family;
421 }
422 
net_pkt_is_ptp(struct net_pkt * pkt)423 static inline bool net_pkt_is_ptp(struct net_pkt *pkt)
424 {
425 	return !!(pkt->ptp_pkt);
426 }
427 
net_pkt_set_ptp(struct net_pkt * pkt,bool is_ptp)428 static inline void net_pkt_set_ptp(struct net_pkt *pkt, bool is_ptp)
429 {
430 	pkt->ptp_pkt = is_ptp;
431 }
432 
net_pkt_is_tx_timestamping(struct net_pkt * pkt)433 static inline bool net_pkt_is_tx_timestamping(struct net_pkt *pkt)
434 {
435 #if defined(CONFIG_NET_PKT_TIMESTAMP)
436 	return !!(pkt->tx_timestamping);
437 #else
438 	ARG_UNUSED(pkt);
439 
440 	return false;
441 #endif
442 }
443 
net_pkt_set_tx_timestamping(struct net_pkt * pkt,bool is_timestamping)444 static inline void net_pkt_set_tx_timestamping(struct net_pkt *pkt, bool is_timestamping)
445 {
446 #if defined(CONFIG_NET_PKT_TIMESTAMP)
447 	pkt->tx_timestamping = is_timestamping;
448 #else
449 	ARG_UNUSED(pkt);
450 	ARG_UNUSED(is_timestamping);
451 #endif
452 }
453 
net_pkt_is_rx_timestamping(struct net_pkt * pkt)454 static inline bool net_pkt_is_rx_timestamping(struct net_pkt *pkt)
455 {
456 #if defined(CONFIG_NET_PKT_TIMESTAMP)
457 	return !!(pkt->rx_timestamping);
458 #else
459 	ARG_UNUSED(pkt);
460 
461 	return false;
462 #endif
463 }
464 
net_pkt_set_rx_timestamping(struct net_pkt * pkt,bool is_timestamping)465 static inline void net_pkt_set_rx_timestamping(struct net_pkt *pkt, bool is_timestamping)
466 {
467 #if defined(CONFIG_NET_PKT_TIMESTAMP)
468 	pkt->rx_timestamping = is_timestamping;
469 #else
470 	ARG_UNUSED(pkt);
471 	ARG_UNUSED(is_timestamping);
472 #endif
473 }
474 
net_pkt_is_captured(struct net_pkt * pkt)475 static inline bool net_pkt_is_captured(struct net_pkt *pkt)
476 {
477 	return !!(pkt->captured);
478 }
479 
net_pkt_set_captured(struct net_pkt * pkt,bool is_captured)480 static inline void net_pkt_set_captured(struct net_pkt *pkt, bool is_captured)
481 {
482 	pkt->captured = is_captured;
483 }
484 
net_pkt_is_l2_bridged(struct net_pkt * pkt)485 static inline bool net_pkt_is_l2_bridged(struct net_pkt *pkt)
486 {
487 	return IS_ENABLED(CONFIG_NET_ETHERNET_BRIDGE) ? !!(pkt->l2_bridged) : 0;
488 }
489 
net_pkt_set_l2_bridged(struct net_pkt * pkt,bool is_l2_bridged)490 static inline void net_pkt_set_l2_bridged(struct net_pkt *pkt, bool is_l2_bridged)
491 {
492 	if (IS_ENABLED(CONFIG_NET_ETHERNET_BRIDGE)) {
493 		pkt->l2_bridged = is_l2_bridged;
494 	}
495 }
496 
net_pkt_is_l2_processed(struct net_pkt * pkt)497 static inline bool net_pkt_is_l2_processed(struct net_pkt *pkt)
498 {
499 	return !!(pkt->l2_processed);
500 }
501 
net_pkt_set_l2_processed(struct net_pkt * pkt,bool is_l2_processed)502 static inline void net_pkt_set_l2_processed(struct net_pkt *pkt,
503 					    bool is_l2_processed)
504 {
505 	pkt->l2_processed = is_l2_processed;
506 }
507 
net_pkt_is_chksum_done(struct net_pkt * pkt)508 static inline bool net_pkt_is_chksum_done(struct net_pkt *pkt)
509 {
510 	return !!(pkt->chksum_done);
511 }
512 
net_pkt_set_chksum_done(struct net_pkt * pkt,bool is_chksum_done)513 static inline void net_pkt_set_chksum_done(struct net_pkt *pkt,
514 					   bool is_chksum_done)
515 {
516 	pkt->chksum_done = is_chksum_done;
517 }
518 
net_pkt_ip_hdr_len(struct net_pkt * pkt)519 static inline uint8_t net_pkt_ip_hdr_len(struct net_pkt *pkt)
520 {
521 #if defined(CONFIG_NET_IP)
522 	return pkt->ip_hdr_len;
523 #else
524 	ARG_UNUSED(pkt);
525 
526 	return 0;
527 #endif
528 }
529 
net_pkt_set_ip_hdr_len(struct net_pkt * pkt,uint8_t len)530 static inline void net_pkt_set_ip_hdr_len(struct net_pkt *pkt, uint8_t len)
531 {
532 #if defined(CONFIG_NET_IP)
533 	pkt->ip_hdr_len = len;
534 #else
535 	ARG_UNUSED(pkt);
536 	ARG_UNUSED(len);
537 #endif
538 }
539 
net_pkt_ip_dscp(struct net_pkt * pkt)540 static inline uint8_t net_pkt_ip_dscp(struct net_pkt *pkt)
541 {
542 #if defined(CONFIG_NET_IP_DSCP_ECN)
543 	return pkt->ip_dscp;
544 #else
545 	ARG_UNUSED(pkt);
546 
547 	return 0;
548 #endif
549 }
550 
net_pkt_set_ip_dscp(struct net_pkt * pkt,uint8_t dscp)551 static inline void net_pkt_set_ip_dscp(struct net_pkt *pkt, uint8_t dscp)
552 {
553 #if defined(CONFIG_NET_IP_DSCP_ECN)
554 	pkt->ip_dscp = dscp;
555 #else
556 	ARG_UNUSED(pkt);
557 	ARG_UNUSED(dscp);
558 #endif
559 }
560 
net_pkt_ip_ecn(struct net_pkt * pkt)561 static inline uint8_t net_pkt_ip_ecn(struct net_pkt *pkt)
562 {
563 #if defined(CONFIG_NET_IP_DSCP_ECN)
564 	return pkt->ip_ecn;
565 #else
566 	ARG_UNUSED(pkt);
567 
568 	return 0;
569 #endif
570 }
571 
net_pkt_set_ip_ecn(struct net_pkt * pkt,uint8_t ecn)572 static inline void net_pkt_set_ip_ecn(struct net_pkt *pkt, uint8_t ecn)
573 {
574 #if defined(CONFIG_NET_IP_DSCP_ECN)
575 	pkt->ip_ecn = ecn;
576 #else
577 	ARG_UNUSED(pkt);
578 	ARG_UNUSED(ecn);
579 #endif
580 }
581 
net_pkt_eof(struct net_pkt * pkt)582 static inline uint8_t net_pkt_eof(struct net_pkt *pkt)
583 {
584 	return pkt->eof;
585 }
586 
net_pkt_set_eof(struct net_pkt * pkt,bool eof)587 static inline void net_pkt_set_eof(struct net_pkt *pkt, bool eof)
588 {
589 	pkt->eof = eof;
590 }
591 
net_pkt_forwarding(struct net_pkt * pkt)592 static inline bool net_pkt_forwarding(struct net_pkt *pkt)
593 {
594 	return !!(pkt->forwarding);
595 }
596 
net_pkt_set_forwarding(struct net_pkt * pkt,bool forward)597 static inline void net_pkt_set_forwarding(struct net_pkt *pkt, bool forward)
598 {
599 	pkt->forwarding = forward;
600 }
601 
602 #if defined(CONFIG_NET_IPV4)
net_pkt_ipv4_ttl(struct net_pkt * pkt)603 static inline uint8_t net_pkt_ipv4_ttl(struct net_pkt *pkt)
604 {
605 	return pkt->ipv4_ttl;
606 }
607 
net_pkt_set_ipv4_ttl(struct net_pkt * pkt,uint8_t ttl)608 static inline void net_pkt_set_ipv4_ttl(struct net_pkt *pkt,
609 					uint8_t ttl)
610 {
611 	pkt->ipv4_ttl = ttl;
612 }
613 
net_pkt_ipv4_opts_len(struct net_pkt * pkt)614 static inline uint8_t net_pkt_ipv4_opts_len(struct net_pkt *pkt)
615 {
616 	return pkt->ipv4_opts_len;
617 }
618 
net_pkt_set_ipv4_opts_len(struct net_pkt * pkt,uint8_t opts_len)619 static inline void net_pkt_set_ipv4_opts_len(struct net_pkt *pkt,
620 					     uint8_t opts_len)
621 {
622 	pkt->ipv4_opts_len = opts_len;
623 }
624 #else
net_pkt_ipv4_ttl(struct net_pkt * pkt)625 static inline uint8_t net_pkt_ipv4_ttl(struct net_pkt *pkt)
626 {
627 	ARG_UNUSED(pkt);
628 
629 	return 0;
630 }
631 
net_pkt_set_ipv4_ttl(struct net_pkt * pkt,uint8_t ttl)632 static inline void net_pkt_set_ipv4_ttl(struct net_pkt *pkt,
633 					uint8_t ttl)
634 {
635 	ARG_UNUSED(pkt);
636 	ARG_UNUSED(ttl);
637 }
638 
net_pkt_ipv4_opts_len(struct net_pkt * pkt)639 static inline uint8_t net_pkt_ipv4_opts_len(struct net_pkt *pkt)
640 {
641 	ARG_UNUSED(pkt);
642 	return 0;
643 }
644 
net_pkt_set_ipv4_opts_len(struct net_pkt * pkt,uint8_t opts_len)645 static inline void net_pkt_set_ipv4_opts_len(struct net_pkt *pkt,
646 					     uint8_t opts_len)
647 {
648 	ARG_UNUSED(pkt);
649 	ARG_UNUSED(opts_len);
650 }
651 #endif
652 
653 #if defined(CONFIG_NET_IPV6)
net_pkt_ipv6_ext_opt_len(struct net_pkt * pkt)654 static inline uint8_t net_pkt_ipv6_ext_opt_len(struct net_pkt *pkt)
655 {
656 	return pkt->ipv6_ext_opt_len;
657 }
658 
net_pkt_set_ipv6_ext_opt_len(struct net_pkt * pkt,uint8_t len)659 static inline void net_pkt_set_ipv6_ext_opt_len(struct net_pkt *pkt,
660 						uint8_t len)
661 {
662 	pkt->ipv6_ext_opt_len = len;
663 }
664 
net_pkt_ipv6_next_hdr(struct net_pkt * pkt)665 static inline uint8_t net_pkt_ipv6_next_hdr(struct net_pkt *pkt)
666 {
667 	return pkt->ipv6_next_hdr;
668 }
669 
net_pkt_set_ipv6_next_hdr(struct net_pkt * pkt,uint8_t next_hdr)670 static inline void net_pkt_set_ipv6_next_hdr(struct net_pkt *pkt,
671 					     uint8_t next_hdr)
672 {
673 	pkt->ipv6_next_hdr = next_hdr;
674 }
675 
net_pkt_ipv6_ext_len(struct net_pkt * pkt)676 static inline uint16_t net_pkt_ipv6_ext_len(struct net_pkt *pkt)
677 {
678 	return pkt->ipv6_ext_len;
679 }
680 
net_pkt_set_ipv6_ext_len(struct net_pkt * pkt,uint16_t len)681 static inline void net_pkt_set_ipv6_ext_len(struct net_pkt *pkt, uint16_t len)
682 {
683 	pkt->ipv6_ext_len = len;
684 }
685 
net_pkt_ipv6_hdr_prev(struct net_pkt * pkt)686 static inline uint16_t net_pkt_ipv6_hdr_prev(struct net_pkt *pkt)
687 {
688 	return pkt->ipv6_prev_hdr_start;
689 }
690 
net_pkt_set_ipv6_hdr_prev(struct net_pkt * pkt,uint16_t offset)691 static inline void net_pkt_set_ipv6_hdr_prev(struct net_pkt *pkt,
692 					     uint16_t offset)
693 {
694 	pkt->ipv6_prev_hdr_start = offset;
695 }
696 
net_pkt_ipv6_hop_limit(struct net_pkt * pkt)697 static inline uint8_t net_pkt_ipv6_hop_limit(struct net_pkt *pkt)
698 {
699 	return pkt->ipv6_hop_limit;
700 }
701 
net_pkt_set_ipv6_hop_limit(struct net_pkt * pkt,uint8_t hop_limit)702 static inline void net_pkt_set_ipv6_hop_limit(struct net_pkt *pkt,
703 					      uint8_t hop_limit)
704 {
705 	pkt->ipv6_hop_limit = hop_limit;
706 }
707 #else /* CONFIG_NET_IPV6 */
net_pkt_ipv6_ext_opt_len(struct net_pkt * pkt)708 static inline uint8_t net_pkt_ipv6_ext_opt_len(struct net_pkt *pkt)
709 {
710 	ARG_UNUSED(pkt);
711 
712 	return 0;
713 }
714 
net_pkt_set_ipv6_ext_opt_len(struct net_pkt * pkt,uint8_t len)715 static inline void net_pkt_set_ipv6_ext_opt_len(struct net_pkt *pkt,
716 						uint8_t len)
717 {
718 	ARG_UNUSED(pkt);
719 	ARG_UNUSED(len);
720 }
721 
net_pkt_ipv6_next_hdr(struct net_pkt * pkt)722 static inline uint8_t net_pkt_ipv6_next_hdr(struct net_pkt *pkt)
723 {
724 	ARG_UNUSED(pkt);
725 
726 	return 0;
727 }
728 
net_pkt_set_ipv6_next_hdr(struct net_pkt * pkt,uint8_t next_hdr)729 static inline void net_pkt_set_ipv6_next_hdr(struct net_pkt *pkt,
730 					     uint8_t next_hdr)
731 {
732 	ARG_UNUSED(pkt);
733 	ARG_UNUSED(next_hdr);
734 }
735 
net_pkt_ipv6_ext_len(struct net_pkt * pkt)736 static inline uint16_t net_pkt_ipv6_ext_len(struct net_pkt *pkt)
737 {
738 	ARG_UNUSED(pkt);
739 
740 	return 0;
741 }
742 
net_pkt_set_ipv6_ext_len(struct net_pkt * pkt,uint16_t len)743 static inline void net_pkt_set_ipv6_ext_len(struct net_pkt *pkt, uint16_t len)
744 {
745 	ARG_UNUSED(pkt);
746 	ARG_UNUSED(len);
747 }
748 
net_pkt_ipv6_hdr_prev(struct net_pkt * pkt)749 static inline uint16_t net_pkt_ipv6_hdr_prev(struct net_pkt *pkt)
750 {
751 	ARG_UNUSED(pkt);
752 
753 	return 0;
754 }
755 
net_pkt_set_ipv6_hdr_prev(struct net_pkt * pkt,uint16_t offset)756 static inline void net_pkt_set_ipv6_hdr_prev(struct net_pkt *pkt,
757 					     uint16_t offset)
758 {
759 	ARG_UNUSED(pkt);
760 	ARG_UNUSED(offset);
761 }
762 
net_pkt_ipv6_hop_limit(struct net_pkt * pkt)763 static inline uint8_t net_pkt_ipv6_hop_limit(struct net_pkt *pkt)
764 {
765 	ARG_UNUSED(pkt);
766 
767 	return 0;
768 }
769 
net_pkt_set_ipv6_hop_limit(struct net_pkt * pkt,uint8_t hop_limit)770 static inline void net_pkt_set_ipv6_hop_limit(struct net_pkt *pkt,
771 					      uint8_t hop_limit)
772 {
773 	ARG_UNUSED(pkt);
774 	ARG_UNUSED(hop_limit);
775 }
776 #endif /* CONFIG_NET_IPV6 */
777 
net_pkt_ip_opts_len(struct net_pkt * pkt)778 static inline uint16_t net_pkt_ip_opts_len(struct net_pkt *pkt)
779 {
780 #if defined(CONFIG_NET_IPV6)
781 	return pkt->ipv6_ext_len;
782 #elif defined(CONFIG_NET_IPV4)
783 	return pkt->ipv4_opts_len;
784 #else
785 	ARG_UNUSED(pkt);
786 
787 	return 0;
788 #endif
789 }
790 
791 #if defined(CONFIG_NET_IPV4_PMTU)
net_pkt_ipv4_pmtu(struct net_pkt * pkt)792 static inline bool net_pkt_ipv4_pmtu(struct net_pkt *pkt)
793 {
794 	return !!pkt->ipv4_pmtu;
795 }
796 
net_pkt_set_ipv4_pmtu(struct net_pkt * pkt,bool value)797 static inline void net_pkt_set_ipv4_pmtu(struct net_pkt *pkt, bool value)
798 {
799 	pkt->ipv4_pmtu = value;
800 }
801 #else
net_pkt_ipv4_pmtu(struct net_pkt * pkt)802 static inline bool net_pkt_ipv4_pmtu(struct net_pkt *pkt)
803 {
804 	ARG_UNUSED(pkt);
805 
806 	return false;
807 }
808 
net_pkt_set_ipv4_pmtu(struct net_pkt * pkt,bool value)809 static inline void net_pkt_set_ipv4_pmtu(struct net_pkt *pkt, bool value)
810 {
811 	ARG_UNUSED(pkt);
812 	ARG_UNUSED(value);
813 }
814 #endif /* CONFIG_NET_IPV4_PMTU */
815 
816 #if defined(CONFIG_NET_IPV4_FRAGMENT)
net_pkt_ipv4_fragment_offset(struct net_pkt * pkt)817 static inline uint16_t net_pkt_ipv4_fragment_offset(struct net_pkt *pkt)
818 {
819 	return (pkt->ipv4_fragment.flags & NET_IPV4_FRAGH_OFFSET_MASK) * 8;
820 }
821 
net_pkt_ipv4_fragment_more(struct net_pkt * pkt)822 static inline bool net_pkt_ipv4_fragment_more(struct net_pkt *pkt)
823 {
824 	return (pkt->ipv4_fragment.flags & NET_IPV4_MORE_FRAG_MASK) != 0;
825 }
826 
net_pkt_set_ipv4_fragment_flags(struct net_pkt * pkt,uint16_t flags)827 static inline void net_pkt_set_ipv4_fragment_flags(struct net_pkt *pkt, uint16_t flags)
828 {
829 	pkt->ipv4_fragment.flags = flags;
830 }
831 
net_pkt_ipv4_fragment_id(struct net_pkt * pkt)832 static inline uint32_t net_pkt_ipv4_fragment_id(struct net_pkt *pkt)
833 {
834 	return pkt->ipv4_fragment.id;
835 }
836 
net_pkt_set_ipv4_fragment_id(struct net_pkt * pkt,uint32_t id)837 static inline void net_pkt_set_ipv4_fragment_id(struct net_pkt *pkt, uint32_t id)
838 {
839 	pkt->ipv4_fragment.id = id;
840 }
841 #else /* CONFIG_NET_IPV4_FRAGMENT */
net_pkt_ipv4_fragment_offset(struct net_pkt * pkt)842 static inline uint16_t net_pkt_ipv4_fragment_offset(struct net_pkt *pkt)
843 {
844 	ARG_UNUSED(pkt);
845 
846 	return 0;
847 }
848 
net_pkt_ipv4_fragment_more(struct net_pkt * pkt)849 static inline bool net_pkt_ipv4_fragment_more(struct net_pkt *pkt)
850 {
851 	ARG_UNUSED(pkt);
852 
853 	return 0;
854 }
855 
net_pkt_set_ipv4_fragment_flags(struct net_pkt * pkt,uint16_t flags)856 static inline void net_pkt_set_ipv4_fragment_flags(struct net_pkt *pkt, uint16_t flags)
857 {
858 	ARG_UNUSED(pkt);
859 	ARG_UNUSED(flags);
860 }
861 
net_pkt_ipv4_fragment_id(struct net_pkt * pkt)862 static inline uint32_t net_pkt_ipv4_fragment_id(struct net_pkt *pkt)
863 {
864 	ARG_UNUSED(pkt);
865 
866 	return 0;
867 }
868 
net_pkt_set_ipv4_fragment_id(struct net_pkt * pkt,uint32_t id)869 static inline void net_pkt_set_ipv4_fragment_id(struct net_pkt *pkt, uint32_t id)
870 {
871 	ARG_UNUSED(pkt);
872 	ARG_UNUSED(id);
873 }
874 #endif /* CONFIG_NET_IPV4_FRAGMENT */
875 
876 #if defined(CONFIG_NET_IPV6_FRAGMENT)
net_pkt_ipv6_fragment_start(struct net_pkt * pkt)877 static inline uint16_t net_pkt_ipv6_fragment_start(struct net_pkt *pkt)
878 {
879 	return pkt->ipv6_fragment.hdr_start;
880 }
881 
net_pkt_set_ipv6_fragment_start(struct net_pkt * pkt,uint16_t start)882 static inline void net_pkt_set_ipv6_fragment_start(struct net_pkt *pkt,
883 						   uint16_t start)
884 {
885 	pkt->ipv6_fragment.hdr_start = start;
886 }
887 
net_pkt_ipv6_fragment_offset(struct net_pkt * pkt)888 static inline uint16_t net_pkt_ipv6_fragment_offset(struct net_pkt *pkt)
889 {
890 	return pkt->ipv6_fragment.flags & NET_IPV6_FRAGH_OFFSET_MASK;
891 }
net_pkt_ipv6_fragment_more(struct net_pkt * pkt)892 static inline bool net_pkt_ipv6_fragment_more(struct net_pkt *pkt)
893 {
894 	return (pkt->ipv6_fragment.flags & 0x01) != 0;
895 }
896 
net_pkt_set_ipv6_fragment_flags(struct net_pkt * pkt,uint16_t flags)897 static inline void net_pkt_set_ipv6_fragment_flags(struct net_pkt *pkt,
898 						   uint16_t flags)
899 {
900 	pkt->ipv6_fragment.flags = flags;
901 }
902 
net_pkt_ipv6_fragment_id(struct net_pkt * pkt)903 static inline uint32_t net_pkt_ipv6_fragment_id(struct net_pkt *pkt)
904 {
905 	return pkt->ipv6_fragment.id;
906 }
907 
net_pkt_set_ipv6_fragment_id(struct net_pkt * pkt,uint32_t id)908 static inline void net_pkt_set_ipv6_fragment_id(struct net_pkt *pkt,
909 						uint32_t id)
910 {
911 	pkt->ipv6_fragment.id = id;
912 }
913 #else /* CONFIG_NET_IPV6_FRAGMENT */
net_pkt_ipv6_fragment_start(struct net_pkt * pkt)914 static inline uint16_t net_pkt_ipv6_fragment_start(struct net_pkt *pkt)
915 {
916 	ARG_UNUSED(pkt);
917 
918 	return 0;
919 }
920 
net_pkt_set_ipv6_fragment_start(struct net_pkt * pkt,uint16_t start)921 static inline void net_pkt_set_ipv6_fragment_start(struct net_pkt *pkt,
922 						   uint16_t start)
923 {
924 	ARG_UNUSED(pkt);
925 	ARG_UNUSED(start);
926 }
927 
net_pkt_ipv6_fragment_offset(struct net_pkt * pkt)928 static inline uint16_t net_pkt_ipv6_fragment_offset(struct net_pkt *pkt)
929 {
930 	ARG_UNUSED(pkt);
931 
932 	return 0;
933 }
934 
net_pkt_ipv6_fragment_more(struct net_pkt * pkt)935 static inline bool net_pkt_ipv6_fragment_more(struct net_pkt *pkt)
936 {
937 	ARG_UNUSED(pkt);
938 
939 	return 0;
940 }
941 
net_pkt_set_ipv6_fragment_flags(struct net_pkt * pkt,uint16_t flags)942 static inline void net_pkt_set_ipv6_fragment_flags(struct net_pkt *pkt,
943 						   uint16_t flags)
944 {
945 	ARG_UNUSED(pkt);
946 	ARG_UNUSED(flags);
947 }
948 
net_pkt_ipv6_fragment_id(struct net_pkt * pkt)949 static inline uint32_t net_pkt_ipv6_fragment_id(struct net_pkt *pkt)
950 {
951 	ARG_UNUSED(pkt);
952 
953 	return 0;
954 }
955 
net_pkt_set_ipv6_fragment_id(struct net_pkt * pkt,uint32_t id)956 static inline void net_pkt_set_ipv6_fragment_id(struct net_pkt *pkt,
957 						uint32_t id)
958 {
959 	ARG_UNUSED(pkt);
960 	ARG_UNUSED(id);
961 }
962 #endif /* CONFIG_NET_IPV6_FRAGMENT */
963 
964 #if defined(CONFIG_NET_IP_FRAGMENT)
net_pkt_is_ip_reassembled(struct net_pkt * pkt)965 static inline bool net_pkt_is_ip_reassembled(struct net_pkt *pkt)
966 {
967 	return !!(pkt->ip_reassembled);
968 }
969 
net_pkt_set_ip_reassembled(struct net_pkt * pkt,bool reassembled)970 static inline void net_pkt_set_ip_reassembled(struct net_pkt *pkt,
971 					      bool reassembled)
972 {
973 	pkt->ip_reassembled = reassembled;
974 }
975 #else /* CONFIG_NET_IP_FRAGMENT */
net_pkt_is_ip_reassembled(struct net_pkt * pkt)976 static inline bool net_pkt_is_ip_reassembled(struct net_pkt *pkt)
977 {
978 	ARG_UNUSED(pkt);
979 
980 	return false;
981 }
982 
net_pkt_set_ip_reassembled(struct net_pkt * pkt,bool reassembled)983 static inline void net_pkt_set_ip_reassembled(struct net_pkt *pkt,
984 					      bool reassembled)
985 {
986 	ARG_UNUSED(pkt);
987 	ARG_UNUSED(reassembled);
988 }
989 #endif /* CONFIG_NET_IP_FRAGMENT */
990 
net_pkt_priority(struct net_pkt * pkt)991 static inline uint8_t net_pkt_priority(struct net_pkt *pkt)
992 {
993 	return pkt->priority;
994 }
995 
net_pkt_set_priority(struct net_pkt * pkt,uint8_t priority)996 static inline void net_pkt_set_priority(struct net_pkt *pkt,
997 					uint8_t priority)
998 {
999 	pkt->priority = priority;
1000 }
1001 
1002 #if defined(CONFIG_NET_CAPTURE_COOKED_MODE)
net_pkt_is_cooked_mode(struct net_pkt * pkt)1003 static inline bool net_pkt_is_cooked_mode(struct net_pkt *pkt)
1004 {
1005 	return pkt->cooked_mode_pkt;
1006 }
1007 
net_pkt_set_cooked_mode(struct net_pkt * pkt,bool value)1008 static inline void net_pkt_set_cooked_mode(struct net_pkt *pkt, bool value)
1009 {
1010 	pkt->cooked_mode_pkt = value;
1011 }
1012 #else
net_pkt_is_cooked_mode(struct net_pkt * pkt)1013 static inline bool net_pkt_is_cooked_mode(struct net_pkt *pkt)
1014 {
1015 	ARG_UNUSED(pkt);
1016 
1017 	return false;
1018 }
1019 
net_pkt_set_cooked_mode(struct net_pkt * pkt,bool value)1020 static inline void net_pkt_set_cooked_mode(struct net_pkt *pkt, bool value)
1021 {
1022 	ARG_UNUSED(pkt);
1023 	ARG_UNUSED(value);
1024 }
1025 #endif /* CONFIG_NET_CAPTURE_COOKED_MODE */
1026 
1027 #if defined(CONFIG_NET_VLAN)
net_pkt_vlan_tag(struct net_pkt * pkt)1028 static inline uint16_t net_pkt_vlan_tag(struct net_pkt *pkt)
1029 {
1030 	return net_eth_vlan_get_vid(pkt->vlan_tci);
1031 }
1032 
net_pkt_set_vlan_tag(struct net_pkt * pkt,uint16_t tag)1033 static inline void net_pkt_set_vlan_tag(struct net_pkt *pkt, uint16_t tag)
1034 {
1035 	pkt->vlan_tci = net_eth_vlan_set_vid(pkt->vlan_tci, tag);
1036 }
1037 
net_pkt_vlan_priority(struct net_pkt * pkt)1038 static inline uint8_t net_pkt_vlan_priority(struct net_pkt *pkt)
1039 {
1040 	return net_eth_vlan_get_pcp(pkt->vlan_tci);
1041 }
1042 
net_pkt_set_vlan_priority(struct net_pkt * pkt,uint8_t priority)1043 static inline void net_pkt_set_vlan_priority(struct net_pkt *pkt,
1044 					     uint8_t priority)
1045 {
1046 	pkt->vlan_tci = net_eth_vlan_set_pcp(pkt->vlan_tci, priority);
1047 }
1048 
net_pkt_vlan_dei(struct net_pkt * pkt)1049 static inline bool net_pkt_vlan_dei(struct net_pkt *pkt)
1050 {
1051 	return net_eth_vlan_get_dei(pkt->vlan_tci);
1052 }
1053 
net_pkt_set_vlan_dei(struct net_pkt * pkt,bool dei)1054 static inline void net_pkt_set_vlan_dei(struct net_pkt *pkt, bool dei)
1055 {
1056 	pkt->vlan_tci = net_eth_vlan_set_dei(pkt->vlan_tci, dei);
1057 }
1058 
net_pkt_set_vlan_tci(struct net_pkt * pkt,uint16_t tci)1059 static inline void net_pkt_set_vlan_tci(struct net_pkt *pkt, uint16_t tci)
1060 {
1061 	pkt->vlan_tci = tci;
1062 }
1063 
net_pkt_vlan_tci(struct net_pkt * pkt)1064 static inline uint16_t net_pkt_vlan_tci(struct net_pkt *pkt)
1065 {
1066 	return pkt->vlan_tci;
1067 }
1068 #else
net_pkt_vlan_tag(struct net_pkt * pkt)1069 static inline uint16_t net_pkt_vlan_tag(struct net_pkt *pkt)
1070 {
1071 	ARG_UNUSED(pkt);
1072 
1073 	return NET_VLAN_TAG_UNSPEC;
1074 }
1075 
net_pkt_set_vlan_tag(struct net_pkt * pkt,uint16_t tag)1076 static inline void net_pkt_set_vlan_tag(struct net_pkt *pkt, uint16_t tag)
1077 {
1078 	ARG_UNUSED(pkt);
1079 	ARG_UNUSED(tag);
1080 }
1081 
net_pkt_vlan_priority(struct net_pkt * pkt)1082 static inline uint8_t net_pkt_vlan_priority(struct net_pkt *pkt)
1083 {
1084 	ARG_UNUSED(pkt);
1085 
1086 	return 0;
1087 }
1088 
net_pkt_vlan_dei(struct net_pkt * pkt)1089 static inline bool net_pkt_vlan_dei(struct net_pkt *pkt)
1090 {
1091 	ARG_UNUSED(pkt);
1092 
1093 	return false;
1094 }
1095 
net_pkt_set_vlan_dei(struct net_pkt * pkt,bool dei)1096 static inline void net_pkt_set_vlan_dei(struct net_pkt *pkt, bool dei)
1097 {
1098 	ARG_UNUSED(pkt);
1099 	ARG_UNUSED(dei);
1100 }
1101 
net_pkt_vlan_tci(struct net_pkt * pkt)1102 static inline uint16_t net_pkt_vlan_tci(struct net_pkt *pkt)
1103 {
1104 	ARG_UNUSED(pkt);
1105 
1106 	return NET_VLAN_TAG_UNSPEC; /* assumes priority is 0 */
1107 }
1108 
net_pkt_set_vlan_tci(struct net_pkt * pkt,uint16_t tci)1109 static inline void net_pkt_set_vlan_tci(struct net_pkt *pkt, uint16_t tci)
1110 {
1111 	ARG_UNUSED(pkt);
1112 	ARG_UNUSED(tci);
1113 }
1114 #endif
1115 
1116 #if defined(CONFIG_NET_PKT_TIMESTAMP) || defined(CONFIG_NET_PKT_TXTIME)
net_pkt_timestamp(struct net_pkt * pkt)1117 static inline struct net_ptp_time *net_pkt_timestamp(struct net_pkt *pkt)
1118 {
1119 	return &pkt->timestamp;
1120 }
1121 
net_pkt_set_timestamp(struct net_pkt * pkt,struct net_ptp_time * timestamp)1122 static inline void net_pkt_set_timestamp(struct net_pkt *pkt,
1123 					 struct net_ptp_time *timestamp)
1124 {
1125 	pkt->timestamp.second = timestamp->second;
1126 	pkt->timestamp.nanosecond = timestamp->nanosecond;
1127 }
1128 
net_pkt_timestamp_ns(struct net_pkt * pkt)1129 static inline net_time_t net_pkt_timestamp_ns(struct net_pkt *pkt)
1130 {
1131 	return net_ptp_time_to_ns(&pkt->timestamp);
1132 }
1133 
net_pkt_set_timestamp_ns(struct net_pkt * pkt,net_time_t timestamp)1134 static inline void net_pkt_set_timestamp_ns(struct net_pkt *pkt, net_time_t timestamp)
1135 {
1136 	pkt->timestamp = ns_to_net_ptp_time(timestamp);
1137 }
1138 #else
net_pkt_timestamp(struct net_pkt * pkt)1139 static inline struct net_ptp_time *net_pkt_timestamp(struct net_pkt *pkt)
1140 {
1141 	ARG_UNUSED(pkt);
1142 
1143 	return NULL;
1144 }
1145 
net_pkt_set_timestamp(struct net_pkt * pkt,struct net_ptp_time * timestamp)1146 static inline void net_pkt_set_timestamp(struct net_pkt *pkt,
1147 					 struct net_ptp_time *timestamp)
1148 {
1149 	ARG_UNUSED(pkt);
1150 	ARG_UNUSED(timestamp);
1151 }
1152 
net_pkt_timestamp_ns(struct net_pkt * pkt)1153 static inline net_time_t net_pkt_timestamp_ns(struct net_pkt *pkt)
1154 {
1155 	ARG_UNUSED(pkt);
1156 
1157 	return 0;
1158 }
1159 
net_pkt_set_timestamp_ns(struct net_pkt * pkt,net_time_t timestamp)1160 static inline void net_pkt_set_timestamp_ns(struct net_pkt *pkt, net_time_t timestamp)
1161 {
1162 	ARG_UNUSED(pkt);
1163 	ARG_UNUSED(timestamp);
1164 }
1165 #endif /* CONFIG_NET_PKT_TIMESTAMP || CONFIG_NET_PKT_TXTIME */
1166 
1167 #if defined(CONFIG_NET_PKT_RXTIME_STATS) || defined(CONFIG_NET_PKT_TXTIME_STATS) || \
1168 	defined(CONFIG_TRACING_NET_CORE)
1169 
net_pkt_create_time(struct net_pkt * pkt)1170 static inline uint32_t net_pkt_create_time(struct net_pkt *pkt)
1171 {
1172 	return pkt->create_time;
1173 }
1174 
net_pkt_set_create_time(struct net_pkt * pkt,uint32_t create_time)1175 static inline void net_pkt_set_create_time(struct net_pkt *pkt,
1176 					   uint32_t create_time)
1177 {
1178 	pkt->create_time = create_time;
1179 }
1180 #else
net_pkt_create_time(struct net_pkt * pkt)1181 static inline uint32_t net_pkt_create_time(struct net_pkt *pkt)
1182 {
1183 	ARG_UNUSED(pkt);
1184 
1185 	return 0U;
1186 }
1187 
net_pkt_set_create_time(struct net_pkt * pkt,uint32_t create_time)1188 static inline void net_pkt_set_create_time(struct net_pkt *pkt,
1189 					   uint32_t create_time)
1190 {
1191 	ARG_UNUSED(pkt);
1192 	ARG_UNUSED(create_time);
1193 }
1194 #endif /* CONFIG_NET_PKT_RXTIME_STATS || CONFIG_NET_PKT_TXTIME_STATS ||
1195 	* CONFIG_TRACING_NET_CORE
1196 	*/
1197 
1198 #if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL) || \
1199 	defined(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)
net_pkt_stats_tick(struct net_pkt * pkt)1200 static inline uint32_t *net_pkt_stats_tick(struct net_pkt *pkt)
1201 {
1202 	return pkt->detail.stat;
1203 }
1204 
net_pkt_stats_tick_count(struct net_pkt * pkt)1205 static inline int net_pkt_stats_tick_count(struct net_pkt *pkt)
1206 {
1207 	return pkt->detail.count;
1208 }
1209 
net_pkt_stats_tick_reset(struct net_pkt * pkt)1210 static inline void net_pkt_stats_tick_reset(struct net_pkt *pkt)
1211 {
1212 	memset(&pkt->detail, 0, sizeof(pkt->detail));
1213 }
1214 
net_pkt_set_stats_tick(struct net_pkt * pkt,uint32_t tick)1215 static ALWAYS_INLINE void net_pkt_set_stats_tick(struct net_pkt *pkt,
1216 						 uint32_t tick)
1217 {
1218 	if (pkt->detail.count >= NET_PKT_DETAIL_STATS_COUNT) {
1219 		NET_ERR("Detail stats count overflow (%d >= %d)",
1220 			pkt->detail.count, NET_PKT_DETAIL_STATS_COUNT);
1221 		return;
1222 	}
1223 
1224 	pkt->detail.stat[pkt->detail.count++] = tick;
1225 }
1226 
1227 #define net_pkt_set_tx_stats_tick(pkt, tick) net_pkt_set_stats_tick(pkt, tick)
1228 #define net_pkt_set_rx_stats_tick(pkt, tick) net_pkt_set_stats_tick(pkt, tick)
1229 #else
net_pkt_stats_tick(struct net_pkt * pkt)1230 static inline uint32_t *net_pkt_stats_tick(struct net_pkt *pkt)
1231 {
1232 	ARG_UNUSED(pkt);
1233 
1234 	return NULL;
1235 }
1236 
net_pkt_stats_tick_count(struct net_pkt * pkt)1237 static inline int net_pkt_stats_tick_count(struct net_pkt *pkt)
1238 {
1239 	ARG_UNUSED(pkt);
1240 
1241 	return 0;
1242 }
1243 
net_pkt_stats_tick_reset(struct net_pkt * pkt)1244 static inline void net_pkt_stats_tick_reset(struct net_pkt *pkt)
1245 {
1246 	ARG_UNUSED(pkt);
1247 }
1248 
net_pkt_set_stats_tick(struct net_pkt * pkt,uint32_t tick)1249 static inline void net_pkt_set_stats_tick(struct net_pkt *pkt, uint32_t tick)
1250 {
1251 	ARG_UNUSED(pkt);
1252 	ARG_UNUSED(tick);
1253 }
1254 
1255 #define net_pkt_set_tx_stats_tick(pkt, tick)
1256 #define net_pkt_set_rx_stats_tick(pkt, tick)
1257 #endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL ||
1258 	  CONFIG_NET_PKT_RXTIME_STATS_DETAIL */
1259 
net_pkt_data(struct net_pkt * pkt)1260 static inline uint8_t *net_pkt_data(struct net_pkt *pkt)
1261 {
1262 	return pkt->frags->data;
1263 }
1264 
net_pkt_ip_data(struct net_pkt * pkt)1265 static inline uint8_t *net_pkt_ip_data(struct net_pkt *pkt)
1266 {
1267 	return pkt->frags->data;
1268 }
1269 
net_pkt_is_empty(struct net_pkt * pkt)1270 static inline bool net_pkt_is_empty(struct net_pkt *pkt)
1271 {
1272 	return !pkt->buffer || !net_pkt_data(pkt) || pkt->buffer->len == 0;
1273 }
1274 
net_pkt_lladdr_src(struct net_pkt * pkt)1275 static inline struct net_linkaddr *net_pkt_lladdr_src(struct net_pkt *pkt)
1276 {
1277 	return &pkt->lladdr_src;
1278 }
1279 
net_pkt_lladdr_dst(struct net_pkt * pkt)1280 static inline struct net_linkaddr *net_pkt_lladdr_dst(struct net_pkt *pkt)
1281 {
1282 	return &pkt->lladdr_dst;
1283 }
1284 
net_pkt_lladdr_swap(struct net_pkt * pkt)1285 static inline void net_pkt_lladdr_swap(struct net_pkt *pkt)
1286 {
1287 	struct net_linkaddr tmp;
1288 
1289 	memcpy(tmp.addr,
1290 	       net_pkt_lladdr_src(pkt)->addr,
1291 	       net_pkt_lladdr_src(pkt)->len);
1292 	memcpy(net_pkt_lladdr_src(pkt)->addr,
1293 	       net_pkt_lladdr_dst(pkt)->addr,
1294 	       net_pkt_lladdr_dst(pkt)->len);
1295 	memcpy(net_pkt_lladdr_dst(pkt)->addr,
1296 	       tmp.addr,
1297 	       net_pkt_lladdr_src(pkt)->len);
1298 }
1299 
net_pkt_lladdr_clear(struct net_pkt * pkt)1300 static inline void net_pkt_lladdr_clear(struct net_pkt *pkt)
1301 {
1302 	(void)net_linkaddr_clear(net_pkt_lladdr_src(pkt));
1303 	(void)net_linkaddr_clear(net_pkt_lladdr_dst(pkt));
1304 }
1305 
net_pkt_ll_proto_type(struct net_pkt * pkt)1306 static inline uint16_t net_pkt_ll_proto_type(struct net_pkt *pkt)
1307 {
1308 	return pkt->ll_proto_type;
1309 }
1310 
net_pkt_set_ll_proto_type(struct net_pkt * pkt,uint16_t type)1311 static inline void net_pkt_set_ll_proto_type(struct net_pkt *pkt, uint16_t type)
1312 {
1313 	pkt->ll_proto_type = type;
1314 }
1315 
1316 #if defined(CONFIG_NET_IPV4_ACD)
net_pkt_ipv4_acd(struct net_pkt * pkt)1317 static inline bool net_pkt_ipv4_acd(struct net_pkt *pkt)
1318 {
1319 	return !!(pkt->ipv4_acd_arp_msg);
1320 }
1321 
net_pkt_set_ipv4_acd(struct net_pkt * pkt,bool is_acd_arp_msg)1322 static inline void net_pkt_set_ipv4_acd(struct net_pkt *pkt,
1323 					bool is_acd_arp_msg)
1324 {
1325 	pkt->ipv4_acd_arp_msg = is_acd_arp_msg;
1326 }
1327 #else /* CONFIG_NET_IPV4_ACD */
net_pkt_ipv4_acd(struct net_pkt * pkt)1328 static inline bool net_pkt_ipv4_acd(struct net_pkt *pkt)
1329 {
1330 	ARG_UNUSED(pkt);
1331 
1332 	return false;
1333 }
1334 
net_pkt_set_ipv4_acd(struct net_pkt * pkt,bool is_acd_arp_msg)1335 static inline void net_pkt_set_ipv4_acd(struct net_pkt *pkt,
1336 					bool is_acd_arp_msg)
1337 {
1338 	ARG_UNUSED(pkt);
1339 	ARG_UNUSED(is_acd_arp_msg);
1340 }
1341 #endif /* CONFIG_NET_IPV4_ACD */
1342 
1343 #if defined(CONFIG_NET_LLDP)
net_pkt_is_lldp(struct net_pkt * pkt)1344 static inline bool net_pkt_is_lldp(struct net_pkt *pkt)
1345 {
1346 	return !!(pkt->lldp_pkt);
1347 }
1348 
net_pkt_set_lldp(struct net_pkt * pkt,bool is_lldp)1349 static inline void net_pkt_set_lldp(struct net_pkt *pkt, bool is_lldp)
1350 {
1351 	pkt->lldp_pkt = is_lldp;
1352 }
1353 #else
net_pkt_is_lldp(struct net_pkt * pkt)1354 static inline bool net_pkt_is_lldp(struct net_pkt *pkt)
1355 {
1356 	ARG_UNUSED(pkt);
1357 
1358 	return false;
1359 }
1360 
net_pkt_set_lldp(struct net_pkt * pkt,bool is_lldp)1361 static inline void net_pkt_set_lldp(struct net_pkt *pkt, bool is_lldp)
1362 {
1363 	ARG_UNUSED(pkt);
1364 	ARG_UNUSED(is_lldp);
1365 }
1366 #endif /* CONFIG_NET_LLDP */
1367 
1368 #if defined(CONFIG_NET_L2_PPP)
net_pkt_is_ppp(struct net_pkt * pkt)1369 static inline bool net_pkt_is_ppp(struct net_pkt *pkt)
1370 {
1371 	return !!(pkt->ppp_msg);
1372 }
1373 
net_pkt_set_ppp(struct net_pkt * pkt,bool is_ppp_msg)1374 static inline void net_pkt_set_ppp(struct net_pkt *pkt,
1375 				   bool is_ppp_msg)
1376 {
1377 	pkt->ppp_msg = is_ppp_msg;
1378 }
1379 #else /* CONFIG_NET_L2_PPP */
net_pkt_is_ppp(struct net_pkt * pkt)1380 static inline bool net_pkt_is_ppp(struct net_pkt *pkt)
1381 {
1382 	ARG_UNUSED(pkt);
1383 
1384 	return false;
1385 }
1386 
net_pkt_set_ppp(struct net_pkt * pkt,bool is_ppp_msg)1387 static inline void net_pkt_set_ppp(struct net_pkt *pkt,
1388 				   bool is_ppp_msg)
1389 {
1390 	ARG_UNUSED(pkt);
1391 	ARG_UNUSED(is_ppp_msg);
1392 }
1393 #endif /* CONFIG_NET_L2_PPP */
1394 
1395 #if defined(NET_PKT_HAS_CONTROL_BLOCK)
net_pkt_cb(struct net_pkt * pkt)1396 static inline void *net_pkt_cb(struct net_pkt *pkt)
1397 {
1398 	return &pkt->cb;
1399 }
1400 #else
net_pkt_cb(struct net_pkt * pkt)1401 static inline void *net_pkt_cb(struct net_pkt *pkt)
1402 {
1403 	ARG_UNUSED(pkt);
1404 
1405 	return NULL;
1406 }
1407 #endif
1408 
1409 #define NET_IPV6_HDR(pkt) ((struct net_ipv6_hdr *)net_pkt_ip_data(pkt))
1410 #define NET_IPV4_HDR(pkt) ((struct net_ipv4_hdr *)net_pkt_ip_data(pkt))
1411 
net_pkt_set_src_ipv6_addr(struct net_pkt * pkt)1412 static inline void net_pkt_set_src_ipv6_addr(struct net_pkt *pkt)
1413 {
1414 	net_if_ipv6_select_src_addr(net_context_get_iface(
1415 					    net_pkt_context(pkt)),
1416 				    (struct in6_addr *)NET_IPV6_HDR(pkt)->src);
1417 }
1418 
net_pkt_set_overwrite(struct net_pkt * pkt,bool overwrite)1419 static inline void net_pkt_set_overwrite(struct net_pkt *pkt, bool overwrite)
1420 {
1421 	pkt->overwrite = overwrite;
1422 }
1423 
net_pkt_is_being_overwritten(struct net_pkt * pkt)1424 static inline bool net_pkt_is_being_overwritten(struct net_pkt *pkt)
1425 {
1426 	return !!(pkt->overwrite);
1427 }
1428 
1429 #ifdef CONFIG_NET_PKT_FILTER
1430 
1431 bool net_pkt_filter_send_ok(struct net_pkt *pkt);
1432 bool net_pkt_filter_recv_ok(struct net_pkt *pkt);
1433 
1434 #else
1435 
net_pkt_filter_send_ok(struct net_pkt * pkt)1436 static inline bool net_pkt_filter_send_ok(struct net_pkt *pkt)
1437 {
1438 	ARG_UNUSED(pkt);
1439 
1440 	return true;
1441 }
1442 
net_pkt_filter_recv_ok(struct net_pkt * pkt)1443 static inline bool net_pkt_filter_recv_ok(struct net_pkt *pkt)
1444 {
1445 	ARG_UNUSED(pkt);
1446 
1447 	return true;
1448 }
1449 
1450 #endif /* CONFIG_NET_PKT_FILTER */
1451 
1452 #if defined(CONFIG_NET_PKT_FILTER) && \
1453 	(defined(CONFIG_NET_PKT_FILTER_IPV4_HOOK) || defined(CONFIG_NET_PKT_FILTER_IPV6_HOOK))
1454 
1455 bool net_pkt_filter_ip_recv_ok(struct net_pkt *pkt);
1456 
1457 #else
1458 
net_pkt_filter_ip_recv_ok(struct net_pkt * pkt)1459 static inline bool net_pkt_filter_ip_recv_ok(struct net_pkt *pkt)
1460 {
1461 	ARG_UNUSED(pkt);
1462 
1463 	return true;
1464 }
1465 
1466 #endif /* CONFIG_NET_PKT_FILTER_IPV4_HOOK || CONFIG_NET_PKT_FILTER_IPV6_HOOK */
1467 
1468 #if defined(CONFIG_NET_PKT_FILTER) && defined(CONFIG_NET_PKT_FILTER_LOCAL_IN_HOOK)
1469 
1470 bool net_pkt_filter_local_in_recv_ok(struct net_pkt *pkt);
1471 
1472 #else
1473 
net_pkt_filter_local_in_recv_ok(struct net_pkt * pkt)1474 static inline bool net_pkt_filter_local_in_recv_ok(struct net_pkt *pkt)
1475 {
1476 	ARG_UNUSED(pkt);
1477 
1478 	return true;
1479 }
1480 
1481 #endif /* CONFIG_NET_PKT_FILTER && CONFIG_NET_PKT_FILTER_LOCAL_IN_HOOK */
1482 
1483 #if defined(CONFIG_NET_OFFLOAD) || defined(CONFIG_NET_L2_IPIP)
net_pkt_remote_address(struct net_pkt * pkt)1484 static inline struct sockaddr *net_pkt_remote_address(struct net_pkt *pkt)
1485 {
1486 	return &pkt->remote;
1487 }
1488 
net_pkt_set_remote_address(struct net_pkt * pkt,struct sockaddr * address,socklen_t len)1489 static inline void net_pkt_set_remote_address(struct net_pkt *pkt,
1490 					      struct sockaddr *address,
1491 					      socklen_t len)
1492 {
1493 	memcpy(&pkt->remote, address, len);
1494 }
1495 #endif /* CONFIG_NET_OFFLOAD || CONFIG_NET_L2_IPIP */
1496 
1497 /* @endcond */
1498 
1499 /**
1500  * @brief Create a net_pkt slab
1501  *
1502  * A net_pkt slab is used to store meta-information about
1503  * network packets. It must be coupled with a data fragment pool
1504  * (@ref NET_PKT_DATA_POOL_DEFINE) used to store the actual
1505  * packet data. The macro can be used by an application to define
1506  * additional custom per-context TX packet slabs (see
1507  * net_context_setup_pools()).
1508  *
1509  * @param name Name of the slab.
1510  * @param count Number of net_pkt in this slab.
1511  */
1512 #define NET_PKT_SLAB_DEFINE(name, count)				\
1513 	K_MEM_SLAB_DEFINE(name, sizeof(struct net_pkt), count, 4);      \
1514 	NET_PKT_ALLOC_STATS_DEFINE(pkt_alloc_stats_##name, name)
1515 
1516 /** @cond INTERNAL_HIDDEN */
1517 
1518 /* Backward compatibility macro */
1519 #define NET_PKT_TX_SLAB_DEFINE(name, count) NET_PKT_SLAB_DEFINE(name, count)
1520 
1521 /** @endcond */
1522 
1523 /**
1524  * @brief Create a data fragment net_buf pool
1525  *
1526  * A net_buf pool is used to store actual data for
1527  * network packets. It must be coupled with a net_pkt slab
1528  * (@ref NET_PKT_SLAB_DEFINE) used to store the packet
1529  * meta-information. The macro can be used by an application to
1530  * define additional custom per-context TX packet pools (see
1531  * net_context_setup_pools()).
1532  *
1533  * @param name Name of the pool.
1534  * @param count Number of net_buf in this pool.
1535  */
1536 #define NET_PKT_DATA_POOL_DEFINE(name, count)				\
1537 	NET_BUF_POOL_DEFINE(name, count, CONFIG_NET_BUF_DATA_SIZE,	\
1538 			    0, NULL)
1539 
1540 /** @cond INTERNAL_HIDDEN */
1541 
1542 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC) || \
1543 	(CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG)
1544 #define NET_PKT_DEBUG_ENABLED
1545 #endif
1546 
1547 #if defined(NET_PKT_DEBUG_ENABLED)
1548 
1549 /* Debug versions of the net_pkt functions that are used when tracking
1550  * buffer usage.
1551  */
1552 
1553 struct net_buf *net_pkt_get_reserve_data_debug(struct net_buf_pool *pool,
1554 					       size_t min_len,
1555 					       k_timeout_t timeout,
1556 					       const char *caller,
1557 					       int line);
1558 
1559 #define net_pkt_get_reserve_data(pool, min_len, timeout)				\
1560 	net_pkt_get_reserve_data_debug(pool, min_len, timeout, __func__, __LINE__)
1561 
1562 struct net_buf *net_pkt_get_reserve_rx_data_debug(size_t min_len,
1563 						  k_timeout_t timeout,
1564 						  const char *caller,
1565 						  int line);
1566 #define net_pkt_get_reserve_rx_data(min_len, timeout)				\
1567 	net_pkt_get_reserve_rx_data_debug(min_len, timeout, __func__, __LINE__)
1568 
1569 struct net_buf *net_pkt_get_reserve_tx_data_debug(size_t min_len,
1570 						  k_timeout_t timeout,
1571 						  const char *caller,
1572 						  int line);
1573 #define net_pkt_get_reserve_tx_data(min_len, timeout)				\
1574 	net_pkt_get_reserve_tx_data_debug(min_len, timeout, __func__, __LINE__)
1575 
1576 struct net_buf *net_pkt_get_frag_debug(struct net_pkt *pkt, size_t min_len,
1577 				       k_timeout_t timeout,
1578 				       const char *caller, int line);
1579 #define net_pkt_get_frag(pkt, min_len, timeout)					\
1580 	net_pkt_get_frag_debug(pkt, min_len, timeout, __func__, __LINE__)
1581 
1582 void net_pkt_unref_debug(struct net_pkt *pkt, const char *caller, int line);
1583 #define net_pkt_unref(pkt) net_pkt_unref_debug(pkt, __func__, __LINE__)
1584 
1585 struct net_pkt *net_pkt_ref_debug(struct net_pkt *pkt, const char *caller,
1586 				  int line);
1587 #define net_pkt_ref(pkt) net_pkt_ref_debug(pkt, __func__, __LINE__)
1588 
1589 struct net_buf *net_pkt_frag_ref_debug(struct net_buf *frag,
1590 				       const char *caller, int line);
1591 #define net_pkt_frag_ref(frag) net_pkt_frag_ref_debug(frag, __func__, __LINE__)
1592 
1593 void net_pkt_frag_unref_debug(struct net_buf *frag,
1594 			      const char *caller, int line);
1595 #define net_pkt_frag_unref(frag)				\
1596 	net_pkt_frag_unref_debug(frag, __func__, __LINE__)
1597 
1598 struct net_buf *net_pkt_frag_del_debug(struct net_pkt *pkt,
1599 				       struct net_buf *parent,
1600 				       struct net_buf *frag,
1601 				       const char *caller, int line);
1602 #define net_pkt_frag_del(pkt, parent, frag)				\
1603 	net_pkt_frag_del_debug(pkt, parent, frag, __func__, __LINE__)
1604 
1605 void net_pkt_frag_add_debug(struct net_pkt *pkt, struct net_buf *frag,
1606 			    const char *caller, int line);
1607 #define net_pkt_frag_add(pkt, frag)				\
1608 	net_pkt_frag_add_debug(pkt, frag, __func__, __LINE__)
1609 
1610 void net_pkt_frag_insert_debug(struct net_pkt *pkt, struct net_buf *frag,
1611 			       const char *caller, int line);
1612 #define net_pkt_frag_insert(pkt, frag)					\
1613 	net_pkt_frag_insert_debug(pkt, frag, __func__, __LINE__)
1614 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC ||
1615 	* CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
1616 	*/
1617 /** @endcond */
1618 
1619 #if defined(NET_PKT_DEBUG_ENABLED)
1620 /**
1621  * @brief Print fragment list and the fragment sizes
1622  *
1623  * @details Only available if debugging is activated.
1624  *
1625  * @param pkt Network pkt.
1626  */
1627 void net_pkt_print_frags(struct net_pkt *pkt);
1628 #else
1629 #define net_pkt_print_frags(pkt)
1630 #endif
1631 
1632 #if !defined(NET_PKT_DEBUG_ENABLED)
1633 /**
1634  * @brief Get a data buffer from a given pool.
1635  *
1636  * @details Normally this version is not useful for applications
1637  * but is mainly used by network fragmentation code.
1638  *
1639  * @param pool The net_buf pool to use.
1640  * @param min_len Minimum length of the requested fragment.
1641  * @param timeout Affects the action taken should the net buf pool be empty.
1642  *        If K_NO_WAIT, then return immediately. If K_FOREVER, then
1643  *        wait as long as necessary. Otherwise, wait up to the specified time.
1644  *
1645  * @return Network buffer if successful, NULL otherwise.
1646  */
1647 struct net_buf *net_pkt_get_reserve_data(struct net_buf_pool *pool,
1648 					 size_t min_len, k_timeout_t timeout);
1649 #endif
1650 
1651 #if !defined(NET_PKT_DEBUG_ENABLED)
1652 /**
1653  * @brief Get RX DATA buffer from pool.
1654  * Normally you should use net_pkt_get_frag() instead.
1655  *
1656  * @details Normally this version is not useful for applications
1657  * but is mainly used by network fragmentation code.
1658  *
1659  * @param min_len Minimum length of the requested fragment.
1660  * @param timeout Affects the action taken should the net buf pool be empty.
1661  *        If K_NO_WAIT, then return immediately. If K_FOREVER, then
1662  *        wait as long as necessary. Otherwise, wait up to the specified time.
1663  *
1664  * @return Network buffer if successful, NULL otherwise.
1665  */
1666 struct net_buf *net_pkt_get_reserve_rx_data(size_t min_len, k_timeout_t timeout);
1667 #endif
1668 
1669 #if !defined(NET_PKT_DEBUG_ENABLED)
1670 /**
1671  * @brief Get TX DATA buffer from pool.
1672  * Normally you should use net_pkt_get_frag() instead.
1673  *
1674  * @details Normally this version is not useful for applications
1675  * but is mainly used by network fragmentation code.
1676  *
1677  * @param min_len Minimum length of the requested fragment.
1678  * @param timeout Affects the action taken should the net buf pool be empty.
1679  *        If K_NO_WAIT, then return immediately. If K_FOREVER, then
1680  *        wait as long as necessary. Otherwise, wait up to the specified time.
1681  *
1682  * @return Network buffer if successful, NULL otherwise.
1683  */
1684 struct net_buf *net_pkt_get_reserve_tx_data(size_t min_len, k_timeout_t timeout);
1685 #endif
1686 
1687 #if !defined(NET_PKT_DEBUG_ENABLED)
1688 /**
1689  * @brief Get a data fragment that might be from user specific
1690  * buffer pool or from global DATA pool.
1691  *
1692  * @param pkt Network packet.
1693  * @param min_len Minimum length of the requested fragment.
1694  * @param timeout Affects the action taken should the net buf pool be empty.
1695  *        If K_NO_WAIT, then return immediately. If K_FOREVER, then
1696  *        wait as long as necessary. Otherwise, wait up to the specified time.
1697  *
1698  * @return Network buffer if successful, NULL otherwise.
1699  */
1700 struct net_buf *net_pkt_get_frag(struct net_pkt *pkt, size_t min_len,
1701 				 k_timeout_t timeout);
1702 #endif
1703 
1704 #if !defined(NET_PKT_DEBUG_ENABLED)
1705 /**
1706  * @brief Place packet back into the available packets slab
1707  *
1708  * @details Releases the packet to other use. This needs to be
1709  * called by application after it has finished with the packet.
1710  *
1711  * @param pkt Network packet to release.
1712  *
1713  */
1714 void net_pkt_unref(struct net_pkt *pkt);
1715 #endif
1716 
1717 #if !defined(NET_PKT_DEBUG_ENABLED)
1718 /**
1719  * @brief Increase the packet ref count
1720  *
1721  * @details Mark the packet to be used still.
1722  *
1723  * @param pkt Network packet to ref.
1724  *
1725  * @return Network packet if successful, NULL otherwise.
1726  */
1727 struct net_pkt *net_pkt_ref(struct net_pkt *pkt);
1728 #endif
1729 
1730 #if !defined(NET_PKT_DEBUG_ENABLED)
1731 /**
1732  * @brief Increase the packet fragment ref count
1733  *
1734  * @details Mark the fragment to be used still.
1735  *
1736  * @param frag Network fragment to ref.
1737  *
1738  * @return a pointer on the referenced Network fragment.
1739  */
1740 struct net_buf *net_pkt_frag_ref(struct net_buf *frag);
1741 #endif
1742 
1743 #if !defined(NET_PKT_DEBUG_ENABLED)
1744 /**
1745  * @brief Decrease the packet fragment ref count
1746  *
1747  * @param frag Network fragment to unref.
1748  */
1749 void net_pkt_frag_unref(struct net_buf *frag);
1750 #endif
1751 
1752 #if !defined(NET_PKT_DEBUG_ENABLED)
1753 /**
1754  * @brief Delete existing fragment from a packet
1755  *
1756  * @param pkt Network packet from which frag belongs to.
1757  * @param parent parent fragment of frag, or NULL if none.
1758  * @param frag Fragment to delete.
1759  *
1760  * @return Pointer to the following fragment, or NULL if it had no
1761  *         further fragments.
1762  */
1763 struct net_buf *net_pkt_frag_del(struct net_pkt *pkt,
1764 				 struct net_buf *parent,
1765 				 struct net_buf *frag);
1766 #endif
1767 
1768 #if !defined(NET_PKT_DEBUG_ENABLED)
1769 /**
1770  * @brief Add a fragment to a packet at the end of its fragment list
1771  *
1772  * @param pkt pkt Network packet where to add the fragment
1773  * @param frag Fragment to add
1774  */
1775 void net_pkt_frag_add(struct net_pkt *pkt, struct net_buf *frag);
1776 #endif
1777 
1778 #if !defined(NET_PKT_DEBUG_ENABLED)
1779 /**
1780  * @brief Insert a fragment to a packet at the beginning of its fragment list
1781  *
1782  * @param pkt pkt Network packet where to insert the fragment
1783  * @param frag Fragment to insert
1784  */
1785 void net_pkt_frag_insert(struct net_pkt *pkt, struct net_buf *frag);
1786 #endif
1787 
1788 /**
1789  * @brief Compact the fragment list of a packet.
1790  *
1791  * @details After this there is no more any free space in individual fragments.
1792  * @param pkt Network packet.
1793  */
1794 void net_pkt_compact(struct net_pkt *pkt);
1795 
1796 /**
1797  * @brief Get information about predefined RX, TX and DATA pools.
1798  *
1799  * @param rx Pointer to RX pool is returned.
1800  * @param tx Pointer to TX pool is returned.
1801  * @param rx_data Pointer to RX DATA pool is returned.
1802  * @param tx_data Pointer to TX DATA pool is returned.
1803  */
1804 void net_pkt_get_info(struct k_mem_slab **rx,
1805 		      struct k_mem_slab **tx,
1806 		      struct net_buf_pool **rx_data,
1807 		      struct net_buf_pool **tx_data);
1808 
1809 /** @cond INTERNAL_HIDDEN */
1810 
1811 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
1812 /**
1813  * @brief Debug helper to print out the buffer allocations
1814  */
1815 void net_pkt_print(void);
1816 
1817 typedef void (*net_pkt_allocs_cb_t)(struct net_pkt *pkt,
1818 				    struct net_buf *buf,
1819 				    const char *func_alloc,
1820 				    int line_alloc,
1821 				    const char *func_free,
1822 				    int line_free,
1823 				    bool in_use,
1824 				    void *user_data);
1825 
1826 void net_pkt_allocs_foreach(net_pkt_allocs_cb_t cb, void *user_data);
1827 
1828 const char *net_pkt_slab2str(struct k_mem_slab *slab);
1829 const char *net_pkt_pool2str(struct net_buf_pool *pool);
1830 
1831 #else
1832 #define net_pkt_print(...)
1833 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC */
1834 
1835 /* New allocator, and API are defined below.
1836  * This will be simpler when time will come to get rid of former API above.
1837  */
1838 #if defined(NET_PKT_DEBUG_ENABLED)
1839 
1840 struct net_pkt *net_pkt_alloc_debug(k_timeout_t timeout,
1841 				    const char *caller, int line);
1842 #define net_pkt_alloc(_timeout)					\
1843 	net_pkt_alloc_debug(_timeout, __func__, __LINE__)
1844 
1845 struct net_pkt *net_pkt_alloc_from_slab_debug(struct k_mem_slab *slab,
1846 					      k_timeout_t timeout,
1847 					      const char *caller, int line);
1848 #define net_pkt_alloc_from_slab(_slab, _timeout)			\
1849 	net_pkt_alloc_from_slab_debug(_slab, _timeout, __func__, __LINE__)
1850 
1851 struct net_pkt *net_pkt_rx_alloc_debug(k_timeout_t timeout,
1852 				       const char *caller, int line);
1853 #define net_pkt_rx_alloc(_timeout)				\
1854 	net_pkt_rx_alloc_debug(_timeout, __func__, __LINE__)
1855 
1856 struct net_pkt *net_pkt_alloc_on_iface_debug(struct net_if *iface,
1857 					     k_timeout_t timeout,
1858 					     const char *caller,
1859 					     int line);
1860 #define net_pkt_alloc_on_iface(_iface, _timeout)			\
1861 	net_pkt_alloc_on_iface_debug(_iface, _timeout, __func__, __LINE__)
1862 
1863 struct net_pkt *net_pkt_rx_alloc_on_iface_debug(struct net_if *iface,
1864 						k_timeout_t timeout,
1865 						const char *caller,
1866 						int line);
1867 #define net_pkt_rx_alloc_on_iface(_iface, _timeout)			\
1868 	net_pkt_rx_alloc_on_iface_debug(_iface, _timeout,		\
1869 					__func__, __LINE__)
1870 
1871 int net_pkt_alloc_buffer_debug(struct net_pkt *pkt,
1872 			       size_t size,
1873 			       enum net_ip_protocol proto,
1874 			       k_timeout_t timeout,
1875 			       const char *caller, int line);
1876 #define net_pkt_alloc_buffer(_pkt, _size, _proto, _timeout)		\
1877 	net_pkt_alloc_buffer_debug(_pkt, _size, _proto, _timeout,	\
1878 				   __func__, __LINE__)
1879 
1880 int net_pkt_alloc_buffer_raw_debug(struct net_pkt *pkt, size_t size,
1881 				   k_timeout_t timeout,
1882 				   const char *caller, int line);
1883 #define net_pkt_alloc_buffer_raw(_pkt, _size, _timeout)	\
1884 	net_pkt_alloc_buffer_raw_debug(_pkt, _size, _timeout,	\
1885 				       __func__, __LINE__)
1886 
1887 struct net_pkt *net_pkt_alloc_with_buffer_debug(struct net_if *iface,
1888 						size_t size,
1889 						sa_family_t family,
1890 						enum net_ip_protocol proto,
1891 						k_timeout_t timeout,
1892 						const char *caller,
1893 						int line);
1894 #define net_pkt_alloc_with_buffer(_iface, _size, _family,		\
1895 				  _proto, _timeout)			\
1896 	net_pkt_alloc_with_buffer_debug(_iface, _size, _family,		\
1897 					_proto, _timeout,		\
1898 					__func__, __LINE__)
1899 
1900 struct net_pkt *net_pkt_rx_alloc_with_buffer_debug(struct net_if *iface,
1901 						   size_t size,
1902 						   sa_family_t family,
1903 						   enum net_ip_protocol proto,
1904 						   k_timeout_t timeout,
1905 						   const char *caller,
1906 						   int line);
1907 #define net_pkt_rx_alloc_with_buffer(_iface, _size, _family,		\
1908 				     _proto, _timeout)			\
1909 	net_pkt_rx_alloc_with_buffer_debug(_iface, _size, _family,	\
1910 					   _proto, _timeout,		\
1911 					   __func__, __LINE__)
1912 
1913 int net_pkt_alloc_buffer_with_reserve_debug(struct net_pkt *pkt,
1914 					    size_t size,
1915 					    size_t reserve,
1916 					    enum net_ip_protocol proto,
1917 					    k_timeout_t timeout,
1918 					    const char *caller,
1919 					    int line);
1920 #define net_pkt_alloc_buffer_with_reserve(_pkt, _size, _reserve, _proto, _timeout) \
1921 	net_pkt_alloc_buffer_with_reserve_debug(_pkt, _size, _reserve, _proto, \
1922 						_timeout, __func__, __LINE__)
1923 
1924 #endif /* NET_PKT_DEBUG_ENABLED */
1925 /** @endcond */
1926 
1927 #if !defined(NET_PKT_DEBUG_ENABLED)
1928 /**
1929  * @brief Allocate an initialized net_pkt
1930  *
1931  * @details for the time being, 2 pools are used. One for TX and one for RX.
1932  *          This allocator has to be used for TX.
1933  *
1934  * @param timeout Maximum time to wait for an allocation.
1935  *
1936  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1937  */
1938 struct net_pkt *net_pkt_alloc(k_timeout_t timeout);
1939 #endif
1940 
1941 #if !defined(NET_PKT_DEBUG_ENABLED)
1942 /**
1943  * @brief Allocate an initialized net_pkt from a specific slab
1944  *
1945  * @details unlike net_pkt_alloc() which uses core slabs, this one will use
1946  *          an external slab (see NET_PKT_SLAB_DEFINE()).
1947  *          Do _not_ use it unless you know what you are doing. Basically, only
1948  *          net_context should be using this, in order to allocate packet and
1949  *          then buffer on its local slab/pool (if any).
1950  *
1951  * @param slab    The slab to use for allocating the packet
1952  * @param timeout Maximum time to wait for an allocation.
1953  *
1954  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1955  */
1956 struct net_pkt *net_pkt_alloc_from_slab(struct k_mem_slab *slab,
1957 					k_timeout_t timeout);
1958 #endif
1959 
1960 #if !defined(NET_PKT_DEBUG_ENABLED)
1961 /**
1962  * @brief Allocate an initialized net_pkt for RX
1963  *
1964  * @details for the time being, 2 pools are used. One for TX and one for RX.
1965  *          This allocator has to be used for RX.
1966  *
1967  * @param timeout Maximum time to wait for an allocation.
1968  *
1969  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1970  */
1971 struct net_pkt *net_pkt_rx_alloc(k_timeout_t timeout);
1972 #endif
1973 
1974 #if !defined(NET_PKT_DEBUG_ENABLED)
1975 /**
1976  * @brief Allocate a network packet for a specific network interface.
1977  *
1978  * @param iface The network interface the packet is supposed to go through.
1979  * @param timeout Maximum time to wait for an allocation.
1980  *
1981  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1982  */
1983 struct net_pkt *net_pkt_alloc_on_iface(struct net_if *iface,
1984 				       k_timeout_t timeout);
1985 
1986 /** @cond INTERNAL_HIDDEN */
1987 
1988 /* Same as above but specifically for RX packet */
1989 struct net_pkt *net_pkt_rx_alloc_on_iface(struct net_if *iface,
1990 					  k_timeout_t timeout);
1991 /** @endcond */
1992 
1993 #endif
1994 
1995 #if !defined(NET_PKT_DEBUG_ENABLED)
1996 /**
1997  * @brief Allocate buffer for a net_pkt
1998  *
1999  * @details: such allocator will take into account space necessary for headers,
2000  *           MTU, and existing buffer (if any). Beware that, due to all these
2001  *           criteria, the allocated size might be smaller/bigger than
2002  *           requested one.
2003  *
2004  * @param pkt     The network packet requiring buffer to be allocated.
2005  * @param size    The size of buffer being requested.
2006  * @param proto   The IP protocol type (can be 0 for none).
2007  * @param timeout Maximum time to wait for an allocation.
2008  *
2009  * @return 0 on success, negative errno code otherwise.
2010  */
2011 int net_pkt_alloc_buffer(struct net_pkt *pkt,
2012 			 size_t size,
2013 			 enum net_ip_protocol proto,
2014 			 k_timeout_t timeout);
2015 #endif
2016 
2017 #if !defined(NET_PKT_DEBUG_ENABLED)
2018 /**
2019  * @brief Allocate buffer for a net_pkt and reserve some space in the first net_buf.
2020  *
2021  * @details: such allocator will take into account space necessary for headers,
2022  *           MTU, and existing buffer (if any). Beware that, due to all these
2023  *           criteria, the allocated size might be smaller/bigger than
2024  *           requested one.
2025  *
2026  * @param pkt     The network packet requiring buffer to be allocated.
2027  * @param size    The size of buffer being requested.
2028  * @param reserve The L2 header size to reserve. This can be 0, in which case
2029  *                the L2 header is placed into a separate net_buf.
2030  * @param proto   The IP protocol type (can be 0 for none).
2031  * @param timeout Maximum time to wait for an allocation.
2032  *
2033  * @return 0 on success, negative errno code otherwise.
2034  */
2035 #if !defined(NET_PKT_DEBUG_ENABLED)
2036 int net_pkt_alloc_buffer_with_reserve(struct net_pkt *pkt,
2037 				      size_t size,
2038 				      size_t reserve,
2039 				      enum net_ip_protocol proto,
2040 				      k_timeout_t timeout);
2041 #endif
2042 
2043 /**
2044  * @brief Allocate buffer for a net_pkt, of specified size, w/o any additional
2045  *        preconditions
2046  *
2047  * @details: The actual buffer size may be larger than requested one if fixed
2048  *           size buffers are in use.
2049  *
2050  * @param pkt     The network packet requiring buffer to be allocated.
2051  * @param size    The size of buffer being requested.
2052  * @param timeout Maximum time to wait for an allocation.
2053  *
2054  * @return 0 on success, negative errno code otherwise.
2055  */
2056 int net_pkt_alloc_buffer_raw(struct net_pkt *pkt, size_t size,
2057 			     k_timeout_t timeout);
2058 #endif
2059 
2060 #if !defined(NET_PKT_DEBUG_ENABLED)
2061 /**
2062  * @brief Allocate a network packet and buffer at once
2063  *
2064  * @param iface   The network interface the packet is supposed to go through.
2065  * @param size    The size of buffer.
2066  * @param family  The family to which the packet belongs.
2067  * @param proto   The IP protocol type (can be 0 for none).
2068  * @param timeout Maximum time to wait for an allocation.
2069  *
2070  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
2071  */
2072 struct net_pkt *net_pkt_alloc_with_buffer(struct net_if *iface,
2073 					  size_t size,
2074 					  sa_family_t family,
2075 					  enum net_ip_protocol proto,
2076 					  k_timeout_t timeout);
2077 
2078 /** @cond INTERNAL_HIDDEN */
2079 
2080 /* Same as above but specifically for RX packet */
2081 struct net_pkt *net_pkt_rx_alloc_with_buffer(struct net_if *iface,
2082 					     size_t size,
2083 					     sa_family_t family,
2084 					     enum net_ip_protocol proto,
2085 					     k_timeout_t timeout);
2086 
2087 /** @endcond */
2088 
2089 #endif
2090 
2091 /**
2092  * @brief Append a buffer in packet
2093  *
2094  * @param pkt    Network packet where to append the buffer
2095  * @param buffer Buffer to append
2096  */
2097 void net_pkt_append_buffer(struct net_pkt *pkt, struct net_buf *buffer);
2098 
2099 /**
2100  * @brief Get available buffer space from a pkt
2101  *
2102  * @note Reserved bytes (headroom) in any of the fragments are not considered to
2103  *       be available.
2104  *
2105  * @param pkt The net_pkt which buffer availability should be evaluated
2106  *
2107  * @return the amount of buffer available
2108  */
2109 size_t net_pkt_available_buffer(struct net_pkt *pkt);
2110 
2111 /**
2112  * @brief Get available buffer space for payload from a pkt
2113  *
2114  * @note Reserved bytes (headroom) in any of the fragments are not considered to
2115  *       be available.
2116  *
2117  * @details Unlike net_pkt_available_buffer(), this will take into account
2118  *          the headers space.
2119  *
2120  * @param pkt   The net_pkt which payload buffer availability should
2121  *              be evaluated
2122  * @param proto The IP protocol type (can be 0 for none).
2123  *
2124  * @return the amount of buffer available for payload
2125  */
2126 size_t net_pkt_available_payload_buffer(struct net_pkt *pkt,
2127 					enum net_ip_protocol proto);
2128 
2129 /**
2130  * @brief Trim net_pkt buffer
2131  *
2132  * @details This will basically check for unused buffers and deallocate
2133  *          them relevantly
2134  *
2135  * @param pkt The net_pkt which buffer will be trimmed
2136  */
2137 void net_pkt_trim_buffer(struct net_pkt *pkt);
2138 
2139 /**
2140  * @brief Remove @a length bytes from tail of packet
2141  *
2142  * @details This function does not take packet cursor into account. It is a
2143  *          helper to remove unneeded bytes from tail of packet (like appended
2144  *          CRC). It takes care of buffer deallocation if removed bytes span
2145  *          whole buffer(s).
2146  *
2147  * @param pkt    Network packet
2148  * @param length Number of bytes to be removed
2149  *
2150  * @retval 0       On success.
2151  * @retval -EINVAL If packet length is shorter than @a length.
2152  */
2153 int net_pkt_remove_tail(struct net_pkt *pkt, size_t length);
2154 
2155 /**
2156  * @brief Initialize net_pkt cursor
2157  *
2158  * @details This will initialize the net_pkt cursor from its buffer.
2159  *
2160  * @param pkt The net_pkt whose cursor is going to be initialized
2161  */
2162 void net_pkt_cursor_init(struct net_pkt *pkt);
2163 
2164 /**
2165  * @brief Backup net_pkt cursor
2166  *
2167  * @param pkt    The net_pkt whose cursor is going to be backed up
2168  * @param backup The cursor where to backup net_pkt cursor
2169  */
net_pkt_cursor_backup(struct net_pkt * pkt,struct net_pkt_cursor * backup)2170 static inline void net_pkt_cursor_backup(struct net_pkt *pkt,
2171 					 struct net_pkt_cursor *backup)
2172 {
2173 	backup->buf = pkt->cursor.buf;
2174 	backup->pos = pkt->cursor.pos;
2175 }
2176 
2177 /**
2178  * @brief Restore net_pkt cursor from a backup
2179  *
2180  * @param pkt    The net_pkt whose cursor is going to be restored
2181  * @param backup The cursor from where to restore net_pkt cursor
2182  */
net_pkt_cursor_restore(struct net_pkt * pkt,struct net_pkt_cursor * backup)2183 static inline void net_pkt_cursor_restore(struct net_pkt *pkt,
2184 					  struct net_pkt_cursor *backup)
2185 {
2186 	pkt->cursor.buf = backup->buf;
2187 	pkt->cursor.pos = backup->pos;
2188 }
2189 
2190 /**
2191  * @brief Returns current position of the cursor
2192  *
2193  * @param pkt The net_pkt whose cursor position is going to be returned
2194  *
2195  * @return cursor's position
2196  */
net_pkt_cursor_get_pos(struct net_pkt * pkt)2197 static inline void *net_pkt_cursor_get_pos(struct net_pkt *pkt)
2198 {
2199 	return pkt->cursor.pos;
2200 }
2201 
2202 /**
2203  * @brief Skip some data from a net_pkt
2204  *
2205  * @details net_pkt's cursor should be properly initialized
2206  *          Cursor position will be updated after the operation.
2207  *          Depending on the value of pkt->overwrite bit, this function
2208  *          will affect the buffer length or not. If it's true, it will
2209  *          advance the cursor to the requested length. If it's false,
2210  *          it will do the same but if the cursor was already also at the
2211  *          end of existing data, it will increment the buffer length.
2212  *          So in this case, its behavior is just like net_pkt_write or
2213  *          net_pkt_memset, difference being that it will not affect the
2214  *          buffer content itself (which may be just garbage then).
2215  *
2216  * @param pkt    The net_pkt whose cursor will be updated to skip given
2217  *               amount of data from the buffer.
2218  * @param length Amount of data to skip in the buffer
2219  *
2220  * @return 0 in success, negative errno code otherwise.
2221  */
2222 int net_pkt_skip(struct net_pkt *pkt, size_t length);
2223 
2224 /**
2225  * @brief Memset some data in a net_pkt
2226  *
2227  * @details net_pkt's cursor should be properly initialized and,
2228  *          if needed, positioned using net_pkt_skip.
2229  *          Cursor position will be updated after the operation.
2230  *
2231  * @param pkt    The net_pkt whose buffer to fill starting at the current
2232  *               cursor position.
2233  * @param byte   The byte to write in memory
2234  * @param length Amount of data to memset with given byte
2235  *
2236  * @return 0 in success, negative errno code otherwise.
2237  */
2238 int net_pkt_memset(struct net_pkt *pkt, int byte, size_t length);
2239 
2240 /**
2241  * @brief Copy data from a packet into another one.
2242  *
2243  * @details Both net_pkt cursors should be properly initialized and,
2244  *          if needed, positioned using net_pkt_skip.
2245  *          The cursors will be updated after the operation.
2246  *
2247  * @param pkt_dst Destination network packet.
2248  * @param pkt_src Source network packet.
2249  * @param length  Length of data to be copied.
2250  *
2251  * @return 0 on success, negative errno code otherwise.
2252  */
2253 int net_pkt_copy(struct net_pkt *pkt_dst,
2254 		 struct net_pkt *pkt_src,
2255 		 size_t length);
2256 
2257 /**
2258  * @brief Clone pkt and its buffer. The cloned packet will be allocated on
2259  *        the same pool as the original one.
2260  *
2261  * @param pkt Original pkt to be cloned
2262  * @param timeout Timeout to wait for free buffer
2263  *
2264  * @return NULL if error, cloned packet otherwise.
2265  */
2266 struct net_pkt *net_pkt_clone(struct net_pkt *pkt, k_timeout_t timeout);
2267 
2268 /**
2269  * @brief Clone pkt and its buffer. The cloned packet will be allocated on
2270  *        the RX packet poll.
2271  *
2272  * @param pkt Original pkt to be cloned
2273  * @param timeout Timeout to wait for free buffer
2274  *
2275  * @return NULL if error, cloned packet otherwise.
2276  */
2277 struct net_pkt *net_pkt_rx_clone(struct net_pkt *pkt, k_timeout_t timeout);
2278 
2279 /**
2280  * @brief Clone pkt and increase the refcount of its buffer.
2281  *
2282  * @param pkt Original pkt to be shallow cloned
2283  * @param timeout Timeout to wait for free packet
2284  *
2285  * @return NULL if error, cloned packet otherwise.
2286  */
2287 struct net_pkt *net_pkt_shallow_clone(struct net_pkt *pkt,
2288 				      k_timeout_t timeout);
2289 
2290 /**
2291  * @brief Read some data from a net_pkt
2292  *
2293  * @details net_pkt's cursor should be properly initialized and,
2294  *          if needed, positioned using net_pkt_skip.
2295  *          Cursor position will be updated after the operation.
2296  *
2297  * @param pkt    The network packet from where to read some data
2298  * @param data   The destination buffer where to copy the data
2299  * @param length The amount of data to copy
2300  *
2301  * @return 0 on success, negative errno code otherwise.
2302  */
2303 int net_pkt_read(struct net_pkt *pkt, void *data, size_t length);
2304 
2305 /**
2306  * @brief Read a byte (uint8_t) from a net_pkt
2307  *
2308  * @details net_pkt's cursor should be properly initialized and,
2309  *          if needed, positioned using net_pkt_skip.
2310  *          Cursor position will be updated after the operation.
2311  *
2312  * @param pkt  The network packet from where to read
2313  * @param data The destination uint8_t where to copy the data
2314  *
2315  * @return 0 on success, negative errno code otherwise.
2316  */
net_pkt_read_u8(struct net_pkt * pkt,uint8_t * data)2317 static inline int net_pkt_read_u8(struct net_pkt *pkt, uint8_t *data)
2318 {
2319 	return net_pkt_read(pkt, data, 1);
2320 }
2321 
2322 /**
2323  * @brief Read uint16_t big endian data from a net_pkt
2324  *
2325  * @details net_pkt's cursor should be properly initialized and,
2326  *          if needed, positioned using net_pkt_skip.
2327  *          Cursor position will be updated after the operation.
2328  *
2329  * @param pkt  The network packet from where to read
2330  * @param data The destination uint16_t where to copy the data
2331  *
2332  * @return 0 on success, negative errno code otherwise.
2333  */
2334 int net_pkt_read_be16(struct net_pkt *pkt, uint16_t *data);
2335 
2336 /**
2337  * @brief Read uint16_t little endian data from a net_pkt
2338  *
2339  * @details net_pkt's cursor should be properly initialized and,
2340  *          if needed, positioned using net_pkt_skip.
2341  *          Cursor position will be updated after the operation.
2342  *
2343  * @param pkt  The network packet from where to read
2344  * @param data The destination uint16_t where to copy the data
2345  *
2346  * @return 0 on success, negative errno code otherwise.
2347  */
2348 int net_pkt_read_le16(struct net_pkt *pkt, uint16_t *data);
2349 
2350 /**
2351  * @brief Read uint32_t big endian data from a net_pkt
2352  *
2353  * @details net_pkt's cursor should be properly initialized and,
2354  *          if needed, positioned using net_pkt_skip.
2355  *          Cursor position will be updated after the operation.
2356  *
2357  * @param pkt  The network packet from where to read
2358  * @param data The destination uint32_t where to copy the data
2359  *
2360  * @return 0 on success, negative errno code otherwise.
2361  */
2362 int net_pkt_read_be32(struct net_pkt *pkt, uint32_t *data);
2363 
2364 /**
2365  * @brief Write data into a net_pkt
2366  *
2367  * @details net_pkt's cursor should be properly initialized and,
2368  *          if needed, positioned using net_pkt_skip.
2369  *          Cursor position will be updated after the operation.
2370  *
2371  * @param pkt    The network packet where to write
2372  * @param data   Data to be written
2373  * @param length Length of the data to be written
2374  *
2375  * @return 0 on success, negative errno code otherwise.
2376  */
2377 int net_pkt_write(struct net_pkt *pkt, const void *data, size_t length);
2378 
2379 /**
2380  * @brief Write a byte (uint8_t) data to a net_pkt
2381  *
2382  * @details net_pkt's cursor should be properly initialized and,
2383  *          if needed, positioned using net_pkt_skip.
2384  *          Cursor position will be updated after the operation.
2385  *
2386  * @param pkt  The network packet from where to read
2387  * @param data The uint8_t value to write
2388  *
2389  * @return 0 on success, negative errno code otherwise.
2390  */
net_pkt_write_u8(struct net_pkt * pkt,uint8_t data)2391 static inline int net_pkt_write_u8(struct net_pkt *pkt, uint8_t data)
2392 {
2393 	return net_pkt_write(pkt, &data, sizeof(uint8_t));
2394 }
2395 
2396 /**
2397  * @brief Write a uint16_t big endian data to a net_pkt
2398  *
2399  * @details net_pkt's cursor should be properly initialized and,
2400  *          if needed, positioned using net_pkt_skip.
2401  *          Cursor position will be updated after the operation.
2402  *
2403  * @param pkt  The network packet from where to read
2404  * @param data The uint16_t value in host byte order to write
2405  *
2406  * @return 0 on success, negative errno code otherwise.
2407  */
net_pkt_write_be16(struct net_pkt * pkt,uint16_t data)2408 static inline int net_pkt_write_be16(struct net_pkt *pkt, uint16_t data)
2409 {
2410 	uint16_t data_be16 = htons(data);
2411 
2412 	return net_pkt_write(pkt, &data_be16, sizeof(uint16_t));
2413 }
2414 
2415 /**
2416  * @brief Write a uint32_t big endian data to a net_pkt
2417  *
2418  * @details net_pkt's cursor should be properly initialized and,
2419  *          if needed, positioned using net_pkt_skip.
2420  *          Cursor position will be updated after the operation.
2421  *
2422  * @param pkt  The network packet from where to read
2423  * @param data The uint32_t value in host byte order to write
2424  *
2425  * @return 0 on success, negative errno code otherwise.
2426  */
net_pkt_write_be32(struct net_pkt * pkt,uint32_t data)2427 static inline int net_pkt_write_be32(struct net_pkt *pkt, uint32_t data)
2428 {
2429 	uint32_t data_be32 = htonl(data);
2430 
2431 	return net_pkt_write(pkt, &data_be32, sizeof(uint32_t));
2432 }
2433 
2434 /**
2435  * @brief Write a uint32_t little endian data to a net_pkt
2436  *
2437  * @details net_pkt's cursor should be properly initialized and,
2438  *          if needed, positioned using net_pkt_skip.
2439  *          Cursor position will be updated after the operation.
2440  *
2441  * @param pkt  The network packet from where to read
2442  * @param data The uint32_t value in host byte order to write
2443  *
2444  * @return 0 on success, negative errno code otherwise.
2445  */
net_pkt_write_le32(struct net_pkt * pkt,uint32_t data)2446 static inline int net_pkt_write_le32(struct net_pkt *pkt, uint32_t data)
2447 {
2448 	uint32_t data_le32 = sys_cpu_to_le32(data);
2449 
2450 	return net_pkt_write(pkt, &data_le32, sizeof(uint32_t));
2451 }
2452 
2453 /**
2454  * @brief Write a uint16_t little endian data to a net_pkt
2455  *
2456  * @details net_pkt's cursor should be properly initialized and,
2457  *          if needed, positioned using net_pkt_skip.
2458  *          Cursor position will be updated after the operation.
2459  *
2460  * @param pkt  The network packet from where to read
2461  * @param data The uint16_t value in host byte order to write
2462  *
2463  * @return 0 on success, negative errno code otherwise.
2464  */
net_pkt_write_le16(struct net_pkt * pkt,uint16_t data)2465 static inline int net_pkt_write_le16(struct net_pkt *pkt, uint16_t data)
2466 {
2467 	uint16_t data_le16 = sys_cpu_to_le16(data);
2468 
2469 	return net_pkt_write(pkt, &data_le16, sizeof(uint16_t));
2470 }
2471 
2472 /**
2473  * @brief Get the amount of data which can be read from current cursor position
2474  *
2475  * @param pkt Network packet
2476  *
2477  * @return Amount of data which can be read from current pkt cursor
2478  */
2479 size_t net_pkt_remaining_data(struct net_pkt *pkt);
2480 
2481 /**
2482  * @brief Get the total amount of bytes stored in a packet.
2483  *
2484  * @param pkt Network packet
2485  *
2486  * @return Total amount of bytes stored in a packet.
2487  */
net_pkt_get_len(struct net_pkt * pkt)2488 static inline size_t net_pkt_get_len(struct net_pkt *pkt)
2489 {
2490 	return net_buf_frags_len(pkt->frags);
2491 }
2492 
2493 /**
2494  * @brief Update the overall length of a packet
2495  *
2496  * @details Unlike net_pkt_pull() below, this does not take packet cursor
2497  *          into account. It's mainly a helper dedicated for ipv4 and ipv6
2498  *          input functions. It shrinks the overall length by given parameter.
2499  *
2500  * @param pkt    Network packet
2501  * @param length The new length of the packet
2502  *
2503  * @return 0 on success, negative errno code otherwise.
2504  */
2505 int net_pkt_update_length(struct net_pkt *pkt, size_t length);
2506 
2507 /**
2508  * @brief Remove data from the packet at current location
2509  *
2510  * @details net_pkt's cursor should be properly initialized and,
2511  *          eventually, properly positioned using net_pkt_skip/read/write.
2512  *          Note that net_pkt's cursor is reset by this function.
2513  *
2514  * @param pkt    Network packet
2515  * @param length Number of bytes to be removed
2516  *
2517  * @return 0 on success, negative errno code otherwise.
2518  */
2519 int net_pkt_pull(struct net_pkt *pkt, size_t length);
2520 
2521 /**
2522  * @brief Get the actual offset in the packet from its cursor
2523  *
2524  * @param pkt Network packet.
2525  *
2526  * @return a valid offset on success, 0 otherwise as there is nothing that
2527  *         can be done to evaluate the offset.
2528  */
2529 uint16_t net_pkt_get_current_offset(struct net_pkt *pkt);
2530 
2531 /**
2532  * @brief Check if a data size could fit contiguously
2533  *
2534  * @details net_pkt's cursor should be properly initialized and,
2535  *          if needed, positioned using net_pkt_skip.
2536  *
2537  * @param pkt  Network packet.
2538  * @param size The size to check for contiguity
2539  *
2540  * @return true if that is the case, false otherwise.
2541  */
2542 bool net_pkt_is_contiguous(struct net_pkt *pkt, size_t size);
2543 
2544 /**
2545  * Get the contiguous buffer space
2546  *
2547  * @param pkt Network packet
2548  *
2549  * @return The available contiguous buffer space in bytes starting from the
2550  *         current cursor position. 0 in case of an error.
2551  */
2552 size_t net_pkt_get_contiguous_len(struct net_pkt *pkt);
2553 
2554 /** @cond INTERNAL_HIDDEN */
2555 
2556 struct net_pkt_data_access {
2557 #if !defined(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)
2558 	void *data;
2559 #endif
2560 	const size_t size;
2561 };
2562 
2563 #if defined(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)
2564 #define NET_PKT_DATA_ACCESS_DEFINE(_name, _type)		\
2565 	struct net_pkt_data_access _name = {			\
2566 		.size = sizeof(_type),				\
2567 	}
2568 
2569 #define NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(_name, _type)	\
2570 	NET_PKT_DATA_ACCESS_DEFINE(_name, _type)
2571 
2572 #else
2573 #define NET_PKT_DATA_ACCESS_DEFINE(_name, _type)		\
2574 	_type _hdr_##_name;					\
2575 	struct net_pkt_data_access _name = {			\
2576 		.data = &_hdr_##_name,				\
2577 		.size = sizeof(_type),				\
2578 	}
2579 
2580 #define NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(_name, _type)	\
2581 	struct net_pkt_data_access _name = {			\
2582 		.data = NULL,					\
2583 		.size = sizeof(_type),				\
2584 	}
2585 
2586 #endif /* CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS */
2587 
2588 /** @endcond */
2589 
2590 /**
2591  * @brief Get data from a network packet in a contiguous way
2592  *
2593  * @details net_pkt's cursor should be properly initialized and,
2594  *          if needed, positioned using net_pkt_skip. Unlike other functions,
2595  *          cursor position will not be updated after the operation.
2596  *
2597  * @param pkt    The network packet from where to get the data.
2598  * @param access A pointer to a valid net_pkt_data_access describing the
2599  *        data to get in a contiguous way.
2600  *
2601  * @return a pointer to the requested contiguous data, NULL otherwise.
2602  */
2603 void *net_pkt_get_data(struct net_pkt *pkt,
2604 		       struct net_pkt_data_access *access);
2605 
2606 /**
2607  * @brief Set contiguous data into a network packet
2608  *
2609  * @details net_pkt's cursor should be properly initialized and,
2610  *          if needed, positioned using net_pkt_skip.
2611  *          Cursor position will be updated after the operation.
2612  *
2613  * @param pkt    The network packet to where the data should be set.
2614  * @param access A pointer to a valid net_pkt_data_access describing the
2615  *        data to set.
2616  *
2617  * @return 0 on success, a negative errno otherwise.
2618  */
2619 int net_pkt_set_data(struct net_pkt *pkt,
2620 		     struct net_pkt_data_access *access);
2621 
2622 /**
2623  * Acknowledge previously contiguous data taken from a network packet
2624  * Packet needs to be set to overwrite mode.
2625  */
net_pkt_acknowledge_data(struct net_pkt * pkt,struct net_pkt_data_access * access)2626 static inline int net_pkt_acknowledge_data(struct net_pkt *pkt,
2627 					   struct net_pkt_data_access *access)
2628 {
2629 	return net_pkt_skip(pkt, access->size);
2630 }
2631 
2632 /**
2633  * @}
2634  */
2635 
2636 #ifdef __cplusplus
2637 }
2638 #endif
2639 
2640 #endif /* ZEPHYR_INCLUDE_NET_NET_PKT_H_ */
2641