1 /** @file
2  * @brief Network packet buffer descriptor API
3  *
4  * Network data is passed between different parts of the stack via
5  * net_buf struct.
6  */
7 
8 /*
9  * Copyright (c) 2016 Intel Corporation
10  *
11  * SPDX-License-Identifier: Apache-2.0
12  */
13 
14 /* Data buffer API - used for all data to/from net */
15 
16 #ifndef ZEPHYR_INCLUDE_NET_NET_PKT_H_
17 #define ZEPHYR_INCLUDE_NET_NET_PKT_H_
18 
19 #include <zephyr/types.h>
20 #include <stdbool.h>
21 
22 #include <zephyr/net/buf.h>
23 
24 #if defined(CONFIG_IEEE802154)
25 #include <zephyr/net/ieee802154_pkt.h>
26 #endif
27 #include <zephyr/net/net_core.h>
28 #include <zephyr/net/net_linkaddr.h>
29 #include <zephyr/net/net_ip.h>
30 #include <zephyr/net/net_if.h>
31 #include <zephyr/net/net_context.h>
32 #include <zephyr/net/ethernet_vlan.h>
33 #include <zephyr/net/ptp_time.h>
34 
35 #ifdef __cplusplus
36 extern "C" {
37 #endif
38 
39 /**
40  * @brief Network packet management library
41  * @defgroup net_pkt Network Packet Library
42  * @ingroup networking
43  * @{
44  */
45 
46 struct net_context;
47 
48 /* buffer cursor used in net_pkt */
49 struct net_pkt_cursor {
50 	/** Current net_buf pointer by the cursor */
51 	struct net_buf *buf;
52 	/** Current position in the data buffer of the net_buf */
53 	uint8_t *pos;
54 };
55 
56 /**
57  * @brief Network packet.
58  *
59  * Note that if you add new fields into net_pkt, remember to update
60  * net_pkt_clone() function.
61  */
62 struct net_pkt {
63 	/**
64 	 * The fifo is used by RX/TX threads and by socket layer. The net_pkt
65 	 * is queued via fifo to the processing thread.
66 	 */
67 	intptr_t fifo;
68 
69 	/** Slab pointer from where it belongs to */
70 	struct k_mem_slab *slab;
71 
72 	/** buffer holding the packet */
73 	union {
74 		struct net_buf *frags;
75 		struct net_buf *buffer;
76 	};
77 
78 	/** Internal buffer iterator used for reading/writing */
79 	struct net_pkt_cursor cursor;
80 
81 	/** Network connection context */
82 	struct net_context *context;
83 
84 	/** Network interface */
85 	struct net_if *iface;
86 
87 	/** @cond ignore */
88 
89 #if defined(CONFIG_NET_TCP)
90 	/** Allow placing the packet into sys_slist_t */
91 	sys_snode_t next;
92 #endif
93 #if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
94 	struct net_if *orig_iface; /* Original network interface */
95 #endif
96 
97 #if defined(CONFIG_NET_PKT_TIMESTAMP)
98 	/**
99 	 * Timestamp if available.
100 	 * For IEEE 802.15.4 packets this refers to the first symbol of the MAC Header.
101 	 */
102 	struct net_ptp_time timestamp;
103 #endif
104 
105 #if defined(CONFIG_NET_PKT_RXTIME_STATS) || defined(CONFIG_NET_PKT_TXTIME_STATS)
106 	struct {
107 		/** Create time in cycles */
108 		uint32_t create_time;
109 
110 #if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL) || \
111 	defined(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)
112 		/** Collect extra statistics for net_pkt processing
113 		 * from various points in the IP stack. See networking
114 		 * documentation where these points are located and how
115 		 * to interpret the results.
116 		 */
117 		struct {
118 			uint32_t stat[NET_PKT_DETAIL_STATS_COUNT];
119 			int count;
120 		} detail;
121 #endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL ||
122 	  CONFIG_NET_PKT_RXTIME_STATS_DETAIL */
123 	};
124 #endif /* CONFIG_NET_PKT_RXTIME_STATS || CONFIG_NET_PKT_TXTIME_STATS */
125 
126 #if defined(CONFIG_NET_PKT_TXTIME)
127 	/** Network packet TX time in the future (in nanoseconds) */
128 	uint64_t txtime;
129 #endif /* CONFIG_NET_PKT_TXTIME */
130 
131 	/** Reference counter */
132 	atomic_t atomic_ref;
133 
134 	/* Filled by layer 2 when network packet is received. */
135 	struct net_linkaddr lladdr_src;
136 	struct net_linkaddr lladdr_dst;
137 	uint16_t ll_proto_type;
138 
139 #if defined(CONFIG_NET_IP)
140 	uint8_t ip_hdr_len;	/* pre-filled in order to avoid func call */
141 #endif
142 
143 	uint8_t overwrite : 1;	 /* Is packet content being overwritten? */
144 	uint8_t sent_or_eof : 1; /* For outgoing packet: is this sent or not
145 				  * For incoming packet of a socket: last
146 				  * packet before EOF
147 				  * Used only if defined(CONFIG_NET_TCP)
148 				  */
149 	uint8_t pkt_queued : 1;	 /* For outgoing packet: is this packet
150 				  * queued to be sent but has not reached
151 				  * the driver yet.
152 				  * Used only if defined(CONFIG_NET_TCP)
153 				  */
154 	uint8_t ptp_pkt : 1;	 /* For outgoing packet: is this packet
155 				  * a L2 PTP packet.
156 				  * Used only if defined (CONFIG_NET_L2_PTP)
157 				  */
158 	uint8_t forwarding : 1;	 /* Are we forwarding this pkt
159 				  * Used only if defined(CONFIG_NET_ROUTE)
160 				  */
161 	uint8_t family : 3;	 /* Address family, see net_ip.h */
162 
163 	/* bitfield byte alignment boundary */
164 
165 #if defined(CONFIG_NET_IPV4_AUTO)
166 	uint8_t ipv4_auto_arp_msg : 1; /* Is this pkt IPv4 autoconf ARP
167 					* message.
168 					* Note: family needs to be
169 					* AF_INET.
170 					*/
171 #endif
172 #if defined(CONFIG_NET_LLDP)
173 	uint8_t lldp_pkt : 1; /* Is this pkt an LLDP message.
174 			       * Note: family needs to be
175 			       * AF_UNSPEC.
176 			       */
177 #endif
178 	uint8_t ppp_msg : 1; /* This is a PPP message */
179 #if defined(CONFIG_NET_TCP)
180 	uint8_t tcp_first_msg : 1; /* Is this the first time this pkt is
181 				    * sent, or is this a resend of a TCP
182 				    * segment.
183 				    */
184 #endif
185 	uint8_t captured : 1;	  /* Set to 1 if this packet is already being
186 				   * captured
187 				   */
188 	uint8_t l2_bridged : 1;	  /* set to 1 if this packet comes from a bridge
189 				   * and already contains its L2 header to be
190 				   * preserved. Useful only if
191 				   * defined(CONFIG_NET_ETHERNET_BRIDGE).
192 				   */
193 	uint8_t l2_processed : 1; /* Set to 1 if this packet has already been
194 				   * processed by the L2
195 				   */
196 
197 	/* bitfield byte alignment boundary */
198 
199 #if defined(CONFIG_NET_IP)
200 	union {
201 		/* IPv6 hop limit or IPv4 ttl for this network packet.
202 		 * The value is shared between IPv6 and IPv4.
203 		 */
204 #if defined(CONFIG_NET_IPV6)
205 		uint8_t ipv6_hop_limit;
206 #endif
207 #if defined(CONFIG_NET_IPV4)
208 		uint8_t ipv4_ttl;
209 #endif
210 	};
211 
212 	union {
213 #if defined(CONFIG_NET_IPV4)
214 		uint8_t ipv4_opts_len; /* length of IPv4 header options */
215 #endif
216 #if defined(CONFIG_NET_IPV6)
217 		uint16_t ipv6_ext_len; /* length of extension headers */
218 #endif
219 	};
220 
221 #if defined(CONFIG_NET_IPV4_FRAGMENT) || defined(CONFIG_NET_IPV6_FRAGMENT)
222 	union {
223 #if defined(CONFIG_NET_IPV4_FRAGMENT)
224 		struct {
225 			uint16_t flags;		/* Fragment offset and M (More Fragment) flag */
226 			uint16_t id;		/* Fragment ID */
227 		} ipv4_fragment;
228 #endif /* CONFIG_NET_IPV4_FRAGMENT */
229 #if defined(CONFIG_NET_IPV6_FRAGMENT)
230 		struct {
231 			uint16_t flags;		/* Fragment offset and M (More Fragment) flag */
232 			uint32_t id;		/* Fragment id */
233 			uint16_t hdr_start;	/* Where starts the fragment header */
234 		} ipv6_fragment;
235 #endif /* CONFIG_NET_IPV6_FRAGMENT */
236 	};
237 #endif /* CONFIG_NET_IPV4_FRAGMENT || CONFIG_NET_IPV6_FRAGMENT */
238 
239 #if defined(CONFIG_NET_IPV6)
240 	/* Where is the start of the last header before payload data
241 	 * in IPv6 packet. This is offset value from start of the IPv6
242 	 * packet. Note that this value should be updated by who ever
243 	 * adds IPv6 extension headers to the network packet.
244 	 */
245 	uint16_t ipv6_prev_hdr_start;
246 
247 	uint8_t ipv6_ext_opt_len; /* IPv6 ND option length */
248 	uint8_t ipv6_next_hdr;	/* What is the very first next header */
249 #endif /* CONFIG_NET_IPV6 */
250 
251 #if defined(CONFIG_NET_IP_DSCP_ECN)
252 	/** IPv4/IPv6 Differentiated Services Code Point value. */
253 	uint8_t ip_dscp : 6;
254 
255 	/** IPv4/IPv6 Explicit Congestion Notification value. */
256 	uint8_t ip_ecn : 2;
257 #endif /* CONFIG_NET_IP_DSCP_ECN */
258 #endif /* CONFIG_NET_IP */
259 
260 #if defined(CONFIG_NET_VLAN)
261 	/* VLAN TCI (Tag Control Information). This contains the Priority
262 	 * Code Point (PCP), Drop Eligible Indicator (DEI) and VLAN
263 	 * Identifier (VID, called more commonly VLAN tag). This value is
264 	 * kept in host byte order.
265 	 */
266 	uint16_t vlan_tci;
267 #endif /* CONFIG_NET_VLAN */
268 
269 #if defined(NET_PKT_HAS_CONTROL_BLOCK)
270 	/* TODO: Evolve this into a union of orthogonal
271 	 *       control block declarations if further L2
272 	 *       stacks require L2-specific attributes.
273 	 */
274 #if defined(CONFIG_IEEE802154)
275 	/* The following structure requires a 4-byte alignment
276 	 * boundary to avoid padding.
277 	 */
278 	struct net_pkt_cb_ieee802154 cb;
279 #endif /* CONFIG_IEEE802154 */
280 #endif /* NET_PKT_HAS_CONTROL_BLOCK */
281 
282 	/** Network packet priority, can be left out in which case packet
283 	 * is not prioritised.
284 	 */
285 	uint8_t priority;
286 
287 	/* @endcond */
288 };
289 
290 /** @cond ignore */
291 
292 /* The interface real ll address */
net_pkt_lladdr_if(struct net_pkt * pkt)293 static inline struct net_linkaddr *net_pkt_lladdr_if(struct net_pkt *pkt)
294 {
295 	return net_if_get_link_addr(pkt->iface);
296 }
297 
net_pkt_context(struct net_pkt * pkt)298 static inline struct net_context *net_pkt_context(struct net_pkt *pkt)
299 {
300 	return pkt->context;
301 }
302 
net_pkt_set_context(struct net_pkt * pkt,struct net_context * ctx)303 static inline void net_pkt_set_context(struct net_pkt *pkt,
304 				       struct net_context *ctx)
305 {
306 	pkt->context = ctx;
307 }
308 
net_pkt_iface(struct net_pkt * pkt)309 static inline struct net_if *net_pkt_iface(struct net_pkt *pkt)
310 {
311 	return pkt->iface;
312 }
313 
net_pkt_set_iface(struct net_pkt * pkt,struct net_if * iface)314 static inline void net_pkt_set_iface(struct net_pkt *pkt, struct net_if *iface)
315 {
316 	pkt->iface = iface;
317 
318 	/* If the network interface is set in pkt, then also set the type of
319 	 * the network address that is stored in pkt. This is done here so
320 	 * that the address type is properly set and is not forgotten.
321 	 */
322 	if (iface) {
323 		uint8_t type = net_if_get_link_addr(iface)->type;
324 
325 		pkt->lladdr_src.type = type;
326 		pkt->lladdr_dst.type = type;
327 	}
328 }
329 
net_pkt_orig_iface(struct net_pkt * pkt)330 static inline struct net_if *net_pkt_orig_iface(struct net_pkt *pkt)
331 {
332 #if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
333 	return pkt->orig_iface;
334 #else
335 	return pkt->iface;
336 #endif
337 }
338 
net_pkt_set_orig_iface(struct net_pkt * pkt,struct net_if * iface)339 static inline void net_pkt_set_orig_iface(struct net_pkt *pkt,
340 					  struct net_if *iface)
341 {
342 #if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
343 	pkt->orig_iface = iface;
344 #endif
345 }
346 
net_pkt_family(struct net_pkt * pkt)347 static inline uint8_t net_pkt_family(struct net_pkt *pkt)
348 {
349 	return pkt->family;
350 }
351 
net_pkt_set_family(struct net_pkt * pkt,uint8_t family)352 static inline void net_pkt_set_family(struct net_pkt *pkt, uint8_t family)
353 {
354 	pkt->family = family;
355 }
356 
net_pkt_is_ptp(struct net_pkt * pkt)357 static inline bool net_pkt_is_ptp(struct net_pkt *pkt)
358 {
359 	return !!(pkt->ptp_pkt);
360 }
361 
net_pkt_set_ptp(struct net_pkt * pkt,bool is_ptp)362 static inline void net_pkt_set_ptp(struct net_pkt *pkt, bool is_ptp)
363 {
364 	pkt->ptp_pkt = is_ptp;
365 }
366 
net_pkt_is_captured(struct net_pkt * pkt)367 static inline bool net_pkt_is_captured(struct net_pkt *pkt)
368 {
369 	return !!(pkt->captured);
370 }
371 
net_pkt_set_captured(struct net_pkt * pkt,bool is_captured)372 static inline void net_pkt_set_captured(struct net_pkt *pkt, bool is_captured)
373 {
374 	pkt->captured = is_captured;
375 }
376 
net_pkt_is_l2_bridged(struct net_pkt * pkt)377 static inline bool net_pkt_is_l2_bridged(struct net_pkt *pkt)
378 {
379 	return IS_ENABLED(CONFIG_NET_ETHERNET_BRIDGE) ? !!(pkt->l2_bridged) : 0;
380 }
381 
net_pkt_set_l2_bridged(struct net_pkt * pkt,bool is_l2_bridged)382 static inline void net_pkt_set_l2_bridged(struct net_pkt *pkt, bool is_l2_bridged)
383 {
384 	if (IS_ENABLED(CONFIG_NET_ETHERNET_BRIDGE)) {
385 		pkt->l2_bridged = is_l2_bridged;
386 	}
387 }
388 
net_pkt_is_l2_processed(struct net_pkt * pkt)389 static inline bool net_pkt_is_l2_processed(struct net_pkt *pkt)
390 {
391 	return !!(pkt->l2_processed);
392 }
393 
net_pkt_set_l2_processed(struct net_pkt * pkt,bool is_l2_processed)394 static inline void net_pkt_set_l2_processed(struct net_pkt *pkt,
395 					    bool is_l2_processed)
396 {
397 	pkt->l2_processed = is_l2_processed;
398 }
399 
net_pkt_ip_hdr_len(struct net_pkt * pkt)400 static inline uint8_t net_pkt_ip_hdr_len(struct net_pkt *pkt)
401 {
402 #if defined(CONFIG_NET_IP)
403 	return pkt->ip_hdr_len;
404 #else
405 	return 0;
406 #endif
407 }
408 
net_pkt_set_ip_hdr_len(struct net_pkt * pkt,uint8_t len)409 static inline void net_pkt_set_ip_hdr_len(struct net_pkt *pkt, uint8_t len)
410 {
411 #if defined(CONFIG_NET_IP)
412 	pkt->ip_hdr_len = len;
413 #endif
414 }
415 
net_pkt_ip_dscp(struct net_pkt * pkt)416 static inline uint8_t net_pkt_ip_dscp(struct net_pkt *pkt)
417 {
418 #if defined(CONFIG_NET_IP_DSCP_ECN)
419 	return pkt->ip_dscp;
420 #else
421 	return 0;
422 #endif
423 }
424 
net_pkt_set_ip_dscp(struct net_pkt * pkt,uint8_t dscp)425 static inline void net_pkt_set_ip_dscp(struct net_pkt *pkt, uint8_t dscp)
426 {
427 #if defined(CONFIG_NET_IP_DSCP_ECN)
428 	pkt->ip_dscp = dscp;
429 #endif
430 }
431 
net_pkt_ip_ecn(struct net_pkt * pkt)432 static inline uint8_t net_pkt_ip_ecn(struct net_pkt *pkt)
433 {
434 #if defined(CONFIG_NET_IP_DSCP_ECN)
435 	return pkt->ip_ecn;
436 #else
437 	return 0;
438 #endif
439 }
440 
net_pkt_set_ip_ecn(struct net_pkt * pkt,uint8_t ecn)441 static inline void net_pkt_set_ip_ecn(struct net_pkt *pkt, uint8_t ecn)
442 {
443 #if defined(CONFIG_NET_IP_DSCP_ECN)
444 	pkt->ip_ecn = ecn;
445 #endif
446 }
447 
net_pkt_sent(struct net_pkt * pkt)448 static inline uint8_t net_pkt_sent(struct net_pkt *pkt)
449 {
450 	return pkt->sent_or_eof;
451 }
452 
net_pkt_set_sent(struct net_pkt * pkt,bool sent)453 static inline void net_pkt_set_sent(struct net_pkt *pkt, bool sent)
454 {
455 	pkt->sent_or_eof = sent;
456 }
457 
net_pkt_queued(struct net_pkt * pkt)458 static inline uint8_t net_pkt_queued(struct net_pkt *pkt)
459 {
460 	return pkt->pkt_queued;
461 }
462 
net_pkt_set_queued(struct net_pkt * pkt,bool send)463 static inline void net_pkt_set_queued(struct net_pkt *pkt, bool send)
464 {
465 	pkt->pkt_queued = send;
466 }
467 
net_pkt_tcp_1st_msg(struct net_pkt * pkt)468 static inline uint8_t net_pkt_tcp_1st_msg(struct net_pkt *pkt)
469 {
470 #if defined(CONFIG_NET_TCP)
471 	return pkt->tcp_first_msg;
472 #else
473 	return true;
474 #endif
475 }
476 
net_pkt_set_tcp_1st_msg(struct net_pkt * pkt,bool is_1st)477 static inline void net_pkt_set_tcp_1st_msg(struct net_pkt *pkt, bool is_1st)
478 {
479 #if defined(CONFIG_NET_TCP)
480 	pkt->tcp_first_msg = is_1st;
481 #else
482 	ARG_UNUSED(pkt);
483 	ARG_UNUSED(is_1st);
484 #endif
485 }
486 
487 #if defined(CONFIG_NET_SOCKETS)
net_pkt_eof(struct net_pkt * pkt)488 static inline uint8_t net_pkt_eof(struct net_pkt *pkt)
489 {
490 	return pkt->sent_or_eof;
491 }
492 
net_pkt_set_eof(struct net_pkt * pkt,bool eof)493 static inline void net_pkt_set_eof(struct net_pkt *pkt, bool eof)
494 {
495 	pkt->sent_or_eof = eof;
496 }
497 #endif
498 
499 #if defined(CONFIG_NET_ROUTE)
net_pkt_forwarding(struct net_pkt * pkt)500 static inline bool net_pkt_forwarding(struct net_pkt *pkt)
501 {
502 	return pkt->forwarding;
503 }
504 
net_pkt_set_forwarding(struct net_pkt * pkt,bool forward)505 static inline void net_pkt_set_forwarding(struct net_pkt *pkt, bool forward)
506 {
507 	pkt->forwarding = forward;
508 }
509 #else
net_pkt_forwarding(struct net_pkt * pkt)510 static inline bool net_pkt_forwarding(struct net_pkt *pkt)
511 {
512 	return false;
513 }
514 #endif
515 
516 #if defined(CONFIG_NET_IPV4)
net_pkt_ipv4_ttl(struct net_pkt * pkt)517 static inline uint8_t net_pkt_ipv4_ttl(struct net_pkt *pkt)
518 {
519 	return pkt->ipv4_ttl;
520 }
521 
net_pkt_set_ipv4_ttl(struct net_pkt * pkt,uint8_t ttl)522 static inline void net_pkt_set_ipv4_ttl(struct net_pkt *pkt,
523 					uint8_t ttl)
524 {
525 	pkt->ipv4_ttl = ttl;
526 }
527 
net_pkt_ipv4_opts_len(struct net_pkt * pkt)528 static inline uint8_t net_pkt_ipv4_opts_len(struct net_pkt *pkt)
529 {
530 	return pkt->ipv4_opts_len;
531 }
532 
net_pkt_set_ipv4_opts_len(struct net_pkt * pkt,uint8_t opts_len)533 static inline void net_pkt_set_ipv4_opts_len(struct net_pkt *pkt,
534 					     uint8_t opts_len)
535 {
536 	pkt->ipv4_opts_len = opts_len;
537 }
538 #else
net_pkt_ipv4_ttl(struct net_pkt * pkt)539 static inline uint8_t net_pkt_ipv4_ttl(struct net_pkt *pkt)
540 {
541 	ARG_UNUSED(pkt);
542 
543 	return 0;
544 }
545 
net_pkt_set_ipv4_ttl(struct net_pkt * pkt,uint8_t ttl)546 static inline void net_pkt_set_ipv4_ttl(struct net_pkt *pkt,
547 					uint8_t ttl)
548 {
549 	ARG_UNUSED(pkt);
550 	ARG_UNUSED(ttl);
551 }
552 
net_pkt_ipv4_opts_len(struct net_pkt * pkt)553 static inline uint8_t net_pkt_ipv4_opts_len(struct net_pkt *pkt)
554 {
555 	ARG_UNUSED(pkt);
556 	return 0;
557 }
558 
net_pkt_set_ipv4_opts_len(struct net_pkt * pkt,uint8_t opts_len)559 static inline void net_pkt_set_ipv4_opts_len(struct net_pkt *pkt,
560 					     uint8_t opts_len)
561 {
562 	ARG_UNUSED(pkt);
563 	ARG_UNUSED(opts_len);
564 }
565 #endif
566 
567 #if defined(CONFIG_NET_IPV6)
net_pkt_ipv6_ext_opt_len(struct net_pkt * pkt)568 static inline uint8_t net_pkt_ipv6_ext_opt_len(struct net_pkt *pkt)
569 {
570 	return pkt->ipv6_ext_opt_len;
571 }
572 
net_pkt_set_ipv6_ext_opt_len(struct net_pkt * pkt,uint8_t len)573 static inline void net_pkt_set_ipv6_ext_opt_len(struct net_pkt *pkt,
574 						uint8_t len)
575 {
576 	pkt->ipv6_ext_opt_len = len;
577 }
578 
net_pkt_ipv6_next_hdr(struct net_pkt * pkt)579 static inline uint8_t net_pkt_ipv6_next_hdr(struct net_pkt *pkt)
580 {
581 	return pkt->ipv6_next_hdr;
582 }
583 
net_pkt_set_ipv6_next_hdr(struct net_pkt * pkt,uint8_t next_hdr)584 static inline void net_pkt_set_ipv6_next_hdr(struct net_pkt *pkt,
585 					     uint8_t next_hdr)
586 {
587 	pkt->ipv6_next_hdr = next_hdr;
588 }
589 
net_pkt_ipv6_ext_len(struct net_pkt * pkt)590 static inline uint16_t net_pkt_ipv6_ext_len(struct net_pkt *pkt)
591 {
592 	return pkt->ipv6_ext_len;
593 }
594 
net_pkt_set_ipv6_ext_len(struct net_pkt * pkt,uint16_t len)595 static inline void net_pkt_set_ipv6_ext_len(struct net_pkt *pkt, uint16_t len)
596 {
597 	pkt->ipv6_ext_len = len;
598 }
599 
net_pkt_ipv6_hdr_prev(struct net_pkt * pkt)600 static inline uint16_t net_pkt_ipv6_hdr_prev(struct net_pkt *pkt)
601 {
602 	return pkt->ipv6_prev_hdr_start;
603 }
604 
net_pkt_set_ipv6_hdr_prev(struct net_pkt * pkt,uint16_t offset)605 static inline void net_pkt_set_ipv6_hdr_prev(struct net_pkt *pkt,
606 					     uint16_t offset)
607 {
608 	pkt->ipv6_prev_hdr_start = offset;
609 }
610 
net_pkt_ipv6_hop_limit(struct net_pkt * pkt)611 static inline uint8_t net_pkt_ipv6_hop_limit(struct net_pkt *pkt)
612 {
613 	return pkt->ipv6_hop_limit;
614 }
615 
net_pkt_set_ipv6_hop_limit(struct net_pkt * pkt,uint8_t hop_limit)616 static inline void net_pkt_set_ipv6_hop_limit(struct net_pkt *pkt,
617 					      uint8_t hop_limit)
618 {
619 	pkt->ipv6_hop_limit = hop_limit;
620 }
621 #else /* CONFIG_NET_IPV6 */
net_pkt_ipv6_ext_opt_len(struct net_pkt * pkt)622 static inline uint8_t net_pkt_ipv6_ext_opt_len(struct net_pkt *pkt)
623 {
624 	ARG_UNUSED(pkt);
625 
626 	return 0;
627 }
628 
net_pkt_set_ipv6_ext_opt_len(struct net_pkt * pkt,uint8_t len)629 static inline void net_pkt_set_ipv6_ext_opt_len(struct net_pkt *pkt,
630 						uint8_t len)
631 {
632 	ARG_UNUSED(pkt);
633 	ARG_UNUSED(len);
634 }
635 
net_pkt_ipv6_next_hdr(struct net_pkt * pkt)636 static inline uint8_t net_pkt_ipv6_next_hdr(struct net_pkt *pkt)
637 {
638 	ARG_UNUSED(pkt);
639 
640 	return 0;
641 }
642 
net_pkt_set_ipv6_next_hdr(struct net_pkt * pkt,uint8_t next_hdr)643 static inline void net_pkt_set_ipv6_next_hdr(struct net_pkt *pkt,
644 					     uint8_t next_hdr)
645 {
646 	ARG_UNUSED(pkt);
647 	ARG_UNUSED(next_hdr);
648 }
649 
net_pkt_ipv6_ext_len(struct net_pkt * pkt)650 static inline uint16_t net_pkt_ipv6_ext_len(struct net_pkt *pkt)
651 {
652 	ARG_UNUSED(pkt);
653 
654 	return 0;
655 }
656 
net_pkt_set_ipv6_ext_len(struct net_pkt * pkt,uint16_t len)657 static inline void net_pkt_set_ipv6_ext_len(struct net_pkt *pkt, uint16_t len)
658 {
659 	ARG_UNUSED(pkt);
660 	ARG_UNUSED(len);
661 }
662 
net_pkt_ipv6_hdr_prev(struct net_pkt * pkt)663 static inline uint16_t net_pkt_ipv6_hdr_prev(struct net_pkt *pkt)
664 {
665 	ARG_UNUSED(pkt);
666 
667 	return 0;
668 }
669 
net_pkt_set_ipv6_hdr_prev(struct net_pkt * pkt,uint16_t offset)670 static inline void net_pkt_set_ipv6_hdr_prev(struct net_pkt *pkt,
671 					     uint16_t offset)
672 {
673 	ARG_UNUSED(pkt);
674 	ARG_UNUSED(offset);
675 }
676 
net_pkt_ipv6_hop_limit(struct net_pkt * pkt)677 static inline uint8_t net_pkt_ipv6_hop_limit(struct net_pkt *pkt)
678 {
679 	ARG_UNUSED(pkt);
680 
681 	return 0;
682 }
683 
net_pkt_set_ipv6_hop_limit(struct net_pkt * pkt,uint8_t hop_limit)684 static inline void net_pkt_set_ipv6_hop_limit(struct net_pkt *pkt,
685 					      uint8_t hop_limit)
686 {
687 	ARG_UNUSED(pkt);
688 	ARG_UNUSED(hop_limit);
689 }
690 #endif /* CONFIG_NET_IPV6 */
691 
net_pkt_ip_opts_len(struct net_pkt * pkt)692 static inline uint16_t net_pkt_ip_opts_len(struct net_pkt *pkt)
693 {
694 #if defined(CONFIG_NET_IPV6)
695 	return pkt->ipv6_ext_len;
696 #elif defined(CONFIG_NET_IPV4)
697 	return pkt->ipv4_opts_len;
698 #else
699 	ARG_UNUSED(pkt);
700 
701 	return 0;
702 #endif
703 }
704 
705 #if defined(CONFIG_NET_IPV4_FRAGMENT)
net_pkt_ipv4_fragment_offset(struct net_pkt * pkt)706 static inline uint16_t net_pkt_ipv4_fragment_offset(struct net_pkt *pkt)
707 {
708 	return (pkt->ipv4_fragment.flags & NET_IPV4_FRAGH_OFFSET_MASK) * 8;
709 }
710 
net_pkt_ipv4_fragment_more(struct net_pkt * pkt)711 static inline bool net_pkt_ipv4_fragment_more(struct net_pkt *pkt)
712 {
713 	return (pkt->ipv4_fragment.flags & NET_IPV4_MORE_FRAG_MASK) != 0;
714 }
715 
net_pkt_set_ipv4_fragment_flags(struct net_pkt * pkt,uint16_t flags)716 static inline void net_pkt_set_ipv4_fragment_flags(struct net_pkt *pkt, uint16_t flags)
717 {
718 	pkt->ipv4_fragment.flags = flags;
719 }
720 
net_pkt_ipv4_fragment_id(struct net_pkt * pkt)721 static inline uint32_t net_pkt_ipv4_fragment_id(struct net_pkt *pkt)
722 {
723 	return pkt->ipv4_fragment.id;
724 }
725 
net_pkt_set_ipv4_fragment_id(struct net_pkt * pkt,uint32_t id)726 static inline void net_pkt_set_ipv4_fragment_id(struct net_pkt *pkt, uint32_t id)
727 {
728 	pkt->ipv4_fragment.id = id;
729 }
730 #else /* CONFIG_NET_IPV4_FRAGMENT */
net_pkt_ipv4_fragment_offset(struct net_pkt * pkt)731 static inline uint16_t net_pkt_ipv4_fragment_offset(struct net_pkt *pkt)
732 {
733 	ARG_UNUSED(pkt);
734 
735 	return 0;
736 }
737 
net_pkt_ipv4_fragment_more(struct net_pkt * pkt)738 static inline bool net_pkt_ipv4_fragment_more(struct net_pkt *pkt)
739 {
740 	ARG_UNUSED(pkt);
741 
742 	return 0;
743 }
744 
net_pkt_set_ipv4_fragment_flags(struct net_pkt * pkt,uint16_t flags)745 static inline void net_pkt_set_ipv4_fragment_flags(struct net_pkt *pkt, uint16_t flags)
746 {
747 	ARG_UNUSED(pkt);
748 	ARG_UNUSED(flags);
749 }
750 
net_pkt_ipv4_fragment_id(struct net_pkt * pkt)751 static inline uint32_t net_pkt_ipv4_fragment_id(struct net_pkt *pkt)
752 {
753 	ARG_UNUSED(pkt);
754 
755 	return 0;
756 }
757 
net_pkt_set_ipv4_fragment_id(struct net_pkt * pkt,uint32_t id)758 static inline void net_pkt_set_ipv4_fragment_id(struct net_pkt *pkt, uint32_t id)
759 {
760 	ARG_UNUSED(pkt);
761 	ARG_UNUSED(id);
762 }
763 #endif /* CONFIG_NET_IPV4_FRAGMENT */
764 
765 #if defined(CONFIG_NET_IPV6_FRAGMENT)
net_pkt_ipv6_fragment_start(struct net_pkt * pkt)766 static inline uint16_t net_pkt_ipv6_fragment_start(struct net_pkt *pkt)
767 {
768 	return pkt->ipv6_fragment.hdr_start;
769 }
770 
net_pkt_set_ipv6_fragment_start(struct net_pkt * pkt,uint16_t start)771 static inline void net_pkt_set_ipv6_fragment_start(struct net_pkt *pkt,
772 						   uint16_t start)
773 {
774 	pkt->ipv6_fragment.hdr_start = start;
775 }
776 
net_pkt_ipv6_fragment_offset(struct net_pkt * pkt)777 static inline uint16_t net_pkt_ipv6_fragment_offset(struct net_pkt *pkt)
778 {
779 	return pkt->ipv6_fragment.flags & NET_IPV6_FRAGH_OFFSET_MASK;
780 }
net_pkt_ipv6_fragment_more(struct net_pkt * pkt)781 static inline bool net_pkt_ipv6_fragment_more(struct net_pkt *pkt)
782 {
783 	return (pkt->ipv6_fragment.flags & 0x01) != 0;
784 }
785 
net_pkt_set_ipv6_fragment_flags(struct net_pkt * pkt,uint16_t flags)786 static inline void net_pkt_set_ipv6_fragment_flags(struct net_pkt *pkt,
787 						   uint16_t flags)
788 {
789 	pkt->ipv6_fragment.flags = flags;
790 }
791 
net_pkt_ipv6_fragment_id(struct net_pkt * pkt)792 static inline uint32_t net_pkt_ipv6_fragment_id(struct net_pkt *pkt)
793 {
794 	return pkt->ipv6_fragment.id;
795 }
796 
net_pkt_set_ipv6_fragment_id(struct net_pkt * pkt,uint32_t id)797 static inline void net_pkt_set_ipv6_fragment_id(struct net_pkt *pkt,
798 						uint32_t id)
799 {
800 	pkt->ipv6_fragment.id = id;
801 }
802 #else /* CONFIG_NET_IPV6_FRAGMENT */
net_pkt_ipv6_fragment_start(struct net_pkt * pkt)803 static inline uint16_t net_pkt_ipv6_fragment_start(struct net_pkt *pkt)
804 {
805 	ARG_UNUSED(pkt);
806 
807 	return 0;
808 }
809 
net_pkt_set_ipv6_fragment_start(struct net_pkt * pkt,uint16_t start)810 static inline void net_pkt_set_ipv6_fragment_start(struct net_pkt *pkt,
811 						   uint16_t start)
812 {
813 	ARG_UNUSED(pkt);
814 	ARG_UNUSED(start);
815 }
816 
net_pkt_ipv6_fragment_offset(struct net_pkt * pkt)817 static inline uint16_t net_pkt_ipv6_fragment_offset(struct net_pkt *pkt)
818 {
819 	ARG_UNUSED(pkt);
820 
821 	return 0;
822 }
823 
net_pkt_ipv6_fragment_more(struct net_pkt * pkt)824 static inline bool net_pkt_ipv6_fragment_more(struct net_pkt *pkt)
825 {
826 	ARG_UNUSED(pkt);
827 
828 	return 0;
829 }
830 
net_pkt_set_ipv6_fragment_flags(struct net_pkt * pkt,uint16_t flags)831 static inline void net_pkt_set_ipv6_fragment_flags(struct net_pkt *pkt,
832 						   uint16_t flags)
833 {
834 	ARG_UNUSED(pkt);
835 	ARG_UNUSED(flags);
836 }
837 
net_pkt_ipv6_fragment_id(struct net_pkt * pkt)838 static inline uint32_t net_pkt_ipv6_fragment_id(struct net_pkt *pkt)
839 {
840 	ARG_UNUSED(pkt);
841 
842 	return 0;
843 }
844 
net_pkt_set_ipv6_fragment_id(struct net_pkt * pkt,uint32_t id)845 static inline void net_pkt_set_ipv6_fragment_id(struct net_pkt *pkt,
846 						uint32_t id)
847 {
848 	ARG_UNUSED(pkt);
849 	ARG_UNUSED(id);
850 }
851 #endif /* CONFIG_NET_IPV6_FRAGMENT */
852 
net_pkt_priority(struct net_pkt * pkt)853 static inline uint8_t net_pkt_priority(struct net_pkt *pkt)
854 {
855 	return pkt->priority;
856 }
857 
net_pkt_set_priority(struct net_pkt * pkt,uint8_t priority)858 static inline void net_pkt_set_priority(struct net_pkt *pkt,
859 					uint8_t priority)
860 {
861 	pkt->priority = priority;
862 }
863 
864 #if defined(CONFIG_NET_VLAN)
net_pkt_vlan_tag(struct net_pkt * pkt)865 static inline uint16_t net_pkt_vlan_tag(struct net_pkt *pkt)
866 {
867 	return net_eth_vlan_get_vid(pkt->vlan_tci);
868 }
869 
net_pkt_set_vlan_tag(struct net_pkt * pkt,uint16_t tag)870 static inline void net_pkt_set_vlan_tag(struct net_pkt *pkt, uint16_t tag)
871 {
872 	pkt->vlan_tci = net_eth_vlan_set_vid(pkt->vlan_tci, tag);
873 }
874 
net_pkt_vlan_priority(struct net_pkt * pkt)875 static inline uint8_t net_pkt_vlan_priority(struct net_pkt *pkt)
876 {
877 	return net_eth_vlan_get_pcp(pkt->vlan_tci);
878 }
879 
net_pkt_set_vlan_priority(struct net_pkt * pkt,uint8_t priority)880 static inline void net_pkt_set_vlan_priority(struct net_pkt *pkt,
881 					     uint8_t priority)
882 {
883 	pkt->vlan_tci = net_eth_vlan_set_pcp(pkt->vlan_tci, priority);
884 }
885 
net_pkt_vlan_dei(struct net_pkt * pkt)886 static inline bool net_pkt_vlan_dei(struct net_pkt *pkt)
887 {
888 	return net_eth_vlan_get_dei(pkt->vlan_tci);
889 }
890 
net_pkt_set_vlan_dei(struct net_pkt * pkt,bool dei)891 static inline void net_pkt_set_vlan_dei(struct net_pkt *pkt, bool dei)
892 {
893 	pkt->vlan_tci = net_eth_vlan_set_dei(pkt->vlan_tci, dei);
894 }
895 
net_pkt_set_vlan_tci(struct net_pkt * pkt,uint16_t tci)896 static inline void net_pkt_set_vlan_tci(struct net_pkt *pkt, uint16_t tci)
897 {
898 	pkt->vlan_tci = tci;
899 }
900 
net_pkt_vlan_tci(struct net_pkt * pkt)901 static inline uint16_t net_pkt_vlan_tci(struct net_pkt *pkt)
902 {
903 	return pkt->vlan_tci;
904 }
905 #else
net_pkt_vlan_tag(struct net_pkt * pkt)906 static inline uint16_t net_pkt_vlan_tag(struct net_pkt *pkt)
907 {
908 	return NET_VLAN_TAG_UNSPEC;
909 }
910 
net_pkt_set_vlan_tag(struct net_pkt * pkt,uint16_t tag)911 static inline void net_pkt_set_vlan_tag(struct net_pkt *pkt, uint16_t tag)
912 {
913 	ARG_UNUSED(pkt);
914 	ARG_UNUSED(tag);
915 }
916 
net_pkt_vlan_priority(struct net_pkt * pkt)917 static inline uint8_t net_pkt_vlan_priority(struct net_pkt *pkt)
918 {
919 	ARG_UNUSED(pkt);
920 	return 0;
921 }
922 
net_pkt_vlan_dei(struct net_pkt * pkt)923 static inline bool net_pkt_vlan_dei(struct net_pkt *pkt)
924 {
925 	return false;
926 }
927 
net_pkt_set_vlan_dei(struct net_pkt * pkt,bool dei)928 static inline void net_pkt_set_vlan_dei(struct net_pkt *pkt, bool dei)
929 {
930 	ARG_UNUSED(pkt);
931 	ARG_UNUSED(dei);
932 }
933 
net_pkt_vlan_tci(struct net_pkt * pkt)934 static inline uint16_t net_pkt_vlan_tci(struct net_pkt *pkt)
935 {
936 	return NET_VLAN_TAG_UNSPEC; /* assumes priority is 0 */
937 }
938 
net_pkt_set_vlan_tci(struct net_pkt * pkt,uint16_t tci)939 static inline void net_pkt_set_vlan_tci(struct net_pkt *pkt, uint16_t tci)
940 {
941 	ARG_UNUSED(pkt);
942 	ARG_UNUSED(tci);
943 }
944 #endif
945 
946 #if defined(CONFIG_NET_PKT_TIMESTAMP)
net_pkt_timestamp(struct net_pkt * pkt)947 static inline struct net_ptp_time *net_pkt_timestamp(struct net_pkt *pkt)
948 {
949 	return &pkt->timestamp;
950 }
951 
net_pkt_set_timestamp(struct net_pkt * pkt,struct net_ptp_time * timestamp)952 static inline void net_pkt_set_timestamp(struct net_pkt *pkt,
953 					 struct net_ptp_time *timestamp)
954 {
955 	pkt->timestamp.second = timestamp->second;
956 	pkt->timestamp.nanosecond = timestamp->nanosecond;
957 }
958 #else
net_pkt_timestamp(struct net_pkt * pkt)959 static inline struct net_ptp_time *net_pkt_timestamp(struct net_pkt *pkt)
960 {
961 	ARG_UNUSED(pkt);
962 
963 	return NULL;
964 }
965 
net_pkt_set_timestamp(struct net_pkt * pkt,struct net_ptp_time * timestamp)966 static inline void net_pkt_set_timestamp(struct net_pkt *pkt,
967 					 struct net_ptp_time *timestamp)
968 {
969 	ARG_UNUSED(pkt);
970 	ARG_UNUSED(timestamp);
971 }
972 #endif /* CONFIG_NET_PKT_TIMESTAMP */
973 
974 #if defined(CONFIG_NET_PKT_RXTIME_STATS) || defined(CONFIG_NET_PKT_TXTIME_STATS)
net_pkt_create_time(struct net_pkt * pkt)975 static inline uint32_t net_pkt_create_time(struct net_pkt *pkt)
976 {
977 	return pkt->create_time;
978 }
979 
net_pkt_set_create_time(struct net_pkt * pkt,uint32_t create_time)980 static inline void net_pkt_set_create_time(struct net_pkt *pkt,
981 					   uint32_t create_time)
982 {
983 	pkt->create_time = create_time;
984 }
985 #else
net_pkt_create_time(struct net_pkt * pkt)986 static inline uint32_t net_pkt_create_time(struct net_pkt *pkt)
987 {
988 	ARG_UNUSED(pkt);
989 
990 	return 0U;
991 }
992 
net_pkt_set_create_time(struct net_pkt * pkt,uint32_t create_time)993 static inline void net_pkt_set_create_time(struct net_pkt *pkt,
994 					   uint32_t create_time)
995 {
996 	ARG_UNUSED(pkt);
997 	ARG_UNUSED(create_time);
998 }
999 #endif /* CONFIG_NET_PKT_RXTIME_STATS || CONFIG_NET_PKT_TXTIME_STATS */
1000 
1001 #if defined(CONFIG_NET_PKT_TXTIME)
net_pkt_txtime(struct net_pkt * pkt)1002 static inline uint64_t net_pkt_txtime(struct net_pkt *pkt)
1003 {
1004 	return pkt->txtime;
1005 }
1006 
net_pkt_set_txtime(struct net_pkt * pkt,uint64_t txtime)1007 static inline void net_pkt_set_txtime(struct net_pkt *pkt, uint64_t txtime)
1008 {
1009 	pkt->txtime = txtime;
1010 }
1011 #else
net_pkt_txtime(struct net_pkt * pkt)1012 static inline uint64_t net_pkt_txtime(struct net_pkt *pkt)
1013 {
1014 	ARG_UNUSED(pkt);
1015 
1016 	return 0;
1017 }
1018 
net_pkt_set_txtime(struct net_pkt * pkt,uint64_t txtime)1019 static inline void net_pkt_set_txtime(struct net_pkt *pkt, uint64_t txtime)
1020 {
1021 	ARG_UNUSED(pkt);
1022 	ARG_UNUSED(txtime);
1023 }
1024 #endif /* CONFIG_NET_PKT_TXTIME */
1025 
1026 #if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL) || \
1027 	defined(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)
net_pkt_stats_tick(struct net_pkt * pkt)1028 static inline uint32_t *net_pkt_stats_tick(struct net_pkt *pkt)
1029 {
1030 	return pkt->detail.stat;
1031 }
1032 
net_pkt_stats_tick_count(struct net_pkt * pkt)1033 static inline int net_pkt_stats_tick_count(struct net_pkt *pkt)
1034 {
1035 	return pkt->detail.count;
1036 }
1037 
net_pkt_stats_tick_reset(struct net_pkt * pkt)1038 static inline void net_pkt_stats_tick_reset(struct net_pkt *pkt)
1039 {
1040 	memset(&pkt->detail, 0, sizeof(pkt->detail));
1041 }
1042 
net_pkt_set_stats_tick(struct net_pkt * pkt,uint32_t tick)1043 static ALWAYS_INLINE void net_pkt_set_stats_tick(struct net_pkt *pkt,
1044 						 uint32_t tick)
1045 {
1046 	if (pkt->detail.count >= NET_PKT_DETAIL_STATS_COUNT) {
1047 		NET_ERR("Detail stats count overflow (%d >= %d)",
1048 			pkt->detail.count, NET_PKT_DETAIL_STATS_COUNT);
1049 		return;
1050 	}
1051 
1052 	pkt->detail.stat[pkt->detail.count++] = tick;
1053 }
1054 
1055 #define net_pkt_set_tx_stats_tick(pkt, tick) net_pkt_set_stats_tick(pkt, tick)
1056 #define net_pkt_set_rx_stats_tick(pkt, tick) net_pkt_set_stats_tick(pkt, tick)
1057 #else
net_pkt_stats_tick(struct net_pkt * pkt)1058 static inline uint32_t *net_pkt_stats_tick(struct net_pkt *pkt)
1059 {
1060 	ARG_UNUSED(pkt);
1061 
1062 	return NULL;
1063 }
1064 
net_pkt_stats_tick_count(struct net_pkt * pkt)1065 static inline int net_pkt_stats_tick_count(struct net_pkt *pkt)
1066 {
1067 	ARG_UNUSED(pkt);
1068 
1069 	return 0;
1070 }
1071 
net_pkt_stats_tick_reset(struct net_pkt * pkt)1072 static inline void net_pkt_stats_tick_reset(struct net_pkt *pkt)
1073 {
1074 	ARG_UNUSED(pkt);
1075 }
1076 
net_pkt_set_stats_tick(struct net_pkt * pkt,uint32_t tick)1077 static inline void net_pkt_set_stats_tick(struct net_pkt *pkt, uint32_t tick)
1078 {
1079 	ARG_UNUSED(pkt);
1080 	ARG_UNUSED(tick);
1081 }
1082 
1083 #define net_pkt_set_tx_stats_tick(pkt, tick)
1084 #define net_pkt_set_rx_stats_tick(pkt, tick)
1085 #endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL ||
1086 	  CONFIG_NET_PKT_RXTIME_STATS_DETAIL */
1087 
net_pkt_get_len(struct net_pkt * pkt)1088 static inline size_t net_pkt_get_len(struct net_pkt *pkt)
1089 {
1090 	return net_buf_frags_len(pkt->frags);
1091 }
1092 
net_pkt_data(struct net_pkt * pkt)1093 static inline uint8_t *net_pkt_data(struct net_pkt *pkt)
1094 {
1095 	return pkt->frags->data;
1096 }
1097 
net_pkt_ip_data(struct net_pkt * pkt)1098 static inline uint8_t *net_pkt_ip_data(struct net_pkt *pkt)
1099 {
1100 	return pkt->frags->data;
1101 }
1102 
net_pkt_is_empty(struct net_pkt * pkt)1103 static inline bool net_pkt_is_empty(struct net_pkt *pkt)
1104 {
1105 	return !pkt->buffer || !net_pkt_data(pkt) || pkt->buffer->len == 0;
1106 }
1107 
net_pkt_lladdr_src(struct net_pkt * pkt)1108 static inline struct net_linkaddr *net_pkt_lladdr_src(struct net_pkt *pkt)
1109 {
1110 	return &pkt->lladdr_src;
1111 }
1112 
net_pkt_lladdr_dst(struct net_pkt * pkt)1113 static inline struct net_linkaddr *net_pkt_lladdr_dst(struct net_pkt *pkt)
1114 {
1115 	return &pkt->lladdr_dst;
1116 }
1117 
net_pkt_lladdr_swap(struct net_pkt * pkt)1118 static inline void net_pkt_lladdr_swap(struct net_pkt *pkt)
1119 {
1120 	uint8_t *addr = net_pkt_lladdr_src(pkt)->addr;
1121 
1122 	net_pkt_lladdr_src(pkt)->addr = net_pkt_lladdr_dst(pkt)->addr;
1123 	net_pkt_lladdr_dst(pkt)->addr = addr;
1124 }
1125 
net_pkt_lladdr_clear(struct net_pkt * pkt)1126 static inline void net_pkt_lladdr_clear(struct net_pkt *pkt)
1127 {
1128 	net_pkt_lladdr_src(pkt)->addr = NULL;
1129 	net_pkt_lladdr_src(pkt)->len = 0U;
1130 }
1131 
net_pkt_ll_proto_type(struct net_pkt * pkt)1132 static inline uint16_t net_pkt_ll_proto_type(struct net_pkt *pkt)
1133 {
1134 	return pkt->ll_proto_type;
1135 }
1136 
net_pkt_set_ll_proto_type(struct net_pkt * pkt,uint16_t type)1137 static inline void net_pkt_set_ll_proto_type(struct net_pkt *pkt, uint16_t type)
1138 {
1139 	pkt->ll_proto_type = type;
1140 }
1141 
1142 #if defined(CONFIG_NET_IPV4_AUTO)
net_pkt_ipv4_auto(struct net_pkt * pkt)1143 static inline bool net_pkt_ipv4_auto(struct net_pkt *pkt)
1144 {
1145 	return pkt->ipv4_auto_arp_msg;
1146 }
1147 
net_pkt_set_ipv4_auto(struct net_pkt * pkt,bool is_auto_arp_msg)1148 static inline void net_pkt_set_ipv4_auto(struct net_pkt *pkt,
1149 					 bool is_auto_arp_msg)
1150 {
1151 	pkt->ipv4_auto_arp_msg = is_auto_arp_msg;
1152 }
1153 #else /* CONFIG_NET_IPV4_AUTO */
net_pkt_ipv4_auto(struct net_pkt * pkt)1154 static inline bool net_pkt_ipv4_auto(struct net_pkt *pkt)
1155 {
1156 	ARG_UNUSED(pkt);
1157 
1158 	return false;
1159 }
1160 
net_pkt_set_ipv4_auto(struct net_pkt * pkt,bool is_auto_arp_msg)1161 static inline void net_pkt_set_ipv4_auto(struct net_pkt *pkt,
1162 					 bool is_auto_arp_msg)
1163 {
1164 	ARG_UNUSED(pkt);
1165 	ARG_UNUSED(is_auto_arp_msg);
1166 }
1167 #endif /* CONFIG_NET_IPV4_AUTO */
1168 
1169 #if defined(CONFIG_NET_LLDP)
net_pkt_is_lldp(struct net_pkt * pkt)1170 static inline bool net_pkt_is_lldp(struct net_pkt *pkt)
1171 {
1172 	return pkt->lldp_pkt;
1173 }
1174 
net_pkt_set_lldp(struct net_pkt * pkt,bool is_lldp)1175 static inline void net_pkt_set_lldp(struct net_pkt *pkt, bool is_lldp)
1176 {
1177 	pkt->lldp_pkt = is_lldp;
1178 }
1179 #else
net_pkt_is_lldp(struct net_pkt * pkt)1180 static inline bool net_pkt_is_lldp(struct net_pkt *pkt)
1181 {
1182 	ARG_UNUSED(pkt);
1183 
1184 	return false;
1185 }
1186 
net_pkt_set_lldp(struct net_pkt * pkt,bool is_lldp)1187 static inline void net_pkt_set_lldp(struct net_pkt *pkt, bool is_lldp)
1188 {
1189 	ARG_UNUSED(pkt);
1190 	ARG_UNUSED(is_lldp);
1191 }
1192 #endif /* CONFIG_NET_LLDP */
1193 
1194 #if defined(CONFIG_NET_L2_PPP)
net_pkt_is_ppp(struct net_pkt * pkt)1195 static inline bool net_pkt_is_ppp(struct net_pkt *pkt)
1196 {
1197 	return pkt->ppp_msg;
1198 }
1199 
net_pkt_set_ppp(struct net_pkt * pkt,bool is_ppp_msg)1200 static inline void net_pkt_set_ppp(struct net_pkt *pkt,
1201 				   bool is_ppp_msg)
1202 {
1203 	pkt->ppp_msg = is_ppp_msg;
1204 }
1205 #else /* CONFIG_NET_L2_PPP */
net_pkt_is_ppp(struct net_pkt * pkt)1206 static inline bool net_pkt_is_ppp(struct net_pkt *pkt)
1207 {
1208 	ARG_UNUSED(pkt);
1209 
1210 	return false;
1211 }
1212 
net_pkt_set_ppp(struct net_pkt * pkt,bool is_ppp_msg)1213 static inline void net_pkt_set_ppp(struct net_pkt *pkt,
1214 				   bool is_ppp_msg)
1215 {
1216 	ARG_UNUSED(pkt);
1217 	ARG_UNUSED(is_ppp_msg);
1218 }
1219 #endif /* CONFIG_NET_L2_PPP */
1220 
1221 #if defined(NET_PKT_HAS_CONTROL_BLOCK)
net_pkt_cb(struct net_pkt * pkt)1222 static inline void *net_pkt_cb(struct net_pkt *pkt)
1223 {
1224 	return &pkt->cb;
1225 }
1226 #else
net_pkt_cb(struct net_pkt * pkt)1227 static inline void *net_pkt_cb(struct net_pkt *pkt)
1228 {
1229 	ARG_UNUSED(pkt);
1230 
1231 	return NULL;
1232 }
1233 #endif
1234 
1235 #define NET_IPV6_HDR(pkt) ((struct net_ipv6_hdr *)net_pkt_ip_data(pkt))
1236 #define NET_IPV4_HDR(pkt) ((struct net_ipv4_hdr *)net_pkt_ip_data(pkt))
1237 
net_pkt_set_src_ipv6_addr(struct net_pkt * pkt)1238 static inline void net_pkt_set_src_ipv6_addr(struct net_pkt *pkt)
1239 {
1240 	net_if_ipv6_select_src_addr(net_context_get_iface(
1241 					    net_pkt_context(pkt)),
1242 				    (struct in6_addr *)NET_IPV6_HDR(pkt)->src);
1243 }
1244 
net_pkt_set_overwrite(struct net_pkt * pkt,bool overwrite)1245 static inline void net_pkt_set_overwrite(struct net_pkt *pkt, bool overwrite)
1246 {
1247 	pkt->overwrite = overwrite;
1248 }
1249 
net_pkt_is_being_overwritten(struct net_pkt * pkt)1250 static inline bool net_pkt_is_being_overwritten(struct net_pkt *pkt)
1251 {
1252 	return pkt->overwrite;
1253 }
1254 
1255 #ifdef CONFIG_NET_PKT_FILTER
1256 
1257 bool net_pkt_filter_send_ok(struct net_pkt *pkt);
1258 bool net_pkt_filter_recv_ok(struct net_pkt *pkt);
1259 
1260 #else
1261 
net_pkt_filter_send_ok(struct net_pkt * pkt)1262 static inline bool net_pkt_filter_send_ok(struct net_pkt *pkt)
1263 {
1264 	ARG_UNUSED(pkt);
1265 
1266 	return true;
1267 }
1268 
net_pkt_filter_recv_ok(struct net_pkt * pkt)1269 static inline bool net_pkt_filter_recv_ok(struct net_pkt *pkt)
1270 {
1271 	ARG_UNUSED(pkt);
1272 
1273 	return true;
1274 }
1275 
1276 #endif /* CONFIG_NET_PKT_FILTER */
1277 
1278 /* @endcond */
1279 
1280 /**
1281  * @brief Create a net_pkt slab
1282  *
1283  * A net_pkt slab is used to store meta-information about
1284  * network packets. It must be coupled with a data fragment pool
1285  * (:c:macro:`NET_PKT_DATA_POOL_DEFINE`) used to store the actual
1286  * packet data. The macro can be used by an application to define
1287  * additional custom per-context TX packet slabs (see
1288  * :c:func:`net_context_setup_pools`).
1289  *
1290  * @param name Name of the slab.
1291  * @param count Number of net_pkt in this slab.
1292  */
1293 #define NET_PKT_SLAB_DEFINE(name, count)				\
1294 	K_MEM_SLAB_DEFINE(name, sizeof(struct net_pkt), count, 4)
1295 
1296 /* Backward compatibility macro */
1297 #define NET_PKT_TX_SLAB_DEFINE(name, count) NET_PKT_SLAB_DEFINE(name, count)
1298 
1299 /**
1300  * @brief Create a data fragment net_buf pool
1301  *
1302  * A net_buf pool is used to store actual data for
1303  * network packets. It must be coupled with a net_pkt slab
1304  * (:c:macro:`NET_PKT_SLAB_DEFINE`) used to store the packet
1305  * meta-information. The macro can be used by an application to
1306  * define additional custom per-context TX packet pools (see
1307  * :c:func:`net_context_setup_pools`).
1308  *
1309  * @param name Name of the pool.
1310  * @param count Number of net_buf in this pool.
1311  */
1312 #define NET_PKT_DATA_POOL_DEFINE(name, count)				\
1313 	NET_BUF_POOL_DEFINE(name, count, CONFIG_NET_BUF_DATA_SIZE,	\
1314 			    0, NULL)
1315 
1316 /** @cond INTERNAL_HIDDEN */
1317 
1318 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC) || \
1319 	(CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG)
1320 #define NET_PKT_DEBUG_ENABLED
1321 #endif
1322 
1323 #if defined(NET_PKT_DEBUG_ENABLED)
1324 
1325 /* Debug versions of the net_pkt functions that are used when tracking
1326  * buffer usage.
1327  */
1328 
1329 struct net_buf *net_pkt_get_reserve_data_debug(struct net_buf_pool *pool,
1330 					       size_t min_len,
1331 					       k_timeout_t timeout,
1332 					       const char *caller,
1333 					       int line);
1334 
1335 #define net_pkt_get_reserve_data(pool, min_len, timeout)				\
1336 	net_pkt_get_reserve_data_debug(pool, min_len, timeout, __func__, __LINE__)
1337 
1338 struct net_buf *net_pkt_get_reserve_rx_data_debug(size_t min_len,
1339 						  k_timeout_t timeout,
1340 						  const char *caller,
1341 						  int line);
1342 #define net_pkt_get_reserve_rx_data(min_len, timeout)				\
1343 	net_pkt_get_reserve_rx_data_debug(min_len, timeout, __func__, __LINE__)
1344 
1345 struct net_buf *net_pkt_get_reserve_tx_data_debug(size_t min_len,
1346 						  k_timeout_t timeout,
1347 						  const char *caller,
1348 						  int line);
1349 #define net_pkt_get_reserve_tx_data(min_len, timeout)				\
1350 	net_pkt_get_reserve_tx_data_debug(min_len, timeout, __func__, __LINE__)
1351 
1352 struct net_buf *net_pkt_get_frag_debug(struct net_pkt *pkt, size_t min_len,
1353 				       k_timeout_t timeout,
1354 				       const char *caller, int line);
1355 #define net_pkt_get_frag(pkt, min_len, timeout)					\
1356 	net_pkt_get_frag_debug(pkt, min_len, timeout, __func__, __LINE__)
1357 
1358 void net_pkt_unref_debug(struct net_pkt *pkt, const char *caller, int line);
1359 #define net_pkt_unref(pkt) net_pkt_unref_debug(pkt, __func__, __LINE__)
1360 
1361 struct net_pkt *net_pkt_ref_debug(struct net_pkt *pkt, const char *caller,
1362 				  int line);
1363 #define net_pkt_ref(pkt) net_pkt_ref_debug(pkt, __func__, __LINE__)
1364 
1365 struct net_buf *net_pkt_frag_ref_debug(struct net_buf *frag,
1366 				       const char *caller, int line);
1367 #define net_pkt_frag_ref(frag) net_pkt_frag_ref_debug(frag, __func__, __LINE__)
1368 
1369 void net_pkt_frag_unref_debug(struct net_buf *frag,
1370 			      const char *caller, int line);
1371 #define net_pkt_frag_unref(frag)				\
1372 	net_pkt_frag_unref_debug(frag, __func__, __LINE__)
1373 
1374 struct net_buf *net_pkt_frag_del_debug(struct net_pkt *pkt,
1375 				       struct net_buf *parent,
1376 				       struct net_buf *frag,
1377 				       const char *caller, int line);
1378 #define net_pkt_frag_del(pkt, parent, frag)				\
1379 	net_pkt_frag_del_debug(pkt, parent, frag, __func__, __LINE__)
1380 
1381 void net_pkt_frag_add_debug(struct net_pkt *pkt, struct net_buf *frag,
1382 			    const char *caller, int line);
1383 #define net_pkt_frag_add(pkt, frag)				\
1384 	net_pkt_frag_add_debug(pkt, frag, __func__, __LINE__)
1385 
1386 void net_pkt_frag_insert_debug(struct net_pkt *pkt, struct net_buf *frag,
1387 			       const char *caller, int line);
1388 #define net_pkt_frag_insert(pkt, frag)					\
1389 	net_pkt_frag_insert_debug(pkt, frag, __func__, __LINE__)
1390 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC ||
1391 	* CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
1392 	*/
1393 /** @endcond */
1394 
1395 /**
1396  * @brief Print fragment list and the fragment sizes
1397  *
1398  * @details Only available if debugging is activated.
1399  *
1400  * @param pkt Network pkt.
1401  */
1402 #if defined(NET_PKT_DEBUG_ENABLED)
1403 void net_pkt_print_frags(struct net_pkt *pkt);
1404 #else
1405 #define net_pkt_print_frags(pkt)
1406 #endif
1407 
1408 /**
1409  * @brief Get RX DATA buffer from pool.
1410  * Normally you should use net_pkt_get_frag() instead.
1411  *
1412  * @details Normally this version is not useful for applications
1413  * but is mainly used by network fragmentation code.
1414  *
1415  * @param min_len Minimum length of the requested fragment.
1416  * @param timeout Affects the action taken should the net buf pool be empty.
1417  *        If K_NO_WAIT, then return immediately. If K_FOREVER, then
1418  *        wait as long as necessary. Otherwise, wait up to the specified time.
1419  *
1420  * @return Network buffer if successful, NULL otherwise.
1421  */
1422 #if !defined(NET_PKT_DEBUG_ENABLED)
1423 struct net_buf *net_pkt_get_reserve_rx_data(size_t min_len, k_timeout_t timeout);
1424 #endif
1425 
1426 /**
1427  * @brief Get TX DATA buffer from pool.
1428  * Normally you should use net_pkt_get_frag() instead.
1429  *
1430  * @details Normally this version is not useful for applications
1431  * but is mainly used by network fragmentation code.
1432  *
1433  * @param min_len Minimum length of the requested fragment.
1434  * @param timeout Affects the action taken should the net buf pool be empty.
1435  *        If K_NO_WAIT, then return immediately. If K_FOREVER, then
1436  *        wait as long as necessary. Otherwise, wait up to the specified time.
1437  *
1438  * @return Network buffer if successful, NULL otherwise.
1439  */
1440 #if !defined(NET_PKT_DEBUG_ENABLED)
1441 struct net_buf *net_pkt_get_reserve_tx_data(size_t min_len, k_timeout_t timeout);
1442 #endif
1443 
1444 /**
1445  * @brief Get a data fragment that might be from user specific
1446  * buffer pool or from global DATA pool.
1447  *
1448  * @param pkt Network packet.
1449  * @param min_len Minimum length of the requested fragment.
1450  * @param timeout Affects the action taken should the net buf pool be empty.
1451  *        If K_NO_WAIT, then return immediately. If K_FOREVER, then
1452  *        wait as long as necessary. Otherwise, wait up to the specified time.
1453  *
1454  * @return Network buffer if successful, NULL otherwise.
1455  */
1456 #if !defined(NET_PKT_DEBUG_ENABLED)
1457 struct net_buf *net_pkt_get_frag(struct net_pkt *pkt, size_t min_len,
1458 				 k_timeout_t timeout);
1459 #endif
1460 
1461 /**
1462  * @brief Place packet back into the available packets slab
1463  *
1464  * @details Releases the packet to other use. This needs to be
1465  * called by application after it has finished with the packet.
1466  *
1467  * @param pkt Network packet to release.
1468  *
1469  */
1470 #if !defined(NET_PKT_DEBUG_ENABLED)
1471 void net_pkt_unref(struct net_pkt *pkt);
1472 #endif
1473 
1474 /**
1475  * @brief Increase the packet ref count
1476  *
1477  * @details Mark the packet to be used still.
1478  *
1479  * @param pkt Network packet to ref.
1480  *
1481  * @return Network packet if successful, NULL otherwise.
1482  */
1483 #if !defined(NET_PKT_DEBUG_ENABLED)
1484 struct net_pkt *net_pkt_ref(struct net_pkt *pkt);
1485 #endif
1486 
1487 /**
1488  * @brief Increase the packet fragment ref count
1489  *
1490  * @details Mark the fragment to be used still.
1491  *
1492  * @param frag Network fragment to ref.
1493  *
1494  * @return a pointer on the referenced Network fragment.
1495  */
1496 #if !defined(NET_PKT_DEBUG_ENABLED)
1497 struct net_buf *net_pkt_frag_ref(struct net_buf *frag);
1498 #endif
1499 
1500 /**
1501  * @brief Decrease the packet fragment ref count
1502  *
1503  * @param frag Network fragment to unref.
1504  */
1505 #if !defined(NET_PKT_DEBUG_ENABLED)
1506 void net_pkt_frag_unref(struct net_buf *frag);
1507 #endif
1508 
1509 /**
1510  * @brief Delete existing fragment from a packet
1511  *
1512  * @param pkt Network packet from which frag belongs to.
1513  * @param parent parent fragment of frag, or NULL if none.
1514  * @param frag Fragment to delete.
1515  *
1516  * @return Pointer to the following fragment, or NULL if it had no
1517  *         further fragments.
1518  */
1519 #if !defined(NET_PKT_DEBUG_ENABLED)
1520 struct net_buf *net_pkt_frag_del(struct net_pkt *pkt,
1521 				 struct net_buf *parent,
1522 				 struct net_buf *frag);
1523 #endif
1524 
1525 /**
1526  * @brief Add a fragment to a packet at the end of its fragment list
1527  *
1528  * @param pkt pkt Network packet where to add the fragment
1529  * @param frag Fragment to add
1530  */
1531 #if !defined(NET_PKT_DEBUG_ENABLED)
1532 void net_pkt_frag_add(struct net_pkt *pkt, struct net_buf *frag);
1533 #endif
1534 
1535 /**
1536  * @brief Insert a fragment to a packet at the beginning of its fragment list
1537  *
1538  * @param pkt pkt Network packet where to insert the fragment
1539  * @param frag Fragment to insert
1540  */
1541 #if !defined(NET_PKT_DEBUG_ENABLED)
1542 void net_pkt_frag_insert(struct net_pkt *pkt, struct net_buf *frag);
1543 #endif
1544 
1545 /**
1546  * @brief Compact the fragment list of a packet.
1547  *
1548  * @details After this there is no more any free space in individual fragments.
1549  * @param pkt Network packet.
1550  */
1551 void net_pkt_compact(struct net_pkt *pkt);
1552 
1553 /**
1554  * @brief Get information about predefined RX, TX and DATA pools.
1555  *
1556  * @param rx Pointer to RX pool is returned.
1557  * @param tx Pointer to TX pool is returned.
1558  * @param rx_data Pointer to RX DATA pool is returned.
1559  * @param tx_data Pointer to TX DATA pool is returned.
1560  */
1561 void net_pkt_get_info(struct k_mem_slab **rx,
1562 		      struct k_mem_slab **tx,
1563 		      struct net_buf_pool **rx_data,
1564 		      struct net_buf_pool **tx_data);
1565 
1566 /** @cond INTERNAL_HIDDEN */
1567 
1568 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
1569 /**
1570  * @brief Debug helper to print out the buffer allocations
1571  */
1572 void net_pkt_print(void);
1573 
1574 typedef void (*net_pkt_allocs_cb_t)(struct net_pkt *pkt,
1575 				    struct net_buf *buf,
1576 				    const char *func_alloc,
1577 				    int line_alloc,
1578 				    const char *func_free,
1579 				    int line_free,
1580 				    bool in_use,
1581 				    void *user_data);
1582 
1583 void net_pkt_allocs_foreach(net_pkt_allocs_cb_t cb, void *user_data);
1584 
1585 const char *net_pkt_slab2str(struct k_mem_slab *slab);
1586 const char *net_pkt_pool2str(struct net_buf_pool *pool);
1587 
1588 #else
1589 #define net_pkt_print(...)
1590 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC */
1591 
1592 /* New allocator, and API are defined below.
1593  * This will be simpler when time will come to get rid of former API above.
1594  */
1595 #if defined(NET_PKT_DEBUG_ENABLED)
1596 
1597 struct net_pkt *net_pkt_alloc_debug(k_timeout_t timeout,
1598 				    const char *caller, int line);
1599 #define net_pkt_alloc(_timeout)					\
1600 	net_pkt_alloc_debug(_timeout, __func__, __LINE__)
1601 
1602 struct net_pkt *net_pkt_alloc_from_slab_debug(struct k_mem_slab *slab,
1603 					      k_timeout_t timeout,
1604 					      const char *caller, int line);
1605 #define net_pkt_alloc_from_slab(_slab, _timeout)			\
1606 	net_pkt_alloc_from_slab_debug(_slab, _timeout, __func__, __LINE__)
1607 
1608 struct net_pkt *net_pkt_rx_alloc_debug(k_timeout_t timeout,
1609 				       const char *caller, int line);
1610 #define net_pkt_rx_alloc(_timeout)				\
1611 	net_pkt_rx_alloc_debug(_timeout, __func__, __LINE__)
1612 
1613 struct net_pkt *net_pkt_alloc_on_iface_debug(struct net_if *iface,
1614 					     k_timeout_t timeout,
1615 					     const char *caller,
1616 					     int line);
1617 #define net_pkt_alloc_on_iface(_iface, _timeout)			\
1618 	net_pkt_alloc_on_iface_debug(_iface, _timeout, __func__, __LINE__)
1619 
1620 struct net_pkt *net_pkt_rx_alloc_on_iface_debug(struct net_if *iface,
1621 						k_timeout_t timeout,
1622 						const char *caller,
1623 						int line);
1624 #define net_pkt_rx_alloc_on_iface(_iface, _timeout)			\
1625 	net_pkt_rx_alloc_on_iface_debug(_iface, _timeout,		\
1626 					__func__, __LINE__)
1627 
1628 int net_pkt_alloc_buffer_debug(struct net_pkt *pkt,
1629 			       size_t size,
1630 			       enum net_ip_protocol proto,
1631 			       k_timeout_t timeout,
1632 			       const char *caller, int line);
1633 #define net_pkt_alloc_buffer(_pkt, _size, _proto, _timeout)		\
1634 	net_pkt_alloc_buffer_debug(_pkt, _size, _proto, _timeout,	\
1635 				   __func__, __LINE__)
1636 
1637 struct net_pkt *net_pkt_alloc_with_buffer_debug(struct net_if *iface,
1638 						size_t size,
1639 						sa_family_t family,
1640 						enum net_ip_protocol proto,
1641 						k_timeout_t timeout,
1642 						const char *caller,
1643 						int line);
1644 #define net_pkt_alloc_with_buffer(_iface, _size, _family,		\
1645 				  _proto, _timeout)			\
1646 	net_pkt_alloc_with_buffer_debug(_iface, _size, _family,		\
1647 					_proto, _timeout,		\
1648 					__func__, __LINE__)
1649 
1650 struct net_pkt *net_pkt_rx_alloc_with_buffer_debug(struct net_if *iface,
1651 						   size_t size,
1652 						   sa_family_t family,
1653 						   enum net_ip_protocol proto,
1654 						   k_timeout_t timeout,
1655 						   const char *caller,
1656 						   int line);
1657 #define net_pkt_rx_alloc_with_buffer(_iface, _size, _family,		\
1658 				     _proto, _timeout)			\
1659 	net_pkt_rx_alloc_with_buffer_debug(_iface, _size, _family,	\
1660 					   _proto, _timeout,		\
1661 					   __func__, __LINE__)
1662 #endif /* NET_PKT_DEBUG_ENABLED */
1663 /** @endcond */
1664 
1665 /**
1666  * @brief Allocate an initialized net_pkt
1667  *
1668  * @details for the time being, 2 pools are used. One for TX and one for RX.
1669  *          This allocator has to be used for TX.
1670  *
1671  * @param timeout Maximum time to wait for an allocation.
1672  *
1673  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1674  */
1675 #if !defined(NET_PKT_DEBUG_ENABLED)
1676 struct net_pkt *net_pkt_alloc(k_timeout_t timeout);
1677 #endif
1678 
1679 /**
1680  * @brief Allocate an initialized net_pkt from a specific slab
1681  *
1682  * @details unlike net_pkt_alloc() which uses core slabs, this one will use
1683  *          an external slab (see NET_PKT_SLAB_DEFINE()).
1684  *          Do _not_ use it unless you know what you are doing. Basically, only
1685  *          net_context should be using this, in order to allocate packet and
1686  *          then buffer on its local slab/pool (if any).
1687  *
1688  * @param slab    The slab to use for allocating the packet
1689  * @param timeout Maximum time to wait for an allocation.
1690  *
1691  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1692  */
1693 #if !defined(NET_PKT_DEBUG_ENABLED)
1694 struct net_pkt *net_pkt_alloc_from_slab(struct k_mem_slab *slab,
1695 					k_timeout_t timeout);
1696 #endif
1697 
1698 /**
1699  * @brief Allocate an initialized net_pkt for RX
1700  *
1701  * @details for the time being, 2 pools are used. One for TX and one for RX.
1702  *          This allocator has to be used for RX.
1703  *
1704  * @param timeout Maximum time to wait for an allocation.
1705  *
1706  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1707  */
1708 #if !defined(NET_PKT_DEBUG_ENABLED)
1709 struct net_pkt *net_pkt_rx_alloc(k_timeout_t timeout);
1710 #endif
1711 
1712 /**
1713  * @brief Allocate a network packet for a specific network interface.
1714  *
1715  * @param iface The network interface the packet is supposed to go through.
1716  * @param timeout Maximum time to wait for an allocation.
1717  *
1718  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1719  */
1720 #if !defined(NET_PKT_DEBUG_ENABLED)
1721 struct net_pkt *net_pkt_alloc_on_iface(struct net_if *iface,
1722 				       k_timeout_t timeout);
1723 
1724 /* Same as above but specifically for RX packet */
1725 struct net_pkt *net_pkt_rx_alloc_on_iface(struct net_if *iface,
1726 					  k_timeout_t timeout);
1727 #endif
1728 
1729 /**
1730  * @brief Allocate buffer for a net_pkt
1731  *
1732  * @details: such allocator will take into account space necessary for headers,
1733  *           MTU, and existing buffer (if any). Beware that, due to all these
1734  *           criteria, the allocated size might be smaller/bigger than
1735  *           requested one.
1736  *
1737  * @param pkt     The network packet requiring buffer to be allocated.
1738  * @param size    The size of buffer being requested.
1739  * @param proto   The IP protocol type (can be 0 for none).
1740  * @param timeout Maximum time to wait for an allocation.
1741  *
1742  * @return 0 on success, negative errno code otherwise.
1743  */
1744 #if !defined(NET_PKT_DEBUG_ENABLED)
1745 int net_pkt_alloc_buffer(struct net_pkt *pkt,
1746 			 size_t size,
1747 			 enum net_ip_protocol proto,
1748 			 k_timeout_t timeout);
1749 #endif
1750 
1751 /**
1752  * @brief Allocate a network packet and buffer at once
1753  *
1754  * @param iface   The network interface the packet is supposed to go through.
1755  * @param size    The size of buffer.
1756  * @param family  The family to which the packet belongs.
1757  * @param proto   The IP protocol type (can be 0 for none).
1758  * @param timeout Maximum time to wait for an allocation.
1759  *
1760  * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1761  */
1762 #if !defined(NET_PKT_DEBUG_ENABLED)
1763 struct net_pkt *net_pkt_alloc_with_buffer(struct net_if *iface,
1764 					  size_t size,
1765 					  sa_family_t family,
1766 					  enum net_ip_protocol proto,
1767 					  k_timeout_t timeout);
1768 
1769 /* Same as above but specifically for RX packet */
1770 struct net_pkt *net_pkt_rx_alloc_with_buffer(struct net_if *iface,
1771 					     size_t size,
1772 					     sa_family_t family,
1773 					     enum net_ip_protocol proto,
1774 					     k_timeout_t timeout);
1775 #endif
1776 
1777 /**
1778  * @brief Append a buffer in packet
1779  *
1780  * @param pkt    Network packet where to append the buffer
1781  * @param buffer Buffer to append
1782  */
1783 void net_pkt_append_buffer(struct net_pkt *pkt, struct net_buf *buffer);
1784 
1785 /**
1786  * @brief Get available buffer space from a pkt
1787  *
1788  * @note Reserved bytes (headroom) in any of the fragments are not considered to
1789  *       be available.
1790  *
1791  * @param pkt The net_pkt which buffer availability should be evaluated
1792  *
1793  * @return the amount of buffer available
1794  */
1795 size_t net_pkt_available_buffer(struct net_pkt *pkt);
1796 
1797 /**
1798  * @brief Get available buffer space for payload from a pkt
1799  *
1800  * @note Reserved bytes (headroom) in any of the fragments are not considered to
1801  *       be available.
1802  *
1803  * @details Unlike net_pkt_available_buffer(), this will take into account
1804  *          the headers space.
1805  *
1806  * @param pkt   The net_pkt which payload buffer availability should
1807  *              be evaluated
1808  * @param proto The IP protocol type (can be 0 for none).
1809  *
1810  * @return the amount of buffer available for payload
1811  */
1812 size_t net_pkt_available_payload_buffer(struct net_pkt *pkt,
1813 					enum net_ip_protocol proto);
1814 
1815 /**
1816  * @brief Trim net_pkt buffer
1817  *
1818  * @details This will basically check for unused buffers and deallocate
1819  *          them relevantly
1820  *
1821  * @param pkt The net_pkt which buffer will be trimmed
1822  */
1823 void net_pkt_trim_buffer(struct net_pkt *pkt);
1824 
1825 /**
1826  * @brief Remove @a length bytes from tail of packet
1827  *
1828  * @details This function does not take packet cursor into account. It is a
1829  *          helper to remove unneeded bytes from tail of packet (like appended
1830  *          CRC). It takes care of buffer deallocation if removed bytes span
1831  *          whole buffer(s).
1832  *
1833  * @param pkt    Network packet
1834  * @param length Number of bytes to be removed
1835  *
1836  * @retval 0       On success.
1837  * @retval -EINVAL If packet length is shorter than @a length.
1838  */
1839 int net_pkt_remove_tail(struct net_pkt *pkt, size_t length);
1840 
1841 /**
1842  * @brief Initialize net_pkt cursor
1843  *
1844  * @details This will initialize the net_pkt cursor from its buffer.
1845  *
1846  * @param pkt The net_pkt whose cursor is going to be initialized
1847  */
1848 void net_pkt_cursor_init(struct net_pkt *pkt);
1849 
1850 /**
1851  * @brief Backup net_pkt cursor
1852  *
1853  * @param pkt    The net_pkt whose cursor is going to be backed up
1854  * @param backup The cursor where to backup net_pkt cursor
1855  */
net_pkt_cursor_backup(struct net_pkt * pkt,struct net_pkt_cursor * backup)1856 static inline void net_pkt_cursor_backup(struct net_pkt *pkt,
1857 					 struct net_pkt_cursor *backup)
1858 {
1859 	backup->buf = pkt->cursor.buf;
1860 	backup->pos = pkt->cursor.pos;
1861 }
1862 
1863 /**
1864  * @brief Restore net_pkt cursor from a backup
1865  *
1866  * @param pkt    The net_pkt whose cursor is going to be restored
1867  * @param backup The cursor from where to restore net_pkt cursor
1868  */
net_pkt_cursor_restore(struct net_pkt * pkt,struct net_pkt_cursor * backup)1869 static inline void net_pkt_cursor_restore(struct net_pkt *pkt,
1870 					  struct net_pkt_cursor *backup)
1871 {
1872 	pkt->cursor.buf = backup->buf;
1873 	pkt->cursor.pos = backup->pos;
1874 }
1875 
1876 /**
1877  * @brief Returns current position of the cursor
1878  *
1879  * @param pkt The net_pkt whose cursor position is going to be returned
1880  *
1881  * @return cursor's position
1882  */
net_pkt_cursor_get_pos(struct net_pkt * pkt)1883 static inline void *net_pkt_cursor_get_pos(struct net_pkt *pkt)
1884 {
1885 	return pkt->cursor.pos;
1886 }
1887 
1888 /**
1889  * @brief Skip some data from a net_pkt
1890  *
1891  * @details net_pkt's cursor should be properly initialized
1892  *          Cursor position will be updated after the operation.
1893  *          Depending on the value of pkt->overwrite bit, this function
1894  *          will affect the buffer length or not. If it's true, it will
1895  *          advance the cursor to the requested length. If it's false,
1896  *          it will do the same but if the cursor was already also at the
1897  *          end of existing data, it will increment the buffer length.
1898  *          So in this case, its behavior is just like net_pkt_write or
1899  *          net_pkt_memset, difference being that it will not affect the
1900  *          buffer content itself (which may be just garbage then).
1901  *
1902  * @param pkt    The net_pkt whose cursor will be updated to skip given
1903  *               amount of data from the buffer.
1904  * @param length Amount of data to skip in the buffer
1905  *
1906  * @return 0 in success, negative errno code otherwise.
1907  */
1908 int net_pkt_skip(struct net_pkt *pkt, size_t length);
1909 
1910 /**
1911  * @brief Memset some data in a net_pkt
1912  *
1913  * @details net_pkt's cursor should be properly initialized and,
1914  *          if needed, positioned using net_pkt_skip.
1915  *          Cursor position will be updated after the operation.
1916  *
1917  * @param pkt    The net_pkt whose buffer to fill starting at the current
1918  *               cursor position.
1919  * @param byte   The byte to write in memory
1920  * @param length Amount of data to memset with given byte
1921  *
1922  * @return 0 in success, negative errno code otherwise.
1923  */
1924 int net_pkt_memset(struct net_pkt *pkt, int byte, size_t length);
1925 
1926 /**
1927  * @brief Copy data from a packet into another one.
1928  *
1929  * @details Both net_pkt cursors should be properly initialized and,
1930  *          if needed, positioned using net_pkt_skip.
1931  *          The cursors will be updated after the operation.
1932  *
1933  * @param pkt_dst Destination network packet.
1934  * @param pkt_src Source network packet.
1935  * @param length  Length of data to be copied.
1936  *
1937  * @return 0 on success, negative errno code otherwise.
1938  */
1939 int net_pkt_copy(struct net_pkt *pkt_dst,
1940 		 struct net_pkt *pkt_src,
1941 		 size_t length);
1942 
1943 /**
1944  * @brief Clone pkt and its buffer. The cloned packet will be allocated on
1945  *        the same pool as the original one.
1946  *
1947  * @param pkt Original pkt to be cloned
1948  * @param timeout Timeout to wait for free buffer
1949  *
1950  * @return NULL if error, cloned packet otherwise.
1951  */
1952 struct net_pkt *net_pkt_clone(struct net_pkt *pkt, k_timeout_t timeout);
1953 
1954 /**
1955  * @brief Clone pkt and its buffer. The cloned packet will be allocated on
1956  *        the RX packet poll.
1957  *
1958  * @param pkt Original pkt to be cloned
1959  * @param timeout Timeout to wait for free buffer
1960  *
1961  * @return NULL if error, cloned packet otherwise.
1962  */
1963 struct net_pkt *net_pkt_rx_clone(struct net_pkt *pkt, k_timeout_t timeout);
1964 
1965 /**
1966  * @brief Clone pkt and increase the refcount of its buffer.
1967  *
1968  * @param pkt Original pkt to be shallow cloned
1969  * @param timeout Timeout to wait for free packet
1970  *
1971  * @return NULL if error, cloned packet otherwise.
1972  */
1973 struct net_pkt *net_pkt_shallow_clone(struct net_pkt *pkt,
1974 				      k_timeout_t timeout);
1975 
1976 /**
1977  * @brief Read some data from a net_pkt
1978  *
1979  * @details net_pkt's cursor should be properly initialized and,
1980  *          if needed, positioned using net_pkt_skip.
1981  *          Cursor position will be updated after the operation.
1982  *
1983  * @param pkt    The network packet from where to read some data
1984  * @param data   The destination buffer where to copy the data
1985  * @param length The amount of data to copy
1986  *
1987  * @return 0 on success, negative errno code otherwise.
1988  */
1989 int net_pkt_read(struct net_pkt *pkt, void *data, size_t length);
1990 
1991 /* Read uint8_t data data a net_pkt */
net_pkt_read_u8(struct net_pkt * pkt,uint8_t * data)1992 static inline int net_pkt_read_u8(struct net_pkt *pkt, uint8_t *data)
1993 {
1994 	return net_pkt_read(pkt, data, 1);
1995 }
1996 
1997 /**
1998  * @brief Read uint16_t big endian data from a net_pkt
1999  *
2000  * @details net_pkt's cursor should be properly initialized and,
2001  *          if needed, positioned using net_pkt_skip.
2002  *          Cursor position will be updated after the operation.
2003  *
2004  * @param pkt  The network packet from where to read
2005  * @param data The destination uint16_t where to copy the data
2006  *
2007  * @return 0 on success, negative errno code otherwise.
2008  */
2009 int net_pkt_read_be16(struct net_pkt *pkt, uint16_t *data);
2010 
2011 /**
2012  * @brief Read uint16_t little endian data from a net_pkt
2013  *
2014  * @details net_pkt's cursor should be properly initialized and,
2015  *          if needed, positioned using net_pkt_skip.
2016  *          Cursor position will be updated after the operation.
2017  *
2018  * @param pkt  The network packet from where to read
2019  * @param data The destination uint16_t where to copy the data
2020  *
2021  * @return 0 on success, negative errno code otherwise.
2022  */
2023 int net_pkt_read_le16(struct net_pkt *pkt, uint16_t *data);
2024 
2025 /**
2026  * @brief Read uint32_t big endian data from a net_pkt
2027  *
2028  * @details net_pkt's cursor should be properly initialized and,
2029  *          if needed, positioned using net_pkt_skip.
2030  *          Cursor position will be updated after the operation.
2031  *
2032  * @param pkt  The network packet from where to read
2033  * @param data The destination uint32_t where to copy the data
2034  *
2035  * @return 0 on success, negative errno code otherwise.
2036  */
2037 int net_pkt_read_be32(struct net_pkt *pkt, uint32_t *data);
2038 
2039 /**
2040  * @brief Write data into a net_pkt
2041  *
2042  * @details net_pkt's cursor should be properly initialized and,
2043  *          if needed, positioned using net_pkt_skip.
2044  *          Cursor position will be updated after the operation.
2045  *
2046  * @param pkt    The network packet where to write
2047  * @param data   Data to be written
2048  * @param length Length of the data to be written
2049  *
2050  * @return 0 on success, negative errno code otherwise.
2051  */
2052 int net_pkt_write(struct net_pkt *pkt, const void *data, size_t length);
2053 
2054 /* Write uint8_t data into a net_pkt. */
net_pkt_write_u8(struct net_pkt * pkt,uint8_t data)2055 static inline int net_pkt_write_u8(struct net_pkt *pkt, uint8_t data)
2056 {
2057 	return net_pkt_write(pkt, &data, sizeof(uint8_t));
2058 }
2059 
2060 /* Write uint16_t big endian data into a net_pkt. */
net_pkt_write_be16(struct net_pkt * pkt,uint16_t data)2061 static inline int net_pkt_write_be16(struct net_pkt *pkt, uint16_t data)
2062 {
2063 	uint16_t data_be16 = htons(data);
2064 
2065 	return net_pkt_write(pkt, &data_be16, sizeof(uint16_t));
2066 }
2067 
2068 /* Write uint32_t big endian data into a net_pkt. */
net_pkt_write_be32(struct net_pkt * pkt,uint32_t data)2069 static inline int net_pkt_write_be32(struct net_pkt *pkt, uint32_t data)
2070 {
2071 	uint32_t data_be32 = htonl(data);
2072 
2073 	return net_pkt_write(pkt, &data_be32, sizeof(uint32_t));
2074 }
2075 
2076 /* Write uint32_t little endian data into a net_pkt. */
net_pkt_write_le32(struct net_pkt * pkt,uint32_t data)2077 static inline int net_pkt_write_le32(struct net_pkt *pkt, uint32_t data)
2078 {
2079 	uint32_t data_le32 = sys_cpu_to_le32(data);
2080 
2081 	return net_pkt_write(pkt, &data_le32, sizeof(uint32_t));
2082 }
2083 
2084 /* Write uint16_t little endian data into a net_pkt. */
net_pkt_write_le16(struct net_pkt * pkt,uint16_t data)2085 static inline int net_pkt_write_le16(struct net_pkt *pkt, uint16_t data)
2086 {
2087 	uint16_t data_le16 = sys_cpu_to_le16(data);
2088 
2089 	return net_pkt_write(pkt, &data_le16, sizeof(uint16_t));
2090 }
2091 
2092 /**
2093  * @brief Get the amount of data which can be read from current cursor position
2094  *
2095  * @param pkt Network packet
2096  *
2097  * @return Amount of data which can be read from current pkt cursor
2098  */
2099 size_t net_pkt_remaining_data(struct net_pkt *pkt);
2100 
2101 /**
2102  * @brief Update the overall length of a packet
2103  *
2104  * @details Unlike net_pkt_pull() below, this does not take packet cursor
2105  *          into account. It's mainly a helper dedicated for ipv4 and ipv6
2106  *          input functions. It shrinks the overall length by given parameter.
2107  *
2108  * @param pkt    Network packet
2109  * @param length The new length of the packet
2110  *
2111  * @return 0 on success, negative errno code otherwise.
2112  */
2113 int net_pkt_update_length(struct net_pkt *pkt, size_t length);
2114 
2115 /**
2116  * @brief Remove data from the packet at current location
2117  *
2118  * @details net_pkt's cursor should be properly initialized and,
2119  *          eventually, properly positioned using net_pkt_skip/read/write.
2120  *          Note that net_pkt's cursor is reset by this function.
2121  *
2122  * @param pkt    Network packet
2123  * @param length Number of bytes to be removed
2124  *
2125  * @return 0 on success, negative errno code otherwise.
2126  */
2127 int net_pkt_pull(struct net_pkt *pkt, size_t length);
2128 
2129 /**
2130  * @brief Get the actual offset in the packet from its cursor
2131  *
2132  * @param pkt Network packet.
2133  *
2134  * @return a valid offset on success, 0 otherwise as there is nothing that
2135  *         can be done to evaluate the offset.
2136  */
2137 uint16_t net_pkt_get_current_offset(struct net_pkt *pkt);
2138 
2139 /**
2140  * @brief Check if a data size could fit contiguously
2141  *
2142  * @details net_pkt's cursor should be properly initialized and,
2143  *          if needed, positioned using net_pkt_skip.
2144  *
2145  * @param pkt  Network packet.
2146  * @param size The size to check for contiguity
2147  *
2148  * @return true if that is the case, false otherwise.
2149  */
2150 bool net_pkt_is_contiguous(struct net_pkt *pkt, size_t size);
2151 
2152 /**
2153  * Get the contiguous buffer space
2154  *
2155  * @param pkt Network packet
2156  *
2157  * @return The available contiguous buffer space in bytes starting from the
2158  *         current cursor position. 0 in case of an error.
2159  */
2160 size_t net_pkt_get_contiguous_len(struct net_pkt *pkt);
2161 
2162 struct net_pkt_data_access {
2163 #if !defined(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)
2164 	void *data;
2165 #endif
2166 	const size_t size;
2167 };
2168 
2169 #if defined(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)
2170 #define NET_PKT_DATA_ACCESS_DEFINE(_name, _type)		\
2171 	struct net_pkt_data_access _name = {			\
2172 		.size = sizeof(_type),				\
2173 	}
2174 
2175 #define NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(_name, _type)	\
2176 	NET_PKT_DATA_ACCESS_DEFINE(_name, _type)
2177 
2178 #else
2179 #define NET_PKT_DATA_ACCESS_DEFINE(_name, _type)		\
2180 	_type _hdr_##_name;					\
2181 	struct net_pkt_data_access _name = {			\
2182 		.data = &_hdr_##_name,				\
2183 		.size = sizeof(_type),				\
2184 	}
2185 
2186 #define NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(_name, _type)	\
2187 	struct net_pkt_data_access _name = {			\
2188 		.data = NULL,					\
2189 		.size = sizeof(_type),				\
2190 	}
2191 
2192 #endif /* CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS */
2193 
2194 /**
2195  * @brief Get data from a network packet in a contiguous way
2196  *
2197  * @details net_pkt's cursor should be properly initialized and,
2198  *          if needed, positioned using net_pkt_skip.
2199  *          Cursor position will be updated after the operation.
2200  *
2201  * @param pkt    The network packet from where to get the data.
2202  * @param access A pointer to a valid net_pkt_data_access describing the
2203  *        data to get in a contiguous way.
2204  *
2205  * @return a pointer to the requested contiguous data, NULL otherwise.
2206  */
2207 void *net_pkt_get_data(struct net_pkt *pkt,
2208 		       struct net_pkt_data_access *access);
2209 
2210 /**
2211  * @brief Set contiguous data into a network packet
2212  *
2213  * @details net_pkt's cursor should be properly initialized and,
2214  *          if needed, positioned using net_pkt_skip.
2215  *          Cursor position will be updated after the operation.
2216  *
2217  * @param pkt    The network packet to where the data should be set.
2218  * @param access A pointer to a valid net_pkt_data_access describing the
2219  *        data to set.
2220  *
2221  * @return 0 on success, a negative errno otherwise.
2222  */
2223 int net_pkt_set_data(struct net_pkt *pkt,
2224 		     struct net_pkt_data_access *access);
2225 
2226 /**
2227  * Acknowledge previously contiguous data taken from a network packet
2228  * Packet needs to be set to overwrite mode.
2229  */
net_pkt_acknowledge_data(struct net_pkt * pkt,struct net_pkt_data_access * access)2230 static inline int net_pkt_acknowledge_data(struct net_pkt *pkt,
2231 					   struct net_pkt_data_access *access)
2232 {
2233 	return net_pkt_skip(pkt, access->size);
2234 }
2235 
2236 /**
2237  * @}
2238  */
2239 
2240 #ifdef __cplusplus
2241 }
2242 #endif
2243 
2244 #endif /* ZEPHYR_INCLUDE_NET_NET_PKT_H_ */
2245