1 /** @file
2 * @brief Network packet buffer descriptor API
3 *
4 * Network data is passed between different parts of the stack via
5 * net_buf struct.
6 */
7
8 /*
9 * Copyright (c) 2016 Intel Corporation
10 *
11 * SPDX-License-Identifier: Apache-2.0
12 */
13
14 /* Data buffer API - used for all data to/from net */
15
16 #ifndef ZEPHYR_INCLUDE_NET_NET_PKT_H_
17 #define ZEPHYR_INCLUDE_NET_NET_PKT_H_
18
19 #include <zephyr/types.h>
20 #include <stdbool.h>
21
22 #include <zephyr/net/buf.h>
23
24 #if defined(CONFIG_IEEE802154)
25 #include <zephyr/net/ieee802154_pkt.h>
26 #endif
27 #include <zephyr/net/net_core.h>
28 #include <zephyr/net/net_linkaddr.h>
29 #include <zephyr/net/net_ip.h>
30 #include <zephyr/net/net_if.h>
31 #include <zephyr/net/net_context.h>
32 #include <zephyr/net/net_time.h>
33 #include <zephyr/net/ethernet_vlan.h>
34 #include <zephyr/net/ptp_time.h>
35
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
39
40 /**
41 * @brief Network packet management library
42 * @defgroup net_pkt Network Packet Library
43 * @ingroup networking
44 * @{
45 */
46
47 struct net_context;
48
49 /* buffer cursor used in net_pkt */
50 struct net_pkt_cursor {
51 /** Current net_buf pointer by the cursor */
52 struct net_buf *buf;
53 /** Current position in the data buffer of the net_buf */
54 uint8_t *pos;
55 };
56
57 /**
58 * @brief Network packet.
59 *
60 * Note that if you add new fields into net_pkt, remember to update
61 * net_pkt_clone() function.
62 */
63 struct net_pkt {
64 /**
65 * The fifo is used by RX/TX threads and by socket layer. The net_pkt
66 * is queued via fifo to the processing thread.
67 */
68 intptr_t fifo;
69
70 /** Slab pointer from where it belongs to */
71 struct k_mem_slab *slab;
72
73 /** buffer holding the packet */
74 union {
75 struct net_buf *frags;
76 struct net_buf *buffer;
77 };
78
79 /** Internal buffer iterator used for reading/writing */
80 struct net_pkt_cursor cursor;
81
82 /** Network connection context */
83 struct net_context *context;
84
85 /** Network interface */
86 struct net_if *iface;
87
88 /** @cond ignore */
89
90 #if defined(CONFIG_NET_TCP)
91 /** Allow placing the packet into sys_slist_t */
92 sys_snode_t next;
93 #endif
94 #if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
95 struct net_if *orig_iface; /* Original network interface */
96 #endif
97
98 #if defined(CONFIG_NET_PKT_TIMESTAMP) || defined(CONFIG_NET_PKT_TXTIME)
99 /**
100 * TX or RX timestamp if available
101 *
102 * For packets that have been sent over the medium, the timestamp refers
103 * to the time the message timestamp point was encountered at the
104 * reference plane.
105 *
106 * Unsent packages can be scheduled by setting the timestamp to a future
107 * point in time.
108 *
109 * All timestamps refer to the network subsystem's local clock.
110 *
111 * See @ref net_ptp_time for definitions of local clock, message
112 * timestamp point and reference plane. See @ref net_time_t for
113 * semantics of the network reference clock.
114 *
115 * TODO: Replace with net_time_t to decouple from PTP.
116 */
117 struct net_ptp_time timestamp;
118 #endif
119
120 #if defined(CONFIG_NET_PKT_RXTIME_STATS) || defined(CONFIG_NET_PKT_TXTIME_STATS)
121 struct {
122 /** Create time in cycles */
123 uint32_t create_time;
124
125 #if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL) || \
126 defined(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)
127 /** Collect extra statistics for net_pkt processing
128 * from various points in the IP stack. See networking
129 * documentation where these points are located and how
130 * to interpret the results.
131 */
132 struct {
133 uint32_t stat[NET_PKT_DETAIL_STATS_COUNT];
134 int count;
135 } detail;
136 #endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL ||
137 CONFIG_NET_PKT_RXTIME_STATS_DETAIL */
138 };
139 #endif /* CONFIG_NET_PKT_RXTIME_STATS || CONFIG_NET_PKT_TXTIME_STATS */
140
141 /** Reference counter */
142 atomic_t atomic_ref;
143
144 /* Filled by layer 2 when network packet is received. */
145 struct net_linkaddr lladdr_src;
146 struct net_linkaddr lladdr_dst;
147 uint16_t ll_proto_type;
148
149 #if defined(CONFIG_NET_IP)
150 uint8_t ip_hdr_len; /* pre-filled in order to avoid func call */
151 #endif
152
153 uint8_t overwrite : 1; /* Is packet content being overwritten? */
154 uint8_t eof : 1; /* Last packet before EOF */
155 uint8_t ptp_pkt : 1; /* For outgoing packet: is this packet
156 * a L2 PTP packet.
157 * Used only if defined (CONFIG_NET_L2_PTP)
158 */
159 uint8_t forwarding : 1; /* Are we forwarding this pkt
160 * Used only if defined(CONFIG_NET_ROUTE)
161 */
162 uint8_t family : 3; /* Address family, see net_ip.h */
163
164 /* bitfield byte alignment boundary */
165
166 #if defined(CONFIG_NET_IPV4_AUTO)
167 uint8_t ipv4_auto_arp_msg : 1; /* Is this pkt IPv4 autoconf ARP
168 * message.
169 * Note: family needs to be
170 * AF_INET.
171 */
172 #endif
173 #if defined(CONFIG_NET_LLDP)
174 uint8_t lldp_pkt : 1; /* Is this pkt an LLDP message.
175 * Note: family needs to be
176 * AF_UNSPEC.
177 */
178 #endif
179 uint8_t ppp_msg : 1; /* This is a PPP message */
180 #if defined(CONFIG_NET_TCP)
181 uint8_t tcp_first_msg : 1; /* Is this the first time this pkt is
182 * sent, or is this a resend of a TCP
183 * segment.
184 */
185 #endif
186 uint8_t captured : 1; /* Set to 1 if this packet is already being
187 * captured
188 */
189 uint8_t l2_bridged : 1; /* set to 1 if this packet comes from a bridge
190 * and already contains its L2 header to be
191 * preserved. Useful only if
192 * defined(CONFIG_NET_ETHERNET_BRIDGE).
193 */
194 uint8_t l2_processed : 1; /* Set to 1 if this packet has already been
195 * processed by the L2
196 */
197 uint8_t chksum_done : 1; /* Checksum has already been computed for
198 * the packet.
199 */
200 #if defined(CONFIG_NET_IP_FRAGMENT)
201 uint8_t ip_reassembled : 1; /* Packet is a reassembled IP packet. */
202 #endif
203 /* bitfield byte alignment boundary */
204
205 #if defined(CONFIG_NET_IP)
206 union {
207 /* IPv6 hop limit or IPv4 ttl for this network packet.
208 * The value is shared between IPv6 and IPv4.
209 */
210 #if defined(CONFIG_NET_IPV6)
211 uint8_t ipv6_hop_limit;
212 #endif
213 #if defined(CONFIG_NET_IPV4)
214 uint8_t ipv4_ttl;
215 #endif
216 };
217
218 union {
219 #if defined(CONFIG_NET_IPV4)
220 uint8_t ipv4_opts_len; /* length of IPv4 header options */
221 #endif
222 #if defined(CONFIG_NET_IPV6)
223 uint16_t ipv6_ext_len; /* length of extension headers */
224 #endif
225 };
226
227 #if defined(CONFIG_NET_IP_FRAGMENT)
228 union {
229 #if defined(CONFIG_NET_IPV4_FRAGMENT)
230 struct {
231 uint16_t flags; /* Fragment offset and M (More Fragment) flag */
232 uint16_t id; /* Fragment ID */
233 } ipv4_fragment;
234 #endif /* CONFIG_NET_IPV4_FRAGMENT */
235 #if defined(CONFIG_NET_IPV6_FRAGMENT)
236 struct {
237 uint16_t flags; /* Fragment offset and M (More Fragment) flag */
238 uint32_t id; /* Fragment id */
239 uint16_t hdr_start; /* Where starts the fragment header */
240 } ipv6_fragment;
241 #endif /* CONFIG_NET_IPV6_FRAGMENT */
242 };
243 #endif /* CONFIG_NET_IP_FRAGMENT */
244
245 #if defined(CONFIG_NET_IPV6)
246 /* Where is the start of the last header before payload data
247 * in IPv6 packet. This is offset value from start of the IPv6
248 * packet. Note that this value should be updated by who ever
249 * adds IPv6 extension headers to the network packet.
250 */
251 uint16_t ipv6_prev_hdr_start;
252
253 uint8_t ipv6_ext_opt_len; /* IPv6 ND option length */
254 uint8_t ipv6_next_hdr; /* What is the very first next header */
255 #endif /* CONFIG_NET_IPV6 */
256
257 #if defined(CONFIG_NET_IP_DSCP_ECN)
258 /** IPv4/IPv6 Differentiated Services Code Point value. */
259 uint8_t ip_dscp : 6;
260
261 /** IPv4/IPv6 Explicit Congestion Notification value. */
262 uint8_t ip_ecn : 2;
263 #endif /* CONFIG_NET_IP_DSCP_ECN */
264 #endif /* CONFIG_NET_IP */
265
266 #if defined(CONFIG_NET_VLAN)
267 /* VLAN TCI (Tag Control Information). This contains the Priority
268 * Code Point (PCP), Drop Eligible Indicator (DEI) and VLAN
269 * Identifier (VID, called more commonly VLAN tag). This value is
270 * kept in host byte order.
271 */
272 uint16_t vlan_tci;
273 #endif /* CONFIG_NET_VLAN */
274
275 #if defined(NET_PKT_HAS_CONTROL_BLOCK)
276 /* TODO: Evolve this into a union of orthogonal
277 * control block declarations if further L2
278 * stacks require L2-specific attributes.
279 */
280 #if defined(CONFIG_IEEE802154)
281 /* The following structure requires a 4-byte alignment
282 * boundary to avoid padding.
283 */
284 struct net_pkt_cb_ieee802154 cb;
285 #endif /* CONFIG_IEEE802154 */
286 #endif /* NET_PKT_HAS_CONTROL_BLOCK */
287
288 /** Network packet priority, can be left out in which case packet
289 * is not prioritised.
290 */
291 uint8_t priority;
292
293 #if defined(CONFIG_NET_OFFLOAD)
294 /* Remote address of the recived packet. This is only used by
295 * network interfaces with an offloaded TCP/IP stack.
296 */
297 struct sockaddr remote;
298 #endif /* CONFIG_NET_OFFLOAD */
299
300 /* @endcond */
301 };
302
303 /** @cond ignore */
304
305 /* The interface real ll address */
net_pkt_lladdr_if(struct net_pkt * pkt)306 static inline struct net_linkaddr *net_pkt_lladdr_if(struct net_pkt *pkt)
307 {
308 return net_if_get_link_addr(pkt->iface);
309 }
310
net_pkt_context(struct net_pkt * pkt)311 static inline struct net_context *net_pkt_context(struct net_pkt *pkt)
312 {
313 return pkt->context;
314 }
315
net_pkt_set_context(struct net_pkt * pkt,struct net_context * ctx)316 static inline void net_pkt_set_context(struct net_pkt *pkt,
317 struct net_context *ctx)
318 {
319 pkt->context = ctx;
320 }
321
net_pkt_iface(struct net_pkt * pkt)322 static inline struct net_if *net_pkt_iface(struct net_pkt *pkt)
323 {
324 return pkt->iface;
325 }
326
net_pkt_set_iface(struct net_pkt * pkt,struct net_if * iface)327 static inline void net_pkt_set_iface(struct net_pkt *pkt, struct net_if *iface)
328 {
329 pkt->iface = iface;
330
331 /* If the network interface is set in pkt, then also set the type of
332 * the network address that is stored in pkt. This is done here so
333 * that the address type is properly set and is not forgotten.
334 */
335 if (iface) {
336 uint8_t type = net_if_get_link_addr(iface)->type;
337
338 pkt->lladdr_src.type = type;
339 pkt->lladdr_dst.type = type;
340 }
341 }
342
net_pkt_orig_iface(struct net_pkt * pkt)343 static inline struct net_if *net_pkt_orig_iface(struct net_pkt *pkt)
344 {
345 #if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
346 return pkt->orig_iface;
347 #else
348 return pkt->iface;
349 #endif
350 }
351
net_pkt_set_orig_iface(struct net_pkt * pkt,struct net_if * iface)352 static inline void net_pkt_set_orig_iface(struct net_pkt *pkt,
353 struct net_if *iface)
354 {
355 #if defined(CONFIG_NET_ROUTING) || defined(CONFIG_NET_ETHERNET_BRIDGE)
356 pkt->orig_iface = iface;
357 #endif
358 }
359
net_pkt_family(struct net_pkt * pkt)360 static inline uint8_t net_pkt_family(struct net_pkt *pkt)
361 {
362 return pkt->family;
363 }
364
net_pkt_set_family(struct net_pkt * pkt,uint8_t family)365 static inline void net_pkt_set_family(struct net_pkt *pkt, uint8_t family)
366 {
367 pkt->family = family;
368 }
369
net_pkt_is_ptp(struct net_pkt * pkt)370 static inline bool net_pkt_is_ptp(struct net_pkt *pkt)
371 {
372 return !!(pkt->ptp_pkt);
373 }
374
net_pkt_set_ptp(struct net_pkt * pkt,bool is_ptp)375 static inline void net_pkt_set_ptp(struct net_pkt *pkt, bool is_ptp)
376 {
377 pkt->ptp_pkt = is_ptp;
378 }
379
net_pkt_is_captured(struct net_pkt * pkt)380 static inline bool net_pkt_is_captured(struct net_pkt *pkt)
381 {
382 return !!(pkt->captured);
383 }
384
net_pkt_set_captured(struct net_pkt * pkt,bool is_captured)385 static inline void net_pkt_set_captured(struct net_pkt *pkt, bool is_captured)
386 {
387 pkt->captured = is_captured;
388 }
389
net_pkt_is_l2_bridged(struct net_pkt * pkt)390 static inline bool net_pkt_is_l2_bridged(struct net_pkt *pkt)
391 {
392 return IS_ENABLED(CONFIG_NET_ETHERNET_BRIDGE) ? !!(pkt->l2_bridged) : 0;
393 }
394
net_pkt_set_l2_bridged(struct net_pkt * pkt,bool is_l2_bridged)395 static inline void net_pkt_set_l2_bridged(struct net_pkt *pkt, bool is_l2_bridged)
396 {
397 if (IS_ENABLED(CONFIG_NET_ETHERNET_BRIDGE)) {
398 pkt->l2_bridged = is_l2_bridged;
399 }
400 }
401
net_pkt_is_l2_processed(struct net_pkt * pkt)402 static inline bool net_pkt_is_l2_processed(struct net_pkt *pkt)
403 {
404 return !!(pkt->l2_processed);
405 }
406
net_pkt_set_l2_processed(struct net_pkt * pkt,bool is_l2_processed)407 static inline void net_pkt_set_l2_processed(struct net_pkt *pkt,
408 bool is_l2_processed)
409 {
410 pkt->l2_processed = is_l2_processed;
411 }
412
net_pkt_is_chksum_done(struct net_pkt * pkt)413 static inline bool net_pkt_is_chksum_done(struct net_pkt *pkt)
414 {
415 return !!(pkt->chksum_done);
416 }
417
net_pkt_set_chksum_done(struct net_pkt * pkt,bool is_chksum_done)418 static inline void net_pkt_set_chksum_done(struct net_pkt *pkt,
419 bool is_chksum_done)
420 {
421 pkt->chksum_done = is_chksum_done;
422 }
423
net_pkt_ip_hdr_len(struct net_pkt * pkt)424 static inline uint8_t net_pkt_ip_hdr_len(struct net_pkt *pkt)
425 {
426 #if defined(CONFIG_NET_IP)
427 return pkt->ip_hdr_len;
428 #else
429 return 0;
430 #endif
431 }
432
net_pkt_set_ip_hdr_len(struct net_pkt * pkt,uint8_t len)433 static inline void net_pkt_set_ip_hdr_len(struct net_pkt *pkt, uint8_t len)
434 {
435 #if defined(CONFIG_NET_IP)
436 pkt->ip_hdr_len = len;
437 #endif
438 }
439
net_pkt_ip_dscp(struct net_pkt * pkt)440 static inline uint8_t net_pkt_ip_dscp(struct net_pkt *pkt)
441 {
442 #if defined(CONFIG_NET_IP_DSCP_ECN)
443 return pkt->ip_dscp;
444 #else
445 return 0;
446 #endif
447 }
448
net_pkt_set_ip_dscp(struct net_pkt * pkt,uint8_t dscp)449 static inline void net_pkt_set_ip_dscp(struct net_pkt *pkt, uint8_t dscp)
450 {
451 #if defined(CONFIG_NET_IP_DSCP_ECN)
452 pkt->ip_dscp = dscp;
453 #endif
454 }
455
net_pkt_ip_ecn(struct net_pkt * pkt)456 static inline uint8_t net_pkt_ip_ecn(struct net_pkt *pkt)
457 {
458 #if defined(CONFIG_NET_IP_DSCP_ECN)
459 return pkt->ip_ecn;
460 #else
461 return 0;
462 #endif
463 }
464
net_pkt_set_ip_ecn(struct net_pkt * pkt,uint8_t ecn)465 static inline void net_pkt_set_ip_ecn(struct net_pkt *pkt, uint8_t ecn)
466 {
467 #if defined(CONFIG_NET_IP_DSCP_ECN)
468 pkt->ip_ecn = ecn;
469 #endif
470 }
471
net_pkt_tcp_1st_msg(struct net_pkt * pkt)472 static inline uint8_t net_pkt_tcp_1st_msg(struct net_pkt *pkt)
473 {
474 #if defined(CONFIG_NET_TCP)
475 return pkt->tcp_first_msg;
476 #else
477 return true;
478 #endif
479 }
480
net_pkt_set_tcp_1st_msg(struct net_pkt * pkt,bool is_1st)481 static inline void net_pkt_set_tcp_1st_msg(struct net_pkt *pkt, bool is_1st)
482 {
483 #if defined(CONFIG_NET_TCP)
484 pkt->tcp_first_msg = is_1st;
485 #else
486 ARG_UNUSED(pkt);
487 ARG_UNUSED(is_1st);
488 #endif
489 }
490
net_pkt_eof(struct net_pkt * pkt)491 static inline uint8_t net_pkt_eof(struct net_pkt *pkt)
492 {
493 return pkt->eof;
494 }
495
net_pkt_set_eof(struct net_pkt * pkt,bool eof)496 static inline void net_pkt_set_eof(struct net_pkt *pkt, bool eof)
497 {
498 pkt->eof = eof;
499 }
500
net_pkt_forwarding(struct net_pkt * pkt)501 static inline bool net_pkt_forwarding(struct net_pkt *pkt)
502 {
503 return !!(pkt->forwarding);
504 }
505
net_pkt_set_forwarding(struct net_pkt * pkt,bool forward)506 static inline void net_pkt_set_forwarding(struct net_pkt *pkt, bool forward)
507 {
508 pkt->forwarding = forward;
509 }
510
511 #if defined(CONFIG_NET_IPV4)
net_pkt_ipv4_ttl(struct net_pkt * pkt)512 static inline uint8_t net_pkt_ipv4_ttl(struct net_pkt *pkt)
513 {
514 return pkt->ipv4_ttl;
515 }
516
net_pkt_set_ipv4_ttl(struct net_pkt * pkt,uint8_t ttl)517 static inline void net_pkt_set_ipv4_ttl(struct net_pkt *pkt,
518 uint8_t ttl)
519 {
520 pkt->ipv4_ttl = ttl;
521 }
522
net_pkt_ipv4_opts_len(struct net_pkt * pkt)523 static inline uint8_t net_pkt_ipv4_opts_len(struct net_pkt *pkt)
524 {
525 return pkt->ipv4_opts_len;
526 }
527
net_pkt_set_ipv4_opts_len(struct net_pkt * pkt,uint8_t opts_len)528 static inline void net_pkt_set_ipv4_opts_len(struct net_pkt *pkt,
529 uint8_t opts_len)
530 {
531 pkt->ipv4_opts_len = opts_len;
532 }
533 #else
net_pkt_ipv4_ttl(struct net_pkt * pkt)534 static inline uint8_t net_pkt_ipv4_ttl(struct net_pkt *pkt)
535 {
536 ARG_UNUSED(pkt);
537
538 return 0;
539 }
540
net_pkt_set_ipv4_ttl(struct net_pkt * pkt,uint8_t ttl)541 static inline void net_pkt_set_ipv4_ttl(struct net_pkt *pkt,
542 uint8_t ttl)
543 {
544 ARG_UNUSED(pkt);
545 ARG_UNUSED(ttl);
546 }
547
net_pkt_ipv4_opts_len(struct net_pkt * pkt)548 static inline uint8_t net_pkt_ipv4_opts_len(struct net_pkt *pkt)
549 {
550 ARG_UNUSED(pkt);
551 return 0;
552 }
553
net_pkt_set_ipv4_opts_len(struct net_pkt * pkt,uint8_t opts_len)554 static inline void net_pkt_set_ipv4_opts_len(struct net_pkt *pkt,
555 uint8_t opts_len)
556 {
557 ARG_UNUSED(pkt);
558 ARG_UNUSED(opts_len);
559 }
560 #endif
561
562 #if defined(CONFIG_NET_IPV6)
net_pkt_ipv6_ext_opt_len(struct net_pkt * pkt)563 static inline uint8_t net_pkt_ipv6_ext_opt_len(struct net_pkt *pkt)
564 {
565 return pkt->ipv6_ext_opt_len;
566 }
567
net_pkt_set_ipv6_ext_opt_len(struct net_pkt * pkt,uint8_t len)568 static inline void net_pkt_set_ipv6_ext_opt_len(struct net_pkt *pkt,
569 uint8_t len)
570 {
571 pkt->ipv6_ext_opt_len = len;
572 }
573
net_pkt_ipv6_next_hdr(struct net_pkt * pkt)574 static inline uint8_t net_pkt_ipv6_next_hdr(struct net_pkt *pkt)
575 {
576 return pkt->ipv6_next_hdr;
577 }
578
net_pkt_set_ipv6_next_hdr(struct net_pkt * pkt,uint8_t next_hdr)579 static inline void net_pkt_set_ipv6_next_hdr(struct net_pkt *pkt,
580 uint8_t next_hdr)
581 {
582 pkt->ipv6_next_hdr = next_hdr;
583 }
584
net_pkt_ipv6_ext_len(struct net_pkt * pkt)585 static inline uint16_t net_pkt_ipv6_ext_len(struct net_pkt *pkt)
586 {
587 return pkt->ipv6_ext_len;
588 }
589
net_pkt_set_ipv6_ext_len(struct net_pkt * pkt,uint16_t len)590 static inline void net_pkt_set_ipv6_ext_len(struct net_pkt *pkt, uint16_t len)
591 {
592 pkt->ipv6_ext_len = len;
593 }
594
net_pkt_ipv6_hdr_prev(struct net_pkt * pkt)595 static inline uint16_t net_pkt_ipv6_hdr_prev(struct net_pkt *pkt)
596 {
597 return pkt->ipv6_prev_hdr_start;
598 }
599
net_pkt_set_ipv6_hdr_prev(struct net_pkt * pkt,uint16_t offset)600 static inline void net_pkt_set_ipv6_hdr_prev(struct net_pkt *pkt,
601 uint16_t offset)
602 {
603 pkt->ipv6_prev_hdr_start = offset;
604 }
605
net_pkt_ipv6_hop_limit(struct net_pkt * pkt)606 static inline uint8_t net_pkt_ipv6_hop_limit(struct net_pkt *pkt)
607 {
608 return pkt->ipv6_hop_limit;
609 }
610
net_pkt_set_ipv6_hop_limit(struct net_pkt * pkt,uint8_t hop_limit)611 static inline void net_pkt_set_ipv6_hop_limit(struct net_pkt *pkt,
612 uint8_t hop_limit)
613 {
614 pkt->ipv6_hop_limit = hop_limit;
615 }
616 #else /* CONFIG_NET_IPV6 */
net_pkt_ipv6_ext_opt_len(struct net_pkt * pkt)617 static inline uint8_t net_pkt_ipv6_ext_opt_len(struct net_pkt *pkt)
618 {
619 ARG_UNUSED(pkt);
620
621 return 0;
622 }
623
net_pkt_set_ipv6_ext_opt_len(struct net_pkt * pkt,uint8_t len)624 static inline void net_pkt_set_ipv6_ext_opt_len(struct net_pkt *pkt,
625 uint8_t len)
626 {
627 ARG_UNUSED(pkt);
628 ARG_UNUSED(len);
629 }
630
net_pkt_ipv6_next_hdr(struct net_pkt * pkt)631 static inline uint8_t net_pkt_ipv6_next_hdr(struct net_pkt *pkt)
632 {
633 ARG_UNUSED(pkt);
634
635 return 0;
636 }
637
net_pkt_set_ipv6_next_hdr(struct net_pkt * pkt,uint8_t next_hdr)638 static inline void net_pkt_set_ipv6_next_hdr(struct net_pkt *pkt,
639 uint8_t next_hdr)
640 {
641 ARG_UNUSED(pkt);
642 ARG_UNUSED(next_hdr);
643 }
644
net_pkt_ipv6_ext_len(struct net_pkt * pkt)645 static inline uint16_t net_pkt_ipv6_ext_len(struct net_pkt *pkt)
646 {
647 ARG_UNUSED(pkt);
648
649 return 0;
650 }
651
net_pkt_set_ipv6_ext_len(struct net_pkt * pkt,uint16_t len)652 static inline void net_pkt_set_ipv6_ext_len(struct net_pkt *pkt, uint16_t len)
653 {
654 ARG_UNUSED(pkt);
655 ARG_UNUSED(len);
656 }
657
net_pkt_ipv6_hdr_prev(struct net_pkt * pkt)658 static inline uint16_t net_pkt_ipv6_hdr_prev(struct net_pkt *pkt)
659 {
660 ARG_UNUSED(pkt);
661
662 return 0;
663 }
664
net_pkt_set_ipv6_hdr_prev(struct net_pkt * pkt,uint16_t offset)665 static inline void net_pkt_set_ipv6_hdr_prev(struct net_pkt *pkt,
666 uint16_t offset)
667 {
668 ARG_UNUSED(pkt);
669 ARG_UNUSED(offset);
670 }
671
net_pkt_ipv6_hop_limit(struct net_pkt * pkt)672 static inline uint8_t net_pkt_ipv6_hop_limit(struct net_pkt *pkt)
673 {
674 ARG_UNUSED(pkt);
675
676 return 0;
677 }
678
net_pkt_set_ipv6_hop_limit(struct net_pkt * pkt,uint8_t hop_limit)679 static inline void net_pkt_set_ipv6_hop_limit(struct net_pkt *pkt,
680 uint8_t hop_limit)
681 {
682 ARG_UNUSED(pkt);
683 ARG_UNUSED(hop_limit);
684 }
685 #endif /* CONFIG_NET_IPV6 */
686
net_pkt_ip_opts_len(struct net_pkt * pkt)687 static inline uint16_t net_pkt_ip_opts_len(struct net_pkt *pkt)
688 {
689 #if defined(CONFIG_NET_IPV6)
690 return pkt->ipv6_ext_len;
691 #elif defined(CONFIG_NET_IPV4)
692 return pkt->ipv4_opts_len;
693 #else
694 ARG_UNUSED(pkt);
695
696 return 0;
697 #endif
698 }
699
700 #if defined(CONFIG_NET_IPV4_FRAGMENT)
net_pkt_ipv4_fragment_offset(struct net_pkt * pkt)701 static inline uint16_t net_pkt_ipv4_fragment_offset(struct net_pkt *pkt)
702 {
703 return (pkt->ipv4_fragment.flags & NET_IPV4_FRAGH_OFFSET_MASK) * 8;
704 }
705
net_pkt_ipv4_fragment_more(struct net_pkt * pkt)706 static inline bool net_pkt_ipv4_fragment_more(struct net_pkt *pkt)
707 {
708 return (pkt->ipv4_fragment.flags & NET_IPV4_MORE_FRAG_MASK) != 0;
709 }
710
net_pkt_set_ipv4_fragment_flags(struct net_pkt * pkt,uint16_t flags)711 static inline void net_pkt_set_ipv4_fragment_flags(struct net_pkt *pkt, uint16_t flags)
712 {
713 pkt->ipv4_fragment.flags = flags;
714 }
715
net_pkt_ipv4_fragment_id(struct net_pkt * pkt)716 static inline uint32_t net_pkt_ipv4_fragment_id(struct net_pkt *pkt)
717 {
718 return pkt->ipv4_fragment.id;
719 }
720
net_pkt_set_ipv4_fragment_id(struct net_pkt * pkt,uint32_t id)721 static inline void net_pkt_set_ipv4_fragment_id(struct net_pkt *pkt, uint32_t id)
722 {
723 pkt->ipv4_fragment.id = id;
724 }
725 #else /* CONFIG_NET_IPV4_FRAGMENT */
net_pkt_ipv4_fragment_offset(struct net_pkt * pkt)726 static inline uint16_t net_pkt_ipv4_fragment_offset(struct net_pkt *pkt)
727 {
728 ARG_UNUSED(pkt);
729
730 return 0;
731 }
732
net_pkt_ipv4_fragment_more(struct net_pkt * pkt)733 static inline bool net_pkt_ipv4_fragment_more(struct net_pkt *pkt)
734 {
735 ARG_UNUSED(pkt);
736
737 return 0;
738 }
739
net_pkt_set_ipv4_fragment_flags(struct net_pkt * pkt,uint16_t flags)740 static inline void net_pkt_set_ipv4_fragment_flags(struct net_pkt *pkt, uint16_t flags)
741 {
742 ARG_UNUSED(pkt);
743 ARG_UNUSED(flags);
744 }
745
net_pkt_ipv4_fragment_id(struct net_pkt * pkt)746 static inline uint32_t net_pkt_ipv4_fragment_id(struct net_pkt *pkt)
747 {
748 ARG_UNUSED(pkt);
749
750 return 0;
751 }
752
net_pkt_set_ipv4_fragment_id(struct net_pkt * pkt,uint32_t id)753 static inline void net_pkt_set_ipv4_fragment_id(struct net_pkt *pkt, uint32_t id)
754 {
755 ARG_UNUSED(pkt);
756 ARG_UNUSED(id);
757 }
758 #endif /* CONFIG_NET_IPV4_FRAGMENT */
759
760 #if defined(CONFIG_NET_IPV6_FRAGMENT)
net_pkt_ipv6_fragment_start(struct net_pkt * pkt)761 static inline uint16_t net_pkt_ipv6_fragment_start(struct net_pkt *pkt)
762 {
763 return pkt->ipv6_fragment.hdr_start;
764 }
765
net_pkt_set_ipv6_fragment_start(struct net_pkt * pkt,uint16_t start)766 static inline void net_pkt_set_ipv6_fragment_start(struct net_pkt *pkt,
767 uint16_t start)
768 {
769 pkt->ipv6_fragment.hdr_start = start;
770 }
771
net_pkt_ipv6_fragment_offset(struct net_pkt * pkt)772 static inline uint16_t net_pkt_ipv6_fragment_offset(struct net_pkt *pkt)
773 {
774 return pkt->ipv6_fragment.flags & NET_IPV6_FRAGH_OFFSET_MASK;
775 }
net_pkt_ipv6_fragment_more(struct net_pkt * pkt)776 static inline bool net_pkt_ipv6_fragment_more(struct net_pkt *pkt)
777 {
778 return (pkt->ipv6_fragment.flags & 0x01) != 0;
779 }
780
net_pkt_set_ipv6_fragment_flags(struct net_pkt * pkt,uint16_t flags)781 static inline void net_pkt_set_ipv6_fragment_flags(struct net_pkt *pkt,
782 uint16_t flags)
783 {
784 pkt->ipv6_fragment.flags = flags;
785 }
786
net_pkt_ipv6_fragment_id(struct net_pkt * pkt)787 static inline uint32_t net_pkt_ipv6_fragment_id(struct net_pkt *pkt)
788 {
789 return pkt->ipv6_fragment.id;
790 }
791
net_pkt_set_ipv6_fragment_id(struct net_pkt * pkt,uint32_t id)792 static inline void net_pkt_set_ipv6_fragment_id(struct net_pkt *pkt,
793 uint32_t id)
794 {
795 pkt->ipv6_fragment.id = id;
796 }
797 #else /* CONFIG_NET_IPV6_FRAGMENT */
net_pkt_ipv6_fragment_start(struct net_pkt * pkt)798 static inline uint16_t net_pkt_ipv6_fragment_start(struct net_pkt *pkt)
799 {
800 ARG_UNUSED(pkt);
801
802 return 0;
803 }
804
net_pkt_set_ipv6_fragment_start(struct net_pkt * pkt,uint16_t start)805 static inline void net_pkt_set_ipv6_fragment_start(struct net_pkt *pkt,
806 uint16_t start)
807 {
808 ARG_UNUSED(pkt);
809 ARG_UNUSED(start);
810 }
811
net_pkt_ipv6_fragment_offset(struct net_pkt * pkt)812 static inline uint16_t net_pkt_ipv6_fragment_offset(struct net_pkt *pkt)
813 {
814 ARG_UNUSED(pkt);
815
816 return 0;
817 }
818
net_pkt_ipv6_fragment_more(struct net_pkt * pkt)819 static inline bool net_pkt_ipv6_fragment_more(struct net_pkt *pkt)
820 {
821 ARG_UNUSED(pkt);
822
823 return 0;
824 }
825
net_pkt_set_ipv6_fragment_flags(struct net_pkt * pkt,uint16_t flags)826 static inline void net_pkt_set_ipv6_fragment_flags(struct net_pkt *pkt,
827 uint16_t flags)
828 {
829 ARG_UNUSED(pkt);
830 ARG_UNUSED(flags);
831 }
832
net_pkt_ipv6_fragment_id(struct net_pkt * pkt)833 static inline uint32_t net_pkt_ipv6_fragment_id(struct net_pkt *pkt)
834 {
835 ARG_UNUSED(pkt);
836
837 return 0;
838 }
839
net_pkt_set_ipv6_fragment_id(struct net_pkt * pkt,uint32_t id)840 static inline void net_pkt_set_ipv6_fragment_id(struct net_pkt *pkt,
841 uint32_t id)
842 {
843 ARG_UNUSED(pkt);
844 ARG_UNUSED(id);
845 }
846 #endif /* CONFIG_NET_IPV6_FRAGMENT */
847
848 #if defined(CONFIG_NET_IP_FRAGMENT)
net_pkt_is_ip_reassembled(struct net_pkt * pkt)849 static inline bool net_pkt_is_ip_reassembled(struct net_pkt *pkt)
850 {
851 return !!(pkt->ip_reassembled);
852 }
853
net_pkt_set_ip_reassembled(struct net_pkt * pkt,bool reassembled)854 static inline void net_pkt_set_ip_reassembled(struct net_pkt *pkt,
855 bool reassembled)
856 {
857 pkt->ip_reassembled = reassembled;
858 }
859 #else /* CONFIG_NET_IP_FRAGMENT */
net_pkt_is_ip_reassembled(struct net_pkt * pkt)860 static inline bool net_pkt_is_ip_reassembled(struct net_pkt *pkt)
861 {
862 ARG_UNUSED(pkt);
863
864 return false;
865 }
866
net_pkt_set_ip_reassembled(struct net_pkt * pkt,bool reassembled)867 static inline void net_pkt_set_ip_reassembled(struct net_pkt *pkt,
868 bool reassembled)
869 {
870 ARG_UNUSED(pkt);
871 ARG_UNUSED(reassembled);
872 }
873 #endif /* CONFIG_NET_IP_FRAGMENT */
874
net_pkt_priority(struct net_pkt * pkt)875 static inline uint8_t net_pkt_priority(struct net_pkt *pkt)
876 {
877 return pkt->priority;
878 }
879
net_pkt_set_priority(struct net_pkt * pkt,uint8_t priority)880 static inline void net_pkt_set_priority(struct net_pkt *pkt,
881 uint8_t priority)
882 {
883 pkt->priority = priority;
884 }
885
886 #if defined(CONFIG_NET_VLAN)
net_pkt_vlan_tag(struct net_pkt * pkt)887 static inline uint16_t net_pkt_vlan_tag(struct net_pkt *pkt)
888 {
889 return net_eth_vlan_get_vid(pkt->vlan_tci);
890 }
891
net_pkt_set_vlan_tag(struct net_pkt * pkt,uint16_t tag)892 static inline void net_pkt_set_vlan_tag(struct net_pkt *pkt, uint16_t tag)
893 {
894 pkt->vlan_tci = net_eth_vlan_set_vid(pkt->vlan_tci, tag);
895 }
896
net_pkt_vlan_priority(struct net_pkt * pkt)897 static inline uint8_t net_pkt_vlan_priority(struct net_pkt *pkt)
898 {
899 return net_eth_vlan_get_pcp(pkt->vlan_tci);
900 }
901
net_pkt_set_vlan_priority(struct net_pkt * pkt,uint8_t priority)902 static inline void net_pkt_set_vlan_priority(struct net_pkt *pkt,
903 uint8_t priority)
904 {
905 pkt->vlan_tci = net_eth_vlan_set_pcp(pkt->vlan_tci, priority);
906 }
907
net_pkt_vlan_dei(struct net_pkt * pkt)908 static inline bool net_pkt_vlan_dei(struct net_pkt *pkt)
909 {
910 return net_eth_vlan_get_dei(pkt->vlan_tci);
911 }
912
net_pkt_set_vlan_dei(struct net_pkt * pkt,bool dei)913 static inline void net_pkt_set_vlan_dei(struct net_pkt *pkt, bool dei)
914 {
915 pkt->vlan_tci = net_eth_vlan_set_dei(pkt->vlan_tci, dei);
916 }
917
net_pkt_set_vlan_tci(struct net_pkt * pkt,uint16_t tci)918 static inline void net_pkt_set_vlan_tci(struct net_pkt *pkt, uint16_t tci)
919 {
920 pkt->vlan_tci = tci;
921 }
922
net_pkt_vlan_tci(struct net_pkt * pkt)923 static inline uint16_t net_pkt_vlan_tci(struct net_pkt *pkt)
924 {
925 return pkt->vlan_tci;
926 }
927 #else
net_pkt_vlan_tag(struct net_pkt * pkt)928 static inline uint16_t net_pkt_vlan_tag(struct net_pkt *pkt)
929 {
930 return NET_VLAN_TAG_UNSPEC;
931 }
932
net_pkt_set_vlan_tag(struct net_pkt * pkt,uint16_t tag)933 static inline void net_pkt_set_vlan_tag(struct net_pkt *pkt, uint16_t tag)
934 {
935 ARG_UNUSED(pkt);
936 ARG_UNUSED(tag);
937 }
938
net_pkt_vlan_priority(struct net_pkt * pkt)939 static inline uint8_t net_pkt_vlan_priority(struct net_pkt *pkt)
940 {
941 ARG_UNUSED(pkt);
942 return 0;
943 }
944
net_pkt_vlan_dei(struct net_pkt * pkt)945 static inline bool net_pkt_vlan_dei(struct net_pkt *pkt)
946 {
947 return false;
948 }
949
net_pkt_set_vlan_dei(struct net_pkt * pkt,bool dei)950 static inline void net_pkt_set_vlan_dei(struct net_pkt *pkt, bool dei)
951 {
952 ARG_UNUSED(pkt);
953 ARG_UNUSED(dei);
954 }
955
net_pkt_vlan_tci(struct net_pkt * pkt)956 static inline uint16_t net_pkt_vlan_tci(struct net_pkt *pkt)
957 {
958 return NET_VLAN_TAG_UNSPEC; /* assumes priority is 0 */
959 }
960
net_pkt_set_vlan_tci(struct net_pkt * pkt,uint16_t tci)961 static inline void net_pkt_set_vlan_tci(struct net_pkt *pkt, uint16_t tci)
962 {
963 ARG_UNUSED(pkt);
964 ARG_UNUSED(tci);
965 }
966 #endif
967
968 #if defined(CONFIG_NET_PKT_TIMESTAMP) || defined(CONFIG_NET_PKT_TXTIME)
net_pkt_timestamp(struct net_pkt * pkt)969 static inline struct net_ptp_time *net_pkt_timestamp(struct net_pkt *pkt)
970 {
971 return &pkt->timestamp;
972 }
973
net_pkt_set_timestamp(struct net_pkt * pkt,struct net_ptp_time * timestamp)974 static inline void net_pkt_set_timestamp(struct net_pkt *pkt,
975 struct net_ptp_time *timestamp)
976 {
977 pkt->timestamp.second = timestamp->second;
978 pkt->timestamp.nanosecond = timestamp->nanosecond;
979 }
980
net_pkt_timestamp_ns(struct net_pkt * pkt)981 static inline net_time_t net_pkt_timestamp_ns(struct net_pkt *pkt)
982 {
983 return net_ptp_time_to_ns(&pkt->timestamp);
984 }
985
net_pkt_set_timestamp_ns(struct net_pkt * pkt,net_time_t timestamp)986 static inline void net_pkt_set_timestamp_ns(struct net_pkt *pkt, net_time_t timestamp)
987 {
988 pkt->timestamp = ns_to_net_ptp_time(timestamp);
989 }
990 #else
net_pkt_timestamp(struct net_pkt * pkt)991 static inline struct net_ptp_time *net_pkt_timestamp(struct net_pkt *pkt)
992 {
993 ARG_UNUSED(pkt);
994
995 return NULL;
996 }
997
net_pkt_set_timestamp(struct net_pkt * pkt,struct net_ptp_time * timestamp)998 static inline void net_pkt_set_timestamp(struct net_pkt *pkt,
999 struct net_ptp_time *timestamp)
1000 {
1001 ARG_UNUSED(pkt);
1002 ARG_UNUSED(timestamp);
1003 }
1004
net_pkt_timestamp_ns(struct net_pkt * pkt)1005 static inline net_time_t net_pkt_timestamp_ns(struct net_pkt *pkt)
1006 {
1007 ARG_UNUSED(pkt);
1008
1009 return 0;
1010 }
1011
net_pkt_set_timestamp_ns(struct net_pkt * pkt,net_time_t timestamp)1012 static inline void net_pkt_set_timestamp_ns(struct net_pkt *pkt, net_time_t timestamp)
1013 {
1014 ARG_UNUSED(pkt);
1015 ARG_UNUSED(timestamp);
1016 }
1017 #endif /* CONFIG_NET_PKT_TIMESTAMP || CONFIG_NET_PKT_TXTIME */
1018
1019 #if defined(CONFIG_NET_PKT_RXTIME_STATS) || defined(CONFIG_NET_PKT_TXTIME_STATS)
net_pkt_create_time(struct net_pkt * pkt)1020 static inline uint32_t net_pkt_create_time(struct net_pkt *pkt)
1021 {
1022 return pkt->create_time;
1023 }
1024
net_pkt_set_create_time(struct net_pkt * pkt,uint32_t create_time)1025 static inline void net_pkt_set_create_time(struct net_pkt *pkt,
1026 uint32_t create_time)
1027 {
1028 pkt->create_time = create_time;
1029 }
1030 #else
net_pkt_create_time(struct net_pkt * pkt)1031 static inline uint32_t net_pkt_create_time(struct net_pkt *pkt)
1032 {
1033 ARG_UNUSED(pkt);
1034
1035 return 0U;
1036 }
1037
net_pkt_set_create_time(struct net_pkt * pkt,uint32_t create_time)1038 static inline void net_pkt_set_create_time(struct net_pkt *pkt,
1039 uint32_t create_time)
1040 {
1041 ARG_UNUSED(pkt);
1042 ARG_UNUSED(create_time);
1043 }
1044 #endif /* CONFIG_NET_PKT_RXTIME_STATS || CONFIG_NET_PKT_TXTIME_STATS */
1045
1046 /**
1047 * @deprecated Use @ref net_pkt_timestamp or @ref net_pkt_timestamp_ns instead.
1048 */
net_pkt_txtime(struct net_pkt * pkt)1049 static inline uint64_t net_pkt_txtime(struct net_pkt *pkt)
1050 {
1051 #if defined(CONFIG_NET_PKT_TXTIME)
1052 return pkt->timestamp.second * NSEC_PER_SEC + pkt->timestamp.nanosecond;
1053 #else
1054 ARG_UNUSED(pkt);
1055
1056 return 0;
1057 #endif /* CONFIG_NET_PKT_TXTIME */
1058 }
1059
1060 /**
1061 * @deprecated Use @ref net_pkt_set_timestamp or @ref net_pkt_set_timestamp_ns
1062 * instead.
1063 */
net_pkt_set_txtime(struct net_pkt * pkt,uint64_t txtime)1064 static inline void net_pkt_set_txtime(struct net_pkt *pkt, uint64_t txtime)
1065 {
1066 #if defined(CONFIG_NET_PKT_TXTIME)
1067 pkt->timestamp.second = txtime / NSEC_PER_SEC;
1068 pkt->timestamp.nanosecond = txtime % NSEC_PER_SEC;
1069 #else
1070 ARG_UNUSED(pkt);
1071 ARG_UNUSED(txtime);
1072 #endif /* CONFIG_NET_PKT_TXTIME */
1073 }
1074
1075 #if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL) || \
1076 defined(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)
net_pkt_stats_tick(struct net_pkt * pkt)1077 static inline uint32_t *net_pkt_stats_tick(struct net_pkt *pkt)
1078 {
1079 return pkt->detail.stat;
1080 }
1081
net_pkt_stats_tick_count(struct net_pkt * pkt)1082 static inline int net_pkt_stats_tick_count(struct net_pkt *pkt)
1083 {
1084 return pkt->detail.count;
1085 }
1086
net_pkt_stats_tick_reset(struct net_pkt * pkt)1087 static inline void net_pkt_stats_tick_reset(struct net_pkt *pkt)
1088 {
1089 memset(&pkt->detail, 0, sizeof(pkt->detail));
1090 }
1091
net_pkt_set_stats_tick(struct net_pkt * pkt,uint32_t tick)1092 static ALWAYS_INLINE void net_pkt_set_stats_tick(struct net_pkt *pkt,
1093 uint32_t tick)
1094 {
1095 if (pkt->detail.count >= NET_PKT_DETAIL_STATS_COUNT) {
1096 NET_ERR("Detail stats count overflow (%d >= %d)",
1097 pkt->detail.count, NET_PKT_DETAIL_STATS_COUNT);
1098 return;
1099 }
1100
1101 pkt->detail.stat[pkt->detail.count++] = tick;
1102 }
1103
1104 #define net_pkt_set_tx_stats_tick(pkt, tick) net_pkt_set_stats_tick(pkt, tick)
1105 #define net_pkt_set_rx_stats_tick(pkt, tick) net_pkt_set_stats_tick(pkt, tick)
1106 #else
net_pkt_stats_tick(struct net_pkt * pkt)1107 static inline uint32_t *net_pkt_stats_tick(struct net_pkt *pkt)
1108 {
1109 ARG_UNUSED(pkt);
1110
1111 return NULL;
1112 }
1113
net_pkt_stats_tick_count(struct net_pkt * pkt)1114 static inline int net_pkt_stats_tick_count(struct net_pkt *pkt)
1115 {
1116 ARG_UNUSED(pkt);
1117
1118 return 0;
1119 }
1120
net_pkt_stats_tick_reset(struct net_pkt * pkt)1121 static inline void net_pkt_stats_tick_reset(struct net_pkt *pkt)
1122 {
1123 ARG_UNUSED(pkt);
1124 }
1125
net_pkt_set_stats_tick(struct net_pkt * pkt,uint32_t tick)1126 static inline void net_pkt_set_stats_tick(struct net_pkt *pkt, uint32_t tick)
1127 {
1128 ARG_UNUSED(pkt);
1129 ARG_UNUSED(tick);
1130 }
1131
1132 #define net_pkt_set_tx_stats_tick(pkt, tick)
1133 #define net_pkt_set_rx_stats_tick(pkt, tick)
1134 #endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL ||
1135 CONFIG_NET_PKT_RXTIME_STATS_DETAIL */
1136
net_pkt_get_len(struct net_pkt * pkt)1137 static inline size_t net_pkt_get_len(struct net_pkt *pkt)
1138 {
1139 return net_buf_frags_len(pkt->frags);
1140 }
1141
net_pkt_data(struct net_pkt * pkt)1142 static inline uint8_t *net_pkt_data(struct net_pkt *pkt)
1143 {
1144 return pkt->frags->data;
1145 }
1146
net_pkt_ip_data(struct net_pkt * pkt)1147 static inline uint8_t *net_pkt_ip_data(struct net_pkt *pkt)
1148 {
1149 return pkt->frags->data;
1150 }
1151
net_pkt_is_empty(struct net_pkt * pkt)1152 static inline bool net_pkt_is_empty(struct net_pkt *pkt)
1153 {
1154 return !pkt->buffer || !net_pkt_data(pkt) || pkt->buffer->len == 0;
1155 }
1156
net_pkt_lladdr_src(struct net_pkt * pkt)1157 static inline struct net_linkaddr *net_pkt_lladdr_src(struct net_pkt *pkt)
1158 {
1159 return &pkt->lladdr_src;
1160 }
1161
net_pkt_lladdr_dst(struct net_pkt * pkt)1162 static inline struct net_linkaddr *net_pkt_lladdr_dst(struct net_pkt *pkt)
1163 {
1164 return &pkt->lladdr_dst;
1165 }
1166
net_pkt_lladdr_swap(struct net_pkt * pkt)1167 static inline void net_pkt_lladdr_swap(struct net_pkt *pkt)
1168 {
1169 uint8_t *addr = net_pkt_lladdr_src(pkt)->addr;
1170
1171 net_pkt_lladdr_src(pkt)->addr = net_pkt_lladdr_dst(pkt)->addr;
1172 net_pkt_lladdr_dst(pkt)->addr = addr;
1173 }
1174
net_pkt_lladdr_clear(struct net_pkt * pkt)1175 static inline void net_pkt_lladdr_clear(struct net_pkt *pkt)
1176 {
1177 net_pkt_lladdr_src(pkt)->addr = NULL;
1178 net_pkt_lladdr_src(pkt)->len = 0U;
1179 }
1180
net_pkt_ll_proto_type(struct net_pkt * pkt)1181 static inline uint16_t net_pkt_ll_proto_type(struct net_pkt *pkt)
1182 {
1183 return pkt->ll_proto_type;
1184 }
1185
net_pkt_set_ll_proto_type(struct net_pkt * pkt,uint16_t type)1186 static inline void net_pkt_set_ll_proto_type(struct net_pkt *pkt, uint16_t type)
1187 {
1188 pkt->ll_proto_type = type;
1189 }
1190
1191 #if defined(CONFIG_NET_IPV4_AUTO)
net_pkt_ipv4_auto(struct net_pkt * pkt)1192 static inline bool net_pkt_ipv4_auto(struct net_pkt *pkt)
1193 {
1194 return !!(pkt->ipv4_auto_arp_msg);
1195 }
1196
net_pkt_set_ipv4_auto(struct net_pkt * pkt,bool is_auto_arp_msg)1197 static inline void net_pkt_set_ipv4_auto(struct net_pkt *pkt,
1198 bool is_auto_arp_msg)
1199 {
1200 pkt->ipv4_auto_arp_msg = is_auto_arp_msg;
1201 }
1202 #else /* CONFIG_NET_IPV4_AUTO */
net_pkt_ipv4_auto(struct net_pkt * pkt)1203 static inline bool net_pkt_ipv4_auto(struct net_pkt *pkt)
1204 {
1205 ARG_UNUSED(pkt);
1206
1207 return false;
1208 }
1209
net_pkt_set_ipv4_auto(struct net_pkt * pkt,bool is_auto_arp_msg)1210 static inline void net_pkt_set_ipv4_auto(struct net_pkt *pkt,
1211 bool is_auto_arp_msg)
1212 {
1213 ARG_UNUSED(pkt);
1214 ARG_UNUSED(is_auto_arp_msg);
1215 }
1216 #endif /* CONFIG_NET_IPV4_AUTO */
1217
1218 #if defined(CONFIG_NET_LLDP)
net_pkt_is_lldp(struct net_pkt * pkt)1219 static inline bool net_pkt_is_lldp(struct net_pkt *pkt)
1220 {
1221 return !!(pkt->lldp_pkt);
1222 }
1223
net_pkt_set_lldp(struct net_pkt * pkt,bool is_lldp)1224 static inline void net_pkt_set_lldp(struct net_pkt *pkt, bool is_lldp)
1225 {
1226 pkt->lldp_pkt = is_lldp;
1227 }
1228 #else
net_pkt_is_lldp(struct net_pkt * pkt)1229 static inline bool net_pkt_is_lldp(struct net_pkt *pkt)
1230 {
1231 ARG_UNUSED(pkt);
1232
1233 return false;
1234 }
1235
net_pkt_set_lldp(struct net_pkt * pkt,bool is_lldp)1236 static inline void net_pkt_set_lldp(struct net_pkt *pkt, bool is_lldp)
1237 {
1238 ARG_UNUSED(pkt);
1239 ARG_UNUSED(is_lldp);
1240 }
1241 #endif /* CONFIG_NET_LLDP */
1242
1243 #if defined(CONFIG_NET_L2_PPP)
net_pkt_is_ppp(struct net_pkt * pkt)1244 static inline bool net_pkt_is_ppp(struct net_pkt *pkt)
1245 {
1246 return !!(pkt->ppp_msg);
1247 }
1248
net_pkt_set_ppp(struct net_pkt * pkt,bool is_ppp_msg)1249 static inline void net_pkt_set_ppp(struct net_pkt *pkt,
1250 bool is_ppp_msg)
1251 {
1252 pkt->ppp_msg = is_ppp_msg;
1253 }
1254 #else /* CONFIG_NET_L2_PPP */
net_pkt_is_ppp(struct net_pkt * pkt)1255 static inline bool net_pkt_is_ppp(struct net_pkt *pkt)
1256 {
1257 ARG_UNUSED(pkt);
1258
1259 return false;
1260 }
1261
net_pkt_set_ppp(struct net_pkt * pkt,bool is_ppp_msg)1262 static inline void net_pkt_set_ppp(struct net_pkt *pkt,
1263 bool is_ppp_msg)
1264 {
1265 ARG_UNUSED(pkt);
1266 ARG_UNUSED(is_ppp_msg);
1267 }
1268 #endif /* CONFIG_NET_L2_PPP */
1269
1270 #if defined(NET_PKT_HAS_CONTROL_BLOCK)
net_pkt_cb(struct net_pkt * pkt)1271 static inline void *net_pkt_cb(struct net_pkt *pkt)
1272 {
1273 return &pkt->cb;
1274 }
1275 #else
net_pkt_cb(struct net_pkt * pkt)1276 static inline void *net_pkt_cb(struct net_pkt *pkt)
1277 {
1278 ARG_UNUSED(pkt);
1279
1280 return NULL;
1281 }
1282 #endif
1283
1284 #define NET_IPV6_HDR(pkt) ((struct net_ipv6_hdr *)net_pkt_ip_data(pkt))
1285 #define NET_IPV4_HDR(pkt) ((struct net_ipv4_hdr *)net_pkt_ip_data(pkt))
1286
net_pkt_set_src_ipv6_addr(struct net_pkt * pkt)1287 static inline void net_pkt_set_src_ipv6_addr(struct net_pkt *pkt)
1288 {
1289 net_if_ipv6_select_src_addr(net_context_get_iface(
1290 net_pkt_context(pkt)),
1291 (struct in6_addr *)NET_IPV6_HDR(pkt)->src);
1292 }
1293
net_pkt_set_overwrite(struct net_pkt * pkt,bool overwrite)1294 static inline void net_pkt_set_overwrite(struct net_pkt *pkt, bool overwrite)
1295 {
1296 pkt->overwrite = overwrite;
1297 }
1298
net_pkt_is_being_overwritten(struct net_pkt * pkt)1299 static inline bool net_pkt_is_being_overwritten(struct net_pkt *pkt)
1300 {
1301 return !!(pkt->overwrite);
1302 }
1303
1304 #ifdef CONFIG_NET_PKT_FILTER
1305
1306 bool net_pkt_filter_send_ok(struct net_pkt *pkt);
1307 bool net_pkt_filter_recv_ok(struct net_pkt *pkt);
1308
1309 #else
1310
net_pkt_filter_send_ok(struct net_pkt * pkt)1311 static inline bool net_pkt_filter_send_ok(struct net_pkt *pkt)
1312 {
1313 ARG_UNUSED(pkt);
1314
1315 return true;
1316 }
1317
net_pkt_filter_recv_ok(struct net_pkt * pkt)1318 static inline bool net_pkt_filter_recv_ok(struct net_pkt *pkt)
1319 {
1320 ARG_UNUSED(pkt);
1321
1322 return true;
1323 }
1324
1325 #endif /* CONFIG_NET_PKT_FILTER */
1326
1327 #if defined(CONFIG_NET_PKT_FILTER) && \
1328 (defined(CONFIG_NET_PKT_FILTER_IPV4_HOOK) || defined(CONFIG_NET_PKT_FILTER_IPV6_HOOK))
1329
1330 bool net_pkt_filter_ip_recv_ok(struct net_pkt *pkt);
1331
1332 #else
1333
net_pkt_filter_ip_recv_ok(struct net_pkt * pkt)1334 static inline bool net_pkt_filter_ip_recv_ok(struct net_pkt *pkt)
1335 {
1336 ARG_UNUSED(pkt);
1337
1338 return true;
1339 }
1340
1341 #endif /* CONFIG_NET_PKT_FILTER_IPV4_HOOK || CONFIG_NET_PKT_FILTER_IPV6_HOOK */
1342
1343 #if defined(CONFIG_NET_PKT_FILTER) && defined(CONFIG_NET_PKT_FILTER_LOCAL_IN_HOOK)
1344
1345 bool net_pkt_filter_local_in_recv_ok(struct net_pkt *pkt);
1346
1347 #else
1348
net_pkt_filter_local_in_recv_ok(struct net_pkt * pkt)1349 static inline bool net_pkt_filter_local_in_recv_ok(struct net_pkt *pkt)
1350 {
1351 ARG_UNUSED(pkt);
1352
1353 return true;
1354 }
1355
1356 #endif /* CONFIG_NET_PKT_FILTER && CONFIG_NET_PKT_FILTER_LOCAL_IN_HOOK */
1357
1358 /* @endcond */
1359
1360 /**
1361 * @brief Create a net_pkt slab
1362 *
1363 * A net_pkt slab is used to store meta-information about
1364 * network packets. It must be coupled with a data fragment pool
1365 * (@ref NET_PKT_DATA_POOL_DEFINE) used to store the actual
1366 * packet data. The macro can be used by an application to define
1367 * additional custom per-context TX packet slabs (see
1368 * net_context_setup_pools()).
1369 *
1370 * @param name Name of the slab.
1371 * @param count Number of net_pkt in this slab.
1372 */
1373 #define NET_PKT_SLAB_DEFINE(name, count) \
1374 K_MEM_SLAB_DEFINE(name, sizeof(struct net_pkt), count, 4)
1375
1376 /* Backward compatibility macro */
1377 #define NET_PKT_TX_SLAB_DEFINE(name, count) NET_PKT_SLAB_DEFINE(name, count)
1378
1379 /**
1380 * @brief Create a data fragment net_buf pool
1381 *
1382 * A net_buf pool is used to store actual data for
1383 * network packets. It must be coupled with a net_pkt slab
1384 * (@ref NET_PKT_SLAB_DEFINE) used to store the packet
1385 * meta-information. The macro can be used by an application to
1386 * define additional custom per-context TX packet pools (see
1387 * net_context_setup_pools()).
1388 *
1389 * @param name Name of the pool.
1390 * @param count Number of net_buf in this pool.
1391 */
1392 #define NET_PKT_DATA_POOL_DEFINE(name, count) \
1393 NET_BUF_POOL_DEFINE(name, count, CONFIG_NET_BUF_DATA_SIZE, \
1394 0, NULL)
1395
1396 /** @cond INTERNAL_HIDDEN */
1397
1398 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC) || \
1399 (CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG)
1400 #define NET_PKT_DEBUG_ENABLED
1401 #endif
1402
1403 #if defined(NET_PKT_DEBUG_ENABLED)
1404
1405 /* Debug versions of the net_pkt functions that are used when tracking
1406 * buffer usage.
1407 */
1408
1409 struct net_buf *net_pkt_get_reserve_data_debug(struct net_buf_pool *pool,
1410 size_t min_len,
1411 k_timeout_t timeout,
1412 const char *caller,
1413 int line);
1414
1415 #define net_pkt_get_reserve_data(pool, min_len, timeout) \
1416 net_pkt_get_reserve_data_debug(pool, min_len, timeout, __func__, __LINE__)
1417
1418 struct net_buf *net_pkt_get_reserve_rx_data_debug(size_t min_len,
1419 k_timeout_t timeout,
1420 const char *caller,
1421 int line);
1422 #define net_pkt_get_reserve_rx_data(min_len, timeout) \
1423 net_pkt_get_reserve_rx_data_debug(min_len, timeout, __func__, __LINE__)
1424
1425 struct net_buf *net_pkt_get_reserve_tx_data_debug(size_t min_len,
1426 k_timeout_t timeout,
1427 const char *caller,
1428 int line);
1429 #define net_pkt_get_reserve_tx_data(min_len, timeout) \
1430 net_pkt_get_reserve_tx_data_debug(min_len, timeout, __func__, __LINE__)
1431
1432 struct net_buf *net_pkt_get_frag_debug(struct net_pkt *pkt, size_t min_len,
1433 k_timeout_t timeout,
1434 const char *caller, int line);
1435 #define net_pkt_get_frag(pkt, min_len, timeout) \
1436 net_pkt_get_frag_debug(pkt, min_len, timeout, __func__, __LINE__)
1437
1438 void net_pkt_unref_debug(struct net_pkt *pkt, const char *caller, int line);
1439 #define net_pkt_unref(pkt) net_pkt_unref_debug(pkt, __func__, __LINE__)
1440
1441 struct net_pkt *net_pkt_ref_debug(struct net_pkt *pkt, const char *caller,
1442 int line);
1443 #define net_pkt_ref(pkt) net_pkt_ref_debug(pkt, __func__, __LINE__)
1444
1445 struct net_buf *net_pkt_frag_ref_debug(struct net_buf *frag,
1446 const char *caller, int line);
1447 #define net_pkt_frag_ref(frag) net_pkt_frag_ref_debug(frag, __func__, __LINE__)
1448
1449 void net_pkt_frag_unref_debug(struct net_buf *frag,
1450 const char *caller, int line);
1451 #define net_pkt_frag_unref(frag) \
1452 net_pkt_frag_unref_debug(frag, __func__, __LINE__)
1453
1454 struct net_buf *net_pkt_frag_del_debug(struct net_pkt *pkt,
1455 struct net_buf *parent,
1456 struct net_buf *frag,
1457 const char *caller, int line);
1458 #define net_pkt_frag_del(pkt, parent, frag) \
1459 net_pkt_frag_del_debug(pkt, parent, frag, __func__, __LINE__)
1460
1461 void net_pkt_frag_add_debug(struct net_pkt *pkt, struct net_buf *frag,
1462 const char *caller, int line);
1463 #define net_pkt_frag_add(pkt, frag) \
1464 net_pkt_frag_add_debug(pkt, frag, __func__, __LINE__)
1465
1466 void net_pkt_frag_insert_debug(struct net_pkt *pkt, struct net_buf *frag,
1467 const char *caller, int line);
1468 #define net_pkt_frag_insert(pkt, frag) \
1469 net_pkt_frag_insert_debug(pkt, frag, __func__, __LINE__)
1470 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC ||
1471 * CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
1472 */
1473 /** @endcond */
1474
1475 /**
1476 * @brief Print fragment list and the fragment sizes
1477 *
1478 * @details Only available if debugging is activated.
1479 *
1480 * @param pkt Network pkt.
1481 */
1482 #if defined(NET_PKT_DEBUG_ENABLED)
1483 void net_pkt_print_frags(struct net_pkt *pkt);
1484 #else
1485 #define net_pkt_print_frags(pkt)
1486 #endif
1487
1488 /**
1489 * @brief Get RX DATA buffer from pool.
1490 * Normally you should use net_pkt_get_frag() instead.
1491 *
1492 * @details Normally this version is not useful for applications
1493 * but is mainly used by network fragmentation code.
1494 *
1495 * @param min_len Minimum length of the requested fragment.
1496 * @param timeout Affects the action taken should the net buf pool be empty.
1497 * If K_NO_WAIT, then return immediately. If K_FOREVER, then
1498 * wait as long as necessary. Otherwise, wait up to the specified time.
1499 *
1500 * @return Network buffer if successful, NULL otherwise.
1501 */
1502 #if !defined(NET_PKT_DEBUG_ENABLED)
1503 struct net_buf *net_pkt_get_reserve_rx_data(size_t min_len, k_timeout_t timeout);
1504 #endif
1505
1506 /**
1507 * @brief Get TX DATA buffer from pool.
1508 * Normally you should use net_pkt_get_frag() instead.
1509 *
1510 * @details Normally this version is not useful for applications
1511 * but is mainly used by network fragmentation code.
1512 *
1513 * @param min_len Minimum length of the requested fragment.
1514 * @param timeout Affects the action taken should the net buf pool be empty.
1515 * If K_NO_WAIT, then return immediately. If K_FOREVER, then
1516 * wait as long as necessary. Otherwise, wait up to the specified time.
1517 *
1518 * @return Network buffer if successful, NULL otherwise.
1519 */
1520 #if !defined(NET_PKT_DEBUG_ENABLED)
1521 struct net_buf *net_pkt_get_reserve_tx_data(size_t min_len, k_timeout_t timeout);
1522 #endif
1523
1524 /**
1525 * @brief Get a data fragment that might be from user specific
1526 * buffer pool or from global DATA pool.
1527 *
1528 * @param pkt Network packet.
1529 * @param min_len Minimum length of the requested fragment.
1530 * @param timeout Affects the action taken should the net buf pool be empty.
1531 * If K_NO_WAIT, then return immediately. If K_FOREVER, then
1532 * wait as long as necessary. Otherwise, wait up to the specified time.
1533 *
1534 * @return Network buffer if successful, NULL otherwise.
1535 */
1536 #if !defined(NET_PKT_DEBUG_ENABLED)
1537 struct net_buf *net_pkt_get_frag(struct net_pkt *pkt, size_t min_len,
1538 k_timeout_t timeout);
1539 #endif
1540
1541 /**
1542 * @brief Place packet back into the available packets slab
1543 *
1544 * @details Releases the packet to other use. This needs to be
1545 * called by application after it has finished with the packet.
1546 *
1547 * @param pkt Network packet to release.
1548 *
1549 */
1550 #if !defined(NET_PKT_DEBUG_ENABLED)
1551 void net_pkt_unref(struct net_pkt *pkt);
1552 #endif
1553
1554 /**
1555 * @brief Increase the packet ref count
1556 *
1557 * @details Mark the packet to be used still.
1558 *
1559 * @param pkt Network packet to ref.
1560 *
1561 * @return Network packet if successful, NULL otherwise.
1562 */
1563 #if !defined(NET_PKT_DEBUG_ENABLED)
1564 struct net_pkt *net_pkt_ref(struct net_pkt *pkt);
1565 #endif
1566
1567 /**
1568 * @brief Increase the packet fragment ref count
1569 *
1570 * @details Mark the fragment to be used still.
1571 *
1572 * @param frag Network fragment to ref.
1573 *
1574 * @return a pointer on the referenced Network fragment.
1575 */
1576 #if !defined(NET_PKT_DEBUG_ENABLED)
1577 struct net_buf *net_pkt_frag_ref(struct net_buf *frag);
1578 #endif
1579
1580 /**
1581 * @brief Decrease the packet fragment ref count
1582 *
1583 * @param frag Network fragment to unref.
1584 */
1585 #if !defined(NET_PKT_DEBUG_ENABLED)
1586 void net_pkt_frag_unref(struct net_buf *frag);
1587 #endif
1588
1589 /**
1590 * @brief Delete existing fragment from a packet
1591 *
1592 * @param pkt Network packet from which frag belongs to.
1593 * @param parent parent fragment of frag, or NULL if none.
1594 * @param frag Fragment to delete.
1595 *
1596 * @return Pointer to the following fragment, or NULL if it had no
1597 * further fragments.
1598 */
1599 #if !defined(NET_PKT_DEBUG_ENABLED)
1600 struct net_buf *net_pkt_frag_del(struct net_pkt *pkt,
1601 struct net_buf *parent,
1602 struct net_buf *frag);
1603 #endif
1604
1605 /**
1606 * @brief Add a fragment to a packet at the end of its fragment list
1607 *
1608 * @param pkt pkt Network packet where to add the fragment
1609 * @param frag Fragment to add
1610 */
1611 #if !defined(NET_PKT_DEBUG_ENABLED)
1612 void net_pkt_frag_add(struct net_pkt *pkt, struct net_buf *frag);
1613 #endif
1614
1615 /**
1616 * @brief Insert a fragment to a packet at the beginning of its fragment list
1617 *
1618 * @param pkt pkt Network packet where to insert the fragment
1619 * @param frag Fragment to insert
1620 */
1621 #if !defined(NET_PKT_DEBUG_ENABLED)
1622 void net_pkt_frag_insert(struct net_pkt *pkt, struct net_buf *frag);
1623 #endif
1624
1625 /**
1626 * @brief Compact the fragment list of a packet.
1627 *
1628 * @details After this there is no more any free space in individual fragments.
1629 * @param pkt Network packet.
1630 */
1631 void net_pkt_compact(struct net_pkt *pkt);
1632
1633 /**
1634 * @brief Get information about predefined RX, TX and DATA pools.
1635 *
1636 * @param rx Pointer to RX pool is returned.
1637 * @param tx Pointer to TX pool is returned.
1638 * @param rx_data Pointer to RX DATA pool is returned.
1639 * @param tx_data Pointer to TX DATA pool is returned.
1640 */
1641 void net_pkt_get_info(struct k_mem_slab **rx,
1642 struct k_mem_slab **tx,
1643 struct net_buf_pool **rx_data,
1644 struct net_buf_pool **tx_data);
1645
1646 /** @cond INTERNAL_HIDDEN */
1647
1648 #if defined(CONFIG_NET_DEBUG_NET_PKT_ALLOC)
1649 /**
1650 * @brief Debug helper to print out the buffer allocations
1651 */
1652 void net_pkt_print(void);
1653
1654 typedef void (*net_pkt_allocs_cb_t)(struct net_pkt *pkt,
1655 struct net_buf *buf,
1656 const char *func_alloc,
1657 int line_alloc,
1658 const char *func_free,
1659 int line_free,
1660 bool in_use,
1661 void *user_data);
1662
1663 void net_pkt_allocs_foreach(net_pkt_allocs_cb_t cb, void *user_data);
1664
1665 const char *net_pkt_slab2str(struct k_mem_slab *slab);
1666 const char *net_pkt_pool2str(struct net_buf_pool *pool);
1667
1668 #else
1669 #define net_pkt_print(...)
1670 #endif /* CONFIG_NET_DEBUG_NET_PKT_ALLOC */
1671
1672 /* New allocator, and API are defined below.
1673 * This will be simpler when time will come to get rid of former API above.
1674 */
1675 #if defined(NET_PKT_DEBUG_ENABLED)
1676
1677 struct net_pkt *net_pkt_alloc_debug(k_timeout_t timeout,
1678 const char *caller, int line);
1679 #define net_pkt_alloc(_timeout) \
1680 net_pkt_alloc_debug(_timeout, __func__, __LINE__)
1681
1682 struct net_pkt *net_pkt_alloc_from_slab_debug(struct k_mem_slab *slab,
1683 k_timeout_t timeout,
1684 const char *caller, int line);
1685 #define net_pkt_alloc_from_slab(_slab, _timeout) \
1686 net_pkt_alloc_from_slab_debug(_slab, _timeout, __func__, __LINE__)
1687
1688 struct net_pkt *net_pkt_rx_alloc_debug(k_timeout_t timeout,
1689 const char *caller, int line);
1690 #define net_pkt_rx_alloc(_timeout) \
1691 net_pkt_rx_alloc_debug(_timeout, __func__, __LINE__)
1692
1693 struct net_pkt *net_pkt_alloc_on_iface_debug(struct net_if *iface,
1694 k_timeout_t timeout,
1695 const char *caller,
1696 int line);
1697 #define net_pkt_alloc_on_iface(_iface, _timeout) \
1698 net_pkt_alloc_on_iface_debug(_iface, _timeout, __func__, __LINE__)
1699
1700 struct net_pkt *net_pkt_rx_alloc_on_iface_debug(struct net_if *iface,
1701 k_timeout_t timeout,
1702 const char *caller,
1703 int line);
1704 #define net_pkt_rx_alloc_on_iface(_iface, _timeout) \
1705 net_pkt_rx_alloc_on_iface_debug(_iface, _timeout, \
1706 __func__, __LINE__)
1707
1708 int net_pkt_alloc_buffer_debug(struct net_pkt *pkt,
1709 size_t size,
1710 enum net_ip_protocol proto,
1711 k_timeout_t timeout,
1712 const char *caller, int line);
1713 #define net_pkt_alloc_buffer(_pkt, _size, _proto, _timeout) \
1714 net_pkt_alloc_buffer_debug(_pkt, _size, _proto, _timeout, \
1715 __func__, __LINE__)
1716
1717 int net_pkt_alloc_buffer_raw_debug(struct net_pkt *pkt, size_t size,
1718 k_timeout_t timeout,
1719 const char *caller, int line);
1720 #define net_pkt_alloc_buffer_raw(_pkt, _size, _timeout) \
1721 net_pkt_alloc_buffer_raw_debug(_pkt, _size, _timeout, \
1722 __func__, __LINE__)
1723
1724 struct net_pkt *net_pkt_alloc_with_buffer_debug(struct net_if *iface,
1725 size_t size,
1726 sa_family_t family,
1727 enum net_ip_protocol proto,
1728 k_timeout_t timeout,
1729 const char *caller,
1730 int line);
1731 #define net_pkt_alloc_with_buffer(_iface, _size, _family, \
1732 _proto, _timeout) \
1733 net_pkt_alloc_with_buffer_debug(_iface, _size, _family, \
1734 _proto, _timeout, \
1735 __func__, __LINE__)
1736
1737 struct net_pkt *net_pkt_rx_alloc_with_buffer_debug(struct net_if *iface,
1738 size_t size,
1739 sa_family_t family,
1740 enum net_ip_protocol proto,
1741 k_timeout_t timeout,
1742 const char *caller,
1743 int line);
1744 #define net_pkt_rx_alloc_with_buffer(_iface, _size, _family, \
1745 _proto, _timeout) \
1746 net_pkt_rx_alloc_with_buffer_debug(_iface, _size, _family, \
1747 _proto, _timeout, \
1748 __func__, __LINE__)
1749 #endif /* NET_PKT_DEBUG_ENABLED */
1750 /** @endcond */
1751
1752 /**
1753 * @brief Allocate an initialized net_pkt
1754 *
1755 * @details for the time being, 2 pools are used. One for TX and one for RX.
1756 * This allocator has to be used for TX.
1757 *
1758 * @param timeout Maximum time to wait for an allocation.
1759 *
1760 * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1761 */
1762 #if !defined(NET_PKT_DEBUG_ENABLED)
1763 struct net_pkt *net_pkt_alloc(k_timeout_t timeout);
1764 #endif
1765
1766 /**
1767 * @brief Allocate an initialized net_pkt from a specific slab
1768 *
1769 * @details unlike net_pkt_alloc() which uses core slabs, this one will use
1770 * an external slab (see NET_PKT_SLAB_DEFINE()).
1771 * Do _not_ use it unless you know what you are doing. Basically, only
1772 * net_context should be using this, in order to allocate packet and
1773 * then buffer on its local slab/pool (if any).
1774 *
1775 * @param slab The slab to use for allocating the packet
1776 * @param timeout Maximum time to wait for an allocation.
1777 *
1778 * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1779 */
1780 #if !defined(NET_PKT_DEBUG_ENABLED)
1781 struct net_pkt *net_pkt_alloc_from_slab(struct k_mem_slab *slab,
1782 k_timeout_t timeout);
1783 #endif
1784
1785 /**
1786 * @brief Allocate an initialized net_pkt for RX
1787 *
1788 * @details for the time being, 2 pools are used. One for TX and one for RX.
1789 * This allocator has to be used for RX.
1790 *
1791 * @param timeout Maximum time to wait for an allocation.
1792 *
1793 * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1794 */
1795 #if !defined(NET_PKT_DEBUG_ENABLED)
1796 struct net_pkt *net_pkt_rx_alloc(k_timeout_t timeout);
1797 #endif
1798
1799 /**
1800 * @brief Allocate a network packet for a specific network interface.
1801 *
1802 * @param iface The network interface the packet is supposed to go through.
1803 * @param timeout Maximum time to wait for an allocation.
1804 *
1805 * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1806 */
1807 #if !defined(NET_PKT_DEBUG_ENABLED)
1808 struct net_pkt *net_pkt_alloc_on_iface(struct net_if *iface,
1809 k_timeout_t timeout);
1810
1811 /* Same as above but specifically for RX packet */
1812 struct net_pkt *net_pkt_rx_alloc_on_iface(struct net_if *iface,
1813 k_timeout_t timeout);
1814 #endif
1815
1816 /**
1817 * @brief Allocate buffer for a net_pkt
1818 *
1819 * @details: such allocator will take into account space necessary for headers,
1820 * MTU, and existing buffer (if any). Beware that, due to all these
1821 * criteria, the allocated size might be smaller/bigger than
1822 * requested one.
1823 *
1824 * @param pkt The network packet requiring buffer to be allocated.
1825 * @param size The size of buffer being requested.
1826 * @param proto The IP protocol type (can be 0 for none).
1827 * @param timeout Maximum time to wait for an allocation.
1828 *
1829 * @return 0 on success, negative errno code otherwise.
1830 */
1831 #if !defined(NET_PKT_DEBUG_ENABLED)
1832 int net_pkt_alloc_buffer(struct net_pkt *pkt,
1833 size_t size,
1834 enum net_ip_protocol proto,
1835 k_timeout_t timeout);
1836 #endif
1837
1838 /**
1839 * @brief Allocate buffer for a net_pkt, of specified size, w/o any additional
1840 * preconditions
1841 *
1842 * @details: The actual buffer size may be larger than requested one if fixed
1843 * size buffers are in use.
1844 *
1845 * @param pkt The network packet requiring buffer to be allocated.
1846 * @param size The size of buffer being requested.
1847 * @param timeout Maximum time to wait for an allocation.
1848 *
1849 * @return 0 on success, negative errno code otherwise.
1850 */
1851 #if !defined(NET_PKT_DEBUG_ENABLED)
1852 int net_pkt_alloc_buffer_raw(struct net_pkt *pkt, size_t size,
1853 k_timeout_t timeout);
1854 #endif
1855
1856 /**
1857 * @brief Allocate a network packet and buffer at once
1858 *
1859 * @param iface The network interface the packet is supposed to go through.
1860 * @param size The size of buffer.
1861 * @param family The family to which the packet belongs.
1862 * @param proto The IP protocol type (can be 0 for none).
1863 * @param timeout Maximum time to wait for an allocation.
1864 *
1865 * @return a pointer to a newly allocated net_pkt on success, NULL otherwise.
1866 */
1867 #if !defined(NET_PKT_DEBUG_ENABLED)
1868 struct net_pkt *net_pkt_alloc_with_buffer(struct net_if *iface,
1869 size_t size,
1870 sa_family_t family,
1871 enum net_ip_protocol proto,
1872 k_timeout_t timeout);
1873
1874 /* Same as above but specifically for RX packet */
1875 struct net_pkt *net_pkt_rx_alloc_with_buffer(struct net_if *iface,
1876 size_t size,
1877 sa_family_t family,
1878 enum net_ip_protocol proto,
1879 k_timeout_t timeout);
1880 #endif
1881
1882 /**
1883 * @brief Append a buffer in packet
1884 *
1885 * @param pkt Network packet where to append the buffer
1886 * @param buffer Buffer to append
1887 */
1888 void net_pkt_append_buffer(struct net_pkt *pkt, struct net_buf *buffer);
1889
1890 /**
1891 * @brief Get available buffer space from a pkt
1892 *
1893 * @note Reserved bytes (headroom) in any of the fragments are not considered to
1894 * be available.
1895 *
1896 * @param pkt The net_pkt which buffer availability should be evaluated
1897 *
1898 * @return the amount of buffer available
1899 */
1900 size_t net_pkt_available_buffer(struct net_pkt *pkt);
1901
1902 /**
1903 * @brief Get available buffer space for payload from a pkt
1904 *
1905 * @note Reserved bytes (headroom) in any of the fragments are not considered to
1906 * be available.
1907 *
1908 * @details Unlike net_pkt_available_buffer(), this will take into account
1909 * the headers space.
1910 *
1911 * @param pkt The net_pkt which payload buffer availability should
1912 * be evaluated
1913 * @param proto The IP protocol type (can be 0 for none).
1914 *
1915 * @return the amount of buffer available for payload
1916 */
1917 size_t net_pkt_available_payload_buffer(struct net_pkt *pkt,
1918 enum net_ip_protocol proto);
1919
1920 /**
1921 * @brief Trim net_pkt buffer
1922 *
1923 * @details This will basically check for unused buffers and deallocate
1924 * them relevantly
1925 *
1926 * @param pkt The net_pkt which buffer will be trimmed
1927 */
1928 void net_pkt_trim_buffer(struct net_pkt *pkt);
1929
1930 /**
1931 * @brief Remove @a length bytes from tail of packet
1932 *
1933 * @details This function does not take packet cursor into account. It is a
1934 * helper to remove unneeded bytes from tail of packet (like appended
1935 * CRC). It takes care of buffer deallocation if removed bytes span
1936 * whole buffer(s).
1937 *
1938 * @param pkt Network packet
1939 * @param length Number of bytes to be removed
1940 *
1941 * @retval 0 On success.
1942 * @retval -EINVAL If packet length is shorter than @a length.
1943 */
1944 int net_pkt_remove_tail(struct net_pkt *pkt, size_t length);
1945
1946 /**
1947 * @brief Initialize net_pkt cursor
1948 *
1949 * @details This will initialize the net_pkt cursor from its buffer.
1950 *
1951 * @param pkt The net_pkt whose cursor is going to be initialized
1952 */
1953 void net_pkt_cursor_init(struct net_pkt *pkt);
1954
1955 /**
1956 * @brief Backup net_pkt cursor
1957 *
1958 * @param pkt The net_pkt whose cursor is going to be backed up
1959 * @param backup The cursor where to backup net_pkt cursor
1960 */
net_pkt_cursor_backup(struct net_pkt * pkt,struct net_pkt_cursor * backup)1961 static inline void net_pkt_cursor_backup(struct net_pkt *pkt,
1962 struct net_pkt_cursor *backup)
1963 {
1964 backup->buf = pkt->cursor.buf;
1965 backup->pos = pkt->cursor.pos;
1966 }
1967
1968 /**
1969 * @brief Restore net_pkt cursor from a backup
1970 *
1971 * @param pkt The net_pkt whose cursor is going to be restored
1972 * @param backup The cursor from where to restore net_pkt cursor
1973 */
net_pkt_cursor_restore(struct net_pkt * pkt,struct net_pkt_cursor * backup)1974 static inline void net_pkt_cursor_restore(struct net_pkt *pkt,
1975 struct net_pkt_cursor *backup)
1976 {
1977 pkt->cursor.buf = backup->buf;
1978 pkt->cursor.pos = backup->pos;
1979 }
1980
1981 /**
1982 * @brief Returns current position of the cursor
1983 *
1984 * @param pkt The net_pkt whose cursor position is going to be returned
1985 *
1986 * @return cursor's position
1987 */
net_pkt_cursor_get_pos(struct net_pkt * pkt)1988 static inline void *net_pkt_cursor_get_pos(struct net_pkt *pkt)
1989 {
1990 return pkt->cursor.pos;
1991 }
1992
1993 /**
1994 * @brief Skip some data from a net_pkt
1995 *
1996 * @details net_pkt's cursor should be properly initialized
1997 * Cursor position will be updated after the operation.
1998 * Depending on the value of pkt->overwrite bit, this function
1999 * will affect the buffer length or not. If it's true, it will
2000 * advance the cursor to the requested length. If it's false,
2001 * it will do the same but if the cursor was already also at the
2002 * end of existing data, it will increment the buffer length.
2003 * So in this case, its behavior is just like net_pkt_write or
2004 * net_pkt_memset, difference being that it will not affect the
2005 * buffer content itself (which may be just garbage then).
2006 *
2007 * @param pkt The net_pkt whose cursor will be updated to skip given
2008 * amount of data from the buffer.
2009 * @param length Amount of data to skip in the buffer
2010 *
2011 * @return 0 in success, negative errno code otherwise.
2012 */
2013 int net_pkt_skip(struct net_pkt *pkt, size_t length);
2014
2015 /**
2016 * @brief Memset some data in a net_pkt
2017 *
2018 * @details net_pkt's cursor should be properly initialized and,
2019 * if needed, positioned using net_pkt_skip.
2020 * Cursor position will be updated after the operation.
2021 *
2022 * @param pkt The net_pkt whose buffer to fill starting at the current
2023 * cursor position.
2024 * @param byte The byte to write in memory
2025 * @param length Amount of data to memset with given byte
2026 *
2027 * @return 0 in success, negative errno code otherwise.
2028 */
2029 int net_pkt_memset(struct net_pkt *pkt, int byte, size_t length);
2030
2031 /**
2032 * @brief Copy data from a packet into another one.
2033 *
2034 * @details Both net_pkt cursors should be properly initialized and,
2035 * if needed, positioned using net_pkt_skip.
2036 * The cursors will be updated after the operation.
2037 *
2038 * @param pkt_dst Destination network packet.
2039 * @param pkt_src Source network packet.
2040 * @param length Length of data to be copied.
2041 *
2042 * @return 0 on success, negative errno code otherwise.
2043 */
2044 int net_pkt_copy(struct net_pkt *pkt_dst,
2045 struct net_pkt *pkt_src,
2046 size_t length);
2047
2048 /**
2049 * @brief Clone pkt and its buffer. The cloned packet will be allocated on
2050 * the same pool as the original one.
2051 *
2052 * @param pkt Original pkt to be cloned
2053 * @param timeout Timeout to wait for free buffer
2054 *
2055 * @return NULL if error, cloned packet otherwise.
2056 */
2057 struct net_pkt *net_pkt_clone(struct net_pkt *pkt, k_timeout_t timeout);
2058
2059 /**
2060 * @brief Clone pkt and its buffer. The cloned packet will be allocated on
2061 * the RX packet poll.
2062 *
2063 * @param pkt Original pkt to be cloned
2064 * @param timeout Timeout to wait for free buffer
2065 *
2066 * @return NULL if error, cloned packet otherwise.
2067 */
2068 struct net_pkt *net_pkt_rx_clone(struct net_pkt *pkt, k_timeout_t timeout);
2069
2070 /**
2071 * @brief Clone pkt and increase the refcount of its buffer.
2072 *
2073 * @param pkt Original pkt to be shallow cloned
2074 * @param timeout Timeout to wait for free packet
2075 *
2076 * @return NULL if error, cloned packet otherwise.
2077 */
2078 struct net_pkt *net_pkt_shallow_clone(struct net_pkt *pkt,
2079 k_timeout_t timeout);
2080
2081 /**
2082 * @brief Read some data from a net_pkt
2083 *
2084 * @details net_pkt's cursor should be properly initialized and,
2085 * if needed, positioned using net_pkt_skip.
2086 * Cursor position will be updated after the operation.
2087 *
2088 * @param pkt The network packet from where to read some data
2089 * @param data The destination buffer where to copy the data
2090 * @param length The amount of data to copy
2091 *
2092 * @return 0 on success, negative errno code otherwise.
2093 */
2094 int net_pkt_read(struct net_pkt *pkt, void *data, size_t length);
2095
2096 /* Read uint8_t data data a net_pkt */
net_pkt_read_u8(struct net_pkt * pkt,uint8_t * data)2097 static inline int net_pkt_read_u8(struct net_pkt *pkt, uint8_t *data)
2098 {
2099 return net_pkt_read(pkt, data, 1);
2100 }
2101
2102 /**
2103 * @brief Read uint16_t big endian data from a net_pkt
2104 *
2105 * @details net_pkt's cursor should be properly initialized and,
2106 * if needed, positioned using net_pkt_skip.
2107 * Cursor position will be updated after the operation.
2108 *
2109 * @param pkt The network packet from where to read
2110 * @param data The destination uint16_t where to copy the data
2111 *
2112 * @return 0 on success, negative errno code otherwise.
2113 */
2114 int net_pkt_read_be16(struct net_pkt *pkt, uint16_t *data);
2115
2116 /**
2117 * @brief Read uint16_t little endian data from a net_pkt
2118 *
2119 * @details net_pkt's cursor should be properly initialized and,
2120 * if needed, positioned using net_pkt_skip.
2121 * Cursor position will be updated after the operation.
2122 *
2123 * @param pkt The network packet from where to read
2124 * @param data The destination uint16_t where to copy the data
2125 *
2126 * @return 0 on success, negative errno code otherwise.
2127 */
2128 int net_pkt_read_le16(struct net_pkt *pkt, uint16_t *data);
2129
2130 /**
2131 * @brief Read uint32_t big endian data from a net_pkt
2132 *
2133 * @details net_pkt's cursor should be properly initialized and,
2134 * if needed, positioned using net_pkt_skip.
2135 * Cursor position will be updated after the operation.
2136 *
2137 * @param pkt The network packet from where to read
2138 * @param data The destination uint32_t where to copy the data
2139 *
2140 * @return 0 on success, negative errno code otherwise.
2141 */
2142 int net_pkt_read_be32(struct net_pkt *pkt, uint32_t *data);
2143
2144 /**
2145 * @brief Write data into a net_pkt
2146 *
2147 * @details net_pkt's cursor should be properly initialized and,
2148 * if needed, positioned using net_pkt_skip.
2149 * Cursor position will be updated after the operation.
2150 *
2151 * @param pkt The network packet where to write
2152 * @param data Data to be written
2153 * @param length Length of the data to be written
2154 *
2155 * @return 0 on success, negative errno code otherwise.
2156 */
2157 int net_pkt_write(struct net_pkt *pkt, const void *data, size_t length);
2158
2159 /* Write uint8_t data into a net_pkt. */
net_pkt_write_u8(struct net_pkt * pkt,uint8_t data)2160 static inline int net_pkt_write_u8(struct net_pkt *pkt, uint8_t data)
2161 {
2162 return net_pkt_write(pkt, &data, sizeof(uint8_t));
2163 }
2164
2165 /* Write uint16_t big endian data into a net_pkt. */
net_pkt_write_be16(struct net_pkt * pkt,uint16_t data)2166 static inline int net_pkt_write_be16(struct net_pkt *pkt, uint16_t data)
2167 {
2168 uint16_t data_be16 = htons(data);
2169
2170 return net_pkt_write(pkt, &data_be16, sizeof(uint16_t));
2171 }
2172
2173 /* Write uint32_t big endian data into a net_pkt. */
net_pkt_write_be32(struct net_pkt * pkt,uint32_t data)2174 static inline int net_pkt_write_be32(struct net_pkt *pkt, uint32_t data)
2175 {
2176 uint32_t data_be32 = htonl(data);
2177
2178 return net_pkt_write(pkt, &data_be32, sizeof(uint32_t));
2179 }
2180
2181 /* Write uint32_t little endian data into a net_pkt. */
net_pkt_write_le32(struct net_pkt * pkt,uint32_t data)2182 static inline int net_pkt_write_le32(struct net_pkt *pkt, uint32_t data)
2183 {
2184 uint32_t data_le32 = sys_cpu_to_le32(data);
2185
2186 return net_pkt_write(pkt, &data_le32, sizeof(uint32_t));
2187 }
2188
2189 /* Write uint16_t little endian data into a net_pkt. */
net_pkt_write_le16(struct net_pkt * pkt,uint16_t data)2190 static inline int net_pkt_write_le16(struct net_pkt *pkt, uint16_t data)
2191 {
2192 uint16_t data_le16 = sys_cpu_to_le16(data);
2193
2194 return net_pkt_write(pkt, &data_le16, sizeof(uint16_t));
2195 }
2196
2197 /**
2198 * @brief Get the amount of data which can be read from current cursor position
2199 *
2200 * @param pkt Network packet
2201 *
2202 * @return Amount of data which can be read from current pkt cursor
2203 */
2204 size_t net_pkt_remaining_data(struct net_pkt *pkt);
2205
2206 /**
2207 * @brief Update the overall length of a packet
2208 *
2209 * @details Unlike net_pkt_pull() below, this does not take packet cursor
2210 * into account. It's mainly a helper dedicated for ipv4 and ipv6
2211 * input functions. It shrinks the overall length by given parameter.
2212 *
2213 * @param pkt Network packet
2214 * @param length The new length of the packet
2215 *
2216 * @return 0 on success, negative errno code otherwise.
2217 */
2218 int net_pkt_update_length(struct net_pkt *pkt, size_t length);
2219
2220 /**
2221 * @brief Remove data from the packet at current location
2222 *
2223 * @details net_pkt's cursor should be properly initialized and,
2224 * eventually, properly positioned using net_pkt_skip/read/write.
2225 * Note that net_pkt's cursor is reset by this function.
2226 *
2227 * @param pkt Network packet
2228 * @param length Number of bytes to be removed
2229 *
2230 * @return 0 on success, negative errno code otherwise.
2231 */
2232 int net_pkt_pull(struct net_pkt *pkt, size_t length);
2233
2234 /**
2235 * @brief Get the actual offset in the packet from its cursor
2236 *
2237 * @param pkt Network packet.
2238 *
2239 * @return a valid offset on success, 0 otherwise as there is nothing that
2240 * can be done to evaluate the offset.
2241 */
2242 uint16_t net_pkt_get_current_offset(struct net_pkt *pkt);
2243
2244 /**
2245 * @brief Check if a data size could fit contiguously
2246 *
2247 * @details net_pkt's cursor should be properly initialized and,
2248 * if needed, positioned using net_pkt_skip.
2249 *
2250 * @param pkt Network packet.
2251 * @param size The size to check for contiguity
2252 *
2253 * @return true if that is the case, false otherwise.
2254 */
2255 bool net_pkt_is_contiguous(struct net_pkt *pkt, size_t size);
2256
2257 /**
2258 * Get the contiguous buffer space
2259 *
2260 * @param pkt Network packet
2261 *
2262 * @return The available contiguous buffer space in bytes starting from the
2263 * current cursor position. 0 in case of an error.
2264 */
2265 size_t net_pkt_get_contiguous_len(struct net_pkt *pkt);
2266
2267 struct net_pkt_data_access {
2268 #if !defined(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)
2269 void *data;
2270 #endif
2271 const size_t size;
2272 };
2273
2274 #if defined(CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS)
2275 #define NET_PKT_DATA_ACCESS_DEFINE(_name, _type) \
2276 struct net_pkt_data_access _name = { \
2277 .size = sizeof(_type), \
2278 }
2279
2280 #define NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(_name, _type) \
2281 NET_PKT_DATA_ACCESS_DEFINE(_name, _type)
2282
2283 #else
2284 #define NET_PKT_DATA_ACCESS_DEFINE(_name, _type) \
2285 _type _hdr_##_name; \
2286 struct net_pkt_data_access _name = { \
2287 .data = &_hdr_##_name, \
2288 .size = sizeof(_type), \
2289 }
2290
2291 #define NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(_name, _type) \
2292 struct net_pkt_data_access _name = { \
2293 .data = NULL, \
2294 .size = sizeof(_type), \
2295 }
2296
2297 #endif /* CONFIG_NET_HEADERS_ALWAYS_CONTIGUOUS */
2298
2299 /**
2300 * @brief Get data from a network packet in a contiguous way
2301 *
2302 * @details net_pkt's cursor should be properly initialized and,
2303 * if needed, positioned using net_pkt_skip. Unlike other functions,
2304 * cursor position will not be updated after the operation.
2305 *
2306 * @param pkt The network packet from where to get the data.
2307 * @param access A pointer to a valid net_pkt_data_access describing the
2308 * data to get in a contiguous way.
2309 *
2310 * @return a pointer to the requested contiguous data, NULL otherwise.
2311 */
2312 void *net_pkt_get_data(struct net_pkt *pkt,
2313 struct net_pkt_data_access *access);
2314
2315 /**
2316 * @brief Set contiguous data into a network packet
2317 *
2318 * @details net_pkt's cursor should be properly initialized and,
2319 * if needed, positioned using net_pkt_skip.
2320 * Cursor position will be updated after the operation.
2321 *
2322 * @param pkt The network packet to where the data should be set.
2323 * @param access A pointer to a valid net_pkt_data_access describing the
2324 * data to set.
2325 *
2326 * @return 0 on success, a negative errno otherwise.
2327 */
2328 int net_pkt_set_data(struct net_pkt *pkt,
2329 struct net_pkt_data_access *access);
2330
2331 /**
2332 * Acknowledge previously contiguous data taken from a network packet
2333 * Packet needs to be set to overwrite mode.
2334 */
net_pkt_acknowledge_data(struct net_pkt * pkt,struct net_pkt_data_access * access)2335 static inline int net_pkt_acknowledge_data(struct net_pkt *pkt,
2336 struct net_pkt_data_access *access)
2337 {
2338 return net_pkt_skip(pkt, access->size);
2339 }
2340
2341 /**
2342 * @}
2343 */
2344
2345 #ifdef __cplusplus
2346 }
2347 #endif
2348
2349 #endif /* ZEPHYR_INCLUDE_NET_NET_PKT_H_ */
2350