1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* AF_XDP internal functions
3  * Copyright(c) 2018 Intel Corporation.
4  */
5 
6 #ifndef _LINUX_XDP_SOCK_H
7 #define _LINUX_XDP_SOCK_H
8 
9 #include <linux/bpf.h>
10 #include <linux/workqueue.h>
11 #include <linux/if_xdp.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/mm.h>
15 #include <net/sock.h>
16 
17 struct net_device;
18 struct xsk_queue;
19 struct xdp_buff;
20 
21 struct xdp_umem {
22 	void *addrs;
23 	u64 size;
24 	u32 headroom;
25 	u32 chunk_size;
26 	u32 chunks;
27 	u32 npgs;
28 	struct user_struct *user;
29 	refcount_t users;
30 	u8 flags;
31 	bool zc;
32 	struct page **pgs;
33 	int id;
34 	struct list_head xsk_dma_list;
35 	struct work_struct work;
36 };
37 
38 struct xsk_map {
39 	struct bpf_map map;
40 	spinlock_t lock; /* Synchronize map updates */
41 	atomic_t count;
42 	struct xdp_sock __rcu *xsk_map[];
43 };
44 
45 struct xdp_sock {
46 	/* struct sock must be the first member of struct xdp_sock */
47 	struct sock sk;
48 	struct xsk_queue *rx ____cacheline_aligned_in_smp;
49 	struct net_device *dev;
50 	struct xdp_umem *umem;
51 	struct list_head flush_node;
52 	struct xsk_buff_pool *pool;
53 	u16 queue_id;
54 	bool zc;
55 	bool sg;
56 	enum {
57 		XSK_READY = 0,
58 		XSK_BOUND,
59 		XSK_UNBOUND,
60 	} state;
61 
62 	struct xsk_queue *tx ____cacheline_aligned_in_smp;
63 	struct list_head tx_list;
64 	/* Protects generic receive. */
65 	spinlock_t rx_lock;
66 
67 	/* Statistics */
68 	u64 rx_dropped;
69 	u64 rx_queue_full;
70 
71 	/* When __xsk_generic_xmit() must return before it sees the EOP descriptor for the current
72 	 * packet, the partially built skb is saved here so that packet building can resume in next
73 	 * call of __xsk_generic_xmit().
74 	 */
75 	struct sk_buff *skb;
76 
77 	struct list_head map_list;
78 	/* Protects map_list */
79 	spinlock_t map_list_lock;
80 	/* Protects multiple processes in the control path */
81 	struct mutex mutex;
82 	struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
83 	struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
84 };
85 
86 #ifdef CONFIG_XDP_SOCKETS
87 
88 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
89 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
90 void __xsk_map_flush(void);
91 
92 #else
93 
xsk_generic_rcv(struct xdp_sock * xs,struct xdp_buff * xdp)94 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
95 {
96 	return -ENOTSUPP;
97 }
98 
__xsk_map_redirect(struct xdp_sock * xs,struct xdp_buff * xdp)99 static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
100 {
101 	return -EOPNOTSUPP;
102 }
103 
__xsk_map_flush(void)104 static inline void __xsk_map_flush(void)
105 {
106 }
107 
108 #endif /* CONFIG_XDP_SOCKETS */
109 
110 #endif /* _LINUX_XDP_SOCK_H */
111