1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* AF_XDP internal functions
3 * Copyright(c) 2018 Intel Corporation.
4 */
5
6 #ifndef _LINUX_XDP_SOCK_H
7 #define _LINUX_XDP_SOCK_H
8
9 #include <linux/workqueue.h>
10 #include <linux/if_xdp.h>
11 #include <linux/mutex.h>
12 #include <linux/spinlock.h>
13 #include <linux/mm.h>
14 #include <net/sock.h>
15
16 struct net_device;
17 struct xsk_queue;
18
19 struct xdp_umem_props {
20 u64 chunk_mask;
21 u64 size;
22 };
23
24 struct xdp_umem_page {
25 void *addr;
26 dma_addr_t dma;
27 };
28
29 struct xdp_umem {
30 struct xsk_queue *fq;
31 struct xsk_queue *cq;
32 struct xdp_umem_page *pages;
33 struct xdp_umem_props props;
34 u32 headroom;
35 u32 chunk_size_nohr;
36 struct user_struct *user;
37 struct pid *pid;
38 unsigned long address;
39 refcount_t users;
40 struct work_struct work;
41 struct page **pgs;
42 u32 npgs;
43 struct net_device *dev;
44 u16 queue_id;
45 bool zc;
46 spinlock_t xsk_list_lock;
47 struct list_head xsk_list;
48 };
49
50 struct xdp_sock {
51 /* struct sock must be the first member of struct xdp_sock */
52 struct sock sk;
53 struct xsk_queue *rx;
54 struct net_device *dev;
55 struct xdp_umem *umem;
56 struct list_head flush_node;
57 u16 queue_id;
58 struct xsk_queue *tx ____cacheline_aligned_in_smp;
59 struct list_head list;
60 bool zc;
61 /* Protects multiple processes in the control path */
62 struct mutex mutex;
63 /* Mutual exclusion of NAPI TX thread and sendmsg error paths
64 * in the SKB destructor callback.
65 */
66 spinlock_t tx_completion_lock;
67 u64 rx_dropped;
68 };
69
70 struct xdp_buff;
71 #ifdef CONFIG_XDP_SOCKETS
72 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
73 int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
74 void xsk_flush(struct xdp_sock *xs);
75 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
76 /* Used from netdev driver */
77 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
78 void xsk_umem_discard_addr(struct xdp_umem *umem);
79 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
80 bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len);
81 void xsk_umem_consume_tx_done(struct xdp_umem *umem);
82 #else
xsk_generic_rcv(struct xdp_sock * xs,struct xdp_buff * xdp)83 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
84 {
85 return -ENOTSUPP;
86 }
87
xsk_rcv(struct xdp_sock * xs,struct xdp_buff * xdp)88 static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
89 {
90 return -ENOTSUPP;
91 }
92
xsk_flush(struct xdp_sock * xs)93 static inline void xsk_flush(struct xdp_sock *xs)
94 {
95 }
96
xsk_is_setup_for_bpf_map(struct xdp_sock * xs)97 static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
98 {
99 return false;
100 }
101 #endif /* CONFIG_XDP_SOCKETS */
102
103 #endif /* _LINUX_XDP_SOCK_H */
104