/Linux-v5.15/include/net/ |
D | xdp.h | 80 xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq) in xdp_init_buff() argument 82 xdp->frame_sz = frame_sz; in xdp_init_buff() 83 xdp->rxq = rxq; in xdp_init_buff() 87 xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start, in xdp_prepare_buff() argument 92 xdp->data_hard_start = hard_start; in xdp_prepare_buff() 93 xdp->data = data; in xdp_prepare_buff() 94 xdp->data_end = data + data_len; in xdp_prepare_buff() 95 xdp->data_meta = meta_valid ? data : data + 1; in xdp_prepare_buff() 104 #define xdp_data_hard_end(xdp) \ argument 105 ((xdp)->data_hard_start + (xdp)->frame_sz - \ [all …]
|
D | xdp_sock_drv.h | 61 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp) in xsk_buff_xdp_get_dma() argument 63 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_xdp_get_dma() 68 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) in xsk_buff_xdp_get_frame_dma() argument 70 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_xdp_get_frame_dma() 85 static inline void xsk_buff_free(struct xdp_buff *xdp) in xsk_buff_free() argument 87 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_free() 103 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool) in xsk_buff_dma_sync_for_cpu() argument 105 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_dma_sync_for_cpu() 200 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp) in xsk_buff_xdp_get_dma() argument 205 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) in xsk_buff_xdp_get_frame_dma() argument [all …]
|
D | xdp_sock.h | 79 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); 80 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp); 85 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) in xsk_generic_rcv() argument 90 static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) in __xsk_map_redirect() argument
|
/Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
D | rx.c | 32 struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk; in mlx5e_xsk_skb_from_cqe_mpwrq_linear() local 48 xdp->data_end = xdp->data + cqe_bcnt32; in mlx5e_xsk_skb_from_cqe_mpwrq_linear() 49 xdp_set_data_meta_invalid(xdp); in mlx5e_xsk_skb_from_cqe_mpwrq_linear() 50 xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); in mlx5e_xsk_skb_from_cqe_mpwrq_linear() 51 net_prefetch(xdp->data); in mlx5e_xsk_skb_from_cqe_mpwrq_linear() 68 if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp))) { in mlx5e_xsk_skb_from_cqe_mpwrq_linear() 77 return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt32); in mlx5e_xsk_skb_from_cqe_mpwrq_linear() 85 struct xdp_buff *xdp = wi->di->xsk; in mlx5e_xsk_skb_from_cqe_linear() local 94 xdp->data_end = xdp->data + cqe_bcnt; in mlx5e_xsk_skb_from_cqe_linear() 95 xdp_set_data_meta_invalid(xdp); in mlx5e_xsk_skb_from_cqe_linear() [all …]
|
/Linux-v5.15/samples/bpf/ |
D | xdp_adjust_tail_kern.c | 70 static __always_inline int send_icmp4_too_big(struct xdp_md *xdp) in send_icmp4_too_big() argument 74 if (bpf_xdp_adjust_head(xdp, 0 - headroom)) in send_icmp4_too_big() 76 void *data = (void *)(long)xdp->data; in send_icmp4_too_big() 77 void *data_end = (void *)(long)xdp->data_end; in send_icmp4_too_big() 120 static __always_inline int handle_ipv4(struct xdp_md *xdp) in handle_ipv4() argument 122 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() 123 void *data = (void *)(long)xdp->data; in handle_ipv4() 129 if (bpf_xdp_adjust_tail(xdp, 0 - offset)) in handle_ipv4() 131 return send_icmp4_too_big(xdp); in handle_ipv4() 137 int _xdp_icmp(struct xdp_md *xdp) in _xdp_icmp() argument [all …]
|
D | xdp_tx_iptunnel_kern.c | 77 static __always_inline int handle_ipv4(struct xdp_md *xdp) in handle_ipv4() argument 79 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() 80 void *data = (void *)(long)xdp->data; in handle_ipv4() 112 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) in handle_ipv4() 115 data = (void *)(long)xdp->data; in handle_ipv4() 116 data_end = (void *)(long)xdp->data_end; in handle_ipv4() 152 static __always_inline int handle_ipv6(struct xdp_md *xdp) in handle_ipv6() argument 154 void *data_end = (void *)(long)xdp->data_end; in handle_ipv6() 155 void *data = (void *)(long)xdp->data; in handle_ipv6() 184 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) in handle_ipv6() [all …]
|
/Linux-v5.15/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_xsk.c | 99 struct xdp_buff *xdp) in ixgbe_run_xdp_zc() argument 107 act = bpf_prog_run_xdp(xdp_prog, xdp); in ixgbe_run_xdp_zc() 110 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in ixgbe_run_xdp_zc() 120 xdpf = xdp_convert_buff_to_frame(xdp); in ixgbe_run_xdp_zc() 158 bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); in ixgbe_alloc_rx_buffers_zc() 159 if (!bi->xdp) { in ixgbe_alloc_rx_buffers_zc() 164 dma = xsk_buff_xdp_get_dma(bi->xdp); in ixgbe_alloc_rx_buffers_zc() 206 unsigned int metasize = bi->xdp->data - bi->xdp->data_meta; in ixgbe_construct_skb_zc() 207 unsigned int datasize = bi->xdp->data_end - bi->xdp->data; in ixgbe_construct_skb_zc() 212 bi->xdp->data_end - bi->xdp->data_hard_start, in ixgbe_construct_skb_zc() [all …]
|
/Linux-v5.15/tools/testing/selftests/bpf/progs/ |
D | test_xdp.c | 80 static __always_inline int handle_ipv4(struct xdp_md *xdp) in handle_ipv4() argument 82 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() 83 void *data = (void *)(long)xdp->data; in handle_ipv4() 113 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) in handle_ipv4() 116 data = (void *)(long)xdp->data; in handle_ipv4() 117 data_end = (void *)(long)xdp->data_end; in handle_ipv4() 153 static __always_inline int handle_ipv6(struct xdp_md *xdp) in handle_ipv6() argument 155 void *data_end = (void *)(long)xdp->data_end; in handle_ipv6() 156 void *data = (void *)(long)xdp->data; in handle_ipv6() 183 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) in handle_ipv6() [all …]
|
D | test_xdp_loop.c | 76 static __always_inline int handle_ipv4(struct xdp_md *xdp) in handle_ipv4() argument 78 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() 79 void *data = (void *)(long)xdp->data; in handle_ipv4() 109 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) in handle_ipv4() 112 data = (void *)(long)xdp->data; in handle_ipv4() 113 data_end = (void *)(long)xdp->data_end; in handle_ipv4() 149 static __always_inline int handle_ipv6(struct xdp_md *xdp) in handle_ipv6() argument 151 void *data_end = (void *)(long)xdp->data_end; in handle_ipv6() 152 void *data = (void *)(long)xdp->data; in handle_ipv6() 179 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) in handle_ipv6() [all …]
|
D | test_xdp_bpf2bpf.c | 45 int BPF_PROG(trace_on_entry, struct xdp_buff *xdp) in BPF_PROG() argument 48 void *data_end = (void *)(long)xdp->data_end; in BPF_PROG() 49 void *data = (void *)(long)xdp->data; in BPF_PROG() 51 meta.ifindex = xdp->rxq->dev->ifindex; in BPF_PROG() 53 bpf_xdp_output(xdp, &perf_buf_map, in BPF_PROG() 58 test_result_fentry = xdp->rxq->dev->ifindex; in BPF_PROG() 64 int BPF_PROG(trace_on_exit, struct xdp_buff *xdp, int ret) in BPF_PROG() argument
|
D | test_xdp_adjust_tail_shrink.c | 15 int _xdp_adjust_tail_shrink(struct xdp_md *xdp) in _xdp_adjust_tail_shrink() argument 17 void *data_end = (void *)(long)xdp->data_end; in _xdp_adjust_tail_shrink() 18 void *data = (void *)(long)xdp->data; in _xdp_adjust_tail_shrink() 25 if (bpf_xdp_adjust_tail(xdp, 0 - offset)) in _xdp_adjust_tail_shrink()
|
D | test_xdp_context_test_run.c | 6 int xdp_context(struct xdp_md *xdp) in xdp_context() argument 8 void *data = (void *)(long)xdp->data; in xdp_context() 9 __u32 *metadata = (void *)(long)xdp->data_meta; in xdp_context() 15 if (bpf_xdp_adjust_meta(xdp, 4)) in xdp_context()
|
D | test_xdp_adjust_tail_grow.c | 6 int _xdp_adjust_tail_grow(struct xdp_md *xdp) in _xdp_adjust_tail_grow() argument 8 void *data_end = (void *)(long)xdp->data_end; in _xdp_adjust_tail_grow() 9 void *data = (void *)(long)xdp->data; in _xdp_adjust_tail_grow() 28 if (bpf_xdp_adjust_tail(xdp, offset)) in _xdp_adjust_tail_grow()
|
D | test_xdp_noinline.c | 278 bool encap_v6(struct xdp_md *xdp, struct ctl_value *cval, in encap_v6() argument 289 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) in encap_v6() 291 data = (void *)(long)xdp->data; in encap_v6() 292 data_end = (void *)(long)xdp->data_end; in encap_v6() 321 bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval, in encap_v4() argument 337 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) in encap_v4() 339 data = (void *)(long)xdp->data; in encap_v4() 340 data_end = (void *)(long)xdp->data_end; in encap_v4() 369 if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr))) in encap_v4() 375 bool decap_v6(struct xdp_md *xdp, void **data, void **data_end, bool inner_v4) in decap_v6() argument [all …]
|
/Linux-v5.15/drivers/net/hyperv/ |
D | netvsc_bpf.c | 24 struct xdp_buff *xdp) in netvsc_run_xdp() argument 32 xdp->data_hard_start = NULL; in netvsc_run_xdp() 53 xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq); in netvsc_run_xdp() 54 xdp_prepare_buff(xdp, page_address(page), NETVSC_XDP_HDRM, len, false); in netvsc_run_xdp() 56 memcpy(xdp->data, data, len); in netvsc_run_xdp() 58 act = bpf_prog_run_xdp(prog, xdp); in netvsc_run_xdp() 79 xdp->data_hard_start = NULL; in netvsc_run_xdp() 139 struct netdev_bpf xdp; in netvsc_vf_setxdp() local 152 memset(&xdp, 0, sizeof(xdp)); in netvsc_vf_setxdp() 157 xdp.command = XDP_SETUP_PROG; in netvsc_vf_setxdp() [all …]
|
/Linux-v5.15/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt_xdp.c | 118 struct xdp_buff xdp; in bnxt_rx_xdp() local 137 xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq); in bnxt_rx_xdp() 138 xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false); in bnxt_rx_xdp() 139 orig_data = xdp.data; in bnxt_rx_xdp() 141 act = bpf_prog_run_xdp(xdp_prog, &xdp); in bnxt_rx_xdp() 150 *len = xdp.data_end - xdp.data; in bnxt_rx_xdp() 151 if (orig_data != xdp.data) { in bnxt_rx_xdp() 152 offset = xdp.data - xdp.data_hard_start; in bnxt_rx_xdp() 153 *data_ptr = xdp.data_hard_start + offset; in bnxt_rx_xdp() 189 if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) { in bnxt_rx_xdp() [all …]
|
/Linux-v5.15/include/trace/events/ |
D | xdp.h | 3 #define TRACE_SYSTEM xdp 31 const struct bpf_prog *xdp, u32 act), 33 TP_ARGS(dev, xdp, act), 42 __entry->prog_id = xdp->aux->id; 92 const struct bpf_prog *xdp, 97 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index), 123 __entry->prog_id = xdp->aux->id; 142 const struct bpf_prog *xdp, 146 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index) 151 const struct bpf_prog *xdp, [all …]
|
/Linux-v5.15/drivers/net/ethernet/intel/ice/ |
D | ice_xsk.c | 378 rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool); in ice_alloc_rx_bufs_zc() 379 if (!rx_buf->xdp) { in ice_alloc_rx_bufs_zc() 384 dma = xsk_buff_xdp_get_dma(rx_buf->xdp); in ice_alloc_rx_bufs_zc() 433 unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta; in ice_construct_skb_zc() 434 unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data; in ice_construct_skb_zc() 435 unsigned int datasize_hard = rx_buf->xdp->data_end - in ice_construct_skb_zc() 436 rx_buf->xdp->data_hard_start; in ice_construct_skb_zc() 444 skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start); in ice_construct_skb_zc() 445 memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize); in ice_construct_skb_zc() 449 xsk_buff_free(rx_buf->xdp); in ice_construct_skb_zc() [all …]
|
/Linux-v5.15/tools/testing/selftests/bpf/ |
D | test_xdp_veth.sh | 61 if ! ip link set dev lo xdp off > /dev/null 2>&1; then 101 xdp_redirect_map.o $BPF_DIR/progs type xdp \ 106 ip link set dev veth1 xdp pinned $BPF_DIR/progs/redirect_map_0 107 ip link set dev veth2 xdp pinned $BPF_DIR/progs/redirect_map_1 108 ip link set dev veth3 xdp pinned $BPF_DIR/progs/redirect_map_2 110 ip -n ns1 link set dev veth11 xdp obj xdp_dummy.o sec xdp_dummy 111 ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec xdp 112 ip -n ns3 link set dev veth33 xdp obj xdp_dummy.o sec xdp_dummy
|
D | test_tcp_check_syncookie_user.c | 81 static int get_map_fd_by_prog_id(int prog_id, bool *xdp) in get_map_fd_by_prog_id() argument 108 *xdp = info.type == BPF_PROG_TYPE_XDP; in get_map_fd_by_prog_id() 119 static int run_test(int server_fd, int results_fd, bool xdp) in run_test() argument 170 if (xdp && value_gen == 0) { in run_test() 210 bool xdp; in main() local 217 results = get_map_fd_by_prog_id(atoi(argv[1]), &xdp); in main() 242 if (run_test(server, results, xdp)) in main() 245 if (run_test(server_v6, results, xdp)) in main()
|
/Linux-v5.15/drivers/net/ethernet/intel/i40e/ |
D | i40e_xsk.c | 149 static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) in i40e_run_xdp_zc() argument 160 act = bpf_prog_run_xdp(xdp_prog, xdp); in i40e_run_xdp_zc() 163 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in i40e_run_xdp_zc() 174 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); in i40e_run_xdp_zc() 196 struct xdp_buff **bi, *xdp; in i40e_alloc_rx_buffers_zc() local 203 xdp = xsk_buff_alloc(rx_ring->xsk_pool); in i40e_alloc_rx_buffers_zc() 204 if (!xdp) { in i40e_alloc_rx_buffers_zc() 208 *bi = xdp; in i40e_alloc_rx_buffers_zc() 209 dma = xsk_buff_xdp_get_dma(xdp); in i40e_alloc_rx_buffers_zc() 244 struct xdp_buff *xdp) in i40e_construct_skb_zc() argument [all …]
|
/Linux-v5.15/net/xdp/ |
D | xsk.c | 144 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start; in xp_get_handle() 152 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) in __xsk_rcv_zc() argument 154 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in __xsk_rcv_zc() 187 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) in __xsk_rcv() argument 193 len = xdp->data_end - xdp->data; in __xsk_rcv() 205 xsk_copy_xdp(xsk_xdp, xdp, len); in __xsk_rcv() 232 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp) in xsk_rcv_check() argument 237 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) in xsk_rcv_check() 240 sk_mark_napi_id_once_xdp(&xs->sk, xdp); in xsk_rcv_check() 251 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) in xsk_generic_rcv() argument [all …]
|
/Linux-v5.15/net/core/ |
D | xdp.c | 343 struct xdp_buff *xdp) in __xdp_return() argument 368 xsk_buff_free(xdp); in __xdp_return() 443 void xdp_return_buff(struct xdp_buff *xdp) in xdp_return_buff() argument 445 __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp); in xdp_return_buff() 473 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp) in xdp_convert_zc_to_xdp_frame() argument 481 metasize = xdp_data_meta_unsupported(xdp) ? 0 : in xdp_convert_zc_to_xdp_frame() 482 xdp->data - xdp->data_meta; in xdp_convert_zc_to_xdp_frame() 483 totsize = xdp->data_end - xdp->data + metasize; in xdp_convert_zc_to_xdp_frame() 497 data_to_copy = metasize ? xdp->data_meta : xdp->data; in xdp_convert_zc_to_xdp_frame() 507 xsk_buff_free(xdp); in xdp_convert_zc_to_xdp_frame()
|
/Linux-v5.15/tools/bpf/bpftool/Documentation/ |
D | bpftool-net.rst | 29 | *ATTACH_TYPE* := { **xdp** | **xdpgeneric** | **xdpdrv** | **xdpoffload** } 36 Currently, only device driver xdp attachments and tc filter 47 The current output will start with all xdp program attachments, followed by 48 all tc class/qdisc bpf program attachments. Both xdp programs and 62 … **xdp** - try native XDP and fallback to generic XDP if NIC driver does not support it; 87 xdp: 103 "xdp": [{ 154 xdp: 164 xdp: 174 xdp:
|
/Linux-v5.15/kernel/bpf/ |
D | devmap.c | 330 struct xdp_buff xdp; in dev_map_bpf_prog_run() local 338 xdp_convert_frame_to_buff(xdpf, &xdp); in dev_map_bpf_prog_run() 339 xdp.txq = &txq; in dev_map_bpf_prog_run() 341 act = bpf_prog_run_xdp(xdp_prog, &xdp); in dev_map_bpf_prog_run() 344 err = xdp_update_frame_from_buff(&xdp, xdpf); in dev_map_bpf_prog_run() 470 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, in __xdp_enqueue() argument 480 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); in __xdp_enqueue() 484 xdpf = xdp_convert_buff_to_frame(xdp); in __xdp_enqueue() 495 struct xdp_buff xdp; in dev_map_bpf_prog_run_skb() local 502 xdp.txq = &txq; in dev_map_bpf_prog_run_skb() [all …]
|