1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2018 Intel Corporation. */
3 
4 #include <assert.h>
5 #include <errno.h>
6 #include <getopt.h>
7 #include <libgen.h>
8 #include <linux/bpf.h>
9 #include <linux/if_link.h>
10 #include <linux/if_xdp.h>
11 #include <linux/if_ether.h>
12 #include <net/if.h>
13 #include <signal.h>
14 #include <stdbool.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <net/ethernet.h>
19 #include <sys/resource.h>
20 #include <sys/socket.h>
21 #include <sys/mman.h>
22 #include <time.h>
23 #include <unistd.h>
24 #include <pthread.h>
25 #include <locale.h>
26 #include <sys/types.h>
27 #include <poll.h>
28 
29 #include "bpf/libbpf.h"
30 #include "bpf_util.h"
31 #include <bpf/bpf.h>
32 
33 #include "xdpsock.h"
34 
35 #ifndef SOL_XDP
36 #define SOL_XDP 283
37 #endif
38 
39 #ifndef AF_XDP
40 #define AF_XDP 44
41 #endif
42 
43 #ifndef PF_XDP
44 #define PF_XDP AF_XDP
45 #endif
46 
47 #define NUM_FRAMES 131072
48 #define FRAME_HEADROOM 0
49 #define FRAME_SHIFT 11
50 #define FRAME_SIZE 2048
51 #define NUM_DESCS 1024
52 #define BATCH_SIZE 16
53 
54 #define FQ_NUM_DESCS 1024
55 #define CQ_NUM_DESCS 1024
56 
57 #define DEBUG_HEXDUMP 0
58 
59 typedef __u64 u64;
60 typedef __u32 u32;
61 
62 static unsigned long prev_time;
63 
64 enum benchmark_type {
65 	BENCH_RXDROP = 0,
66 	BENCH_TXONLY = 1,
67 	BENCH_L2FWD = 2,
68 };
69 
70 static enum benchmark_type opt_bench = BENCH_RXDROP;
71 static u32 opt_xdp_flags;
72 static const char *opt_if = "";
73 static int opt_ifindex;
74 static int opt_queue;
75 static int opt_poll;
76 static int opt_shared_packet_buffer;
77 static int opt_interval = 1;
78 static u32 opt_xdp_bind_flags;
79 
80 struct xdp_umem_uqueue {
81 	u32 cached_prod;
82 	u32 cached_cons;
83 	u32 mask;
84 	u32 size;
85 	u32 *producer;
86 	u32 *consumer;
87 	u64 *ring;
88 	void *map;
89 };
90 
91 struct xdp_umem {
92 	char *frames;
93 	struct xdp_umem_uqueue fq;
94 	struct xdp_umem_uqueue cq;
95 	int fd;
96 };
97 
98 struct xdp_uqueue {
99 	u32 cached_prod;
100 	u32 cached_cons;
101 	u32 mask;
102 	u32 size;
103 	u32 *producer;
104 	u32 *consumer;
105 	struct xdp_desc *ring;
106 	void *map;
107 };
108 
109 struct xdpsock {
110 	struct xdp_uqueue rx;
111 	struct xdp_uqueue tx;
112 	int sfd;
113 	struct xdp_umem *umem;
114 	u32 outstanding_tx;
115 	unsigned long rx_npkts;
116 	unsigned long tx_npkts;
117 	unsigned long prev_rx_npkts;
118 	unsigned long prev_tx_npkts;
119 };
120 
121 #define MAX_SOCKS 4
122 static int num_socks;
123 struct xdpsock *xsks[MAX_SOCKS];
124 
get_nsecs(void)125 static unsigned long get_nsecs(void)
126 {
127 	struct timespec ts;
128 
129 	clock_gettime(CLOCK_MONOTONIC, &ts);
130 	return ts.tv_sec * 1000000000UL + ts.tv_nsec;
131 }
132 
133 static void dump_stats(void);
134 
135 #define lassert(expr)							\
136 	do {								\
137 		if (!(expr)) {						\
138 			fprintf(stderr, "%s:%s:%i: Assertion failed: "	\
139 				#expr ": errno: %d/\"%s\"\n",		\
140 				__FILE__, __func__, __LINE__,		\
141 				errno, strerror(errno));		\
142 			dump_stats();					\
143 			exit(EXIT_FAILURE);				\
144 		}							\
145 	} while (0)
146 
147 #define barrier() __asm__ __volatile__("": : :"memory")
148 #ifdef __aarch64__
149 #define u_smp_rmb() __asm__ __volatile__("dmb ishld": : :"memory")
150 #define u_smp_wmb() __asm__ __volatile__("dmb ishst": : :"memory")
151 #else
152 #define u_smp_rmb() barrier()
153 #define u_smp_wmb() barrier()
154 #endif
155 #define likely(x) __builtin_expect(!!(x), 1)
156 #define unlikely(x) __builtin_expect(!!(x), 0)
157 
158 static const char pkt_data[] =
159 	"\x3c\xfd\xfe\x9e\x7f\x71\xec\xb1\xd7\x98\x3a\xc0\x08\x00\x45\x00"
160 	"\x00\x2e\x00\x00\x00\x00\x40\x11\x88\x97\x05\x08\x07\x08\xc8\x14"
161 	"\x1e\x04\x10\x92\x10\x92\x00\x1a\x6d\xa3\x34\x33\x1f\x69\x40\x6b"
162 	"\x54\x59\xb6\x14\x2d\x11\x44\xbf\xaf\xd9\xbe\xaa";
163 
umem_nb_free(struct xdp_umem_uqueue * q,u32 nb)164 static inline u32 umem_nb_free(struct xdp_umem_uqueue *q, u32 nb)
165 {
166 	u32 free_entries = q->cached_cons - q->cached_prod;
167 
168 	if (free_entries >= nb)
169 		return free_entries;
170 
171 	/* Refresh the local tail pointer */
172 	q->cached_cons = *q->consumer + q->size;
173 
174 	return q->cached_cons - q->cached_prod;
175 }
176 
xq_nb_free(struct xdp_uqueue * q,u32 ndescs)177 static inline u32 xq_nb_free(struct xdp_uqueue *q, u32 ndescs)
178 {
179 	u32 free_entries = q->cached_cons - q->cached_prod;
180 
181 	if (free_entries >= ndescs)
182 		return free_entries;
183 
184 	/* Refresh the local tail pointer */
185 	q->cached_cons = *q->consumer + q->size;
186 	return q->cached_cons - q->cached_prod;
187 }
188 
umem_nb_avail(struct xdp_umem_uqueue * q,u32 nb)189 static inline u32 umem_nb_avail(struct xdp_umem_uqueue *q, u32 nb)
190 {
191 	u32 entries = q->cached_prod - q->cached_cons;
192 
193 	if (entries == 0) {
194 		q->cached_prod = *q->producer;
195 		entries = q->cached_prod - q->cached_cons;
196 	}
197 
198 	return (entries > nb) ? nb : entries;
199 }
200 
xq_nb_avail(struct xdp_uqueue * q,u32 ndescs)201 static inline u32 xq_nb_avail(struct xdp_uqueue *q, u32 ndescs)
202 {
203 	u32 entries = q->cached_prod - q->cached_cons;
204 
205 	if (entries == 0) {
206 		q->cached_prod = *q->producer;
207 		entries = q->cached_prod - q->cached_cons;
208 	}
209 
210 	return (entries > ndescs) ? ndescs : entries;
211 }
212 
umem_fill_to_kernel_ex(struct xdp_umem_uqueue * fq,struct xdp_desc * d,size_t nb)213 static inline int umem_fill_to_kernel_ex(struct xdp_umem_uqueue *fq,
214 					 struct xdp_desc *d,
215 					 size_t nb)
216 {
217 	u32 i;
218 
219 	if (umem_nb_free(fq, nb) < nb)
220 		return -ENOSPC;
221 
222 	for (i = 0; i < nb; i++) {
223 		u32 idx = fq->cached_prod++ & fq->mask;
224 
225 		fq->ring[idx] = d[i].addr;
226 	}
227 
228 	u_smp_wmb();
229 
230 	*fq->producer = fq->cached_prod;
231 
232 	return 0;
233 }
234 
umem_fill_to_kernel(struct xdp_umem_uqueue * fq,u64 * d,size_t nb)235 static inline int umem_fill_to_kernel(struct xdp_umem_uqueue *fq, u64 *d,
236 				      size_t nb)
237 {
238 	u32 i;
239 
240 	if (umem_nb_free(fq, nb) < nb)
241 		return -ENOSPC;
242 
243 	for (i = 0; i < nb; i++) {
244 		u32 idx = fq->cached_prod++ & fq->mask;
245 
246 		fq->ring[idx] = d[i];
247 	}
248 
249 	u_smp_wmb();
250 
251 	*fq->producer = fq->cached_prod;
252 
253 	return 0;
254 }
255 
umem_complete_from_kernel(struct xdp_umem_uqueue * cq,u64 * d,size_t nb)256 static inline size_t umem_complete_from_kernel(struct xdp_umem_uqueue *cq,
257 					       u64 *d, size_t nb)
258 {
259 	u32 idx, i, entries = umem_nb_avail(cq, nb);
260 
261 	u_smp_rmb();
262 
263 	for (i = 0; i < entries; i++) {
264 		idx = cq->cached_cons++ & cq->mask;
265 		d[i] = cq->ring[idx];
266 	}
267 
268 	if (entries > 0) {
269 		u_smp_wmb();
270 
271 		*cq->consumer = cq->cached_cons;
272 	}
273 
274 	return entries;
275 }
276 
xq_get_data(struct xdpsock * xsk,u64 addr)277 static inline void *xq_get_data(struct xdpsock *xsk, u64 addr)
278 {
279 	return &xsk->umem->frames[addr];
280 }
281 
xq_enq(struct xdp_uqueue * uq,const struct xdp_desc * descs,unsigned int ndescs)282 static inline int xq_enq(struct xdp_uqueue *uq,
283 			 const struct xdp_desc *descs,
284 			 unsigned int ndescs)
285 {
286 	struct xdp_desc *r = uq->ring;
287 	unsigned int i;
288 
289 	if (xq_nb_free(uq, ndescs) < ndescs)
290 		return -ENOSPC;
291 
292 	for (i = 0; i < ndescs; i++) {
293 		u32 idx = uq->cached_prod++ & uq->mask;
294 
295 		r[idx].addr = descs[i].addr;
296 		r[idx].len = descs[i].len;
297 	}
298 
299 	u_smp_wmb();
300 
301 	*uq->producer = uq->cached_prod;
302 	return 0;
303 }
304 
xq_enq_tx_only(struct xdp_uqueue * uq,unsigned int id,unsigned int ndescs)305 static inline int xq_enq_tx_only(struct xdp_uqueue *uq,
306 				 unsigned int id, unsigned int ndescs)
307 {
308 	struct xdp_desc *r = uq->ring;
309 	unsigned int i;
310 
311 	if (xq_nb_free(uq, ndescs) < ndescs)
312 		return -ENOSPC;
313 
314 	for (i = 0; i < ndescs; i++) {
315 		u32 idx = uq->cached_prod++ & uq->mask;
316 
317 		r[idx].addr	= (id + i) << FRAME_SHIFT;
318 		r[idx].len	= sizeof(pkt_data) - 1;
319 	}
320 
321 	u_smp_wmb();
322 
323 	*uq->producer = uq->cached_prod;
324 	return 0;
325 }
326 
xq_deq(struct xdp_uqueue * uq,struct xdp_desc * descs,int ndescs)327 static inline int xq_deq(struct xdp_uqueue *uq,
328 			 struct xdp_desc *descs,
329 			 int ndescs)
330 {
331 	struct xdp_desc *r = uq->ring;
332 	unsigned int idx;
333 	int i, entries;
334 
335 	entries = xq_nb_avail(uq, ndescs);
336 
337 	u_smp_rmb();
338 
339 	for (i = 0; i < entries; i++) {
340 		idx = uq->cached_cons++ & uq->mask;
341 		descs[i] = r[idx];
342 	}
343 
344 	if (entries > 0) {
345 		u_smp_wmb();
346 
347 		*uq->consumer = uq->cached_cons;
348 	}
349 
350 	return entries;
351 }
352 
swap_mac_addresses(void * data)353 static void swap_mac_addresses(void *data)
354 {
355 	struct ether_header *eth = (struct ether_header *)data;
356 	struct ether_addr *src_addr = (struct ether_addr *)&eth->ether_shost;
357 	struct ether_addr *dst_addr = (struct ether_addr *)&eth->ether_dhost;
358 	struct ether_addr tmp;
359 
360 	tmp = *src_addr;
361 	*src_addr = *dst_addr;
362 	*dst_addr = tmp;
363 }
364 
hex_dump(void * pkt,size_t length,u64 addr)365 static void hex_dump(void *pkt, size_t length, u64 addr)
366 {
367 	const unsigned char *address = (unsigned char *)pkt;
368 	const unsigned char *line = address;
369 	size_t line_size = 32;
370 	unsigned char c;
371 	char buf[32];
372 	int i = 0;
373 
374 	if (!DEBUG_HEXDUMP)
375 		return;
376 
377 	sprintf(buf, "addr=%llu", addr);
378 	printf("length = %zu\n", length);
379 	printf("%s | ", buf);
380 	while (length-- > 0) {
381 		printf("%02X ", *address++);
382 		if (!(++i % line_size) || (length == 0 && i % line_size)) {
383 			if (length == 0) {
384 				while (i++ % line_size)
385 					printf("__ ");
386 			}
387 			printf(" | ");	/* right close */
388 			while (line < address) {
389 				c = *line++;
390 				printf("%c", (c < 33 || c == 255) ? 0x2E : c);
391 			}
392 			printf("\n");
393 			if (length > 0)
394 				printf("%s | ", buf);
395 		}
396 	}
397 	printf("\n");
398 }
399 
gen_eth_frame(char * frame)400 static size_t gen_eth_frame(char *frame)
401 {
402 	memcpy(frame, pkt_data, sizeof(pkt_data) - 1);
403 	return sizeof(pkt_data) - 1;
404 }
405 
xdp_umem_configure(int sfd)406 static struct xdp_umem *xdp_umem_configure(int sfd)
407 {
408 	int fq_size = FQ_NUM_DESCS, cq_size = CQ_NUM_DESCS;
409 	struct xdp_mmap_offsets off;
410 	struct xdp_umem_reg mr;
411 	struct xdp_umem *umem;
412 	socklen_t optlen;
413 	void *bufs;
414 
415 	umem = calloc(1, sizeof(*umem));
416 	lassert(umem);
417 
418 	lassert(posix_memalign(&bufs, getpagesize(), /* PAGE_SIZE aligned */
419 			       NUM_FRAMES * FRAME_SIZE) == 0);
420 
421 	mr.addr = (__u64)bufs;
422 	mr.len = NUM_FRAMES * FRAME_SIZE;
423 	mr.chunk_size = FRAME_SIZE;
424 	mr.headroom = FRAME_HEADROOM;
425 
426 	lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr)) == 0);
427 	lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_FILL_RING, &fq_size,
428 			   sizeof(int)) == 0);
429 	lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_COMPLETION_RING, &cq_size,
430 			   sizeof(int)) == 0);
431 
432 	optlen = sizeof(off);
433 	lassert(getsockopt(sfd, SOL_XDP, XDP_MMAP_OFFSETS, &off,
434 			   &optlen) == 0);
435 
436 	umem->fq.map = mmap(0, off.fr.desc +
437 			    FQ_NUM_DESCS * sizeof(u64),
438 			    PROT_READ | PROT_WRITE,
439 			    MAP_SHARED | MAP_POPULATE, sfd,
440 			    XDP_UMEM_PGOFF_FILL_RING);
441 	lassert(umem->fq.map != MAP_FAILED);
442 
443 	umem->fq.mask = FQ_NUM_DESCS - 1;
444 	umem->fq.size = FQ_NUM_DESCS;
445 	umem->fq.producer = umem->fq.map + off.fr.producer;
446 	umem->fq.consumer = umem->fq.map + off.fr.consumer;
447 	umem->fq.ring = umem->fq.map + off.fr.desc;
448 	umem->fq.cached_cons = FQ_NUM_DESCS;
449 
450 	umem->cq.map = mmap(0, off.cr.desc +
451 			     CQ_NUM_DESCS * sizeof(u64),
452 			     PROT_READ | PROT_WRITE,
453 			     MAP_SHARED | MAP_POPULATE, sfd,
454 			     XDP_UMEM_PGOFF_COMPLETION_RING);
455 	lassert(umem->cq.map != MAP_FAILED);
456 
457 	umem->cq.mask = CQ_NUM_DESCS - 1;
458 	umem->cq.size = CQ_NUM_DESCS;
459 	umem->cq.producer = umem->cq.map + off.cr.producer;
460 	umem->cq.consumer = umem->cq.map + off.cr.consumer;
461 	umem->cq.ring = umem->cq.map + off.cr.desc;
462 
463 	umem->frames = bufs;
464 	umem->fd = sfd;
465 
466 	if (opt_bench == BENCH_TXONLY) {
467 		int i;
468 
469 		for (i = 0; i < NUM_FRAMES * FRAME_SIZE; i += FRAME_SIZE)
470 			(void)gen_eth_frame(&umem->frames[i]);
471 	}
472 
473 	return umem;
474 }
475 
xsk_configure(struct xdp_umem * umem)476 static struct xdpsock *xsk_configure(struct xdp_umem *umem)
477 {
478 	struct sockaddr_xdp sxdp = {};
479 	struct xdp_mmap_offsets off;
480 	int sfd, ndescs = NUM_DESCS;
481 	struct xdpsock *xsk;
482 	bool shared = true;
483 	socklen_t optlen;
484 	u64 i;
485 
486 	sfd = socket(PF_XDP, SOCK_RAW, 0);
487 	lassert(sfd >= 0);
488 
489 	xsk = calloc(1, sizeof(*xsk));
490 	lassert(xsk);
491 
492 	xsk->sfd = sfd;
493 	xsk->outstanding_tx = 0;
494 
495 	if (!umem) {
496 		shared = false;
497 		xsk->umem = xdp_umem_configure(sfd);
498 	} else {
499 		xsk->umem = umem;
500 	}
501 
502 	lassert(setsockopt(sfd, SOL_XDP, XDP_RX_RING,
503 			   &ndescs, sizeof(int)) == 0);
504 	lassert(setsockopt(sfd, SOL_XDP, XDP_TX_RING,
505 			   &ndescs, sizeof(int)) == 0);
506 	optlen = sizeof(off);
507 	lassert(getsockopt(sfd, SOL_XDP, XDP_MMAP_OFFSETS, &off,
508 			   &optlen) == 0);
509 
510 	/* Rx */
511 	xsk->rx.map = mmap(NULL,
512 			   off.rx.desc +
513 			   NUM_DESCS * sizeof(struct xdp_desc),
514 			   PROT_READ | PROT_WRITE,
515 			   MAP_SHARED | MAP_POPULATE, sfd,
516 			   XDP_PGOFF_RX_RING);
517 	lassert(xsk->rx.map != MAP_FAILED);
518 
519 	if (!shared) {
520 		for (i = 0; i < NUM_DESCS * FRAME_SIZE; i += FRAME_SIZE)
521 			lassert(umem_fill_to_kernel(&xsk->umem->fq, &i, 1)
522 				== 0);
523 	}
524 
525 	/* Tx */
526 	xsk->tx.map = mmap(NULL,
527 			   off.tx.desc +
528 			   NUM_DESCS * sizeof(struct xdp_desc),
529 			   PROT_READ | PROT_WRITE,
530 			   MAP_SHARED | MAP_POPULATE, sfd,
531 			   XDP_PGOFF_TX_RING);
532 	lassert(xsk->tx.map != MAP_FAILED);
533 
534 	xsk->rx.mask = NUM_DESCS - 1;
535 	xsk->rx.size = NUM_DESCS;
536 	xsk->rx.producer = xsk->rx.map + off.rx.producer;
537 	xsk->rx.consumer = xsk->rx.map + off.rx.consumer;
538 	xsk->rx.ring = xsk->rx.map + off.rx.desc;
539 
540 	xsk->tx.mask = NUM_DESCS - 1;
541 	xsk->tx.size = NUM_DESCS;
542 	xsk->tx.producer = xsk->tx.map + off.tx.producer;
543 	xsk->tx.consumer = xsk->tx.map + off.tx.consumer;
544 	xsk->tx.ring = xsk->tx.map + off.tx.desc;
545 	xsk->tx.cached_cons = NUM_DESCS;
546 
547 	sxdp.sxdp_family = PF_XDP;
548 	sxdp.sxdp_ifindex = opt_ifindex;
549 	sxdp.sxdp_queue_id = opt_queue;
550 
551 	if (shared) {
552 		sxdp.sxdp_flags = XDP_SHARED_UMEM;
553 		sxdp.sxdp_shared_umem_fd = umem->fd;
554 	} else {
555 		sxdp.sxdp_flags = opt_xdp_bind_flags;
556 	}
557 
558 	lassert(bind(sfd, (struct sockaddr *)&sxdp, sizeof(sxdp)) == 0);
559 
560 	return xsk;
561 }
562 
print_benchmark(bool running)563 static void print_benchmark(bool running)
564 {
565 	const char *bench_str = "INVALID";
566 
567 	if (opt_bench == BENCH_RXDROP)
568 		bench_str = "rxdrop";
569 	else if (opt_bench == BENCH_TXONLY)
570 		bench_str = "txonly";
571 	else if (opt_bench == BENCH_L2FWD)
572 		bench_str = "l2fwd";
573 
574 	printf("%s:%d %s ", opt_if, opt_queue, bench_str);
575 	if (opt_xdp_flags & XDP_FLAGS_SKB_MODE)
576 		printf("xdp-skb ");
577 	else if (opt_xdp_flags & XDP_FLAGS_DRV_MODE)
578 		printf("xdp-drv ");
579 	else
580 		printf("	");
581 
582 	if (opt_poll)
583 		printf("poll() ");
584 
585 	if (running) {
586 		printf("running...");
587 		fflush(stdout);
588 	}
589 }
590 
dump_stats(void)591 static void dump_stats(void)
592 {
593 	unsigned long now = get_nsecs();
594 	long dt = now - prev_time;
595 	int i;
596 
597 	prev_time = now;
598 
599 	for (i = 0; i < num_socks; i++) {
600 		char *fmt = "%-15s %'-11.0f %'-11lu\n";
601 		double rx_pps, tx_pps;
602 
603 		rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) *
604 			 1000000000. / dt;
605 		tx_pps = (xsks[i]->tx_npkts - xsks[i]->prev_tx_npkts) *
606 			 1000000000. / dt;
607 
608 		printf("\n sock%d@", i);
609 		print_benchmark(false);
610 		printf("\n");
611 
612 		printf("%-15s %-11s %-11s %-11.2f\n", "", "pps", "pkts",
613 		       dt / 1000000000.);
614 		printf(fmt, "rx", rx_pps, xsks[i]->rx_npkts);
615 		printf(fmt, "tx", tx_pps, xsks[i]->tx_npkts);
616 
617 		xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts;
618 		xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts;
619 	}
620 }
621 
poller(void * arg)622 static void *poller(void *arg)
623 {
624 	(void)arg;
625 	for (;;) {
626 		sleep(opt_interval);
627 		dump_stats();
628 	}
629 
630 	return NULL;
631 }
632 
int_exit(int sig)633 static void int_exit(int sig)
634 {
635 	(void)sig;
636 	dump_stats();
637 	bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
638 	exit(EXIT_SUCCESS);
639 }
640 
641 static struct option long_options[] = {
642 	{"rxdrop", no_argument, 0, 'r'},
643 	{"txonly", no_argument, 0, 't'},
644 	{"l2fwd", no_argument, 0, 'l'},
645 	{"interface", required_argument, 0, 'i'},
646 	{"queue", required_argument, 0, 'q'},
647 	{"poll", no_argument, 0, 'p'},
648 	{"shared-buffer", no_argument, 0, 's'},
649 	{"xdp-skb", no_argument, 0, 'S'},
650 	{"xdp-native", no_argument, 0, 'N'},
651 	{"interval", required_argument, 0, 'n'},
652 	{0, 0, 0, 0}
653 };
654 
usage(const char * prog)655 static void usage(const char *prog)
656 {
657 	const char *str =
658 		"  Usage: %s [OPTIONS]\n"
659 		"  Options:\n"
660 		"  -r, --rxdrop		Discard all incoming packets (default)\n"
661 		"  -t, --txonly		Only send packets\n"
662 		"  -l, --l2fwd		MAC swap L2 forwarding\n"
663 		"  -i, --interface=n	Run on interface n\n"
664 		"  -q, --queue=n	Use queue n (default 0)\n"
665 		"  -p, --poll		Use poll syscall\n"
666 		"  -s, --shared-buffer	Use shared packet buffer\n"
667 		"  -S, --xdp-skb=n	Use XDP skb-mod\n"
668 		"  -N, --xdp-native=n	Enfore XDP native mode\n"
669 		"  -n, --interval=n	Specify statistics update interval (default 1 sec).\n"
670 		"\n";
671 	fprintf(stderr, str, prog);
672 	exit(EXIT_FAILURE);
673 }
674 
parse_command_line(int argc,char ** argv)675 static void parse_command_line(int argc, char **argv)
676 {
677 	int option_index, c;
678 
679 	opterr = 0;
680 
681 	for (;;) {
682 		c = getopt_long(argc, argv, "rtli:q:psSNn:", long_options,
683 				&option_index);
684 		if (c == -1)
685 			break;
686 
687 		switch (c) {
688 		case 'r':
689 			opt_bench = BENCH_RXDROP;
690 			break;
691 		case 't':
692 			opt_bench = BENCH_TXONLY;
693 			break;
694 		case 'l':
695 			opt_bench = BENCH_L2FWD;
696 			break;
697 		case 'i':
698 			opt_if = optarg;
699 			break;
700 		case 'q':
701 			opt_queue = atoi(optarg);
702 			break;
703 		case 's':
704 			opt_shared_packet_buffer = 1;
705 			break;
706 		case 'p':
707 			opt_poll = 1;
708 			break;
709 		case 'S':
710 			opt_xdp_flags |= XDP_FLAGS_SKB_MODE;
711 			opt_xdp_bind_flags |= XDP_COPY;
712 			break;
713 		case 'N':
714 			opt_xdp_flags |= XDP_FLAGS_DRV_MODE;
715 			break;
716 		case 'n':
717 			opt_interval = atoi(optarg);
718 			break;
719 		default:
720 			usage(basename(argv[0]));
721 		}
722 	}
723 
724 	opt_ifindex = if_nametoindex(opt_if);
725 	if (!opt_ifindex) {
726 		fprintf(stderr, "ERROR: interface \"%s\" does not exist\n",
727 			opt_if);
728 		usage(basename(argv[0]));
729 	}
730 }
731 
kick_tx(int fd)732 static void kick_tx(int fd)
733 {
734 	int ret;
735 
736 	ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0);
737 	if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY)
738 		return;
739 	lassert(0);
740 }
741 
complete_tx_l2fwd(struct xdpsock * xsk)742 static inline void complete_tx_l2fwd(struct xdpsock *xsk)
743 {
744 	u64 descs[BATCH_SIZE];
745 	unsigned int rcvd;
746 	size_t ndescs;
747 
748 	if (!xsk->outstanding_tx)
749 		return;
750 
751 	kick_tx(xsk->sfd);
752 	ndescs = (xsk->outstanding_tx > BATCH_SIZE) ? BATCH_SIZE :
753 		 xsk->outstanding_tx;
754 
755 	/* re-add completed Tx buffers */
756 	rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, ndescs);
757 	if (rcvd > 0) {
758 		umem_fill_to_kernel(&xsk->umem->fq, descs, rcvd);
759 		xsk->outstanding_tx -= rcvd;
760 		xsk->tx_npkts += rcvd;
761 	}
762 }
763 
complete_tx_only(struct xdpsock * xsk)764 static inline void complete_tx_only(struct xdpsock *xsk)
765 {
766 	u64 descs[BATCH_SIZE];
767 	unsigned int rcvd;
768 
769 	if (!xsk->outstanding_tx)
770 		return;
771 
772 	kick_tx(xsk->sfd);
773 
774 	rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, BATCH_SIZE);
775 	if (rcvd > 0) {
776 		xsk->outstanding_tx -= rcvd;
777 		xsk->tx_npkts += rcvd;
778 	}
779 }
780 
rx_drop(struct xdpsock * xsk)781 static void rx_drop(struct xdpsock *xsk)
782 {
783 	struct xdp_desc descs[BATCH_SIZE];
784 	unsigned int rcvd, i;
785 
786 	rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
787 	if (!rcvd)
788 		return;
789 
790 	for (i = 0; i < rcvd; i++) {
791 		char *pkt = xq_get_data(xsk, descs[i].addr);
792 
793 		hex_dump(pkt, descs[i].len, descs[i].addr);
794 	}
795 
796 	xsk->rx_npkts += rcvd;
797 
798 	umem_fill_to_kernel_ex(&xsk->umem->fq, descs, rcvd);
799 }
800 
rx_drop_all(void)801 static void rx_drop_all(void)
802 {
803 	struct pollfd fds[MAX_SOCKS + 1];
804 	int i, ret, timeout, nfds = 1;
805 
806 	memset(fds, 0, sizeof(fds));
807 
808 	for (i = 0; i < num_socks; i++) {
809 		fds[i].fd = xsks[i]->sfd;
810 		fds[i].events = POLLIN;
811 		timeout = 1000; /* 1sn */
812 	}
813 
814 	for (;;) {
815 		if (opt_poll) {
816 			ret = poll(fds, nfds, timeout);
817 			if (ret <= 0)
818 				continue;
819 		}
820 
821 		for (i = 0; i < num_socks; i++)
822 			rx_drop(xsks[i]);
823 	}
824 }
825 
tx_only(struct xdpsock * xsk)826 static void tx_only(struct xdpsock *xsk)
827 {
828 	int timeout, ret, nfds = 1;
829 	struct pollfd fds[nfds + 1];
830 	unsigned int idx = 0;
831 
832 	memset(fds, 0, sizeof(fds));
833 	fds[0].fd = xsk->sfd;
834 	fds[0].events = POLLOUT;
835 	timeout = 1000; /* 1sn */
836 
837 	for (;;) {
838 		if (opt_poll) {
839 			ret = poll(fds, nfds, timeout);
840 			if (ret <= 0)
841 				continue;
842 
843 			if (fds[0].fd != xsk->sfd ||
844 			    !(fds[0].revents & POLLOUT))
845 				continue;
846 		}
847 
848 		if (xq_nb_free(&xsk->tx, BATCH_SIZE) >= BATCH_SIZE) {
849 			lassert(xq_enq_tx_only(&xsk->tx, idx, BATCH_SIZE) == 0);
850 
851 			xsk->outstanding_tx += BATCH_SIZE;
852 			idx += BATCH_SIZE;
853 			idx %= NUM_FRAMES;
854 		}
855 
856 		complete_tx_only(xsk);
857 	}
858 }
859 
l2fwd(struct xdpsock * xsk)860 static void l2fwd(struct xdpsock *xsk)
861 {
862 	for (;;) {
863 		struct xdp_desc descs[BATCH_SIZE];
864 		unsigned int rcvd, i;
865 		int ret;
866 
867 		for (;;) {
868 			complete_tx_l2fwd(xsk);
869 
870 			rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
871 			if (rcvd > 0)
872 				break;
873 		}
874 
875 		for (i = 0; i < rcvd; i++) {
876 			char *pkt = xq_get_data(xsk, descs[i].addr);
877 
878 			swap_mac_addresses(pkt);
879 
880 			hex_dump(pkt, descs[i].len, descs[i].addr);
881 		}
882 
883 		xsk->rx_npkts += rcvd;
884 
885 		ret = xq_enq(&xsk->tx, descs, rcvd);
886 		lassert(ret == 0);
887 		xsk->outstanding_tx += rcvd;
888 	}
889 }
890 
main(int argc,char ** argv)891 int main(int argc, char **argv)
892 {
893 	struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
894 	struct bpf_prog_load_attr prog_load_attr = {
895 		.prog_type	= BPF_PROG_TYPE_XDP,
896 	};
897 	int prog_fd, qidconf_map, xsks_map;
898 	struct bpf_object *obj;
899 	char xdp_filename[256];
900 	struct bpf_map *map;
901 	int i, ret, key = 0;
902 	pthread_t pt;
903 
904 	parse_command_line(argc, argv);
905 
906 	if (setrlimit(RLIMIT_MEMLOCK, &r)) {
907 		fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n",
908 			strerror(errno));
909 		exit(EXIT_FAILURE);
910 	}
911 
912 	snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv[0]);
913 	prog_load_attr.file = xdp_filename;
914 
915 	if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
916 		exit(EXIT_FAILURE);
917 	if (prog_fd < 0) {
918 		fprintf(stderr, "ERROR: no program found: %s\n",
919 			strerror(prog_fd));
920 		exit(EXIT_FAILURE);
921 	}
922 
923 	map = bpf_object__find_map_by_name(obj, "qidconf_map");
924 	qidconf_map = bpf_map__fd(map);
925 	if (qidconf_map < 0) {
926 		fprintf(stderr, "ERROR: no qidconf map found: %s\n",
927 			strerror(qidconf_map));
928 		exit(EXIT_FAILURE);
929 	}
930 
931 	map = bpf_object__find_map_by_name(obj, "xsks_map");
932 	xsks_map = bpf_map__fd(map);
933 	if (xsks_map < 0) {
934 		fprintf(stderr, "ERROR: no xsks map found: %s\n",
935 			strerror(xsks_map));
936 		exit(EXIT_FAILURE);
937 	}
938 
939 	if (bpf_set_link_xdp_fd(opt_ifindex, prog_fd, opt_xdp_flags) < 0) {
940 		fprintf(stderr, "ERROR: link set xdp fd failed\n");
941 		exit(EXIT_FAILURE);
942 	}
943 
944 	ret = bpf_map_update_elem(qidconf_map, &key, &opt_queue, 0);
945 	if (ret) {
946 		fprintf(stderr, "ERROR: bpf_map_update_elem qidconf\n");
947 		exit(EXIT_FAILURE);
948 	}
949 
950 	/* Create sockets... */
951 	xsks[num_socks++] = xsk_configure(NULL);
952 
953 #if RR_LB
954 	for (i = 0; i < MAX_SOCKS - 1; i++)
955 		xsks[num_socks++] = xsk_configure(xsks[0]->umem);
956 #endif
957 
958 	/* ...and insert them into the map. */
959 	for (i = 0; i < num_socks; i++) {
960 		key = i;
961 		ret = bpf_map_update_elem(xsks_map, &key, &xsks[i]->sfd, 0);
962 		if (ret) {
963 			fprintf(stderr, "ERROR: bpf_map_update_elem %d\n", i);
964 			exit(EXIT_FAILURE);
965 		}
966 	}
967 
968 	signal(SIGINT, int_exit);
969 	signal(SIGTERM, int_exit);
970 	signal(SIGABRT, int_exit);
971 
972 	setlocale(LC_ALL, "");
973 
974 	ret = pthread_create(&pt, NULL, poller, NULL);
975 	lassert(ret == 0);
976 
977 	prev_time = get_nsecs();
978 
979 	if (opt_bench == BENCH_RXDROP)
980 		rx_drop_all();
981 	else if (opt_bench == BENCH_TXONLY)
982 		tx_only(xsks[0]);
983 	else
984 		l2fwd(xsks[0]);
985 
986 	return 0;
987 }
988