1 /* SPDX-License-Identifier: GPL-2.0
2  * Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc.
3  */
4 static const char *__doc__=
5  "XDP monitor tool, based on tracepoints\n"
6 ;
7 
8 static const char *__doc_err_only__=
9  " NOTICE: Only tracking XDP redirect errors\n"
10  "         Enable TX success stats via '--stats'\n"
11  "         (which comes with a per packet processing overhead)\n"
12 ;
13 
14 #include <errno.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <stdbool.h>
18 #include <stdint.h>
19 #include <string.h>
20 #include <ctype.h>
21 #include <unistd.h>
22 #include <locale.h>
23 
24 #include <sys/resource.h>
25 #include <getopt.h>
26 #include <net/if.h>
27 #include <time.h>
28 
29 #include <signal.h>
30 #include <bpf/bpf.h>
31 #include <bpf/libbpf.h>
32 #include "bpf_util.h"
33 
34 enum map_type {
35 	REDIRECT_ERR_CNT,
36 	EXCEPTION_CNT,
37 	CPUMAP_ENQUEUE_CNT,
38 	CPUMAP_KTHREAD_CNT,
39 	DEVMAP_XMIT_CNT,
40 };
41 
42 static const char *const map_type_strings[] = {
43 	[REDIRECT_ERR_CNT] = "redirect_err_cnt",
44 	[EXCEPTION_CNT] = "exception_cnt",
45 	[CPUMAP_ENQUEUE_CNT] = "cpumap_enqueue_cnt",
46 	[CPUMAP_KTHREAD_CNT] = "cpumap_kthread_cnt",
47 	[DEVMAP_XMIT_CNT] = "devmap_xmit_cnt",
48 };
49 
50 #define NUM_MAP 5
51 #define NUM_TP 8
52 
53 static int tp_cnt;
54 static int map_cnt;
55 static int verbose = 1;
56 static bool debug = false;
57 struct bpf_map *map_data[NUM_MAP] = {};
58 struct bpf_link *tp_links[NUM_TP] = {};
59 struct bpf_object *obj;
60 
61 static const struct option long_options[] = {
62 	{"help",	no_argument,		NULL, 'h' },
63 	{"debug",	no_argument,		NULL, 'D' },
64 	{"stats",	no_argument,		NULL, 'S' },
65 	{"sec", 	required_argument,	NULL, 's' },
66 	{0, 0, NULL,  0 }
67 };
68 
int_exit(int sig)69 static void int_exit(int sig)
70 {
71 	/* Detach tracepoints */
72 	while (tp_cnt)
73 		bpf_link__destroy(tp_links[--tp_cnt]);
74 
75 	bpf_object__close(obj);
76 	exit(0);
77 }
78 
79 /* C standard specifies two constants, EXIT_SUCCESS(0) and EXIT_FAILURE(1) */
80 #define EXIT_FAIL_MEM	5
81 
usage(char * argv[])82 static void usage(char *argv[])
83 {
84 	int i;
85 	printf("\nDOCUMENTATION:\n%s\n", __doc__);
86 	printf("\n");
87 	printf(" Usage: %s (options-see-below)\n",
88 	       argv[0]);
89 	printf(" Listing options:\n");
90 	for (i = 0; long_options[i].name != 0; i++) {
91 		printf(" --%-15s", long_options[i].name);
92 		if (long_options[i].flag != NULL)
93 			printf(" flag (internal value:%d)",
94 			       *long_options[i].flag);
95 		else
96 			printf("short-option: -%c",
97 			       long_options[i].val);
98 		printf("\n");
99 	}
100 	printf("\n");
101 }
102 
103 #define NANOSEC_PER_SEC 1000000000 /* 10^9 */
gettime(void)104 static __u64 gettime(void)
105 {
106 	struct timespec t;
107 	int res;
108 
109 	res = clock_gettime(CLOCK_MONOTONIC, &t);
110 	if (res < 0) {
111 		fprintf(stderr, "Error with gettimeofday! (%i)\n", res);
112 		exit(EXIT_FAILURE);
113 	}
114 	return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
115 }
116 
117 enum {
118 	REDIR_SUCCESS = 0,
119 	REDIR_ERROR = 1,
120 };
121 #define REDIR_RES_MAX 2
122 static const char *redir_names[REDIR_RES_MAX] = {
123 	[REDIR_SUCCESS]	= "Success",
124 	[REDIR_ERROR]	= "Error",
125 };
err2str(int err)126 static const char *err2str(int err)
127 {
128 	if (err < REDIR_RES_MAX)
129 		return redir_names[err];
130 	return NULL;
131 }
132 /* enum xdp_action */
133 #define XDP_UNKNOWN	XDP_REDIRECT + 1
134 #define XDP_ACTION_MAX (XDP_UNKNOWN + 1)
135 static const char *xdp_action_names[XDP_ACTION_MAX] = {
136 	[XDP_ABORTED]	= "XDP_ABORTED",
137 	[XDP_DROP]	= "XDP_DROP",
138 	[XDP_PASS]	= "XDP_PASS",
139 	[XDP_TX]	= "XDP_TX",
140 	[XDP_REDIRECT]	= "XDP_REDIRECT",
141 	[XDP_UNKNOWN]	= "XDP_UNKNOWN",
142 };
action2str(int action)143 static const char *action2str(int action)
144 {
145 	if (action < XDP_ACTION_MAX)
146 		return xdp_action_names[action];
147 	return NULL;
148 }
149 
150 /* Common stats data record shared with _kern.c */
151 struct datarec {
152 	__u64 processed;
153 	__u64 dropped;
154 	__u64 info;
155 	__u64 err;
156 };
157 #define MAX_CPUS 64
158 
159 /* Userspace structs for collection of stats from maps */
160 struct record {
161 	__u64 timestamp;
162 	struct datarec total;
163 	struct datarec *cpu;
164 };
165 struct u64rec {
166 	__u64 processed;
167 };
168 struct record_u64 {
169 	/* record for _kern side __u64 values */
170 	__u64 timestamp;
171 	struct u64rec total;
172 	struct u64rec *cpu;
173 };
174 
175 struct stats_record {
176 	struct record_u64 xdp_redirect[REDIR_RES_MAX];
177 	struct record_u64 xdp_exception[XDP_ACTION_MAX];
178 	struct record xdp_cpumap_kthread;
179 	struct record xdp_cpumap_enqueue[MAX_CPUS];
180 	struct record xdp_devmap_xmit;
181 };
182 
map_collect_record(int fd,__u32 key,struct record * rec)183 static bool map_collect_record(int fd, __u32 key, struct record *rec)
184 {
185 	/* For percpu maps, userspace gets a value per possible CPU */
186 	unsigned int nr_cpus = bpf_num_possible_cpus();
187 	struct datarec values[nr_cpus];
188 	__u64 sum_processed = 0;
189 	__u64 sum_dropped = 0;
190 	__u64 sum_info = 0;
191 	__u64 sum_err = 0;
192 	int i;
193 
194 	if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
195 		fprintf(stderr,
196 			"ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
197 		return false;
198 	}
199 	/* Get time as close as possible to reading map contents */
200 	rec->timestamp = gettime();
201 
202 	/* Record and sum values from each CPU */
203 	for (i = 0; i < nr_cpus; i++) {
204 		rec->cpu[i].processed = values[i].processed;
205 		sum_processed        += values[i].processed;
206 		rec->cpu[i].dropped = values[i].dropped;
207 		sum_dropped        += values[i].dropped;
208 		rec->cpu[i].info = values[i].info;
209 		sum_info        += values[i].info;
210 		rec->cpu[i].err = values[i].err;
211 		sum_err        += values[i].err;
212 	}
213 	rec->total.processed = sum_processed;
214 	rec->total.dropped   = sum_dropped;
215 	rec->total.info      = sum_info;
216 	rec->total.err       = sum_err;
217 	return true;
218 }
219 
map_collect_record_u64(int fd,__u32 key,struct record_u64 * rec)220 static bool map_collect_record_u64(int fd, __u32 key, struct record_u64 *rec)
221 {
222 	/* For percpu maps, userspace gets a value per possible CPU */
223 	unsigned int nr_cpus = bpf_num_possible_cpus();
224 	struct u64rec values[nr_cpus];
225 	__u64 sum_total = 0;
226 	int i;
227 
228 	if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
229 		fprintf(stderr,
230 			"ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
231 		return false;
232 	}
233 	/* Get time as close as possible to reading map contents */
234 	rec->timestamp = gettime();
235 
236 	/* Record and sum values from each CPU */
237 	for (i = 0; i < nr_cpus; i++) {
238 		rec->cpu[i].processed = values[i].processed;
239 		sum_total            += values[i].processed;
240 	}
241 	rec->total.processed = sum_total;
242 	return true;
243 }
244 
calc_period(struct record * r,struct record * p)245 static double calc_period(struct record *r, struct record *p)
246 {
247 	double period_ = 0;
248 	__u64 period = 0;
249 
250 	period = r->timestamp - p->timestamp;
251 	if (period > 0)
252 		period_ = ((double) period / NANOSEC_PER_SEC);
253 
254 	return period_;
255 }
256 
calc_period_u64(struct record_u64 * r,struct record_u64 * p)257 static double calc_period_u64(struct record_u64 *r, struct record_u64 *p)
258 {
259 	double period_ = 0;
260 	__u64 period = 0;
261 
262 	period = r->timestamp - p->timestamp;
263 	if (period > 0)
264 		period_ = ((double) period / NANOSEC_PER_SEC);
265 
266 	return period_;
267 }
268 
calc_pps(struct datarec * r,struct datarec * p,double period)269 static double calc_pps(struct datarec *r, struct datarec *p, double period)
270 {
271 	__u64 packets = 0;
272 	double pps = 0;
273 
274 	if (period > 0) {
275 		packets = r->processed - p->processed;
276 		pps = packets / period;
277 	}
278 	return pps;
279 }
280 
calc_pps_u64(struct u64rec * r,struct u64rec * p,double period)281 static double calc_pps_u64(struct u64rec *r, struct u64rec *p, double period)
282 {
283 	__u64 packets = 0;
284 	double pps = 0;
285 
286 	if (period > 0) {
287 		packets = r->processed - p->processed;
288 		pps = packets / period;
289 	}
290 	return pps;
291 }
292 
calc_drop(struct datarec * r,struct datarec * p,double period)293 static double calc_drop(struct datarec *r, struct datarec *p, double period)
294 {
295 	__u64 packets = 0;
296 	double pps = 0;
297 
298 	if (period > 0) {
299 		packets = r->dropped - p->dropped;
300 		pps = packets / period;
301 	}
302 	return pps;
303 }
304 
calc_info(struct datarec * r,struct datarec * p,double period)305 static double calc_info(struct datarec *r, struct datarec *p, double period)
306 {
307 	__u64 packets = 0;
308 	double pps = 0;
309 
310 	if (period > 0) {
311 		packets = r->info - p->info;
312 		pps = packets / period;
313 	}
314 	return pps;
315 }
316 
calc_err(struct datarec * r,struct datarec * p,double period)317 static double calc_err(struct datarec *r, struct datarec *p, double period)
318 {
319 	__u64 packets = 0;
320 	double pps = 0;
321 
322 	if (period > 0) {
323 		packets = r->err - p->err;
324 		pps = packets / period;
325 	}
326 	return pps;
327 }
328 
stats_print(struct stats_record * stats_rec,struct stats_record * stats_prev,bool err_only)329 static void stats_print(struct stats_record *stats_rec,
330 			struct stats_record *stats_prev,
331 			bool err_only)
332 {
333 	unsigned int nr_cpus = bpf_num_possible_cpus();
334 	int rec_i = 0, i, to_cpu;
335 	double t = 0, pps = 0;
336 
337 	/* Header */
338 	printf("%-15s %-7s %-12s %-12s %-9s\n",
339 	       "XDP-event", "CPU:to", "pps", "drop-pps", "extra-info");
340 
341 	/* tracepoint: xdp:xdp_redirect_* */
342 	if (err_only)
343 		rec_i = REDIR_ERROR;
344 
345 	for (; rec_i < REDIR_RES_MAX; rec_i++) {
346 		struct record_u64 *rec, *prev;
347 		char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %s\n";
348 		char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %s\n";
349 
350 		rec  =  &stats_rec->xdp_redirect[rec_i];
351 		prev = &stats_prev->xdp_redirect[rec_i];
352 		t = calc_period_u64(rec, prev);
353 
354 		for (i = 0; i < nr_cpus; i++) {
355 			struct u64rec *r = &rec->cpu[i];
356 			struct u64rec *p = &prev->cpu[i];
357 
358 			pps = calc_pps_u64(r, p, t);
359 			if (pps > 0)
360 				printf(fmt1, "XDP_REDIRECT", i,
361 				       rec_i ? 0.0: pps, rec_i ? pps : 0.0,
362 				       err2str(rec_i));
363 		}
364 		pps = calc_pps_u64(&rec->total, &prev->total, t);
365 		printf(fmt2, "XDP_REDIRECT", "total",
366 		       rec_i ? 0.0: pps, rec_i ? pps : 0.0, err2str(rec_i));
367 	}
368 
369 	/* tracepoint: xdp:xdp_exception */
370 	for (rec_i = 0; rec_i < XDP_ACTION_MAX; rec_i++) {
371 		struct record_u64 *rec, *prev;
372 		char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %s\n";
373 		char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %s\n";
374 
375 		rec  =  &stats_rec->xdp_exception[rec_i];
376 		prev = &stats_prev->xdp_exception[rec_i];
377 		t = calc_period_u64(rec, prev);
378 
379 		for (i = 0; i < nr_cpus; i++) {
380 			struct u64rec *r = &rec->cpu[i];
381 			struct u64rec *p = &prev->cpu[i];
382 
383 			pps = calc_pps_u64(r, p, t);
384 			if (pps > 0)
385 				printf(fmt1, "Exception", i,
386 				       0.0, pps, action2str(rec_i));
387 		}
388 		pps = calc_pps_u64(&rec->total, &prev->total, t);
389 		if (pps > 0)
390 			printf(fmt2, "Exception", "total",
391 			       0.0, pps, action2str(rec_i));
392 	}
393 
394 	/* cpumap enqueue stats */
395 	for (to_cpu = 0; to_cpu < MAX_CPUS; to_cpu++) {
396 		char *fmt1 = "%-15s %3d:%-3d %'-12.0f %'-12.0f %'-10.2f %s\n";
397 		char *fmt2 = "%-15s %3s:%-3d %'-12.0f %'-12.0f %'-10.2f %s\n";
398 		struct record *rec, *prev;
399 		char *info_str = "";
400 		double drop, info;
401 
402 		rec  =  &stats_rec->xdp_cpumap_enqueue[to_cpu];
403 		prev = &stats_prev->xdp_cpumap_enqueue[to_cpu];
404 		t = calc_period(rec, prev);
405 		for (i = 0; i < nr_cpus; i++) {
406 			struct datarec *r = &rec->cpu[i];
407 			struct datarec *p = &prev->cpu[i];
408 
409 			pps  = calc_pps(r, p, t);
410 			drop = calc_drop(r, p, t);
411 			info = calc_info(r, p, t);
412 			if (info > 0) {
413 				info_str = "bulk-average";
414 				info = pps / info; /* calc average bulk size */
415 			}
416 			if (pps > 0)
417 				printf(fmt1, "cpumap-enqueue",
418 				       i, to_cpu, pps, drop, info, info_str);
419 		}
420 		pps = calc_pps(&rec->total, &prev->total, t);
421 		if (pps > 0) {
422 			drop = calc_drop(&rec->total, &prev->total, t);
423 			info = calc_info(&rec->total, &prev->total, t);
424 			if (info > 0) {
425 				info_str = "bulk-average";
426 				info = pps / info; /* calc average bulk size */
427 			}
428 			printf(fmt2, "cpumap-enqueue",
429 			       "sum", to_cpu, pps, drop, info, info_str);
430 		}
431 	}
432 
433 	/* cpumap kthread stats */
434 	{
435 		char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %'-10.0f %s\n";
436 		char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %'-10.0f %s\n";
437 		struct record *rec, *prev;
438 		double drop, info;
439 		char *i_str = "";
440 
441 		rec  =  &stats_rec->xdp_cpumap_kthread;
442 		prev = &stats_prev->xdp_cpumap_kthread;
443 		t = calc_period(rec, prev);
444 		for (i = 0; i < nr_cpus; i++) {
445 			struct datarec *r = &rec->cpu[i];
446 			struct datarec *p = &prev->cpu[i];
447 
448 			pps  = calc_pps(r, p, t);
449 			drop = calc_drop(r, p, t);
450 			info = calc_info(r, p, t);
451 			if (info > 0)
452 				i_str = "sched";
453 			if (pps > 0 || drop > 0)
454 				printf(fmt1, "cpumap-kthread",
455 				       i, pps, drop, info, i_str);
456 		}
457 		pps = calc_pps(&rec->total, &prev->total, t);
458 		drop = calc_drop(&rec->total, &prev->total, t);
459 		info = calc_info(&rec->total, &prev->total, t);
460 		if (info > 0)
461 			i_str = "sched-sum";
462 		printf(fmt2, "cpumap-kthread", "total", pps, drop, info, i_str);
463 	}
464 
465 	/* devmap ndo_xdp_xmit stats */
466 	{
467 		char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %'-10.2f %s %s\n";
468 		char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %'-10.2f %s %s\n";
469 		struct record *rec, *prev;
470 		double drop, info, err;
471 		char *i_str = "";
472 		char *err_str = "";
473 
474 		rec  =  &stats_rec->xdp_devmap_xmit;
475 		prev = &stats_prev->xdp_devmap_xmit;
476 		t = calc_period(rec, prev);
477 		for (i = 0; i < nr_cpus; i++) {
478 			struct datarec *r = &rec->cpu[i];
479 			struct datarec *p = &prev->cpu[i];
480 
481 			pps  = calc_pps(r, p, t);
482 			drop = calc_drop(r, p, t);
483 			info = calc_info(r, p, t);
484 			err  = calc_err(r, p, t);
485 			if (info > 0) {
486 				i_str = "bulk-average";
487 				info = (pps+drop) / info; /* calc avg bulk */
488 			}
489 			if (err > 0)
490 				err_str = "drv-err";
491 			if (pps > 0 || drop > 0)
492 				printf(fmt1, "devmap-xmit",
493 				       i, pps, drop, info, i_str, err_str);
494 		}
495 		pps = calc_pps(&rec->total, &prev->total, t);
496 		drop = calc_drop(&rec->total, &prev->total, t);
497 		info = calc_info(&rec->total, &prev->total, t);
498 		err  = calc_err(&rec->total, &prev->total, t);
499 		if (info > 0) {
500 			i_str = "bulk-average";
501 			info = (pps+drop) / info; /* calc avg bulk */
502 		}
503 		if (err > 0)
504 			err_str = "drv-err";
505 		printf(fmt2, "devmap-xmit", "total", pps, drop,
506 		       info, i_str, err_str);
507 	}
508 
509 	printf("\n");
510 }
511 
stats_collect(struct stats_record * rec)512 static bool stats_collect(struct stats_record *rec)
513 {
514 	int fd;
515 	int i;
516 
517 	/* TODO: Detect if someone unloaded the perf event_fd's, as
518 	 * this can happen by someone running perf-record -e
519 	 */
520 
521 	fd = bpf_map__fd(map_data[REDIRECT_ERR_CNT]);
522 	for (i = 0; i < REDIR_RES_MAX; i++)
523 		map_collect_record_u64(fd, i, &rec->xdp_redirect[i]);
524 
525 	fd = bpf_map__fd(map_data[EXCEPTION_CNT]);
526 	for (i = 0; i < XDP_ACTION_MAX; i++) {
527 		map_collect_record_u64(fd, i, &rec->xdp_exception[i]);
528 	}
529 
530 	fd = bpf_map__fd(map_data[CPUMAP_ENQUEUE_CNT]);
531 	for (i = 0; i < MAX_CPUS; i++)
532 		map_collect_record(fd, i, &rec->xdp_cpumap_enqueue[i]);
533 
534 	fd = bpf_map__fd(map_data[CPUMAP_KTHREAD_CNT]);
535 	map_collect_record(fd, 0, &rec->xdp_cpumap_kthread);
536 
537 	fd = bpf_map__fd(map_data[DEVMAP_XMIT_CNT]);
538 	map_collect_record(fd, 0, &rec->xdp_devmap_xmit);
539 
540 	return true;
541 }
542 
alloc_rec_per_cpu(int record_size)543 static void *alloc_rec_per_cpu(int record_size)
544 {
545 	unsigned int nr_cpus = bpf_num_possible_cpus();
546 	void *array;
547 
548 	array = calloc(nr_cpus, record_size);
549 	if (!array) {
550 		fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
551 		exit(EXIT_FAIL_MEM);
552 	}
553 	return array;
554 }
555 
alloc_stats_record(void)556 static struct stats_record *alloc_stats_record(void)
557 {
558 	struct stats_record *rec;
559 	int rec_sz;
560 	int i;
561 
562 	/* Alloc main stats_record structure */
563 	rec = calloc(1, sizeof(*rec));
564 	if (!rec) {
565 		fprintf(stderr, "Mem alloc error\n");
566 		exit(EXIT_FAIL_MEM);
567 	}
568 
569 	/* Alloc stats stored per CPU for each record */
570 	rec_sz = sizeof(struct u64rec);
571 	for (i = 0; i < REDIR_RES_MAX; i++)
572 		rec->xdp_redirect[i].cpu = alloc_rec_per_cpu(rec_sz);
573 
574 	for (i = 0; i < XDP_ACTION_MAX; i++)
575 		rec->xdp_exception[i].cpu = alloc_rec_per_cpu(rec_sz);
576 
577 	rec_sz = sizeof(struct datarec);
578 	rec->xdp_cpumap_kthread.cpu = alloc_rec_per_cpu(rec_sz);
579 	rec->xdp_devmap_xmit.cpu    = alloc_rec_per_cpu(rec_sz);
580 
581 	for (i = 0; i < MAX_CPUS; i++)
582 		rec->xdp_cpumap_enqueue[i].cpu = alloc_rec_per_cpu(rec_sz);
583 
584 	return rec;
585 }
586 
free_stats_record(struct stats_record * r)587 static void free_stats_record(struct stats_record *r)
588 {
589 	int i;
590 
591 	for (i = 0; i < REDIR_RES_MAX; i++)
592 		free(r->xdp_redirect[i].cpu);
593 
594 	for (i = 0; i < XDP_ACTION_MAX; i++)
595 		free(r->xdp_exception[i].cpu);
596 
597 	free(r->xdp_cpumap_kthread.cpu);
598 	free(r->xdp_devmap_xmit.cpu);
599 
600 	for (i = 0; i < MAX_CPUS; i++)
601 		free(r->xdp_cpumap_enqueue[i].cpu);
602 
603 	free(r);
604 }
605 
606 /* Pointer swap trick */
swap(struct stats_record ** a,struct stats_record ** b)607 static inline void swap(struct stats_record **a, struct stats_record **b)
608 {
609 	struct stats_record *tmp;
610 
611 	tmp = *a;
612 	*a = *b;
613 	*b = tmp;
614 }
615 
stats_poll(int interval,bool err_only)616 static void stats_poll(int interval, bool err_only)
617 {
618 	struct stats_record *rec, *prev;
619 
620 	rec  = alloc_stats_record();
621 	prev = alloc_stats_record();
622 	stats_collect(rec);
623 
624 	if (err_only)
625 		printf("\n%s\n", __doc_err_only__);
626 
627 	/* Trick to pretty printf with thousands separators use %' */
628 	setlocale(LC_NUMERIC, "en_US");
629 
630 	/* Header */
631 	if (verbose)
632 		printf("\n%s", __doc__);
633 
634 	/* TODO Need more advanced stats on error types */
635 	if (verbose) {
636 		printf(" - Stats map0: %s\n", bpf_map__name(map_data[0]));
637 		printf(" - Stats map1: %s\n", bpf_map__name(map_data[1]));
638 		printf("\n");
639 	}
640 	fflush(stdout);
641 
642 	while (1) {
643 		swap(&prev, &rec);
644 		stats_collect(rec);
645 		stats_print(rec, prev, err_only);
646 		fflush(stdout);
647 		sleep(interval);
648 	}
649 
650 	free_stats_record(rec);
651 	free_stats_record(prev);
652 }
653 
print_bpf_prog_info(void)654 static void print_bpf_prog_info(void)
655 {
656 	struct bpf_program *prog;
657 	struct bpf_map *map;
658 	int i = 0;
659 
660 	/* Prog info */
661 	printf("Loaded BPF prog have %d bpf program(s)\n", tp_cnt);
662 	bpf_object__for_each_program(prog, obj) {
663 		printf(" - prog_fd[%d] = fd(%d)\n", i, bpf_program__fd(prog));
664 		i++;
665 	}
666 
667 	i = 0;
668 	/* Maps info */
669 	printf("Loaded BPF prog have %d map(s)\n", map_cnt);
670 	bpf_object__for_each_map(map, obj) {
671 		const char *name = bpf_map__name(map);
672 		int fd		 = bpf_map__fd(map);
673 
674 		printf(" - map_data[%d] = fd(%d) name:%s\n", i, fd, name);
675 		i++;
676 	}
677 
678 	/* Event info */
679 	printf("Searching for (max:%d) event file descriptor(s)\n", tp_cnt);
680 	for (i = 0; i < tp_cnt; i++) {
681 		int fd = bpf_link__fd(tp_links[i]);
682 
683 		if (fd != -1)
684 			printf(" - event_fd[%d] = fd(%d)\n", i, fd);
685 	}
686 }
687 
main(int argc,char ** argv)688 int main(int argc, char **argv)
689 {
690 	struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
691 	struct bpf_program *prog;
692 	int longindex = 0, opt;
693 	int ret = EXIT_FAILURE;
694 	enum map_type type;
695 	char filename[256];
696 
697 	/* Default settings: */
698 	bool errors_only = true;
699 	int interval = 2;
700 
701 	/* Parse commands line args */
702 	while ((opt = getopt_long(argc, argv, "hDSs:",
703 				  long_options, &longindex)) != -1) {
704 		switch (opt) {
705 		case 'D':
706 			debug = true;
707 			break;
708 		case 'S':
709 			errors_only = false;
710 			break;
711 		case 's':
712 			interval = atoi(optarg);
713 			break;
714 		case 'h':
715 		default:
716 			usage(argv);
717 			return ret;
718 		}
719 	}
720 
721 	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
722 	if (setrlimit(RLIMIT_MEMLOCK, &r)) {
723 		perror("setrlimit(RLIMIT_MEMLOCK)");
724 		return ret;
725 	}
726 
727 	/* Remove tracepoint program when program is interrupted or killed */
728 	signal(SIGINT, int_exit);
729 	signal(SIGTERM, int_exit);
730 
731 	obj = bpf_object__open_file(filename, NULL);
732 	if (libbpf_get_error(obj)) {
733 		printf("ERROR: opening BPF object file failed\n");
734 		obj = NULL;
735 		goto cleanup;
736 	}
737 
738 	/* load BPF program */
739 	if (bpf_object__load(obj)) {
740 		printf("ERROR: loading BPF object file failed\n");
741 		goto cleanup;
742 	}
743 
744 	for (type = 0; type < NUM_MAP; type++) {
745 		map_data[type] =
746 			bpf_object__find_map_by_name(obj, map_type_strings[type]);
747 
748 		if (libbpf_get_error(map_data[type])) {
749 			printf("ERROR: finding a map in obj file failed\n");
750 			goto cleanup;
751 		}
752 		map_cnt++;
753 	}
754 
755 	bpf_object__for_each_program(prog, obj) {
756 		tp_links[tp_cnt] = bpf_program__attach(prog);
757 		if (libbpf_get_error(tp_links[tp_cnt])) {
758 			printf("ERROR: bpf_program__attach failed\n");
759 			tp_links[tp_cnt] = NULL;
760 			goto cleanup;
761 		}
762 		tp_cnt++;
763 	}
764 
765 	if (debug) {
766 		print_bpf_prog_info();
767 	}
768 
769 	/* Unload/stop tracepoint event by closing bpf_link's */
770 	if (errors_only) {
771 		/* The bpf_link[i] depend on the order of
772 		 * the functions was defined in _kern.c
773 		 */
774 		bpf_link__destroy(tp_links[2]);	/* tracepoint/xdp/xdp_redirect */
775 		tp_links[2] = NULL;
776 
777 		bpf_link__destroy(tp_links[3]);	/* tracepoint/xdp/xdp_redirect_map */
778 		tp_links[3] = NULL;
779 	}
780 
781 	stats_poll(interval, errors_only);
782 
783 	ret = EXIT_SUCCESS;
784 
785 cleanup:
786 	/* Detach tracepoints */
787 	while (tp_cnt)
788 		bpf_link__destroy(tp_links[--tp_cnt]);
789 
790 	bpf_object__close(obj);
791 	return ret;
792 }
793