1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <test_progs.h>
4 #include <unistd.h>
5 #include <sys/syscall.h>
6 #include "bpf_iter_ipv6_route.skel.h"
7 #include "bpf_iter_netlink.skel.h"
8 #include "bpf_iter_bpf_map.skel.h"
9 #include "bpf_iter_task.skel.h"
10 #include "bpf_iter_task_stack.skel.h"
11 #include "bpf_iter_task_file.skel.h"
12 #include "bpf_iter_task_vma.skel.h"
13 #include "bpf_iter_task_btf.skel.h"
14 #include "bpf_iter_tcp4.skel.h"
15 #include "bpf_iter_tcp6.skel.h"
16 #include "bpf_iter_udp4.skel.h"
17 #include "bpf_iter_udp6.skel.h"
18 #include "bpf_iter_unix.skel.h"
19 #include "bpf_iter_vma_offset.skel.h"
20 #include "bpf_iter_test_kern1.skel.h"
21 #include "bpf_iter_test_kern2.skel.h"
22 #include "bpf_iter_test_kern3.skel.h"
23 #include "bpf_iter_test_kern4.skel.h"
24 #include "bpf_iter_bpf_hash_map.skel.h"
25 #include "bpf_iter_bpf_percpu_hash_map.skel.h"
26 #include "bpf_iter_bpf_array_map.skel.h"
27 #include "bpf_iter_bpf_percpu_array_map.skel.h"
28 #include "bpf_iter_bpf_sk_storage_helpers.skel.h"
29 #include "bpf_iter_bpf_sk_storage_map.skel.h"
30 #include "bpf_iter_test_kern5.skel.h"
31 #include "bpf_iter_test_kern6.skel.h"
32 #include "bpf_iter_bpf_link.skel.h"
33 #include "bpf_iter_ksym.skel.h"
34 #include "bpf_iter_sockmap.skel.h"
35
36 static int duration;
37
test_btf_id_or_null(void)38 static void test_btf_id_or_null(void)
39 {
40 struct bpf_iter_test_kern3 *skel;
41
42 skel = bpf_iter_test_kern3__open_and_load();
43 if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) {
44 bpf_iter_test_kern3__destroy(skel);
45 return;
46 }
47 }
48
do_dummy_read_opts(struct bpf_program * prog,struct bpf_iter_attach_opts * opts)49 static void do_dummy_read_opts(struct bpf_program *prog, struct bpf_iter_attach_opts *opts)
50 {
51 struct bpf_link *link;
52 char buf[16] = {};
53 int iter_fd, len;
54
55 link = bpf_program__attach_iter(prog, opts);
56 if (!ASSERT_OK_PTR(link, "attach_iter"))
57 return;
58
59 iter_fd = bpf_iter_create(bpf_link__fd(link));
60 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
61 goto free_link;
62
63 /* not check contents, but ensure read() ends without error */
64 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
65 ;
66 CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));
67
68 close(iter_fd);
69
70 free_link:
71 bpf_link__destroy(link);
72 }
73
do_dummy_read(struct bpf_program * prog)74 static void do_dummy_read(struct bpf_program *prog)
75 {
76 do_dummy_read_opts(prog, NULL);
77 }
78
do_read_map_iter_fd(struct bpf_object_skeleton ** skel,struct bpf_program * prog,struct bpf_map * map)79 static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog,
80 struct bpf_map *map)
81 {
82 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
83 union bpf_iter_link_info linfo;
84 struct bpf_link *link;
85 char buf[16] = {};
86 int iter_fd, len;
87
88 memset(&linfo, 0, sizeof(linfo));
89 linfo.map.map_fd = bpf_map__fd(map);
90 opts.link_info = &linfo;
91 opts.link_info_len = sizeof(linfo);
92 link = bpf_program__attach_iter(prog, &opts);
93 if (!ASSERT_OK_PTR(link, "attach_map_iter"))
94 return;
95
96 iter_fd = bpf_iter_create(bpf_link__fd(link));
97 if (!ASSERT_GE(iter_fd, 0, "create_map_iter")) {
98 bpf_link__destroy(link);
99 return;
100 }
101
102 /* Close link and map fd prematurely */
103 bpf_link__destroy(link);
104 bpf_object__destroy_skeleton(*skel);
105 *skel = NULL;
106
107 /* Try to let map free work to run first if map is freed */
108 usleep(100);
109 /* Memory used by both sock map and sock local storage map are
110 * freed after two synchronize_rcu() calls, so wait for it
111 */
112 kern_sync_rcu();
113 kern_sync_rcu();
114
115 /* Read after both map fd and link fd are closed */
116 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
117 ;
118 ASSERT_GE(len, 0, "read_iterator");
119
120 close(iter_fd);
121 }
122
read_fd_into_buffer(int fd,char * buf,int size)123 static int read_fd_into_buffer(int fd, char *buf, int size)
124 {
125 int bufleft = size;
126 int len;
127
128 do {
129 len = read(fd, buf, bufleft);
130 if (len > 0) {
131 buf += len;
132 bufleft -= len;
133 }
134 } while (len > 0);
135
136 return len < 0 ? len : size - bufleft;
137 }
138
test_ipv6_route(void)139 static void test_ipv6_route(void)
140 {
141 struct bpf_iter_ipv6_route *skel;
142
143 skel = bpf_iter_ipv6_route__open_and_load();
144 if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load"))
145 return;
146
147 do_dummy_read(skel->progs.dump_ipv6_route);
148
149 bpf_iter_ipv6_route__destroy(skel);
150 }
151
test_netlink(void)152 static void test_netlink(void)
153 {
154 struct bpf_iter_netlink *skel;
155
156 skel = bpf_iter_netlink__open_and_load();
157 if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load"))
158 return;
159
160 do_dummy_read(skel->progs.dump_netlink);
161
162 bpf_iter_netlink__destroy(skel);
163 }
164
test_bpf_map(void)165 static void test_bpf_map(void)
166 {
167 struct bpf_iter_bpf_map *skel;
168
169 skel = bpf_iter_bpf_map__open_and_load();
170 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load"))
171 return;
172
173 do_dummy_read(skel->progs.dump_bpf_map);
174
175 bpf_iter_bpf_map__destroy(skel);
176 }
177
pidfd_open(pid_t pid,unsigned int flags)178 static int pidfd_open(pid_t pid, unsigned int flags)
179 {
180 return syscall(SYS_pidfd_open, pid, flags);
181 }
182
check_bpf_link_info(const struct bpf_program * prog)183 static void check_bpf_link_info(const struct bpf_program *prog)
184 {
185 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
186 union bpf_iter_link_info linfo;
187 struct bpf_link_info info = {};
188 struct bpf_link *link;
189 __u32 info_len;
190 int err;
191
192 memset(&linfo, 0, sizeof(linfo));
193 linfo.task.tid = getpid();
194 opts.link_info = &linfo;
195 opts.link_info_len = sizeof(linfo);
196
197 link = bpf_program__attach_iter(prog, &opts);
198 if (!ASSERT_OK_PTR(link, "attach_iter"))
199 return;
200
201 info_len = sizeof(info);
202 err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &info, &info_len);
203 ASSERT_OK(err, "bpf_obj_get_info_by_fd");
204 ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid");
205
206 bpf_link__destroy(link);
207 }
208
209 static pthread_mutex_t do_nothing_mutex;
210
do_nothing_wait(void * arg)211 static void *do_nothing_wait(void *arg)
212 {
213 pthread_mutex_lock(&do_nothing_mutex);
214 pthread_mutex_unlock(&do_nothing_mutex);
215
216 pthread_exit(arg);
217 }
218
test_task_common_nocheck(struct bpf_iter_attach_opts * opts,int * num_unknown,int * num_known)219 static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts,
220 int *num_unknown, int *num_known)
221 {
222 struct bpf_iter_task *skel;
223 pthread_t thread_id;
224 void *ret;
225
226 skel = bpf_iter_task__open_and_load();
227 if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
228 return;
229
230 ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
231
232 ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
233 "pthread_create");
234
235 skel->bss->tid = getpid();
236
237 do_dummy_read_opts(skel->progs.dump_task, opts);
238
239 *num_unknown = skel->bss->num_unknown_tid;
240 *num_known = skel->bss->num_known_tid;
241
242 ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
243 ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
244 "pthread_join");
245
246 bpf_iter_task__destroy(skel);
247 }
248
test_task_common(struct bpf_iter_attach_opts * opts,int num_unknown,int num_known)249 static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown, int num_known)
250 {
251 int num_unknown_tid, num_known_tid;
252
253 test_task_common_nocheck(opts, &num_unknown_tid, &num_known_tid);
254 ASSERT_EQ(num_unknown_tid, num_unknown, "check_num_unknown_tid");
255 ASSERT_EQ(num_known_tid, num_known, "check_num_known_tid");
256 }
257
test_task_tid(void)258 static void test_task_tid(void)
259 {
260 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
261 union bpf_iter_link_info linfo;
262 int num_unknown_tid, num_known_tid;
263
264 memset(&linfo, 0, sizeof(linfo));
265 linfo.task.tid = getpid();
266 opts.link_info = &linfo;
267 opts.link_info_len = sizeof(linfo);
268 test_task_common(&opts, 0, 1);
269
270 linfo.task.tid = 0;
271 linfo.task.pid = getpid();
272 test_task_common(&opts, 1, 1);
273
274 test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid);
275 ASSERT_GT(num_unknown_tid, 1, "check_num_unknown_tid");
276 ASSERT_EQ(num_known_tid, 1, "check_num_known_tid");
277 }
278
test_task_pid(void)279 static void test_task_pid(void)
280 {
281 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
282 union bpf_iter_link_info linfo;
283
284 memset(&linfo, 0, sizeof(linfo));
285 linfo.task.pid = getpid();
286 opts.link_info = &linfo;
287 opts.link_info_len = sizeof(linfo);
288
289 test_task_common(&opts, 1, 1);
290 }
291
test_task_pidfd(void)292 static void test_task_pidfd(void)
293 {
294 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
295 union bpf_iter_link_info linfo;
296 int pidfd;
297
298 pidfd = pidfd_open(getpid(), 0);
299 if (!ASSERT_GT(pidfd, 0, "pidfd_open"))
300 return;
301
302 memset(&linfo, 0, sizeof(linfo));
303 linfo.task.pid_fd = pidfd;
304 opts.link_info = &linfo;
305 opts.link_info_len = sizeof(linfo);
306
307 test_task_common(&opts, 1, 1);
308
309 close(pidfd);
310 }
311
test_task_sleepable(void)312 static void test_task_sleepable(void)
313 {
314 struct bpf_iter_task *skel;
315
316 skel = bpf_iter_task__open_and_load();
317 if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
318 return;
319
320 do_dummy_read(skel->progs.dump_task_sleepable);
321
322 ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0,
323 "num_expected_failure_copy_from_user_task");
324 ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0,
325 "num_success_copy_from_user_task");
326
327 bpf_iter_task__destroy(skel);
328 }
329
test_task_stack(void)330 static void test_task_stack(void)
331 {
332 struct bpf_iter_task_stack *skel;
333
334 skel = bpf_iter_task_stack__open_and_load();
335 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load"))
336 return;
337
338 do_dummy_read(skel->progs.dump_task_stack);
339 do_dummy_read(skel->progs.get_task_user_stacks);
340
341 bpf_iter_task_stack__destroy(skel);
342 }
343
test_task_file(void)344 static void test_task_file(void)
345 {
346 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
347 struct bpf_iter_task_file *skel;
348 union bpf_iter_link_info linfo;
349 pthread_t thread_id;
350 void *ret;
351
352 skel = bpf_iter_task_file__open_and_load();
353 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_file__open_and_load"))
354 return;
355
356 skel->bss->tgid = getpid();
357
358 ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
359
360 ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
361 "pthread_create");
362
363 memset(&linfo, 0, sizeof(linfo));
364 linfo.task.tid = getpid();
365 opts.link_info = &linfo;
366 opts.link_info_len = sizeof(linfo);
367
368 do_dummy_read_opts(skel->progs.dump_task_file, &opts);
369
370 ASSERT_EQ(skel->bss->count, 0, "check_count");
371 ASSERT_EQ(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
372
373 skel->bss->last_tgid = 0;
374 skel->bss->count = 0;
375 skel->bss->unique_tgid_count = 0;
376
377 do_dummy_read(skel->progs.dump_task_file);
378
379 ASSERT_EQ(skel->bss->count, 0, "check_count");
380 ASSERT_GT(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
381
382 check_bpf_link_info(skel->progs.dump_task_file);
383
384 ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
385 ASSERT_OK(pthread_join(thread_id, &ret), "pthread_join");
386 ASSERT_NULL(ret, "pthread_join");
387
388 bpf_iter_task_file__destroy(skel);
389 }
390
391 #define TASKBUFSZ 32768
392
393 static char taskbuf[TASKBUFSZ];
394
do_btf_read(struct bpf_iter_task_btf * skel)395 static int do_btf_read(struct bpf_iter_task_btf *skel)
396 {
397 struct bpf_program *prog = skel->progs.dump_task_struct;
398 struct bpf_iter_task_btf__bss *bss = skel->bss;
399 int iter_fd = -1, err;
400 struct bpf_link *link;
401 char *buf = taskbuf;
402 int ret = 0;
403
404 link = bpf_program__attach_iter(prog, NULL);
405 if (!ASSERT_OK_PTR(link, "attach_iter"))
406 return ret;
407
408 iter_fd = bpf_iter_create(bpf_link__fd(link));
409 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
410 goto free_link;
411
412 err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ);
413 if (bss->skip) {
414 printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
415 ret = 1;
416 test__skip();
417 goto free_link;
418 }
419
420 if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno)))
421 goto free_link;
422
423 ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)",
424 "check for btf representation of task_struct in iter data");
425 free_link:
426 if (iter_fd > 0)
427 close(iter_fd);
428 bpf_link__destroy(link);
429 return ret;
430 }
431
test_task_btf(void)432 static void test_task_btf(void)
433 {
434 struct bpf_iter_task_btf__bss *bss;
435 struct bpf_iter_task_btf *skel;
436 int ret;
437
438 skel = bpf_iter_task_btf__open_and_load();
439 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_btf__open_and_load"))
440 return;
441
442 bss = skel->bss;
443
444 ret = do_btf_read(skel);
445 if (ret)
446 goto cleanup;
447
448 if (!ASSERT_NEQ(bss->tasks, 0, "no task iteration, did BPF program run?"))
449 goto cleanup;
450
451 ASSERT_EQ(bss->seq_err, 0, "check for unexpected err");
452
453 cleanup:
454 bpf_iter_task_btf__destroy(skel);
455 }
456
test_tcp4(void)457 static void test_tcp4(void)
458 {
459 struct bpf_iter_tcp4 *skel;
460
461 skel = bpf_iter_tcp4__open_and_load();
462 if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load"))
463 return;
464
465 do_dummy_read(skel->progs.dump_tcp4);
466
467 bpf_iter_tcp4__destroy(skel);
468 }
469
test_tcp6(void)470 static void test_tcp6(void)
471 {
472 struct bpf_iter_tcp6 *skel;
473
474 skel = bpf_iter_tcp6__open_and_load();
475 if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load"))
476 return;
477
478 do_dummy_read(skel->progs.dump_tcp6);
479
480 bpf_iter_tcp6__destroy(skel);
481 }
482
test_udp4(void)483 static void test_udp4(void)
484 {
485 struct bpf_iter_udp4 *skel;
486
487 skel = bpf_iter_udp4__open_and_load();
488 if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load"))
489 return;
490
491 do_dummy_read(skel->progs.dump_udp4);
492
493 bpf_iter_udp4__destroy(skel);
494 }
495
test_udp6(void)496 static void test_udp6(void)
497 {
498 struct bpf_iter_udp6 *skel;
499
500 skel = bpf_iter_udp6__open_and_load();
501 if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load"))
502 return;
503
504 do_dummy_read(skel->progs.dump_udp6);
505
506 bpf_iter_udp6__destroy(skel);
507 }
508
test_unix(void)509 static void test_unix(void)
510 {
511 struct bpf_iter_unix *skel;
512
513 skel = bpf_iter_unix__open_and_load();
514 if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load"))
515 return;
516
517 do_dummy_read(skel->progs.dump_unix);
518
519 bpf_iter_unix__destroy(skel);
520 }
521
522 /* The expected string is less than 16 bytes */
do_read_with_fd(int iter_fd,const char * expected,bool read_one_char)523 static int do_read_with_fd(int iter_fd, const char *expected,
524 bool read_one_char)
525 {
526 int len, read_buf_len, start;
527 char buf[16] = {};
528
529 read_buf_len = read_one_char ? 1 : 16;
530 start = 0;
531 while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
532 start += len;
533 if (CHECK(start >= 16, "read", "read len %d\n", len))
534 return -1;
535 read_buf_len = read_one_char ? 1 : 16 - start;
536 }
537 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
538 return -1;
539
540 if (!ASSERT_STREQ(buf, expected, "read"))
541 return -1;
542
543 return 0;
544 }
545
test_anon_iter(bool read_one_char)546 static void test_anon_iter(bool read_one_char)
547 {
548 struct bpf_iter_test_kern1 *skel;
549 struct bpf_link *link;
550 int iter_fd, err;
551
552 skel = bpf_iter_test_kern1__open_and_load();
553 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern1__open_and_load"))
554 return;
555
556 err = bpf_iter_test_kern1__attach(skel);
557 if (!ASSERT_OK(err, "bpf_iter_test_kern1__attach")) {
558 goto out;
559 }
560
561 link = skel->links.dump_task;
562 iter_fd = bpf_iter_create(bpf_link__fd(link));
563 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
564 goto out;
565
566 do_read_with_fd(iter_fd, "abcd", read_one_char);
567 close(iter_fd);
568
569 out:
570 bpf_iter_test_kern1__destroy(skel);
571 }
572
do_read(const char * path,const char * expected)573 static int do_read(const char *path, const char *expected)
574 {
575 int err, iter_fd;
576
577 iter_fd = open(path, O_RDONLY);
578 if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n",
579 path, strerror(errno)))
580 return -1;
581
582 err = do_read_with_fd(iter_fd, expected, false);
583 close(iter_fd);
584 return err;
585 }
586
test_file_iter(void)587 static void test_file_iter(void)
588 {
589 const char *path = "/sys/fs/bpf/bpf_iter_test1";
590 struct bpf_iter_test_kern1 *skel1;
591 struct bpf_iter_test_kern2 *skel2;
592 struct bpf_link *link;
593 int err;
594
595 skel1 = bpf_iter_test_kern1__open_and_load();
596 if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load"))
597 return;
598
599 link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
600 if (!ASSERT_OK_PTR(link, "attach_iter"))
601 goto out;
602
603 /* unlink this path if it exists. */
604 unlink(path);
605
606 err = bpf_link__pin(link, path);
607 if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err))
608 goto free_link;
609
610 err = do_read(path, "abcd");
611 if (err)
612 goto unlink_path;
613
614 /* file based iterator seems working fine. Let us a link update
615 * of the underlying link and `cat` the iterator again, its content
616 * should change.
617 */
618 skel2 = bpf_iter_test_kern2__open_and_load();
619 if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load"))
620 goto unlink_path;
621
622 err = bpf_link__update_program(link, skel2->progs.dump_task);
623 if (!ASSERT_OK(err, "update_prog"))
624 goto destroy_skel2;
625
626 do_read(path, "ABCD");
627
628 destroy_skel2:
629 bpf_iter_test_kern2__destroy(skel2);
630 unlink_path:
631 unlink(path);
632 free_link:
633 bpf_link__destroy(link);
634 out:
635 bpf_iter_test_kern1__destroy(skel1);
636 }
637
test_overflow(bool test_e2big_overflow,bool ret1)638 static void test_overflow(bool test_e2big_overflow, bool ret1)
639 {
640 __u32 map_info_len, total_read_len, expected_read_len;
641 int err, iter_fd, map1_fd, map2_fd, len;
642 struct bpf_map_info map_info = {};
643 struct bpf_iter_test_kern4 *skel;
644 struct bpf_link *link;
645 __u32 iter_size;
646 char *buf;
647
648 skel = bpf_iter_test_kern4__open();
649 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open"))
650 return;
651
652 /* create two maps: bpf program will only do bpf_seq_write
653 * for these two maps. The goal is one map output almost
654 * fills seq_file buffer and then the other will trigger
655 * overflow and needs restart.
656 */
657 map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
658 if (CHECK(map1_fd < 0, "bpf_map_create",
659 "map_creation failed: %s\n", strerror(errno)))
660 goto out;
661 map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
662 if (CHECK(map2_fd < 0, "bpf_map_create",
663 "map_creation failed: %s\n", strerror(errno)))
664 goto free_map1;
665
666 /* bpf_seq_printf kernel buffer is 8 pages, so one map
667 * bpf_seq_write will mostly fill it, and the other map
668 * will partially fill and then trigger overflow and need
669 * bpf_seq_read restart.
670 */
671 iter_size = sysconf(_SC_PAGE_SIZE) << 3;
672
673 if (test_e2big_overflow) {
674 skel->rodata->print_len = (iter_size + 8) / 8;
675 expected_read_len = 2 * (iter_size + 8);
676 } else if (!ret1) {
677 skel->rodata->print_len = (iter_size - 8) / 8;
678 expected_read_len = 2 * (iter_size - 8);
679 } else {
680 skel->rodata->print_len = 1;
681 expected_read_len = 2 * 8;
682 }
683 skel->rodata->ret1 = ret1;
684
685 if (!ASSERT_OK(bpf_iter_test_kern4__load(skel),
686 "bpf_iter_test_kern4__load"))
687 goto free_map2;
688
689 /* setup filtering map_id in bpf program */
690 map_info_len = sizeof(map_info);
691 err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len);
692 if (CHECK(err, "get_map_info", "get map info failed: %s\n",
693 strerror(errno)))
694 goto free_map2;
695 skel->bss->map1_id = map_info.id;
696
697 err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len);
698 if (CHECK(err, "get_map_info", "get map info failed: %s\n",
699 strerror(errno)))
700 goto free_map2;
701 skel->bss->map2_id = map_info.id;
702
703 link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
704 if (!ASSERT_OK_PTR(link, "attach_iter"))
705 goto free_map2;
706
707 iter_fd = bpf_iter_create(bpf_link__fd(link));
708 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
709 goto free_link;
710
711 buf = malloc(expected_read_len);
712 if (!buf)
713 goto close_iter;
714
715 /* do read */
716 total_read_len = 0;
717 if (test_e2big_overflow) {
718 while ((len = read(iter_fd, buf, expected_read_len)) > 0)
719 total_read_len += len;
720
721 CHECK(len != -1 || errno != E2BIG, "read",
722 "expected ret -1, errno E2BIG, but get ret %d, error %s\n",
723 len, strerror(errno));
724 goto free_buf;
725 } else if (!ret1) {
726 while ((len = read(iter_fd, buf, expected_read_len)) > 0)
727 total_read_len += len;
728
729 if (CHECK(len < 0, "read", "read failed: %s\n",
730 strerror(errno)))
731 goto free_buf;
732 } else {
733 do {
734 len = read(iter_fd, buf, expected_read_len);
735 if (len > 0)
736 total_read_len += len;
737 } while (len > 0 || len == -EAGAIN);
738
739 if (CHECK(len < 0, "read", "read failed: %s\n",
740 strerror(errno)))
741 goto free_buf;
742 }
743
744 if (!ASSERT_EQ(total_read_len, expected_read_len, "read"))
745 goto free_buf;
746
747 if (!ASSERT_EQ(skel->bss->map1_accessed, 1, "map1_accessed"))
748 goto free_buf;
749
750 if (!ASSERT_EQ(skel->bss->map2_accessed, 2, "map2_accessed"))
751 goto free_buf;
752
753 ASSERT_EQ(skel->bss->map2_seqnum1, skel->bss->map2_seqnum2, "map2_seqnum");
754
755 free_buf:
756 free(buf);
757 close_iter:
758 close(iter_fd);
759 free_link:
760 bpf_link__destroy(link);
761 free_map2:
762 close(map2_fd);
763 free_map1:
764 close(map1_fd);
765 out:
766 bpf_iter_test_kern4__destroy(skel);
767 }
768
test_bpf_hash_map(void)769 static void test_bpf_hash_map(void)
770 {
771 __u32 expected_key_a = 0, expected_key_b = 0;
772 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
773 struct bpf_iter_bpf_hash_map *skel;
774 int err, i, len, map_fd, iter_fd;
775 union bpf_iter_link_info linfo;
776 __u64 val, expected_val = 0;
777 struct bpf_link *link;
778 struct key_t {
779 int a;
780 int b;
781 int c;
782 } key;
783 char buf[64];
784
785 skel = bpf_iter_bpf_hash_map__open();
786 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open"))
787 return;
788
789 skel->bss->in_test_mode = true;
790
791 err = bpf_iter_bpf_hash_map__load(skel);
792 if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load"))
793 goto out;
794
795 /* iterator with hashmap2 and hashmap3 should fail */
796 memset(&linfo, 0, sizeof(linfo));
797 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
798 opts.link_info = &linfo;
799 opts.link_info_len = sizeof(linfo);
800 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
801 if (!ASSERT_ERR_PTR(link, "attach_iter"))
802 goto out;
803
804 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
805 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
806 if (!ASSERT_ERR_PTR(link, "attach_iter"))
807 goto out;
808
809 /* hashmap1 should be good, update map values here */
810 map_fd = bpf_map__fd(skel->maps.hashmap1);
811 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
812 key.a = i + 1;
813 key.b = i + 2;
814 key.c = i + 3;
815 val = i + 4;
816 expected_key_a += key.a;
817 expected_key_b += key.b;
818 expected_val += val;
819
820 err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
821 if (!ASSERT_OK(err, "map_update"))
822 goto out;
823 }
824
825 /* Sleepable program is prohibited for hash map iterator */
826 linfo.map.map_fd = map_fd;
827 link = bpf_program__attach_iter(skel->progs.sleepable_dummy_dump, &opts);
828 if (!ASSERT_ERR_PTR(link, "attach_sleepable_prog_to_iter"))
829 goto out;
830
831 linfo.map.map_fd = map_fd;
832 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
833 if (!ASSERT_OK_PTR(link, "attach_iter"))
834 goto out;
835
836 iter_fd = bpf_iter_create(bpf_link__fd(link));
837 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
838 goto free_link;
839
840 /* do some tests */
841 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
842 ;
843 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
844 goto close_iter;
845
846 /* test results */
847 if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
848 goto close_iter;
849 if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
850 goto close_iter;
851 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
852 goto close_iter;
853
854 close_iter:
855 close(iter_fd);
856 free_link:
857 bpf_link__destroy(link);
858 out:
859 bpf_iter_bpf_hash_map__destroy(skel);
860 }
861
test_bpf_percpu_hash_map(void)862 static void test_bpf_percpu_hash_map(void)
863 {
864 __u32 expected_key_a = 0, expected_key_b = 0;
865 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
866 struct bpf_iter_bpf_percpu_hash_map *skel;
867 int err, i, j, len, map_fd, iter_fd;
868 union bpf_iter_link_info linfo;
869 __u32 expected_val = 0;
870 struct bpf_link *link;
871 struct key_t {
872 int a;
873 int b;
874 int c;
875 } key;
876 char buf[64];
877 void *val;
878
879 skel = bpf_iter_bpf_percpu_hash_map__open();
880 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__open"))
881 return;
882
883 skel->rodata->num_cpus = bpf_num_possible_cpus();
884 val = malloc(8 * bpf_num_possible_cpus());
885
886 err = bpf_iter_bpf_percpu_hash_map__load(skel);
887 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load"))
888 goto out;
889
890 /* update map values here */
891 map_fd = bpf_map__fd(skel->maps.hashmap1);
892 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
893 key.a = i + 1;
894 key.b = i + 2;
895 key.c = i + 3;
896 expected_key_a += key.a;
897 expected_key_b += key.b;
898
899 for (j = 0; j < bpf_num_possible_cpus(); j++) {
900 *(__u32 *)(val + j * 8) = i + j;
901 expected_val += i + j;
902 }
903
904 err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
905 if (!ASSERT_OK(err, "map_update"))
906 goto out;
907 }
908
909 memset(&linfo, 0, sizeof(linfo));
910 linfo.map.map_fd = map_fd;
911 opts.link_info = &linfo;
912 opts.link_info_len = sizeof(linfo);
913 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
914 if (!ASSERT_OK_PTR(link, "attach_iter"))
915 goto out;
916
917 iter_fd = bpf_iter_create(bpf_link__fd(link));
918 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
919 goto free_link;
920
921 /* do some tests */
922 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
923 ;
924 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
925 goto close_iter;
926
927 /* test results */
928 if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
929 goto close_iter;
930 if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
931 goto close_iter;
932 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
933 goto close_iter;
934
935 close_iter:
936 close(iter_fd);
937 free_link:
938 bpf_link__destroy(link);
939 out:
940 bpf_iter_bpf_percpu_hash_map__destroy(skel);
941 free(val);
942 }
943
test_bpf_array_map(void)944 static void test_bpf_array_map(void)
945 {
946 __u64 val, expected_val = 0, res_first_val, first_val = 0;
947 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
948 __u32 expected_key = 0, res_first_key;
949 struct bpf_iter_bpf_array_map *skel;
950 union bpf_iter_link_info linfo;
951 int err, i, map_fd, iter_fd;
952 struct bpf_link *link;
953 char buf[64] = {};
954 int len, start;
955
956 skel = bpf_iter_bpf_array_map__open_and_load();
957 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
958 return;
959
960 map_fd = bpf_map__fd(skel->maps.arraymap1);
961 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
962 val = i + 4;
963 expected_key += i;
964 expected_val += val;
965
966 if (i == 0)
967 first_val = val;
968
969 err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
970 if (!ASSERT_OK(err, "map_update"))
971 goto out;
972 }
973
974 memset(&linfo, 0, sizeof(linfo));
975 linfo.map.map_fd = map_fd;
976 opts.link_info = &linfo;
977 opts.link_info_len = sizeof(linfo);
978 link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
979 if (!ASSERT_OK_PTR(link, "attach_iter"))
980 goto out;
981
982 iter_fd = bpf_iter_create(bpf_link__fd(link));
983 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
984 goto free_link;
985
986 /* do some tests */
987 start = 0;
988 while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
989 start += len;
990 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
991 goto close_iter;
992
993 /* test results */
994 res_first_key = *(__u32 *)buf;
995 res_first_val = *(__u64 *)(buf + sizeof(__u32));
996 if (CHECK(res_first_key != 0 || res_first_val != first_val,
997 "bpf_seq_write",
998 "seq_write failure: first key %u vs expected 0, "
999 " first value %llu vs expected %llu\n",
1000 res_first_key, res_first_val, first_val))
1001 goto close_iter;
1002
1003 if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
1004 goto close_iter;
1005 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1006 goto close_iter;
1007
1008 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
1009 err = bpf_map_lookup_elem(map_fd, &i, &val);
1010 if (!ASSERT_OK(err, "map_lookup"))
1011 goto out;
1012 if (!ASSERT_EQ(i, val, "invalid_val"))
1013 goto out;
1014 }
1015
1016 close_iter:
1017 close(iter_fd);
1018 free_link:
1019 bpf_link__destroy(link);
1020 out:
1021 bpf_iter_bpf_array_map__destroy(skel);
1022 }
1023
test_bpf_array_map_iter_fd(void)1024 static void test_bpf_array_map_iter_fd(void)
1025 {
1026 struct bpf_iter_bpf_array_map *skel;
1027
1028 skel = bpf_iter_bpf_array_map__open_and_load();
1029 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
1030 return;
1031
1032 do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_array_map,
1033 skel->maps.arraymap1);
1034
1035 bpf_iter_bpf_array_map__destroy(skel);
1036 }
1037
test_bpf_percpu_array_map(void)1038 static void test_bpf_percpu_array_map(void)
1039 {
1040 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1041 struct bpf_iter_bpf_percpu_array_map *skel;
1042 __u32 expected_key = 0, expected_val = 0;
1043 union bpf_iter_link_info linfo;
1044 int err, i, j, map_fd, iter_fd;
1045 struct bpf_link *link;
1046 char buf[64];
1047 void *val;
1048 int len;
1049
1050 skel = bpf_iter_bpf_percpu_array_map__open();
1051 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__open"))
1052 return;
1053
1054 skel->rodata->num_cpus = bpf_num_possible_cpus();
1055 val = malloc(8 * bpf_num_possible_cpus());
1056
1057 err = bpf_iter_bpf_percpu_array_map__load(skel);
1058 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load"))
1059 goto out;
1060
1061 /* update map values here */
1062 map_fd = bpf_map__fd(skel->maps.arraymap1);
1063 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
1064 expected_key += i;
1065
1066 for (j = 0; j < bpf_num_possible_cpus(); j++) {
1067 *(__u32 *)(val + j * 8) = i + j;
1068 expected_val += i + j;
1069 }
1070
1071 err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
1072 if (!ASSERT_OK(err, "map_update"))
1073 goto out;
1074 }
1075
1076 memset(&linfo, 0, sizeof(linfo));
1077 linfo.map.map_fd = map_fd;
1078 opts.link_info = &linfo;
1079 opts.link_info_len = sizeof(linfo);
1080 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
1081 if (!ASSERT_OK_PTR(link, "attach_iter"))
1082 goto out;
1083
1084 iter_fd = bpf_iter_create(bpf_link__fd(link));
1085 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1086 goto free_link;
1087
1088 /* do some tests */
1089 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1090 ;
1091 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1092 goto close_iter;
1093
1094 /* test results */
1095 if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
1096 goto close_iter;
1097 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1098 goto close_iter;
1099
1100 close_iter:
1101 close(iter_fd);
1102 free_link:
1103 bpf_link__destroy(link);
1104 out:
1105 bpf_iter_bpf_percpu_array_map__destroy(skel);
1106 free(val);
1107 }
1108
1109 /* An iterator program deletes all local storage in a map. */
test_bpf_sk_storage_delete(void)1110 static void test_bpf_sk_storage_delete(void)
1111 {
1112 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1113 struct bpf_iter_bpf_sk_storage_helpers *skel;
1114 union bpf_iter_link_info linfo;
1115 int err, len, map_fd, iter_fd;
1116 struct bpf_link *link;
1117 int sock_fd = -1;
1118 __u32 val = 42;
1119 char buf[64];
1120
1121 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1122 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
1123 return;
1124
1125 map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1126
1127 sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1128 if (!ASSERT_GE(sock_fd, 0, "socket"))
1129 goto out;
1130 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1131 if (!ASSERT_OK(err, "map_update"))
1132 goto out;
1133
1134 memset(&linfo, 0, sizeof(linfo));
1135 linfo.map.map_fd = map_fd;
1136 opts.link_info = &linfo;
1137 opts.link_info_len = sizeof(linfo);
1138 link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map,
1139 &opts);
1140 if (!ASSERT_OK_PTR(link, "attach_iter"))
1141 goto out;
1142
1143 iter_fd = bpf_iter_create(bpf_link__fd(link));
1144 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1145 goto free_link;
1146
1147 /* do some tests */
1148 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1149 ;
1150 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1151 goto close_iter;
1152
1153 /* test results */
1154 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1155 if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
1156 "map value wasn't deleted (err=%d, errno=%d)\n", err, errno))
1157 goto close_iter;
1158
1159 close_iter:
1160 close(iter_fd);
1161 free_link:
1162 bpf_link__destroy(link);
1163 out:
1164 if (sock_fd >= 0)
1165 close(sock_fd);
1166 bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1167 }
1168
1169 /* This creates a socket and its local storage. It then runs a task_iter BPF
1170 * program that replaces the existing socket local storage with the tgid of the
1171 * only task owning a file descriptor to this socket, this process, prog_tests.
1172 * It then runs a tcp socket iterator that negates the value in the existing
1173 * socket local storage, the test verifies that the resulting value is -pid.
1174 */
test_bpf_sk_storage_get(void)1175 static void test_bpf_sk_storage_get(void)
1176 {
1177 struct bpf_iter_bpf_sk_storage_helpers *skel;
1178 int err, map_fd, val = -1;
1179 int sock_fd = -1;
1180
1181 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1182 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
1183 return;
1184
1185 sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1186 if (!ASSERT_GE(sock_fd, 0, "socket"))
1187 goto out;
1188
1189 err = listen(sock_fd, 1);
1190 if (!ASSERT_OK(err, "listen"))
1191 goto close_socket;
1192
1193 map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1194
1195 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1196 if (!ASSERT_OK(err, "bpf_map_update_elem"))
1197 goto close_socket;
1198
1199 do_dummy_read(skel->progs.fill_socket_owner);
1200
1201 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1202 if (CHECK(err || val != getpid(), "bpf_map_lookup_elem",
1203 "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1204 getpid(), val, err))
1205 goto close_socket;
1206
1207 do_dummy_read(skel->progs.negate_socket_local_storage);
1208
1209 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1210 CHECK(err || val != -getpid(), "bpf_map_lookup_elem",
1211 "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1212 -getpid(), val, err);
1213
1214 close_socket:
1215 close(sock_fd);
1216 out:
1217 bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1218 }
1219
test_bpf_sk_stoarge_map_iter_fd(void)1220 static void test_bpf_sk_stoarge_map_iter_fd(void)
1221 {
1222 struct bpf_iter_bpf_sk_storage_map *skel;
1223
1224 skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1225 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
1226 return;
1227
1228 do_read_map_iter_fd(&skel->skeleton, skel->progs.rw_bpf_sk_storage_map,
1229 skel->maps.sk_stg_map);
1230
1231 bpf_iter_bpf_sk_storage_map__destroy(skel);
1232 }
1233
test_bpf_sk_storage_map(void)1234 static void test_bpf_sk_storage_map(void)
1235 {
1236 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1237 int err, i, len, map_fd, iter_fd, num_sockets;
1238 struct bpf_iter_bpf_sk_storage_map *skel;
1239 union bpf_iter_link_info linfo;
1240 int sock_fd[3] = {-1, -1, -1};
1241 __u32 val, expected_val = 0;
1242 struct bpf_link *link;
1243 char buf[64];
1244
1245 skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1246 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
1247 return;
1248
1249 map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1250 num_sockets = ARRAY_SIZE(sock_fd);
1251 for (i = 0; i < num_sockets; i++) {
1252 sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
1253 if (!ASSERT_GE(sock_fd[i], 0, "socket"))
1254 goto out;
1255
1256 val = i + 1;
1257 expected_val += val;
1258
1259 err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
1260 BPF_NOEXIST);
1261 if (!ASSERT_OK(err, "map_update"))
1262 goto out;
1263 }
1264
1265 memset(&linfo, 0, sizeof(linfo));
1266 linfo.map.map_fd = map_fd;
1267 opts.link_info = &linfo;
1268 opts.link_info_len = sizeof(linfo);
1269 link = bpf_program__attach_iter(skel->progs.oob_write_bpf_sk_storage_map, &opts);
1270 err = libbpf_get_error(link);
1271 if (!ASSERT_EQ(err, -EACCES, "attach_oob_write_iter")) {
1272 if (!err)
1273 bpf_link__destroy(link);
1274 goto out;
1275 }
1276
1277 link = bpf_program__attach_iter(skel->progs.rw_bpf_sk_storage_map, &opts);
1278 if (!ASSERT_OK_PTR(link, "attach_iter"))
1279 goto out;
1280
1281 iter_fd = bpf_iter_create(bpf_link__fd(link));
1282 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1283 goto free_link;
1284
1285 skel->bss->to_add_val = time(NULL);
1286 /* do some tests */
1287 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1288 ;
1289 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1290 goto close_iter;
1291
1292 /* test results */
1293 if (!ASSERT_EQ(skel->bss->ipv6_sk_count, num_sockets, "ipv6_sk_count"))
1294 goto close_iter;
1295
1296 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1297 goto close_iter;
1298
1299 for (i = 0; i < num_sockets; i++) {
1300 err = bpf_map_lookup_elem(map_fd, &sock_fd[i], &val);
1301 if (!ASSERT_OK(err, "map_lookup") ||
1302 !ASSERT_EQ(val, i + 1 + skel->bss->to_add_val, "check_map_value"))
1303 break;
1304 }
1305
1306 close_iter:
1307 close(iter_fd);
1308 free_link:
1309 bpf_link__destroy(link);
1310 out:
1311 for (i = 0; i < num_sockets; i++) {
1312 if (sock_fd[i] >= 0)
1313 close(sock_fd[i]);
1314 }
1315 bpf_iter_bpf_sk_storage_map__destroy(skel);
1316 }
1317
test_rdonly_buf_out_of_bound(void)1318 static void test_rdonly_buf_out_of_bound(void)
1319 {
1320 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1321 struct bpf_iter_test_kern5 *skel;
1322 union bpf_iter_link_info linfo;
1323 struct bpf_link *link;
1324
1325 skel = bpf_iter_test_kern5__open_and_load();
1326 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern5__open_and_load"))
1327 return;
1328
1329 memset(&linfo, 0, sizeof(linfo));
1330 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
1331 opts.link_info = &linfo;
1332 opts.link_info_len = sizeof(linfo);
1333 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
1334 if (!ASSERT_ERR_PTR(link, "attach_iter"))
1335 bpf_link__destroy(link);
1336
1337 bpf_iter_test_kern5__destroy(skel);
1338 }
1339
test_buf_neg_offset(void)1340 static void test_buf_neg_offset(void)
1341 {
1342 struct bpf_iter_test_kern6 *skel;
1343
1344 skel = bpf_iter_test_kern6__open_and_load();
1345 if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern6__open_and_load"))
1346 bpf_iter_test_kern6__destroy(skel);
1347 }
1348
test_link_iter(void)1349 static void test_link_iter(void)
1350 {
1351 struct bpf_iter_bpf_link *skel;
1352
1353 skel = bpf_iter_bpf_link__open_and_load();
1354 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load"))
1355 return;
1356
1357 do_dummy_read(skel->progs.dump_bpf_link);
1358
1359 bpf_iter_bpf_link__destroy(skel);
1360 }
1361
test_ksym_iter(void)1362 static void test_ksym_iter(void)
1363 {
1364 struct bpf_iter_ksym *skel;
1365
1366 skel = bpf_iter_ksym__open_and_load();
1367 if (!ASSERT_OK_PTR(skel, "bpf_iter_ksym__open_and_load"))
1368 return;
1369
1370 do_dummy_read(skel->progs.dump_ksym);
1371
1372 bpf_iter_ksym__destroy(skel);
1373 }
1374
1375 #define CMP_BUFFER_SIZE 1024
1376 static char task_vma_output[CMP_BUFFER_SIZE];
1377 static char proc_maps_output[CMP_BUFFER_SIZE];
1378
1379 /* remove \0 and \t from str, and only keep the first line */
str_strip_first_line(char * str)1380 static void str_strip_first_line(char *str)
1381 {
1382 char *dst = str, *src = str;
1383
1384 do {
1385 if (*src == ' ' || *src == '\t')
1386 src++;
1387 else
1388 *(dst++) = *(src++);
1389
1390 } while (*src != '\0' && *src != '\n');
1391
1392 *dst = '\0';
1393 }
1394
test_task_vma_common(struct bpf_iter_attach_opts * opts)1395 static void test_task_vma_common(struct bpf_iter_attach_opts *opts)
1396 {
1397 int err, iter_fd = -1, proc_maps_fd = -1;
1398 struct bpf_iter_task_vma *skel;
1399 int len, read_size = 4;
1400 char maps_path[64];
1401
1402 skel = bpf_iter_task_vma__open();
1403 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open"))
1404 return;
1405
1406 skel->bss->pid = getpid();
1407 skel->bss->one_task = opts ? 1 : 0;
1408
1409 err = bpf_iter_task_vma__load(skel);
1410 if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
1411 goto out;
1412
1413 skel->links.proc_maps = bpf_program__attach_iter(
1414 skel->progs.proc_maps, opts);
1415
1416 if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
1417 skel->links.proc_maps = NULL;
1418 goto out;
1419 }
1420
1421 iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
1422 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1423 goto out;
1424
1425 /* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks
1426 * to trigger seq_file corner cases.
1427 */
1428 len = 0;
1429 while (len < CMP_BUFFER_SIZE) {
1430 err = read_fd_into_buffer(iter_fd, task_vma_output + len,
1431 MIN(read_size, CMP_BUFFER_SIZE - len));
1432 if (!err)
1433 break;
1434 if (!ASSERT_GE(err, 0, "read_iter_fd"))
1435 goto out;
1436 len += err;
1437 }
1438 if (opts)
1439 ASSERT_EQ(skel->bss->one_task_error, 0, "unexpected task");
1440
1441 /* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */
1442 snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid);
1443 proc_maps_fd = open(maps_path, O_RDONLY);
1444 if (!ASSERT_GE(proc_maps_fd, 0, "open_proc_maps"))
1445 goto out;
1446 err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE);
1447 if (!ASSERT_GE(err, 0, "read_prog_maps_fd"))
1448 goto out;
1449
1450 /* strip and compare the first line of the two files */
1451 str_strip_first_line(task_vma_output);
1452 str_strip_first_line(proc_maps_output);
1453
1454 ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output");
1455
1456 check_bpf_link_info(skel->progs.proc_maps);
1457
1458 out:
1459 close(proc_maps_fd);
1460 close(iter_fd);
1461 bpf_iter_task_vma__destroy(skel);
1462 }
1463
test_bpf_sockmap_map_iter_fd(void)1464 void test_bpf_sockmap_map_iter_fd(void)
1465 {
1466 struct bpf_iter_sockmap *skel;
1467
1468 skel = bpf_iter_sockmap__open_and_load();
1469 if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
1470 return;
1471
1472 do_read_map_iter_fd(&skel->skeleton, skel->progs.copy, skel->maps.sockmap);
1473
1474 bpf_iter_sockmap__destroy(skel);
1475 }
1476
test_task_vma(void)1477 static void test_task_vma(void)
1478 {
1479 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1480 union bpf_iter_link_info linfo;
1481
1482 memset(&linfo, 0, sizeof(linfo));
1483 linfo.task.tid = getpid();
1484 opts.link_info = &linfo;
1485 opts.link_info_len = sizeof(linfo);
1486
1487 test_task_vma_common(&opts);
1488 test_task_vma_common(NULL);
1489 }
1490
1491 /* uprobe attach point */
trigger_func(int arg)1492 static noinline int trigger_func(int arg)
1493 {
1494 asm volatile ("");
1495 return arg + 1;
1496 }
1497
test_task_vma_offset_common(struct bpf_iter_attach_opts * opts,bool one_proc)1498 static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc)
1499 {
1500 struct bpf_iter_vma_offset *skel;
1501 struct bpf_link *link;
1502 char buf[16] = {};
1503 int iter_fd, len;
1504 int pgsz, shift;
1505
1506 skel = bpf_iter_vma_offset__open_and_load();
1507 if (!ASSERT_OK_PTR(skel, "bpf_iter_vma_offset__open_and_load"))
1508 return;
1509
1510 skel->bss->pid = getpid();
1511 skel->bss->address = (uintptr_t)trigger_func;
1512 for (pgsz = getpagesize(), shift = 0; pgsz > 1; pgsz >>= 1, shift++)
1513 ;
1514 skel->bss->page_shift = shift;
1515
1516 link = bpf_program__attach_iter(skel->progs.get_vma_offset, opts);
1517 if (!ASSERT_OK_PTR(link, "attach_iter"))
1518 return;
1519
1520 iter_fd = bpf_iter_create(bpf_link__fd(link));
1521 if (!ASSERT_GT(iter_fd, 0, "create_iter"))
1522 goto exit;
1523
1524 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1525 ;
1526 buf[15] = 0;
1527 ASSERT_EQ(strcmp(buf, "OK\n"), 0, "strcmp");
1528
1529 ASSERT_EQ(skel->bss->offset, get_uprobe_offset(trigger_func), "offset");
1530 if (one_proc)
1531 ASSERT_EQ(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
1532 else
1533 ASSERT_GT(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
1534
1535 close(iter_fd);
1536
1537 exit:
1538 bpf_link__destroy(link);
1539 }
1540
test_task_vma_offset(void)1541 static void test_task_vma_offset(void)
1542 {
1543 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1544 union bpf_iter_link_info linfo;
1545
1546 memset(&linfo, 0, sizeof(linfo));
1547 linfo.task.pid = getpid();
1548 opts.link_info = &linfo;
1549 opts.link_info_len = sizeof(linfo);
1550
1551 test_task_vma_offset_common(&opts, true);
1552
1553 linfo.task.pid = 0;
1554 linfo.task.tid = getpid();
1555 test_task_vma_offset_common(&opts, true);
1556
1557 test_task_vma_offset_common(NULL, false);
1558 }
1559
test_bpf_iter(void)1560 void test_bpf_iter(void)
1561 {
1562 ASSERT_OK(pthread_mutex_init(&do_nothing_mutex, NULL), "pthread_mutex_init");
1563
1564 if (test__start_subtest("btf_id_or_null"))
1565 test_btf_id_or_null();
1566 if (test__start_subtest("ipv6_route"))
1567 test_ipv6_route();
1568 if (test__start_subtest("netlink"))
1569 test_netlink();
1570 if (test__start_subtest("bpf_map"))
1571 test_bpf_map();
1572 if (test__start_subtest("task_tid"))
1573 test_task_tid();
1574 if (test__start_subtest("task_pid"))
1575 test_task_pid();
1576 if (test__start_subtest("task_pidfd"))
1577 test_task_pidfd();
1578 if (test__start_subtest("task_sleepable"))
1579 test_task_sleepable();
1580 if (test__start_subtest("task_stack"))
1581 test_task_stack();
1582 if (test__start_subtest("task_file"))
1583 test_task_file();
1584 if (test__start_subtest("task_vma"))
1585 test_task_vma();
1586 if (test__start_subtest("task_btf"))
1587 test_task_btf();
1588 if (test__start_subtest("tcp4"))
1589 test_tcp4();
1590 if (test__start_subtest("tcp6"))
1591 test_tcp6();
1592 if (test__start_subtest("udp4"))
1593 test_udp4();
1594 if (test__start_subtest("udp6"))
1595 test_udp6();
1596 if (test__start_subtest("unix"))
1597 test_unix();
1598 if (test__start_subtest("anon"))
1599 test_anon_iter(false);
1600 if (test__start_subtest("anon-read-one-char"))
1601 test_anon_iter(true);
1602 if (test__start_subtest("file"))
1603 test_file_iter();
1604 if (test__start_subtest("overflow"))
1605 test_overflow(false, false);
1606 if (test__start_subtest("overflow-e2big"))
1607 test_overflow(true, false);
1608 if (test__start_subtest("prog-ret-1"))
1609 test_overflow(false, true);
1610 if (test__start_subtest("bpf_hash_map"))
1611 test_bpf_hash_map();
1612 if (test__start_subtest("bpf_percpu_hash_map"))
1613 test_bpf_percpu_hash_map();
1614 if (test__start_subtest("bpf_array_map"))
1615 test_bpf_array_map();
1616 if (test__start_subtest("bpf_array_map_iter_fd"))
1617 test_bpf_array_map_iter_fd();
1618 if (test__start_subtest("bpf_percpu_array_map"))
1619 test_bpf_percpu_array_map();
1620 if (test__start_subtest("bpf_sk_storage_map"))
1621 test_bpf_sk_storage_map();
1622 if (test__start_subtest("bpf_sk_storage_map_iter_fd"))
1623 test_bpf_sk_stoarge_map_iter_fd();
1624 if (test__start_subtest("bpf_sk_storage_delete"))
1625 test_bpf_sk_storage_delete();
1626 if (test__start_subtest("bpf_sk_storage_get"))
1627 test_bpf_sk_storage_get();
1628 if (test__start_subtest("rdonly-buf-out-of-bound"))
1629 test_rdonly_buf_out_of_bound();
1630 if (test__start_subtest("buf-neg-offset"))
1631 test_buf_neg_offset();
1632 if (test__start_subtest("link-iter"))
1633 test_link_iter();
1634 if (test__start_subtest("ksym"))
1635 test_ksym_iter();
1636 if (test__start_subtest("bpf_sockmap_map_iter_fd"))
1637 test_bpf_sockmap_map_iter_fd();
1638 if (test__start_subtest("vma_offset"))
1639 test_task_vma_offset();
1640 }
1641