1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <test_progs.h>
4 #include <unistd.h>
5 #include <sys/syscall.h>
6 #include <task_local_storage_helpers.h>
7 #include "bpf_iter_ipv6_route.skel.h"
8 #include "bpf_iter_netlink.skel.h"
9 #include "bpf_iter_bpf_map.skel.h"
10 #include "bpf_iter_task.skel.h"
11 #include "bpf_iter_task_stack.skel.h"
12 #include "bpf_iter_task_file.skel.h"
13 #include "bpf_iter_task_vma.skel.h"
14 #include "bpf_iter_task_btf.skel.h"
15 #include "bpf_iter_tcp4.skel.h"
16 #include "bpf_iter_tcp6.skel.h"
17 #include "bpf_iter_udp4.skel.h"
18 #include "bpf_iter_udp6.skel.h"
19 #include "bpf_iter_unix.skel.h"
20 #include "bpf_iter_vma_offset.skel.h"
21 #include "bpf_iter_test_kern1.skel.h"
22 #include "bpf_iter_test_kern2.skel.h"
23 #include "bpf_iter_test_kern3.skel.h"
24 #include "bpf_iter_test_kern4.skel.h"
25 #include "bpf_iter_bpf_hash_map.skel.h"
26 #include "bpf_iter_bpf_percpu_hash_map.skel.h"
27 #include "bpf_iter_bpf_array_map.skel.h"
28 #include "bpf_iter_bpf_percpu_array_map.skel.h"
29 #include "bpf_iter_bpf_sk_storage_helpers.skel.h"
30 #include "bpf_iter_bpf_sk_storage_map.skel.h"
31 #include "bpf_iter_test_kern5.skel.h"
32 #include "bpf_iter_test_kern6.skel.h"
33 #include "bpf_iter_bpf_link.skel.h"
34 #include "bpf_iter_ksym.skel.h"
35 #include "bpf_iter_sockmap.skel.h"
36
37 static int duration;
38
test_btf_id_or_null(void)39 static void test_btf_id_or_null(void)
40 {
41 struct bpf_iter_test_kern3 *skel;
42
43 skel = bpf_iter_test_kern3__open_and_load();
44 if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) {
45 bpf_iter_test_kern3__destroy(skel);
46 return;
47 }
48 }
49
do_dummy_read_opts(struct bpf_program * prog,struct bpf_iter_attach_opts * opts)50 static void do_dummy_read_opts(struct bpf_program *prog, struct bpf_iter_attach_opts *opts)
51 {
52 struct bpf_link *link;
53 char buf[16] = {};
54 int iter_fd, len;
55
56 link = bpf_program__attach_iter(prog, opts);
57 if (!ASSERT_OK_PTR(link, "attach_iter"))
58 return;
59
60 iter_fd = bpf_iter_create(bpf_link__fd(link));
61 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
62 goto free_link;
63
64 /* not check contents, but ensure read() ends without error */
65 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
66 ;
67 CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));
68
69 close(iter_fd);
70
71 free_link:
72 bpf_link__destroy(link);
73 }
74
do_dummy_read(struct bpf_program * prog)75 static void do_dummy_read(struct bpf_program *prog)
76 {
77 do_dummy_read_opts(prog, NULL);
78 }
79
do_read_map_iter_fd(struct bpf_object_skeleton ** skel,struct bpf_program * prog,struct bpf_map * map)80 static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog,
81 struct bpf_map *map)
82 {
83 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
84 union bpf_iter_link_info linfo;
85 struct bpf_link *link;
86 char buf[16] = {};
87 int iter_fd, len;
88
89 memset(&linfo, 0, sizeof(linfo));
90 linfo.map.map_fd = bpf_map__fd(map);
91 opts.link_info = &linfo;
92 opts.link_info_len = sizeof(linfo);
93 link = bpf_program__attach_iter(prog, &opts);
94 if (!ASSERT_OK_PTR(link, "attach_map_iter"))
95 return;
96
97 iter_fd = bpf_iter_create(bpf_link__fd(link));
98 if (!ASSERT_GE(iter_fd, 0, "create_map_iter")) {
99 bpf_link__destroy(link);
100 return;
101 }
102
103 /* Close link and map fd prematurely */
104 bpf_link__destroy(link);
105 bpf_object__destroy_skeleton(*skel);
106 *skel = NULL;
107
108 /* Try to let map free work to run first if map is freed */
109 usleep(100);
110 /* Memory used by both sock map and sock local storage map are
111 * freed after two synchronize_rcu() calls, so wait for it
112 */
113 kern_sync_rcu();
114 kern_sync_rcu();
115
116 /* Read after both map fd and link fd are closed */
117 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
118 ;
119 ASSERT_GE(len, 0, "read_iterator");
120
121 close(iter_fd);
122 }
123
read_fd_into_buffer(int fd,char * buf,int size)124 static int read_fd_into_buffer(int fd, char *buf, int size)
125 {
126 int bufleft = size;
127 int len;
128
129 do {
130 len = read(fd, buf, bufleft);
131 if (len > 0) {
132 buf += len;
133 bufleft -= len;
134 }
135 } while (len > 0);
136
137 return len < 0 ? len : size - bufleft;
138 }
139
test_ipv6_route(void)140 static void test_ipv6_route(void)
141 {
142 struct bpf_iter_ipv6_route *skel;
143
144 skel = bpf_iter_ipv6_route__open_and_load();
145 if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load"))
146 return;
147
148 do_dummy_read(skel->progs.dump_ipv6_route);
149
150 bpf_iter_ipv6_route__destroy(skel);
151 }
152
test_netlink(void)153 static void test_netlink(void)
154 {
155 struct bpf_iter_netlink *skel;
156
157 skel = bpf_iter_netlink__open_and_load();
158 if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load"))
159 return;
160
161 do_dummy_read(skel->progs.dump_netlink);
162
163 bpf_iter_netlink__destroy(skel);
164 }
165
test_bpf_map(void)166 static void test_bpf_map(void)
167 {
168 struct bpf_iter_bpf_map *skel;
169
170 skel = bpf_iter_bpf_map__open_and_load();
171 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load"))
172 return;
173
174 do_dummy_read(skel->progs.dump_bpf_map);
175
176 bpf_iter_bpf_map__destroy(skel);
177 }
178
check_bpf_link_info(const struct bpf_program * prog)179 static void check_bpf_link_info(const struct bpf_program *prog)
180 {
181 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
182 union bpf_iter_link_info linfo;
183 struct bpf_link_info info = {};
184 struct bpf_link *link;
185 __u32 info_len;
186 int err;
187
188 memset(&linfo, 0, sizeof(linfo));
189 linfo.task.tid = getpid();
190 opts.link_info = &linfo;
191 opts.link_info_len = sizeof(linfo);
192
193 link = bpf_program__attach_iter(prog, &opts);
194 if (!ASSERT_OK_PTR(link, "attach_iter"))
195 return;
196
197 info_len = sizeof(info);
198 err = bpf_link_get_info_by_fd(bpf_link__fd(link), &info, &info_len);
199 ASSERT_OK(err, "bpf_link_get_info_by_fd");
200 ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid");
201
202 bpf_link__destroy(link);
203 }
204
205 static pthread_mutex_t do_nothing_mutex;
206
do_nothing_wait(void * arg)207 static void *do_nothing_wait(void *arg)
208 {
209 pthread_mutex_lock(&do_nothing_mutex);
210 pthread_mutex_unlock(&do_nothing_mutex);
211
212 pthread_exit(arg);
213 }
214
test_task_common_nocheck(struct bpf_iter_attach_opts * opts,int * num_unknown,int * num_known)215 static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts,
216 int *num_unknown, int *num_known)
217 {
218 struct bpf_iter_task *skel;
219 pthread_t thread_id;
220 void *ret;
221
222 skel = bpf_iter_task__open_and_load();
223 if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
224 return;
225
226 ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
227
228 ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
229 "pthread_create");
230
231 skel->bss->tid = getpid();
232
233 do_dummy_read_opts(skel->progs.dump_task, opts);
234
235 *num_unknown = skel->bss->num_unknown_tid;
236 *num_known = skel->bss->num_known_tid;
237
238 ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
239 ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
240 "pthread_join");
241
242 bpf_iter_task__destroy(skel);
243 }
244
test_task_common(struct bpf_iter_attach_opts * opts,int num_unknown,int num_known)245 static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown, int num_known)
246 {
247 int num_unknown_tid, num_known_tid;
248
249 test_task_common_nocheck(opts, &num_unknown_tid, &num_known_tid);
250 ASSERT_EQ(num_unknown_tid, num_unknown, "check_num_unknown_tid");
251 ASSERT_EQ(num_known_tid, num_known, "check_num_known_tid");
252 }
253
test_task_tid(void)254 static void test_task_tid(void)
255 {
256 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
257 union bpf_iter_link_info linfo;
258 int num_unknown_tid, num_known_tid;
259
260 memset(&linfo, 0, sizeof(linfo));
261 linfo.task.tid = getpid();
262 opts.link_info = &linfo;
263 opts.link_info_len = sizeof(linfo);
264 test_task_common(&opts, 0, 1);
265
266 linfo.task.tid = 0;
267 linfo.task.pid = getpid();
268 test_task_common(&opts, 1, 1);
269
270 test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid);
271 ASSERT_GT(num_unknown_tid, 1, "check_num_unknown_tid");
272 ASSERT_EQ(num_known_tid, 1, "check_num_known_tid");
273 }
274
test_task_pid(void)275 static void test_task_pid(void)
276 {
277 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
278 union bpf_iter_link_info linfo;
279
280 memset(&linfo, 0, sizeof(linfo));
281 linfo.task.pid = getpid();
282 opts.link_info = &linfo;
283 opts.link_info_len = sizeof(linfo);
284
285 test_task_common(&opts, 1, 1);
286 }
287
test_task_pidfd(void)288 static void test_task_pidfd(void)
289 {
290 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
291 union bpf_iter_link_info linfo;
292 int pidfd;
293
294 pidfd = sys_pidfd_open(getpid(), 0);
295 if (!ASSERT_GT(pidfd, 0, "sys_pidfd_open"))
296 return;
297
298 memset(&linfo, 0, sizeof(linfo));
299 linfo.task.pid_fd = pidfd;
300 opts.link_info = &linfo;
301 opts.link_info_len = sizeof(linfo);
302
303 test_task_common(&opts, 1, 1);
304
305 close(pidfd);
306 }
307
test_task_sleepable(void)308 static void test_task_sleepable(void)
309 {
310 struct bpf_iter_task *skel;
311
312 skel = bpf_iter_task__open_and_load();
313 if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
314 return;
315
316 do_dummy_read(skel->progs.dump_task_sleepable);
317
318 ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0,
319 "num_expected_failure_copy_from_user_task");
320 ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0,
321 "num_success_copy_from_user_task");
322
323 bpf_iter_task__destroy(skel);
324 }
325
test_task_stack(void)326 static void test_task_stack(void)
327 {
328 struct bpf_iter_task_stack *skel;
329
330 skel = bpf_iter_task_stack__open_and_load();
331 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load"))
332 return;
333
334 do_dummy_read(skel->progs.dump_task_stack);
335 do_dummy_read(skel->progs.get_task_user_stacks);
336
337 bpf_iter_task_stack__destroy(skel);
338 }
339
test_task_file(void)340 static void test_task_file(void)
341 {
342 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
343 struct bpf_iter_task_file *skel;
344 union bpf_iter_link_info linfo;
345 pthread_t thread_id;
346 void *ret;
347
348 skel = bpf_iter_task_file__open_and_load();
349 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_file__open_and_load"))
350 return;
351
352 skel->bss->tgid = getpid();
353
354 ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
355
356 ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
357 "pthread_create");
358
359 memset(&linfo, 0, sizeof(linfo));
360 linfo.task.tid = getpid();
361 opts.link_info = &linfo;
362 opts.link_info_len = sizeof(linfo);
363
364 do_dummy_read_opts(skel->progs.dump_task_file, &opts);
365
366 ASSERT_EQ(skel->bss->count, 0, "check_count");
367 ASSERT_EQ(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
368
369 skel->bss->last_tgid = 0;
370 skel->bss->count = 0;
371 skel->bss->unique_tgid_count = 0;
372
373 do_dummy_read(skel->progs.dump_task_file);
374
375 ASSERT_EQ(skel->bss->count, 0, "check_count");
376 ASSERT_GT(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
377
378 check_bpf_link_info(skel->progs.dump_task_file);
379
380 ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
381 ASSERT_OK(pthread_join(thread_id, &ret), "pthread_join");
382 ASSERT_NULL(ret, "pthread_join");
383
384 bpf_iter_task_file__destroy(skel);
385 }
386
387 #define TASKBUFSZ 32768
388
389 static char taskbuf[TASKBUFSZ];
390
do_btf_read(struct bpf_iter_task_btf * skel)391 static int do_btf_read(struct bpf_iter_task_btf *skel)
392 {
393 struct bpf_program *prog = skel->progs.dump_task_struct;
394 struct bpf_iter_task_btf__bss *bss = skel->bss;
395 int iter_fd = -1, err;
396 struct bpf_link *link;
397 char *buf = taskbuf;
398 int ret = 0;
399
400 link = bpf_program__attach_iter(prog, NULL);
401 if (!ASSERT_OK_PTR(link, "attach_iter"))
402 return ret;
403
404 iter_fd = bpf_iter_create(bpf_link__fd(link));
405 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
406 goto free_link;
407
408 err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ);
409 if (bss->skip) {
410 printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
411 ret = 1;
412 test__skip();
413 goto free_link;
414 }
415
416 if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno)))
417 goto free_link;
418
419 ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)",
420 "check for btf representation of task_struct in iter data");
421 free_link:
422 if (iter_fd > 0)
423 close(iter_fd);
424 bpf_link__destroy(link);
425 return ret;
426 }
427
test_task_btf(void)428 static void test_task_btf(void)
429 {
430 struct bpf_iter_task_btf__bss *bss;
431 struct bpf_iter_task_btf *skel;
432 int ret;
433
434 skel = bpf_iter_task_btf__open_and_load();
435 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_btf__open_and_load"))
436 return;
437
438 bss = skel->bss;
439
440 ret = do_btf_read(skel);
441 if (ret)
442 goto cleanup;
443
444 if (!ASSERT_NEQ(bss->tasks, 0, "no task iteration, did BPF program run?"))
445 goto cleanup;
446
447 ASSERT_EQ(bss->seq_err, 0, "check for unexpected err");
448
449 cleanup:
450 bpf_iter_task_btf__destroy(skel);
451 }
452
test_tcp4(void)453 static void test_tcp4(void)
454 {
455 struct bpf_iter_tcp4 *skel;
456
457 skel = bpf_iter_tcp4__open_and_load();
458 if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load"))
459 return;
460
461 do_dummy_read(skel->progs.dump_tcp4);
462
463 bpf_iter_tcp4__destroy(skel);
464 }
465
test_tcp6(void)466 static void test_tcp6(void)
467 {
468 struct bpf_iter_tcp6 *skel;
469
470 skel = bpf_iter_tcp6__open_and_load();
471 if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load"))
472 return;
473
474 do_dummy_read(skel->progs.dump_tcp6);
475
476 bpf_iter_tcp6__destroy(skel);
477 }
478
test_udp4(void)479 static void test_udp4(void)
480 {
481 struct bpf_iter_udp4 *skel;
482
483 skel = bpf_iter_udp4__open_and_load();
484 if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load"))
485 return;
486
487 do_dummy_read(skel->progs.dump_udp4);
488
489 bpf_iter_udp4__destroy(skel);
490 }
491
test_udp6(void)492 static void test_udp6(void)
493 {
494 struct bpf_iter_udp6 *skel;
495
496 skel = bpf_iter_udp6__open_and_load();
497 if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load"))
498 return;
499
500 do_dummy_read(skel->progs.dump_udp6);
501
502 bpf_iter_udp6__destroy(skel);
503 }
504
test_unix(void)505 static void test_unix(void)
506 {
507 struct bpf_iter_unix *skel;
508
509 skel = bpf_iter_unix__open_and_load();
510 if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load"))
511 return;
512
513 do_dummy_read(skel->progs.dump_unix);
514
515 bpf_iter_unix__destroy(skel);
516 }
517
518 /* The expected string is less than 16 bytes */
do_read_with_fd(int iter_fd,const char * expected,bool read_one_char)519 static int do_read_with_fd(int iter_fd, const char *expected,
520 bool read_one_char)
521 {
522 int len, read_buf_len, start;
523 char buf[16] = {};
524
525 read_buf_len = read_one_char ? 1 : 16;
526 start = 0;
527 while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
528 start += len;
529 if (CHECK(start >= 16, "read", "read len %d\n", len))
530 return -1;
531 read_buf_len = read_one_char ? 1 : 16 - start;
532 }
533 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
534 return -1;
535
536 if (!ASSERT_STREQ(buf, expected, "read"))
537 return -1;
538
539 return 0;
540 }
541
test_anon_iter(bool read_one_char)542 static void test_anon_iter(bool read_one_char)
543 {
544 struct bpf_iter_test_kern1 *skel;
545 struct bpf_link *link;
546 int iter_fd, err;
547
548 skel = bpf_iter_test_kern1__open_and_load();
549 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern1__open_and_load"))
550 return;
551
552 err = bpf_iter_test_kern1__attach(skel);
553 if (!ASSERT_OK(err, "bpf_iter_test_kern1__attach")) {
554 goto out;
555 }
556
557 link = skel->links.dump_task;
558 iter_fd = bpf_iter_create(bpf_link__fd(link));
559 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
560 goto out;
561
562 do_read_with_fd(iter_fd, "abcd", read_one_char);
563 close(iter_fd);
564
565 out:
566 bpf_iter_test_kern1__destroy(skel);
567 }
568
do_read(const char * path,const char * expected)569 static int do_read(const char *path, const char *expected)
570 {
571 int err, iter_fd;
572
573 iter_fd = open(path, O_RDONLY);
574 if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n",
575 path, strerror(errno)))
576 return -1;
577
578 err = do_read_with_fd(iter_fd, expected, false);
579 close(iter_fd);
580 return err;
581 }
582
test_file_iter(void)583 static void test_file_iter(void)
584 {
585 const char *path = "/sys/fs/bpf/bpf_iter_test1";
586 struct bpf_iter_test_kern1 *skel1;
587 struct bpf_iter_test_kern2 *skel2;
588 struct bpf_link *link;
589 int err;
590
591 skel1 = bpf_iter_test_kern1__open_and_load();
592 if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load"))
593 return;
594
595 link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
596 if (!ASSERT_OK_PTR(link, "attach_iter"))
597 goto out;
598
599 /* unlink this path if it exists. */
600 unlink(path);
601
602 err = bpf_link__pin(link, path);
603 if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err))
604 goto free_link;
605
606 err = do_read(path, "abcd");
607 if (err)
608 goto unlink_path;
609
610 /* file based iterator seems working fine. Let us a link update
611 * of the underlying link and `cat` the iterator again, its content
612 * should change.
613 */
614 skel2 = bpf_iter_test_kern2__open_and_load();
615 if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load"))
616 goto unlink_path;
617
618 err = bpf_link__update_program(link, skel2->progs.dump_task);
619 if (!ASSERT_OK(err, "update_prog"))
620 goto destroy_skel2;
621
622 do_read(path, "ABCD");
623
624 destroy_skel2:
625 bpf_iter_test_kern2__destroy(skel2);
626 unlink_path:
627 unlink(path);
628 free_link:
629 bpf_link__destroy(link);
630 out:
631 bpf_iter_test_kern1__destroy(skel1);
632 }
633
test_overflow(bool test_e2big_overflow,bool ret1)634 static void test_overflow(bool test_e2big_overflow, bool ret1)
635 {
636 __u32 map_info_len, total_read_len, expected_read_len;
637 int err, iter_fd, map1_fd, map2_fd, len;
638 struct bpf_map_info map_info = {};
639 struct bpf_iter_test_kern4 *skel;
640 struct bpf_link *link;
641 __u32 iter_size;
642 char *buf;
643
644 skel = bpf_iter_test_kern4__open();
645 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open"))
646 return;
647
648 /* create two maps: bpf program will only do bpf_seq_write
649 * for these two maps. The goal is one map output almost
650 * fills seq_file buffer and then the other will trigger
651 * overflow and needs restart.
652 */
653 map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
654 if (CHECK(map1_fd < 0, "bpf_map_create",
655 "map_creation failed: %s\n", strerror(errno)))
656 goto out;
657 map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
658 if (CHECK(map2_fd < 0, "bpf_map_create",
659 "map_creation failed: %s\n", strerror(errno)))
660 goto free_map1;
661
662 /* bpf_seq_printf kernel buffer is 8 pages, so one map
663 * bpf_seq_write will mostly fill it, and the other map
664 * will partially fill and then trigger overflow and need
665 * bpf_seq_read restart.
666 */
667 iter_size = sysconf(_SC_PAGE_SIZE) << 3;
668
669 if (test_e2big_overflow) {
670 skel->rodata->print_len = (iter_size + 8) / 8;
671 expected_read_len = 2 * (iter_size + 8);
672 } else if (!ret1) {
673 skel->rodata->print_len = (iter_size - 8) / 8;
674 expected_read_len = 2 * (iter_size - 8);
675 } else {
676 skel->rodata->print_len = 1;
677 expected_read_len = 2 * 8;
678 }
679 skel->rodata->ret1 = ret1;
680
681 if (!ASSERT_OK(bpf_iter_test_kern4__load(skel),
682 "bpf_iter_test_kern4__load"))
683 goto free_map2;
684
685 /* setup filtering map_id in bpf program */
686 map_info_len = sizeof(map_info);
687 err = bpf_map_get_info_by_fd(map1_fd, &map_info, &map_info_len);
688 if (CHECK(err, "get_map_info", "get map info failed: %s\n",
689 strerror(errno)))
690 goto free_map2;
691 skel->bss->map1_id = map_info.id;
692
693 err = bpf_map_get_info_by_fd(map2_fd, &map_info, &map_info_len);
694 if (CHECK(err, "get_map_info", "get map info failed: %s\n",
695 strerror(errno)))
696 goto free_map2;
697 skel->bss->map2_id = map_info.id;
698
699 link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
700 if (!ASSERT_OK_PTR(link, "attach_iter"))
701 goto free_map2;
702
703 iter_fd = bpf_iter_create(bpf_link__fd(link));
704 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
705 goto free_link;
706
707 buf = malloc(expected_read_len);
708 if (!buf)
709 goto close_iter;
710
711 /* do read */
712 total_read_len = 0;
713 if (test_e2big_overflow) {
714 while ((len = read(iter_fd, buf, expected_read_len)) > 0)
715 total_read_len += len;
716
717 CHECK(len != -1 || errno != E2BIG, "read",
718 "expected ret -1, errno E2BIG, but get ret %d, error %s\n",
719 len, strerror(errno));
720 goto free_buf;
721 } else if (!ret1) {
722 while ((len = read(iter_fd, buf, expected_read_len)) > 0)
723 total_read_len += len;
724
725 if (CHECK(len < 0, "read", "read failed: %s\n",
726 strerror(errno)))
727 goto free_buf;
728 } else {
729 do {
730 len = read(iter_fd, buf, expected_read_len);
731 if (len > 0)
732 total_read_len += len;
733 } while (len > 0 || len == -EAGAIN);
734
735 if (CHECK(len < 0, "read", "read failed: %s\n",
736 strerror(errno)))
737 goto free_buf;
738 }
739
740 if (!ASSERT_EQ(total_read_len, expected_read_len, "read"))
741 goto free_buf;
742
743 if (!ASSERT_EQ(skel->bss->map1_accessed, 1, "map1_accessed"))
744 goto free_buf;
745
746 if (!ASSERT_EQ(skel->bss->map2_accessed, 2, "map2_accessed"))
747 goto free_buf;
748
749 ASSERT_EQ(skel->bss->map2_seqnum1, skel->bss->map2_seqnum2, "map2_seqnum");
750
751 free_buf:
752 free(buf);
753 close_iter:
754 close(iter_fd);
755 free_link:
756 bpf_link__destroy(link);
757 free_map2:
758 close(map2_fd);
759 free_map1:
760 close(map1_fd);
761 out:
762 bpf_iter_test_kern4__destroy(skel);
763 }
764
test_bpf_hash_map(void)765 static void test_bpf_hash_map(void)
766 {
767 __u32 expected_key_a = 0, expected_key_b = 0;
768 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
769 struct bpf_iter_bpf_hash_map *skel;
770 int err, i, len, map_fd, iter_fd;
771 union bpf_iter_link_info linfo;
772 __u64 val, expected_val = 0;
773 struct bpf_link *link;
774 struct key_t {
775 int a;
776 int b;
777 int c;
778 } key;
779 char buf[64];
780
781 skel = bpf_iter_bpf_hash_map__open();
782 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open"))
783 return;
784
785 skel->bss->in_test_mode = true;
786
787 err = bpf_iter_bpf_hash_map__load(skel);
788 if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load"))
789 goto out;
790
791 /* iterator with hashmap2 and hashmap3 should fail */
792 memset(&linfo, 0, sizeof(linfo));
793 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
794 opts.link_info = &linfo;
795 opts.link_info_len = sizeof(linfo);
796 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
797 if (!ASSERT_ERR_PTR(link, "attach_iter"))
798 goto out;
799
800 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
801 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
802 if (!ASSERT_ERR_PTR(link, "attach_iter"))
803 goto out;
804
805 /* hashmap1 should be good, update map values here */
806 map_fd = bpf_map__fd(skel->maps.hashmap1);
807 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
808 key.a = i + 1;
809 key.b = i + 2;
810 key.c = i + 3;
811 val = i + 4;
812 expected_key_a += key.a;
813 expected_key_b += key.b;
814 expected_val += val;
815
816 err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
817 if (!ASSERT_OK(err, "map_update"))
818 goto out;
819 }
820
821 /* Sleepable program is prohibited for hash map iterator */
822 linfo.map.map_fd = map_fd;
823 link = bpf_program__attach_iter(skel->progs.sleepable_dummy_dump, &opts);
824 if (!ASSERT_ERR_PTR(link, "attach_sleepable_prog_to_iter"))
825 goto out;
826
827 linfo.map.map_fd = map_fd;
828 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
829 if (!ASSERT_OK_PTR(link, "attach_iter"))
830 goto out;
831
832 iter_fd = bpf_iter_create(bpf_link__fd(link));
833 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
834 goto free_link;
835
836 /* do some tests */
837 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
838 ;
839 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
840 goto close_iter;
841
842 /* test results */
843 if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
844 goto close_iter;
845 if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
846 goto close_iter;
847 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
848 goto close_iter;
849
850 close_iter:
851 close(iter_fd);
852 free_link:
853 bpf_link__destroy(link);
854 out:
855 bpf_iter_bpf_hash_map__destroy(skel);
856 }
857
test_bpf_percpu_hash_map(void)858 static void test_bpf_percpu_hash_map(void)
859 {
860 __u32 expected_key_a = 0, expected_key_b = 0;
861 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
862 struct bpf_iter_bpf_percpu_hash_map *skel;
863 int err, i, j, len, map_fd, iter_fd;
864 union bpf_iter_link_info linfo;
865 __u32 expected_val = 0;
866 struct bpf_link *link;
867 struct key_t {
868 int a;
869 int b;
870 int c;
871 } key;
872 char buf[64];
873 void *val;
874
875 skel = bpf_iter_bpf_percpu_hash_map__open();
876 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__open"))
877 return;
878
879 skel->rodata->num_cpus = bpf_num_possible_cpus();
880 val = malloc(8 * bpf_num_possible_cpus());
881
882 err = bpf_iter_bpf_percpu_hash_map__load(skel);
883 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load"))
884 goto out;
885
886 /* update map values here */
887 map_fd = bpf_map__fd(skel->maps.hashmap1);
888 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
889 key.a = i + 1;
890 key.b = i + 2;
891 key.c = i + 3;
892 expected_key_a += key.a;
893 expected_key_b += key.b;
894
895 for (j = 0; j < bpf_num_possible_cpus(); j++) {
896 *(__u32 *)(val + j * 8) = i + j;
897 expected_val += i + j;
898 }
899
900 err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
901 if (!ASSERT_OK(err, "map_update"))
902 goto out;
903 }
904
905 memset(&linfo, 0, sizeof(linfo));
906 linfo.map.map_fd = map_fd;
907 opts.link_info = &linfo;
908 opts.link_info_len = sizeof(linfo);
909 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
910 if (!ASSERT_OK_PTR(link, "attach_iter"))
911 goto out;
912
913 iter_fd = bpf_iter_create(bpf_link__fd(link));
914 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
915 goto free_link;
916
917 /* do some tests */
918 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
919 ;
920 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
921 goto close_iter;
922
923 /* test results */
924 if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
925 goto close_iter;
926 if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
927 goto close_iter;
928 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
929 goto close_iter;
930
931 close_iter:
932 close(iter_fd);
933 free_link:
934 bpf_link__destroy(link);
935 out:
936 bpf_iter_bpf_percpu_hash_map__destroy(skel);
937 free(val);
938 }
939
test_bpf_array_map(void)940 static void test_bpf_array_map(void)
941 {
942 __u64 val, expected_val = 0, res_first_val, first_val = 0;
943 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
944 __u32 key, expected_key = 0, res_first_key;
945 int err, i, map_fd, hash_fd, iter_fd;
946 struct bpf_iter_bpf_array_map *skel;
947 union bpf_iter_link_info linfo;
948 struct bpf_link *link;
949 char buf[64] = {};
950 int len, start;
951
952 skel = bpf_iter_bpf_array_map__open_and_load();
953 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
954 return;
955
956 map_fd = bpf_map__fd(skel->maps.arraymap1);
957 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
958 val = i + 4;
959 expected_key += i;
960 expected_val += val;
961
962 if (i == 0)
963 first_val = val;
964
965 err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
966 if (!ASSERT_OK(err, "map_update"))
967 goto out;
968 }
969
970 memset(&linfo, 0, sizeof(linfo));
971 linfo.map.map_fd = map_fd;
972 opts.link_info = &linfo;
973 opts.link_info_len = sizeof(linfo);
974 link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
975 if (!ASSERT_OK_PTR(link, "attach_iter"))
976 goto out;
977
978 iter_fd = bpf_iter_create(bpf_link__fd(link));
979 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
980 goto free_link;
981
982 /* do some tests */
983 start = 0;
984 while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
985 start += len;
986 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
987 goto close_iter;
988
989 /* test results */
990 res_first_key = *(__u32 *)buf;
991 res_first_val = *(__u64 *)(buf + sizeof(__u32));
992 if (CHECK(res_first_key != 0 || res_first_val != first_val,
993 "bpf_seq_write",
994 "seq_write failure: first key %u vs expected 0, "
995 " first value %llu vs expected %llu\n",
996 res_first_key, res_first_val, first_val))
997 goto close_iter;
998
999 if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
1000 goto close_iter;
1001 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1002 goto close_iter;
1003
1004 hash_fd = bpf_map__fd(skel->maps.hashmap1);
1005 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
1006 err = bpf_map_lookup_elem(map_fd, &i, &val);
1007 if (!ASSERT_OK(err, "map_lookup arraymap1"))
1008 goto close_iter;
1009 if (!ASSERT_EQ(i, val, "invalid_val arraymap1"))
1010 goto close_iter;
1011
1012 val = i + 4;
1013 err = bpf_map_lookup_elem(hash_fd, &val, &key);
1014 if (!ASSERT_OK(err, "map_lookup hashmap1"))
1015 goto close_iter;
1016 if (!ASSERT_EQ(key, val - 4, "invalid_val hashmap1"))
1017 goto close_iter;
1018 }
1019
1020 close_iter:
1021 close(iter_fd);
1022 free_link:
1023 bpf_link__destroy(link);
1024 out:
1025 bpf_iter_bpf_array_map__destroy(skel);
1026 }
1027
test_bpf_array_map_iter_fd(void)1028 static void test_bpf_array_map_iter_fd(void)
1029 {
1030 struct bpf_iter_bpf_array_map *skel;
1031
1032 skel = bpf_iter_bpf_array_map__open_and_load();
1033 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
1034 return;
1035
1036 do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_array_map,
1037 skel->maps.arraymap1);
1038
1039 bpf_iter_bpf_array_map__destroy(skel);
1040 }
1041
test_bpf_percpu_array_map(void)1042 static void test_bpf_percpu_array_map(void)
1043 {
1044 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1045 struct bpf_iter_bpf_percpu_array_map *skel;
1046 __u32 expected_key = 0, expected_val = 0;
1047 union bpf_iter_link_info linfo;
1048 int err, i, j, map_fd, iter_fd;
1049 struct bpf_link *link;
1050 char buf[64];
1051 void *val;
1052 int len;
1053
1054 skel = bpf_iter_bpf_percpu_array_map__open();
1055 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__open"))
1056 return;
1057
1058 skel->rodata->num_cpus = bpf_num_possible_cpus();
1059 val = malloc(8 * bpf_num_possible_cpus());
1060
1061 err = bpf_iter_bpf_percpu_array_map__load(skel);
1062 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load"))
1063 goto out;
1064
1065 /* update map values here */
1066 map_fd = bpf_map__fd(skel->maps.arraymap1);
1067 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
1068 expected_key += i;
1069
1070 for (j = 0; j < bpf_num_possible_cpus(); j++) {
1071 *(__u32 *)(val + j * 8) = i + j;
1072 expected_val += i + j;
1073 }
1074
1075 err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
1076 if (!ASSERT_OK(err, "map_update"))
1077 goto out;
1078 }
1079
1080 memset(&linfo, 0, sizeof(linfo));
1081 linfo.map.map_fd = map_fd;
1082 opts.link_info = &linfo;
1083 opts.link_info_len = sizeof(linfo);
1084 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
1085 if (!ASSERT_OK_PTR(link, "attach_iter"))
1086 goto out;
1087
1088 iter_fd = bpf_iter_create(bpf_link__fd(link));
1089 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1090 goto free_link;
1091
1092 /* do some tests */
1093 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1094 ;
1095 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1096 goto close_iter;
1097
1098 /* test results */
1099 if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
1100 goto close_iter;
1101 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1102 goto close_iter;
1103
1104 close_iter:
1105 close(iter_fd);
1106 free_link:
1107 bpf_link__destroy(link);
1108 out:
1109 bpf_iter_bpf_percpu_array_map__destroy(skel);
1110 free(val);
1111 }
1112
1113 /* An iterator program deletes all local storage in a map. */
test_bpf_sk_storage_delete(void)1114 static void test_bpf_sk_storage_delete(void)
1115 {
1116 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1117 struct bpf_iter_bpf_sk_storage_helpers *skel;
1118 union bpf_iter_link_info linfo;
1119 int err, len, map_fd, iter_fd;
1120 struct bpf_link *link;
1121 int sock_fd = -1;
1122 __u32 val = 42;
1123 char buf[64];
1124
1125 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1126 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
1127 return;
1128
1129 map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1130
1131 sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1132 if (!ASSERT_GE(sock_fd, 0, "socket"))
1133 goto out;
1134 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1135 if (!ASSERT_OK(err, "map_update"))
1136 goto out;
1137
1138 memset(&linfo, 0, sizeof(linfo));
1139 linfo.map.map_fd = map_fd;
1140 opts.link_info = &linfo;
1141 opts.link_info_len = sizeof(linfo);
1142 link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map,
1143 &opts);
1144 if (!ASSERT_OK_PTR(link, "attach_iter"))
1145 goto out;
1146
1147 iter_fd = bpf_iter_create(bpf_link__fd(link));
1148 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1149 goto free_link;
1150
1151 /* do some tests */
1152 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1153 ;
1154 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1155 goto close_iter;
1156
1157 /* test results */
1158 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1159 if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
1160 "map value wasn't deleted (err=%d, errno=%d)\n", err, errno))
1161 goto close_iter;
1162
1163 close_iter:
1164 close(iter_fd);
1165 free_link:
1166 bpf_link__destroy(link);
1167 out:
1168 if (sock_fd >= 0)
1169 close(sock_fd);
1170 bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1171 }
1172
1173 /* This creates a socket and its local storage. It then runs a task_iter BPF
1174 * program that replaces the existing socket local storage with the tgid of the
1175 * only task owning a file descriptor to this socket, this process, prog_tests.
1176 * It then runs a tcp socket iterator that negates the value in the existing
1177 * socket local storage, the test verifies that the resulting value is -pid.
1178 */
test_bpf_sk_storage_get(void)1179 static void test_bpf_sk_storage_get(void)
1180 {
1181 struct bpf_iter_bpf_sk_storage_helpers *skel;
1182 int err, map_fd, val = -1;
1183 int sock_fd = -1;
1184
1185 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1186 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
1187 return;
1188
1189 sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1190 if (!ASSERT_GE(sock_fd, 0, "socket"))
1191 goto out;
1192
1193 err = listen(sock_fd, 1);
1194 if (!ASSERT_OK(err, "listen"))
1195 goto close_socket;
1196
1197 map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1198
1199 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1200 if (!ASSERT_OK(err, "bpf_map_update_elem"))
1201 goto close_socket;
1202
1203 do_dummy_read(skel->progs.fill_socket_owner);
1204
1205 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1206 if (CHECK(err || val != getpid(), "bpf_map_lookup_elem",
1207 "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1208 getpid(), val, err))
1209 goto close_socket;
1210
1211 do_dummy_read(skel->progs.negate_socket_local_storage);
1212
1213 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1214 CHECK(err || val != -getpid(), "bpf_map_lookup_elem",
1215 "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1216 -getpid(), val, err);
1217
1218 close_socket:
1219 close(sock_fd);
1220 out:
1221 bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1222 }
1223
test_bpf_sk_stoarge_map_iter_fd(void)1224 static void test_bpf_sk_stoarge_map_iter_fd(void)
1225 {
1226 struct bpf_iter_bpf_sk_storage_map *skel;
1227
1228 skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1229 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
1230 return;
1231
1232 do_read_map_iter_fd(&skel->skeleton, skel->progs.rw_bpf_sk_storage_map,
1233 skel->maps.sk_stg_map);
1234
1235 bpf_iter_bpf_sk_storage_map__destroy(skel);
1236 }
1237
test_bpf_sk_storage_map(void)1238 static void test_bpf_sk_storage_map(void)
1239 {
1240 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1241 int err, i, len, map_fd, iter_fd, num_sockets;
1242 struct bpf_iter_bpf_sk_storage_map *skel;
1243 union bpf_iter_link_info linfo;
1244 int sock_fd[3] = {-1, -1, -1};
1245 __u32 val, expected_val = 0;
1246 struct bpf_link *link;
1247 char buf[64];
1248
1249 skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1250 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
1251 return;
1252
1253 map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1254 num_sockets = ARRAY_SIZE(sock_fd);
1255 for (i = 0; i < num_sockets; i++) {
1256 sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
1257 if (!ASSERT_GE(sock_fd[i], 0, "socket"))
1258 goto out;
1259
1260 val = i + 1;
1261 expected_val += val;
1262
1263 err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
1264 BPF_NOEXIST);
1265 if (!ASSERT_OK(err, "map_update"))
1266 goto out;
1267 }
1268
1269 memset(&linfo, 0, sizeof(linfo));
1270 linfo.map.map_fd = map_fd;
1271 opts.link_info = &linfo;
1272 opts.link_info_len = sizeof(linfo);
1273 link = bpf_program__attach_iter(skel->progs.oob_write_bpf_sk_storage_map, &opts);
1274 err = libbpf_get_error(link);
1275 if (!ASSERT_EQ(err, -EACCES, "attach_oob_write_iter")) {
1276 if (!err)
1277 bpf_link__destroy(link);
1278 goto out;
1279 }
1280
1281 link = bpf_program__attach_iter(skel->progs.rw_bpf_sk_storage_map, &opts);
1282 if (!ASSERT_OK_PTR(link, "attach_iter"))
1283 goto out;
1284
1285 iter_fd = bpf_iter_create(bpf_link__fd(link));
1286 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1287 goto free_link;
1288
1289 skel->bss->to_add_val = time(NULL);
1290 /* do some tests */
1291 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1292 ;
1293 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1294 goto close_iter;
1295
1296 /* test results */
1297 if (!ASSERT_EQ(skel->bss->ipv6_sk_count, num_sockets, "ipv6_sk_count"))
1298 goto close_iter;
1299
1300 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1301 goto close_iter;
1302
1303 for (i = 0; i < num_sockets; i++) {
1304 err = bpf_map_lookup_elem(map_fd, &sock_fd[i], &val);
1305 if (!ASSERT_OK(err, "map_lookup") ||
1306 !ASSERT_EQ(val, i + 1 + skel->bss->to_add_val, "check_map_value"))
1307 break;
1308 }
1309
1310 close_iter:
1311 close(iter_fd);
1312 free_link:
1313 bpf_link__destroy(link);
1314 out:
1315 for (i = 0; i < num_sockets; i++) {
1316 if (sock_fd[i] >= 0)
1317 close(sock_fd[i]);
1318 }
1319 bpf_iter_bpf_sk_storage_map__destroy(skel);
1320 }
1321
test_rdonly_buf_out_of_bound(void)1322 static void test_rdonly_buf_out_of_bound(void)
1323 {
1324 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1325 struct bpf_iter_test_kern5 *skel;
1326 union bpf_iter_link_info linfo;
1327 struct bpf_link *link;
1328
1329 skel = bpf_iter_test_kern5__open_and_load();
1330 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern5__open_and_load"))
1331 return;
1332
1333 memset(&linfo, 0, sizeof(linfo));
1334 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
1335 opts.link_info = &linfo;
1336 opts.link_info_len = sizeof(linfo);
1337 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
1338 if (!ASSERT_ERR_PTR(link, "attach_iter"))
1339 bpf_link__destroy(link);
1340
1341 bpf_iter_test_kern5__destroy(skel);
1342 }
1343
test_buf_neg_offset(void)1344 static void test_buf_neg_offset(void)
1345 {
1346 struct bpf_iter_test_kern6 *skel;
1347
1348 skel = bpf_iter_test_kern6__open_and_load();
1349 if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern6__open_and_load"))
1350 bpf_iter_test_kern6__destroy(skel);
1351 }
1352
test_link_iter(void)1353 static void test_link_iter(void)
1354 {
1355 struct bpf_iter_bpf_link *skel;
1356
1357 skel = bpf_iter_bpf_link__open_and_load();
1358 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load"))
1359 return;
1360
1361 do_dummy_read(skel->progs.dump_bpf_link);
1362
1363 bpf_iter_bpf_link__destroy(skel);
1364 }
1365
test_ksym_iter(void)1366 static void test_ksym_iter(void)
1367 {
1368 struct bpf_iter_ksym *skel;
1369
1370 skel = bpf_iter_ksym__open_and_load();
1371 if (!ASSERT_OK_PTR(skel, "bpf_iter_ksym__open_and_load"))
1372 return;
1373
1374 do_dummy_read(skel->progs.dump_ksym);
1375
1376 bpf_iter_ksym__destroy(skel);
1377 }
1378
1379 #define CMP_BUFFER_SIZE 1024
1380 static char task_vma_output[CMP_BUFFER_SIZE];
1381 static char proc_maps_output[CMP_BUFFER_SIZE];
1382
1383 /* remove \0 and \t from str, and only keep the first line */
str_strip_first_line(char * str)1384 static void str_strip_first_line(char *str)
1385 {
1386 char *dst = str, *src = str;
1387
1388 do {
1389 if (*src == ' ' || *src == '\t')
1390 src++;
1391 else
1392 *(dst++) = *(src++);
1393
1394 } while (*src != '\0' && *src != '\n');
1395
1396 *dst = '\0';
1397 }
1398
test_task_vma_common(struct bpf_iter_attach_opts * opts)1399 static void test_task_vma_common(struct bpf_iter_attach_opts *opts)
1400 {
1401 int err, iter_fd = -1, proc_maps_fd = -1;
1402 struct bpf_iter_task_vma *skel;
1403 int len, read_size = 4;
1404 char maps_path[64];
1405
1406 skel = bpf_iter_task_vma__open();
1407 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open"))
1408 return;
1409
1410 skel->bss->pid = getpid();
1411 skel->bss->one_task = opts ? 1 : 0;
1412
1413 err = bpf_iter_task_vma__load(skel);
1414 if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
1415 goto out;
1416
1417 skel->links.proc_maps = bpf_program__attach_iter(
1418 skel->progs.proc_maps, opts);
1419
1420 if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
1421 skel->links.proc_maps = NULL;
1422 goto out;
1423 }
1424
1425 iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
1426 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1427 goto out;
1428
1429 /* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks
1430 * to trigger seq_file corner cases.
1431 */
1432 len = 0;
1433 while (len < CMP_BUFFER_SIZE) {
1434 err = read_fd_into_buffer(iter_fd, task_vma_output + len,
1435 MIN(read_size, CMP_BUFFER_SIZE - len));
1436 if (!err)
1437 break;
1438 if (!ASSERT_GE(err, 0, "read_iter_fd"))
1439 goto out;
1440 len += err;
1441 }
1442 if (opts)
1443 ASSERT_EQ(skel->bss->one_task_error, 0, "unexpected task");
1444
1445 /* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */
1446 snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid);
1447 proc_maps_fd = open(maps_path, O_RDONLY);
1448 if (!ASSERT_GE(proc_maps_fd, 0, "open_proc_maps"))
1449 goto out;
1450 err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE);
1451 if (!ASSERT_GE(err, 0, "read_prog_maps_fd"))
1452 goto out;
1453
1454 /* strip and compare the first line of the two files */
1455 str_strip_first_line(task_vma_output);
1456 str_strip_first_line(proc_maps_output);
1457
1458 ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output");
1459
1460 check_bpf_link_info(skel->progs.proc_maps);
1461
1462 out:
1463 close(proc_maps_fd);
1464 close(iter_fd);
1465 bpf_iter_task_vma__destroy(skel);
1466 }
1467
test_task_vma_dead_task(void)1468 static void test_task_vma_dead_task(void)
1469 {
1470 struct bpf_iter_task_vma *skel;
1471 int wstatus, child_pid = -1;
1472 time_t start_tm, cur_tm;
1473 int err, iter_fd = -1;
1474 int wait_sec = 3;
1475
1476 skel = bpf_iter_task_vma__open();
1477 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open"))
1478 return;
1479
1480 skel->bss->pid = getpid();
1481
1482 err = bpf_iter_task_vma__load(skel);
1483 if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
1484 goto out;
1485
1486 skel->links.proc_maps = bpf_program__attach_iter(
1487 skel->progs.proc_maps, NULL);
1488
1489 if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
1490 skel->links.proc_maps = NULL;
1491 goto out;
1492 }
1493
1494 start_tm = time(NULL);
1495 cur_tm = start_tm;
1496
1497 child_pid = fork();
1498 if (child_pid == 0) {
1499 /* Fork short-lived processes in the background. */
1500 while (cur_tm < start_tm + wait_sec) {
1501 system("echo > /dev/null");
1502 cur_tm = time(NULL);
1503 }
1504 exit(0);
1505 }
1506
1507 if (!ASSERT_GE(child_pid, 0, "fork_child"))
1508 goto out;
1509
1510 while (cur_tm < start_tm + wait_sec) {
1511 iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
1512 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1513 goto out;
1514
1515 /* Drain all data from iter_fd. */
1516 while (cur_tm < start_tm + wait_sec) {
1517 err = read_fd_into_buffer(iter_fd, task_vma_output, CMP_BUFFER_SIZE);
1518 if (!ASSERT_GE(err, 0, "read_iter_fd"))
1519 goto out;
1520
1521 cur_tm = time(NULL);
1522
1523 if (err == 0)
1524 break;
1525 }
1526
1527 close(iter_fd);
1528 iter_fd = -1;
1529 }
1530
1531 check_bpf_link_info(skel->progs.proc_maps);
1532
1533 out:
1534 waitpid(child_pid, &wstatus, 0);
1535 close(iter_fd);
1536 bpf_iter_task_vma__destroy(skel);
1537 }
1538
test_bpf_sockmap_map_iter_fd(void)1539 void test_bpf_sockmap_map_iter_fd(void)
1540 {
1541 struct bpf_iter_sockmap *skel;
1542
1543 skel = bpf_iter_sockmap__open_and_load();
1544 if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
1545 return;
1546
1547 do_read_map_iter_fd(&skel->skeleton, skel->progs.copy, skel->maps.sockmap);
1548
1549 bpf_iter_sockmap__destroy(skel);
1550 }
1551
test_task_vma(void)1552 static void test_task_vma(void)
1553 {
1554 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1555 union bpf_iter_link_info linfo;
1556
1557 memset(&linfo, 0, sizeof(linfo));
1558 linfo.task.tid = getpid();
1559 opts.link_info = &linfo;
1560 opts.link_info_len = sizeof(linfo);
1561
1562 test_task_vma_common(&opts);
1563 test_task_vma_common(NULL);
1564 }
1565
1566 /* uprobe attach point */
trigger_func(int arg)1567 static noinline int trigger_func(int arg)
1568 {
1569 asm volatile ("");
1570 return arg + 1;
1571 }
1572
test_task_vma_offset_common(struct bpf_iter_attach_opts * opts,bool one_proc)1573 static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc)
1574 {
1575 struct bpf_iter_vma_offset *skel;
1576 char buf[16] = {};
1577 int iter_fd, len;
1578 int pgsz, shift;
1579
1580 skel = bpf_iter_vma_offset__open_and_load();
1581 if (!ASSERT_OK_PTR(skel, "bpf_iter_vma_offset__open_and_load"))
1582 return;
1583
1584 skel->bss->pid = getpid();
1585 skel->bss->address = (uintptr_t)trigger_func;
1586 for (pgsz = getpagesize(), shift = 0; pgsz > 1; pgsz >>= 1, shift++)
1587 ;
1588 skel->bss->page_shift = shift;
1589
1590 skel->links.get_vma_offset = bpf_program__attach_iter(skel->progs.get_vma_offset, opts);
1591 if (!ASSERT_OK_PTR(skel->links.get_vma_offset, "attach_iter"))
1592 goto exit;
1593
1594 iter_fd = bpf_iter_create(bpf_link__fd(skel->links.get_vma_offset));
1595 if (!ASSERT_GT(iter_fd, 0, "create_iter"))
1596 goto exit;
1597
1598 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1599 ;
1600 buf[15] = 0;
1601 ASSERT_EQ(strcmp(buf, "OK\n"), 0, "strcmp");
1602
1603 ASSERT_EQ(skel->bss->offset, get_uprobe_offset(trigger_func), "offset");
1604 if (one_proc)
1605 ASSERT_EQ(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
1606 else
1607 ASSERT_GT(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
1608
1609 close(iter_fd);
1610
1611 exit:
1612 bpf_iter_vma_offset__destroy(skel);
1613 }
1614
test_task_vma_offset(void)1615 static void test_task_vma_offset(void)
1616 {
1617 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1618 union bpf_iter_link_info linfo;
1619
1620 memset(&linfo, 0, sizeof(linfo));
1621 linfo.task.pid = getpid();
1622 opts.link_info = &linfo;
1623 opts.link_info_len = sizeof(linfo);
1624
1625 test_task_vma_offset_common(&opts, true);
1626
1627 linfo.task.pid = 0;
1628 linfo.task.tid = getpid();
1629 test_task_vma_offset_common(&opts, true);
1630
1631 test_task_vma_offset_common(NULL, false);
1632 }
1633
test_bpf_iter(void)1634 void test_bpf_iter(void)
1635 {
1636 ASSERT_OK(pthread_mutex_init(&do_nothing_mutex, NULL), "pthread_mutex_init");
1637
1638 if (test__start_subtest("btf_id_or_null"))
1639 test_btf_id_or_null();
1640 if (test__start_subtest("ipv6_route"))
1641 test_ipv6_route();
1642 if (test__start_subtest("netlink"))
1643 test_netlink();
1644 if (test__start_subtest("bpf_map"))
1645 test_bpf_map();
1646 if (test__start_subtest("task_tid"))
1647 test_task_tid();
1648 if (test__start_subtest("task_pid"))
1649 test_task_pid();
1650 if (test__start_subtest("task_pidfd"))
1651 test_task_pidfd();
1652 if (test__start_subtest("task_sleepable"))
1653 test_task_sleepable();
1654 if (test__start_subtest("task_stack"))
1655 test_task_stack();
1656 if (test__start_subtest("task_file"))
1657 test_task_file();
1658 if (test__start_subtest("task_vma"))
1659 test_task_vma();
1660 if (test__start_subtest("task_vma_dead_task"))
1661 test_task_vma_dead_task();
1662 if (test__start_subtest("task_btf"))
1663 test_task_btf();
1664 if (test__start_subtest("tcp4"))
1665 test_tcp4();
1666 if (test__start_subtest("tcp6"))
1667 test_tcp6();
1668 if (test__start_subtest("udp4"))
1669 test_udp4();
1670 if (test__start_subtest("udp6"))
1671 test_udp6();
1672 if (test__start_subtest("unix"))
1673 test_unix();
1674 if (test__start_subtest("anon"))
1675 test_anon_iter(false);
1676 if (test__start_subtest("anon-read-one-char"))
1677 test_anon_iter(true);
1678 if (test__start_subtest("file"))
1679 test_file_iter();
1680 if (test__start_subtest("overflow"))
1681 test_overflow(false, false);
1682 if (test__start_subtest("overflow-e2big"))
1683 test_overflow(true, false);
1684 if (test__start_subtest("prog-ret-1"))
1685 test_overflow(false, true);
1686 if (test__start_subtest("bpf_hash_map"))
1687 test_bpf_hash_map();
1688 if (test__start_subtest("bpf_percpu_hash_map"))
1689 test_bpf_percpu_hash_map();
1690 if (test__start_subtest("bpf_array_map"))
1691 test_bpf_array_map();
1692 if (test__start_subtest("bpf_array_map_iter_fd"))
1693 test_bpf_array_map_iter_fd();
1694 if (test__start_subtest("bpf_percpu_array_map"))
1695 test_bpf_percpu_array_map();
1696 if (test__start_subtest("bpf_sk_storage_map"))
1697 test_bpf_sk_storage_map();
1698 if (test__start_subtest("bpf_sk_storage_map_iter_fd"))
1699 test_bpf_sk_stoarge_map_iter_fd();
1700 if (test__start_subtest("bpf_sk_storage_delete"))
1701 test_bpf_sk_storage_delete();
1702 if (test__start_subtest("bpf_sk_storage_get"))
1703 test_bpf_sk_storage_get();
1704 if (test__start_subtest("rdonly-buf-out-of-bound"))
1705 test_rdonly_buf_out_of_bound();
1706 if (test__start_subtest("buf-neg-offset"))
1707 test_buf_neg_offset();
1708 if (test__start_subtest("link-iter"))
1709 test_link_iter();
1710 if (test__start_subtest("ksym"))
1711 test_ksym_iter();
1712 if (test__start_subtest("bpf_sockmap_map_iter_fd"))
1713 test_bpf_sockmap_map_iter_fd();
1714 if (test__start_subtest("vma_offset"))
1715 test_task_vma_offset();
1716 }
1717