Home
last modified time | relevance | path

Searched refs:skel (Results 1 – 25 of 126) sorted by relevance

123456

/Linux-v5.15/tools/testing/selftests/bpf/prog_tests/
Datomics.c7 static void test_add(struct atomics *skel) in test_add() argument
13 link_fd = atomics__add__attach(skel); in test_add()
17 prog_fd = skel->progs.add.prog_fd; in test_add()
24 ASSERT_EQ(skel->data->add64_value, 3, "add64_value"); in test_add()
25 ASSERT_EQ(skel->bss->add64_result, 1, "add64_result"); in test_add()
27 ASSERT_EQ(skel->data->add32_value, 3, "add32_value"); in test_add()
28 ASSERT_EQ(skel->bss->add32_result, 1, "add32_result"); in test_add()
30 ASSERT_EQ(skel->bss->add_stack_value_copy, 3, "add_stack_value"); in test_add()
31 ASSERT_EQ(skel->bss->add_stack_result, 1, "add_stack_result"); in test_add()
33 ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value"); in test_add()
[all …]
Dbpf_iter.c34 struct bpf_iter_test_kern3 *skel; in test_btf_id_or_null() local
36 skel = bpf_iter_test_kern3__open_and_load(); in test_btf_id_or_null()
37 if (CHECK(skel, "bpf_iter_test_kern3__open_and_load", in test_btf_id_or_null()
39 bpf_iter_test_kern3__destroy(skel); in test_btf_id_or_null()
87 struct bpf_iter_ipv6_route *skel; in test_ipv6_route() local
89 skel = bpf_iter_ipv6_route__open_and_load(); in test_ipv6_route()
90 if (CHECK(!skel, "bpf_iter_ipv6_route__open_and_load", in test_ipv6_route()
94 do_dummy_read(skel->progs.dump_ipv6_route); in test_ipv6_route()
96 bpf_iter_ipv6_route__destroy(skel); in test_ipv6_route()
101 struct bpf_iter_netlink *skel; in test_netlink() local
[all …]
Dcheck_mtu.c43 struct test_check_mtu *skel; in test_check_mtu_xdp_attach() local
49 skel = test_check_mtu__open_and_load(); in test_check_mtu_xdp_attach()
50 if (CHECK(!skel, "open and load skel", "failed")) in test_check_mtu_xdp_attach()
53 prog = skel->progs.xdp_use_helper_basic; in test_check_mtu_xdp_attach()
58 skel->links.xdp_use_helper_basic = link; in test_check_mtu_xdp_attach()
75 test_check_mtu__destroy(skel); in test_check_mtu_xdp_attach()
78 static void test_check_mtu_run_xdp(struct test_check_mtu *skel, in test_check_mtu_run_xdp() argument
106 mtu_result = skel->bss->global_bpf_mtu_xdp; in test_check_mtu_run_xdp()
113 struct test_check_mtu *skel; in test_check_mtu_xdp() local
116 skel = test_check_mtu__open(); in test_check_mtu_xdp()
[all …]
Dbtf_skc_cls_ingress.c19 static struct test_btf_skc_cls_ingress *skel; variable
77 memset(&skel->bss->srv_sa6, 0, sizeof(skel->bss->srv_sa6)); in reset_test()
78 skel->bss->listen_tp_sport = 0; in reset_test()
79 skel->bss->req_sk_sport = 0; in reset_test()
80 skel->bss->recv_cookie = 0; in reset_test()
81 skel->bss->gen_cookie = 0; in reset_test()
82 skel->bss->linum = 0; in reset_test()
87 if (skel->bss->linum) in print_err_line()
88 printf("bpf prog error at line %u\n", skel->bss->linum); in print_err_line()
108 memcpy(&skel->bss->srv_sa6, &srv_sa6, sizeof(srv_sa6)); in test_conn()
[all …]
Dringbuf.c61 static struct test_ringbuf *skel; variable
66 skel->bss->dropped = 0; in trigger_samples()
67 skel->bss->total = 0; in trigger_samples()
68 skel->bss->discarded = 0; in trigger_samples()
71 skel->bss->value = 333; in trigger_samples()
73 skel->bss->value = 777; in trigger_samples()
93 skel = test_ringbuf__open(); in test_ringbuf()
94 if (CHECK(!skel, "skel_open", "skeleton open failed\n")) in test_ringbuf()
97 skel->maps.ringbuf.max_entries = page_size; in test_ringbuf()
99 err = test_ringbuf__load(skel); in test_ringbuf()
[all …]
Dsnprintf.c40 struct test_snprintf *skel; in test_snprintf_positive() local
42 skel = test_snprintf__open_and_load(); in test_snprintf_positive()
43 if (!ASSERT_OK_PTR(skel, "skel_open")) in test_snprintf_positive()
46 skel->bss->pid = getpid(); in test_snprintf_positive()
48 if (!ASSERT_OK(test_snprintf__attach(skel), "skel_attach")) in test_snprintf_positive()
54 ASSERT_STREQ(skel->bss->num_out, EXP_NUM_OUT, "num_out"); in test_snprintf_positive()
55 ASSERT_EQ(skel->bss->num_ret, EXP_NUM_RET, "num_ret"); in test_snprintf_positive()
57 ASSERT_STREQ(skel->bss->ip_out, EXP_IP_OUT, "ip_out"); in test_snprintf_positive()
58 ASSERT_EQ(skel->bss->ip_ret, EXP_IP_RET, "ip_ret"); in test_snprintf_positive()
60 ASSERT_OK(memcmp(skel->bss->sym_out, exp_sym_out, in test_snprintf_positive()
[all …]
Dtask_local_storage.c15 struct task_local_storage *skel; in test_sys_enter_exit() local
18 skel = task_local_storage__open_and_load(); in test_sys_enter_exit()
19 if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) in test_sys_enter_exit()
22 skel->bss->target_pid = syscall(SYS_gettid); in test_sys_enter_exit()
24 err = task_local_storage__attach(skel); in test_sys_enter_exit()
32 ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt"); in test_sys_enter_exit()
33 ASSERT_EQ(skel->bss->exit_cnt, 3, "exit_cnt"); in test_sys_enter_exit()
34 ASSERT_EQ(skel->bss->mismatch_cnt, 0, "mismatch_cnt"); in test_sys_enter_exit()
36 task_local_storage__destroy(skel); in test_sys_enter_exit()
41 struct task_local_storage_exit_creds *skel; in test_exit_creds() local
[all …]
Dbpf_cookie.c11 static void kprobe_subtest(struct test_bpf_cookie *skel) in kprobe_subtest() argument
20 link1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, in kprobe_subtest()
27 link2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, in kprobe_subtest()
35 retlink1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, in kprobe_subtest()
42 retlink2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, in kprobe_subtest()
50 ASSERT_EQ(skel->bss->kprobe_res, 0x1 | 0x2, "kprobe_res"); in kprobe_subtest()
51 ASSERT_EQ(skel->bss->kretprobe_res, 0x10 | 0x20, "kretprobe_res"); in kprobe_subtest()
60 static void uprobe_subtest(struct test_bpf_cookie *skel) in uprobe_subtest() argument
74 link1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, 0 /* self pid */, in uprobe_subtest()
81 link2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, -1 /* any pid */, in uprobe_subtest()
[all …]
Dringbuf_multi.c43 struct test_ringbuf_multi *skel; in test_ringbuf_multi() local
49 skel = test_ringbuf_multi__open(); in test_ringbuf_multi()
50 if (CHECK(!skel, "skel_open", "skeleton open failed\n")) in test_ringbuf_multi()
53 err = bpf_map__set_max_entries(skel->maps.ringbuf1, page_size); in test_ringbuf_multi()
57 err = bpf_map__set_max_entries(skel->maps.ringbuf2, page_size); in test_ringbuf_multi()
61 err = bpf_map__set_max_entries(bpf_map__inner_map(skel->maps.ringbuf_arr), page_size); in test_ringbuf_multi()
69 err = bpf_map__set_inner_map_fd(skel->maps.ringbuf_hash, proto_fd); in test_ringbuf_multi()
73 err = test_ringbuf_multi__load(skel); in test_ringbuf_multi()
81 skel->bss->pid = getpid(); in test_ringbuf_multi()
83 ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf1), in test_ringbuf_multi()
[all …]
Dfor_each.c13 struct for_each_hash_map_elem *skel; in test_hash_map() local
18 skel = for_each_hash_map_elem__open_and_load(); in test_hash_map()
19 if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load")) in test_hash_map()
22 hashmap_fd = bpf_map__fd(skel->maps.hashmap); in test_hash_map()
23 max_entries = bpf_map__max_entries(skel->maps.hashmap); in test_hash_map()
33 percpu_map_fd = bpf_map__fd(skel->maps.percpu_map); in test_hash_map()
45 err = bpf_prog_test_run(bpf_program__fd(skel->progs.test_pkt_access), in test_hash_map()
52 ASSERT_EQ(skel->bss->hashmap_output, 4, "hashmap_output"); in test_hash_map()
53 ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems"); in test_hash_map()
59 ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called"); in test_hash_map()
[all …]
Dsk_lookup.c516 static void query_lookup_prog(struct test_sk_lookup *skel) in query_lookup_prog() argument
533 link[0] = attach_lookup_prog(skel->progs.lookup_pass); in query_lookup_prog()
536 link[1] = attach_lookup_prog(skel->progs.lookup_pass); in query_lookup_prog()
539 link[2] = attach_lookup_prog(skel->progs.lookup_drop); in query_lookup_prog()
672 static void test_redirect_lookup(struct test_sk_lookup *skel) in test_redirect_lookup() argument
677 .lookup_prog = skel->progs.redir_port, in test_redirect_lookup()
678 .sock_map = skel->maps.redir_map, in test_redirect_lookup()
685 .lookup_prog = skel->progs.redir_ip4, in test_redirect_lookup()
686 .sock_map = skel->maps.redir_map, in test_redirect_lookup()
693 .lookup_prog = skel->progs.select_sock_a, in test_redirect_lookup()
[all …]
Dattach_probe.c14 struct test_attach_probe* skel; in test_attach_probe() local
28 skel = test_attach_probe__open_and_load(); in test_attach_probe()
29 if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) in test_attach_probe()
31 if (CHECK(!skel->bss, "check_bss", ".bss wasn't mmap()-ed\n")) in test_attach_probe()
34 kprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kprobe, in test_attach_probe()
39 skel->links.handle_kprobe = kprobe_link; in test_attach_probe()
41 kretprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kretprobe, in test_attach_probe()
46 skel->links.handle_kretprobe = kretprobe_link; in test_attach_probe()
52 uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, in test_attach_probe()
59 skel->links.handle_uprobe = uprobe_link; in test_attach_probe()
[all …]
Dbtf_map_in_map.c27 struct test_btf_map_in_map *skel; in test_lookup_update() local
30 skel = test_btf_map_in_map__open_and_load(); in test_lookup_update()
31 if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n")) in test_lookup_update()
34 err = test_btf_map_in_map__attach(skel); in test_lookup_update()
38 map1_fd = bpf_map__fd(skel->maps.inner_map1); in test_lookup_update()
39 map2_fd = bpf_map__fd(skel->maps.inner_map2); in test_lookup_update()
40 map3_fd = bpf_map__fd(skel->maps.inner_map3); in test_lookup_update()
41 map4_fd = bpf_map__fd(skel->maps.inner_map4); in test_lookup_update()
42 map5_fd = bpf_map__fd(skel->maps.inner_map5); in test_lookup_update()
43 outer_arr_dyn_fd = bpf_map__fd(skel->maps.outer_arr_dyn); in test_lookup_update()
[all …]
Dlookup_and_delete.c47 struct test_lookup_and_delete *skel; in setup_prog() local
50 skel = test_lookup_and_delete__open(); in setup_prog()
51 if (!ASSERT_OK_PTR(skel, "test_lookup_and_delete__open")) in setup_prog()
54 err = bpf_map__set_type(skel->maps.hash_map, map_type); in setup_prog()
58 err = bpf_map__set_max_entries(skel->maps.hash_map, MAX_ENTRIES); in setup_prog()
62 err = test_lookup_and_delete__load(skel); in setup_prog()
66 *map_fd = bpf_map__fd(skel->maps.hash_map); in setup_prog()
70 return skel; in setup_prog()
73 test_lookup_and_delete__destroy(skel); in setup_prog()
78 static int trigger_tp(struct test_lookup_and_delete *skel, __u64 key, in trigger_tp() argument
[all …]
Dlinked_vars.c11 struct linked_vars *skel; in test_linked_vars() local
13 skel = linked_vars__open(); in test_linked_vars()
14 if (!ASSERT_OK_PTR(skel, "skel_open")) in test_linked_vars()
17 skel->bss->input_bss1 = 1000; in test_linked_vars()
18 skel->bss->input_bss2 = 2000; in test_linked_vars()
19 skel->bss->input_bss_weak = 3000; in test_linked_vars()
21 err = linked_vars__load(skel); in test_linked_vars()
25 err = linked_vars__attach(skel); in test_linked_vars()
32 ASSERT_EQ(skel->bss->output_bss1, 1000 + 2000 + 3000, "output_bss1"); in test_linked_vars()
33 ASSERT_EQ(skel->bss->output_bss2, 1000 + 2000 + 3000, "output_bss2"); in test_linked_vars()
[all …]
Dget_func_ip_test.c7 struct get_func_ip_test *skel = NULL; in test_get_func_ip_test() local
11 skel = get_func_ip_test__open(); in test_get_func_ip_test()
12 if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open")) in test_get_func_ip_test()
19 bpf_program__set_autoload(skel->progs.test6, false); in test_get_func_ip_test()
20 bpf_program__set_autoload(skel->progs.test7, false); in test_get_func_ip_test()
23 err = get_func_ip_test__load(skel); in test_get_func_ip_test()
27 err = get_func_ip_test__attach(skel); in test_get_func_ip_test()
31 prog_fd = bpf_program__fd(skel->progs.test1); in test_get_func_ip_test()
37 prog_fd = bpf_program__fd(skel->progs.test5); in test_get_func_ip_test()
43 ASSERT_EQ(skel->bss->test1_result, 1, "test1_result"); in test_get_func_ip_test()
[all …]
Drecursion.c10 struct recursion *skel; in test_recursion() local
14 skel = recursion__open_and_load(); in test_recursion()
15 if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) in test_recursion()
18 err = recursion__attach(skel); in test_recursion()
22 ASSERT_EQ(skel->bss->pass1, 0, "pass1 == 0"); in test_recursion()
23 bpf_map_lookup_elem(bpf_map__fd(skel->maps.hash1), &key, 0); in test_recursion()
24 ASSERT_EQ(skel->bss->pass1, 1, "pass1 == 1"); in test_recursion()
25 bpf_map_lookup_elem(bpf_map__fd(skel->maps.hash1), &key, 0); in test_recursion()
26 ASSERT_EQ(skel->bss->pass1, 2, "pass1 == 2"); in test_recursion()
28 ASSERT_EQ(skel->bss->pass2, 0, "pass2 == 0"); in test_recursion()
[all …]
Dtest_lsm.c54 static int test_lsm(struct lsm *skel) in test_lsm() argument
60 err = lsm__attach(skel); in test_lsm()
65 link = bpf_program__attach(skel->progs.test_int_hook); in test_lsm()
69 err = exec_cmd(&skel->bss->monitored_pid); in test_lsm()
73 ASSERT_EQ(skel->bss->bprm_count, 1, "bprm_count"); in test_lsm()
75 skel->bss->monitored_pid = getpid(); in test_lsm()
81 ASSERT_EQ(skel->bss->mprotect_count, 1, "mprotect_count"); in test_lsm()
87 ASSERT_EQ(skel->bss->copy_test, 3, "copy_test"); in test_lsm()
89 lsm__detach(skel); in test_lsm()
91 skel->bss->copy_test = 0; in test_lsm()
[all …]
Dlinked_funcs.c11 struct linked_funcs *skel; in test_linked_funcs() local
13 skel = linked_funcs__open(); in test_linked_funcs()
14 if (!ASSERT_OK_PTR(skel, "skel_open")) in test_linked_funcs()
17 skel->rodata->my_tid = syscall(SYS_gettid); in test_linked_funcs()
18 skel->bss->syscall_id = SYS_getpgid; in test_linked_funcs()
20 err = linked_funcs__load(skel); in test_linked_funcs()
24 err = linked_funcs__attach(skel); in test_linked_funcs()
31 ASSERT_EQ(skel->bss->output_val1, 2000 + 2000, "output_val1"); in test_linked_funcs()
32 ASSERT_EQ(skel->bss->output_ctx1, SYS_getpgid, "output_ctx1"); in test_linked_funcs()
33 ASSERT_EQ(skel->bss->output_weak1, 42, "output_weak1"); in test_linked_funcs()
[all …]
Dmap_init.c41 struct test_map_init *skel; in setup() local
44 skel = test_map_init__open(); in setup()
45 if (!ASSERT_OK_PTR(skel, "skel_open")) in setup()
48 err = bpf_map__set_type(skel->maps.hashmap1, map_type); in setup()
52 err = bpf_map__set_max_entries(skel->maps.hashmap1, map_sz); in setup()
56 err = test_map_init__load(skel); in setup()
60 *map_fd = bpf_map__fd(skel->maps.hashmap1); in setup()
68 return skel; in setup()
73 test_map_init__destroy(skel); in setup()
78 static int prog_run_insert_elem(struct test_map_init *skel, map_key_t key, in prog_run_insert_elem() argument
[all …]
Dsockmap_basic.c113 struct test_skmsg_load_helpers *skel; in test_skmsg_helpers() local
116 skel = test_skmsg_load_helpers__open_and_load(); in test_skmsg_helpers()
117 if (CHECK_FAIL(!skel)) { in test_skmsg_helpers()
122 verdict = bpf_program__fd(skel->progs.prog_msg_verdict); in test_skmsg_helpers()
123 map = bpf_map__fd(skel->maps.sock_map); in test_skmsg_helpers()
137 test_skmsg_load_helpers__destroy(skel); in test_skmsg_helpers()
144 struct test_sockmap_update *skel; in test_sockmap_update() local
154 skel = test_sockmap_update__open_and_load(); in test_sockmap_update()
155 if (CHECK(!skel, "open_and_load", "cannot load skeleton\n")) in test_sockmap_update()
158 prog = bpf_program__fd(skel->progs.copy_sock_map); in test_sockmap_update()
[all …]
Dprobe_read_user_str.c9 static int test_one_str(struct test_probe_read_user_str *skel, const char *str, in test_one_str() argument
20 skel->bss->user_ptr = buf; in test_one_str()
26 if (CHECK(skel->bss->ret < 0, "prog_ret", "prog returned: %ld\n", in test_one_str()
27 skel->bss->ret)) in test_one_str()
31 err = memcmp(skel->bss->buf, str, len); in test_one_str()
37 err = memcmp(skel->bss->buf + len, buf, sizeof(buf) - len); in test_one_str()
46 struct test_probe_read_user_str *skel; in test_probe_read_user_str() local
49 skel = test_probe_read_user_str__open_and_load(); in test_probe_read_user_str()
50 if (CHECK(!skel, "test_probe_read_user_str__open_and_load", in test_probe_read_user_str()
55 skel->bss->pid = getpid(); in test_probe_read_user_str()
[all …]
Dsubprogs.c12 struct test_subprogs *skel; in test_subprogs() local
16 skel = test_subprogs__open_and_load(); in test_subprogs()
17 if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) in test_subprogs()
20 err = test_subprogs__attach(skel); in test_subprogs()
26 CHECK(skel->bss->res1 != 12, "res1", "got %d, exp %d\n", skel->bss->res1, 12); in test_subprogs()
27 CHECK(skel->bss->res2 != 17, "res2", "got %d, exp %d\n", skel->bss->res2, 17); in test_subprogs()
28 CHECK(skel->bss->res3 != 19, "res3", "got %d, exp %d\n", skel->bss->res3, 19); in test_subprogs()
29 CHECK(skel->bss->res4 != 36, "res4", "got %d, exp %d\n", skel->bss->res4, 36); in test_subprogs()
36 test_subprogs__destroy(skel); in test_subprogs()
Dautoload.c11 struct test_autoload* skel; in test_autoload() local
13 skel = test_autoload__open_and_load(); in test_autoload()
15 if (CHECK(skel, "skel_open_and_load", "unexpected success\n")) in test_autoload()
18 skel = test_autoload__open(); in test_autoload()
19 if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) in test_autoload()
23 bpf_program__set_autoload(skel->progs.prog3, false); in test_autoload()
25 err = test_autoload__load(skel); in test_autoload()
29 err = test_autoload__attach(skel); in test_autoload()
35 CHECK(!skel->bss->prog1_called, "prog1", "not called\n"); in test_autoload()
36 CHECK(!skel->bss->prog2_called, "prog2", "not called\n"); in test_autoload()
[all …]
/Linux-v5.15/samples/v4l/
Dv4l2-pci-skeleton.c133 struct skeleton *skel = dev_id; in skeleton_irq() local
140 spin_lock(&skel->qlock); in skeleton_irq()
142 spin_unlock(&skel->qlock); in skeleton_irq()
144 new_buf->vb.sequence = skel->sequence++; in skeleton_irq()
145 new_buf->vb.field = skel->field; in skeleton_irq()
146 if (skel->format.field == V4L2_FIELD_ALTERNATE) { in skeleton_irq()
147 if (skel->field == V4L2_FIELD_BOTTOM) in skeleton_irq()
148 skel->field = V4L2_FIELD_TOP; in skeleton_irq()
149 else if (skel->field == V4L2_FIELD_TOP) in skeleton_irq()
150 skel->field = V4L2_FIELD_BOTTOM; in skeleton_irq()
[all …]

123456