Home
last modified time | relevance | path

Searched refs:skel (Results 1 – 25 of 196) sorted by relevance

12345678

/Linux-v6.1/tools/testing/selftests/bpf/prog_tests/
Datomics.c7 static void test_add(struct atomics_lskel *skel) in test_add() argument
13 prog_fd = skel->progs.add.prog_fd; in test_add()
20 ASSERT_EQ(skel->data->add64_value, 3, "add64_value"); in test_add()
21 ASSERT_EQ(skel->bss->add64_result, 1, "add64_result"); in test_add()
23 ASSERT_EQ(skel->data->add32_value, 3, "add32_value"); in test_add()
24 ASSERT_EQ(skel->bss->add32_result, 1, "add32_result"); in test_add()
26 ASSERT_EQ(skel->bss->add_stack_value_copy, 3, "add_stack_value"); in test_add()
27 ASSERT_EQ(skel->bss->add_stack_result, 1, "add_stack_result"); in test_add()
29 ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value"); in test_add()
32 static void test_sub(struct atomics_lskel *skel) in test_sub() argument
[all …]
Dtest_strncmp.c6 static int trigger_strncmp(const struct strncmp_test *skel) in trigger_strncmp() argument
12 cmp = skel->bss->cmp_ret; in trigger_strncmp()
24 static void strncmp_full_str_cmp(struct strncmp_test *skel, const char *name, in strncmp_full_str_cmp() argument
27 size_t nr = sizeof(skel->bss->str); in strncmp_full_str_cmp()
28 char *str = skel->bss->str; in strncmp_full_str_cmp()
33 memcpy(str, skel->rodata->target, nr); in strncmp_full_str_cmp()
37 got = trigger_strncmp(skel); in strncmp_full_str_cmp()
46 struct strncmp_test *skel; in test_strncmp_ret() local
49 skel = strncmp_test__open(); in test_strncmp_ret()
50 if (!ASSERT_OK_PTR(skel, "strncmp_test open")) in test_strncmp_ret()
[all …]
Dattach_probe.c33 struct test_attach_probe* skel; in test_attach_probe() local
60 skel = test_attach_probe__open(); in test_attach_probe()
61 if (!ASSERT_OK_PTR(skel, "skel_open")) in test_attach_probe()
65 if (!ASSERT_OK(bpf_program__set_flags(skel->progs.handle_kprobe_sleepable, in test_attach_probe()
69 if (!ASSERT_OK(test_attach_probe__load(skel), "skel_load")) in test_attach_probe()
71 if (!ASSERT_OK_PTR(skel->bss, "check_bss")) in test_attach_probe()
75 kprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kprobe, in test_attach_probe()
80 skel->links.handle_kprobe = kprobe_link; in test_attach_probe()
82 kretprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kretprobe, in test_attach_probe()
87 skel->links.handle_kretprobe = kretprobe_link; in test_attach_probe()
[all …]
Dbpf_loop.c8 static void check_nr_loops(struct bpf_loop *skel) in check_nr_loops() argument
12 link = bpf_program__attach(skel->progs.test_prog); in check_nr_loops()
17 skel->bss->nr_loops = 0; in check_nr_loops()
21 ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops, in check_nr_loops()
25 skel->bss->nr_loops = 500; in check_nr_loops()
29 ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops, in check_nr_loops()
31 ASSERT_EQ(skel->bss->g_output, (500 * 499) / 2, "g_output"); in check_nr_loops()
34 skel->bss->nr_loops = -1; in check_nr_loops()
38 ASSERT_EQ(skel->bss->err, -E2BIG, "over max limit"); in check_nr_loops()
43 static void check_callback_fn_stop(struct bpf_loop *skel) in check_callback_fn_stop() argument
[all …]
Dbpf_iter.c40 struct bpf_iter_test_kern3 *skel; in test_btf_id_or_null() local
42 skel = bpf_iter_test_kern3__open_and_load(); in test_btf_id_or_null()
43 if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) { in test_btf_id_or_null()
44 bpf_iter_test_kern3__destroy(skel); in test_btf_id_or_null()
79 static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog, in do_read_map_iter_fd() argument
104 bpf_object__destroy_skeleton(*skel); in do_read_map_iter_fd()
105 *skel = NULL; in do_read_map_iter_fd()
141 struct bpf_iter_ipv6_route *skel; in test_ipv6_route() local
143 skel = bpf_iter_ipv6_route__open_and_load(); in test_ipv6_route()
144 if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load")) in test_ipv6_route()
[all …]
Dtracing_struct.c9 struct tracing_struct *skel; in test_fentry() local
12 skel = tracing_struct__open_and_load(); in test_fentry()
13 if (!ASSERT_OK_PTR(skel, "tracing_struct__open_and_load")) in test_fentry()
16 err = tracing_struct__attach(skel); in test_fentry()
22 ASSERT_EQ(skel->bss->t1_a_a, 2, "t1:a.a"); in test_fentry()
23 ASSERT_EQ(skel->bss->t1_a_b, 3, "t1:a.b"); in test_fentry()
24 ASSERT_EQ(skel->bss->t1_b, 1, "t1:b"); in test_fentry()
25 ASSERT_EQ(skel->bss->t1_c, 4, "t1:c"); in test_fentry()
27 ASSERT_EQ(skel->bss->t1_nregs, 4, "t1 nregs"); in test_fentry()
28 ASSERT_EQ(skel->bss->t1_reg0, 2, "t1 reg0"); in test_fentry()
[all …]
Dfind_vma.c10 static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret, bool need_test) in test_and_reset_skel() argument
13 ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec"); in test_and_reset_skel()
14 ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret"); in test_and_reset_skel()
15 ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret"); in test_and_reset_skel()
16 ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs"); in test_and_reset_skel()
19 skel->bss->found_vm_exec = 0; in test_and_reset_skel()
20 skel->data->find_addr_ret = -1; in test_and_reset_skel()
21 skel->data->find_zero_ret = -1; in test_and_reset_skel()
22 skel->bss->d_iname[0] = 0; in test_and_reset_skel()
41 static bool find_vma_pe_condition(struct find_vma *skel) in find_vma_pe_condition() argument
[all …]
Dbtf_tag.c17 struct test_btf_decl_tag *skel; in test_btf_decl_tag() local
19 skel = test_btf_decl_tag__open_and_load(); in test_btf_decl_tag()
20 if (!ASSERT_OK_PTR(skel, "btf_decl_tag")) in test_btf_decl_tag()
23 if (skel->rodata->skip_tests) { in test_btf_decl_tag()
28 test_btf_decl_tag__destroy(skel); in test_btf_decl_tag()
33 struct btf_type_tag *skel; in test_btf_type_tag() local
35 skel = btf_type_tag__open_and_load(); in test_btf_type_tag()
36 if (!ASSERT_OK_PTR(skel, "btf_type_tag")) in test_btf_type_tag()
39 if (skel->rodata->skip_tests) { in test_btf_type_tag()
44 btf_type_tag__destroy(skel); in test_btf_type_tag()
[all …]
Dcheck_mtu.c43 struct test_check_mtu *skel; in test_check_mtu_xdp_attach() local
49 skel = test_check_mtu__open_and_load(); in test_check_mtu_xdp_attach()
50 if (CHECK(!skel, "open and load skel", "failed")) in test_check_mtu_xdp_attach()
53 prog = skel->progs.xdp_use_helper_basic; in test_check_mtu_xdp_attach()
58 skel->links.xdp_use_helper_basic = link; in test_check_mtu_xdp_attach()
75 test_check_mtu__destroy(skel); in test_check_mtu_xdp_attach()
78 static void test_check_mtu_run_xdp(struct test_check_mtu *skel, in test_check_mtu_run_xdp() argument
99 mtu_result = skel->bss->global_bpf_mtu_xdp; in test_check_mtu_run_xdp()
106 struct test_check_mtu *skel; in test_check_mtu_xdp() local
109 skel = test_check_mtu__open(); in test_check_mtu_xdp()
[all …]
Dbpf_cookie.c21 static void kprobe_subtest(struct test_bpf_cookie *skel) in kprobe_subtest() argument
30 link1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, in kprobe_subtest()
37 link2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, in kprobe_subtest()
45 retlink1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, in kprobe_subtest()
52 retlink2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, in kprobe_subtest()
60 ASSERT_EQ(skel->bss->kprobe_res, 0x1 | 0x2, "kprobe_res"); in kprobe_subtest()
61 ASSERT_EQ(skel->bss->kretprobe_res, 0x10 | 0x20, "kretprobe_res"); in kprobe_subtest()
70 static void kprobe_multi_test_run(struct kprobe_multi *skel) in kprobe_multi_test_run() argument
75 prog_fd = bpf_program__fd(skel->progs.trigger); in kprobe_multi_test_run()
80 ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result"); in kprobe_multi_test_run()
[all …]
Duser_ringbuf.c74 struct user_ringbuf_success *skel; in open_load_ringbuf_skel() local
77 skel = user_ringbuf_success__open(); in open_load_ringbuf_skel()
78 if (!ASSERT_OK_PTR(skel, "skel_open")) in open_load_ringbuf_skel()
81 err = bpf_map__set_max_entries(skel->maps.user_ringbuf, c_ringbuf_size); in open_load_ringbuf_skel()
85 err = bpf_map__set_max_entries(skel->maps.kernel_ringbuf, c_ringbuf_size); in open_load_ringbuf_skel()
89 err = user_ringbuf_success__load(skel); in open_load_ringbuf_skel()
93 return skel; in open_load_ringbuf_skel()
96 user_ringbuf_success__destroy(skel); in open_load_ringbuf_skel()
105 struct user_ringbuf_success *skel; in test_user_ringbuf_mappings() local
107 skel = open_load_ringbuf_skel(); in test_user_ringbuf_mappings()
[all …]
Dtest_bpf_syscall_macro.c9 struct bpf_syscall_macro *skel = NULL; in test_bpf_syscall_macro() local
18 skel = bpf_syscall_macro__open(); in test_bpf_syscall_macro()
19 if (!ASSERT_OK_PTR(skel, "bpf_syscall_macro__open")) in test_bpf_syscall_macro()
22 skel->rodata->filter_pid = getpid(); in test_bpf_syscall_macro()
25 err = bpf_syscall_macro__load(skel); in test_bpf_syscall_macro()
30 err = bpf_syscall_macro__attach(skel); in test_bpf_syscall_macro()
37 ASSERT_NEQ(skel->bss->arg1, exp_arg1, "syscall_arg1"); in test_bpf_syscall_macro()
39 ASSERT_EQ(skel->bss->arg1, exp_arg1, "syscall_arg1"); in test_bpf_syscall_macro()
41 ASSERT_EQ(skel->bss->arg2, exp_arg2, "syscall_arg2"); in test_bpf_syscall_macro()
42 ASSERT_EQ(skel->bss->arg3, exp_arg3, "syscall_arg3"); in test_bpf_syscall_macro()
[all …]
Dbtf_skc_cls_ingress.c19 static struct test_btf_skc_cls_ingress *skel; variable
57 memset(&skel->bss->srv_sa6, 0, sizeof(skel->bss->srv_sa6)); in reset_test()
58 skel->bss->listen_tp_sport = 0; in reset_test()
59 skel->bss->req_sk_sport = 0; in reset_test()
60 skel->bss->recv_cookie = 0; in reset_test()
61 skel->bss->gen_cookie = 0; in reset_test()
62 skel->bss->linum = 0; in reset_test()
67 if (skel->bss->linum) in print_err_line()
68 printf("bpf prog error at line %u\n", skel->bss->linum); in print_err_line()
88 memcpy(&skel->bss->srv_sa6, &srv_sa6, sizeof(srv_sa6)); in test_conn()
[all …]
Dringbuf.c61 static struct test_ringbuf_lskel *skel; variable
66 skel->bss->dropped = 0; in trigger_samples()
67 skel->bss->total = 0; in trigger_samples()
68 skel->bss->discarded = 0; in trigger_samples()
71 skel->bss->value = 333; in trigger_samples()
73 skel->bss->value = 777; in trigger_samples()
93 skel = test_ringbuf_lskel__open(); in test_ringbuf()
94 if (CHECK(!skel, "skel_open", "skeleton open failed\n")) in test_ringbuf()
97 skel->maps.ringbuf.max_entries = page_size; in test_ringbuf()
99 err = test_ringbuf_lskel__load(skel); in test_ringbuf()
[all …]
Dsnprintf.c40 struct test_snprintf *skel; in test_snprintf_positive() local
42 skel = test_snprintf__open_and_load(); in test_snprintf_positive()
43 if (!ASSERT_OK_PTR(skel, "skel_open")) in test_snprintf_positive()
46 skel->bss->pid = getpid(); in test_snprintf_positive()
48 if (!ASSERT_OK(test_snprintf__attach(skel), "skel_attach")) in test_snprintf_positive()
54 ASSERT_STREQ(skel->bss->num_out, EXP_NUM_OUT, "num_out"); in test_snprintf_positive()
55 ASSERT_EQ(skel->bss->num_ret, EXP_NUM_RET, "num_ret"); in test_snprintf_positive()
57 ASSERT_STREQ(skel->bss->ip_out, EXP_IP_OUT, "ip_out"); in test_snprintf_positive()
58 ASSERT_EQ(skel->bss->ip_ret, EXP_IP_RET, "ip_ret"); in test_snprintf_positive()
60 ASSERT_OK(memcmp(skel->bss->sym_out, exp_sym_out, in test_snprintf_positive()
[all …]
Dtask_local_storage.c15 struct task_local_storage *skel; in test_sys_enter_exit() local
18 skel = task_local_storage__open_and_load(); in test_sys_enter_exit()
19 if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) in test_sys_enter_exit()
22 skel->bss->target_pid = syscall(SYS_gettid); in test_sys_enter_exit()
24 err = task_local_storage__attach(skel); in test_sys_enter_exit()
32 ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt"); in test_sys_enter_exit()
33 ASSERT_EQ(skel->bss->exit_cnt, 3, "exit_cnt"); in test_sys_enter_exit()
34 ASSERT_EQ(skel->bss->mismatch_cnt, 0, "mismatch_cnt"); in test_sys_enter_exit()
36 task_local_storage__destroy(skel); in test_sys_enter_exit()
41 struct task_local_storage_exit_creds *skel; in test_exit_creds() local
[all …]
Dfor_each.c14 struct for_each_hash_map_elem *skel; in test_hash_map() local
25 skel = for_each_hash_map_elem__open_and_load(); in test_hash_map()
26 if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load")) in test_hash_map()
29 max_entries = bpf_map__max_entries(skel->maps.hashmap); in test_hash_map()
33 err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key), in test_hash_map()
48 err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key), in test_hash_map()
53 err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts); in test_hash_map()
59 ASSERT_EQ(skel->bss->hashmap_output, 4, "hashmap_output"); in test_hash_map()
60 ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems"); in test_hash_map()
63 err = bpf_map__lookup_elem(skel->maps.hashmap, &key, sizeof(key), &val, sizeof(val), 0); in test_hash_map()
[all …]
Dbpf_nf.c55 struct test_bpf_nf *skel; in test_bpf_nf_ct() local
66 skel = test_bpf_nf__open_and_load(); in test_bpf_nf_ct()
67 if (!ASSERT_OK_PTR(skel, "test_bpf_nf__open_and_load")) in test_bpf_nf_ct()
91 skel->bss->saddr = peer_addr.sin_addr.s_addr; in test_bpf_nf_ct()
92 skel->bss->sport = peer_addr.sin_port; in test_bpf_nf_ct()
93 skel->bss->daddr = peer_addr.sin_addr.s_addr; in test_bpf_nf_ct()
94 skel->bss->dport = htons(srv_port); in test_bpf_nf_ct()
97 prog_fd = bpf_program__fd(skel->progs.nf_xdp_ct_test); in test_bpf_nf_ct()
99 prog_fd = bpf_program__fd(skel->progs.nf_skb_ct_test); in test_bpf_nf_ct()
105 ASSERT_EQ(skel->bss->test_einval_bpf_tuple, -EINVAL, "Test EINVAL for NULL bpf_tuple"); in test_bpf_nf_ct()
[all …]
Dlog_fixup.c17 struct test_log_fixup* skel; in bad_core_relo() local
20 skel = test_log_fixup__open(); in bad_core_relo()
21 if (!ASSERT_OK_PTR(skel, "skel_open")) in bad_core_relo()
24 bpf_program__set_autoload(skel->progs.bad_relo, true); in bad_core_relo()
26 bpf_program__set_log_buf(skel->progs.bad_relo, log_buf, log_buf_size ?: sizeof(log_buf)); in bad_core_relo()
28 err = test_log_fixup__load(skel); in bad_core_relo()
68 test_log_fixup__destroy(skel); in bad_core_relo()
74 struct test_log_fixup* skel; in bad_core_relo_subprog() local
77 skel = test_log_fixup__open(); in bad_core_relo_subprog()
78 if (!ASSERT_OK_PTR(skel, "skel_open")) in bad_core_relo_subprog()
[all …]
Dringbuf_multi.c43 struct test_ringbuf_multi *skel; in test_ringbuf_multi() local
49 skel = test_ringbuf_multi__open(); in test_ringbuf_multi()
50 if (CHECK(!skel, "skel_open", "skeleton open failed\n")) in test_ringbuf_multi()
54 ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), page_size, "rb1_size_before"); in test_ringbuf_multi()
55 ASSERT_OK(bpf_map__set_max_entries(skel->maps.ringbuf1, page_size + 1), "rb1_resize"); in test_ringbuf_multi()
56 ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), 2 * page_size, "rb1_size_after"); in test_ringbuf_multi()
57 ASSERT_OK(bpf_map__set_max_entries(skel->maps.ringbuf1, page_size), "rb1_reset"); in test_ringbuf_multi()
58 ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), page_size, "rb1_size_final"); in test_ringbuf_multi()
64 err = bpf_map__set_inner_map_fd(skel->maps.ringbuf_hash, proto_fd); in test_ringbuf_multi()
68 err = test_ringbuf_multi__load(skel); in test_ringbuf_multi()
[all …]
Dksyms_btf.c20 struct test_ksyms_btf *skel = NULL; in test_basic() local
36 skel = test_ksyms_btf__open_and_load(); in test_basic()
37 if (CHECK(!skel, "skel_open", "failed to open and load skeleton\n")) in test_basic()
40 err = test_ksyms_btf__attach(skel); in test_basic()
47 data = skel->data; in test_basic()
74 test_ksyms_btf__destroy(skel); in test_basic()
79 struct test_ksyms_btf_null_check *skel; in test_null_check() local
81 skel = test_ksyms_btf_null_check__open_and_load(); in test_null_check()
82 CHECK(skel, "skel_open", "unexpected load of a prog missing null check\n"); in test_null_check()
84 test_ksyms_btf_null_check__destroy(skel); in test_null_check()
[all …]
Dkprobe_multi_test.c9 static void kprobe_multi_test_run(struct kprobe_multi *skel, bool test_return) in kprobe_multi_test_run() argument
14 prog_fd = bpf_program__fd(skel->progs.trigger); in kprobe_multi_test_run()
19 ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result"); in kprobe_multi_test_run()
20 ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result"); in kprobe_multi_test_run()
21 ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result"); in kprobe_multi_test_run()
22 ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result"); in kprobe_multi_test_run()
23 ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result"); in kprobe_multi_test_run()
24 ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result"); in kprobe_multi_test_run()
25 ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result"); in kprobe_multi_test_run()
26 ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result"); in kprobe_multi_test_run()
[all …]
/Linux-v6.1/kernel/bpf/preload/iterators/
Diterators.lskel.h26 iterators_bpf__dump_bpf_map__attach(struct iterators_bpf *skel) in iterators_bpf__dump_bpf_map__attach() argument
28 int prog_fd = skel->progs.dump_bpf_map.prog_fd; in iterators_bpf__dump_bpf_map__attach()
32 skel->links.dump_bpf_map_fd = fd; in iterators_bpf__dump_bpf_map__attach()
37 iterators_bpf__dump_bpf_prog__attach(struct iterators_bpf *skel) in iterators_bpf__dump_bpf_prog__attach() argument
39 int prog_fd = skel->progs.dump_bpf_prog.prog_fd; in iterators_bpf__dump_bpf_prog__attach()
43 skel->links.dump_bpf_prog_fd = fd; in iterators_bpf__dump_bpf_prog__attach()
48 iterators_bpf__attach(struct iterators_bpf *skel) in iterators_bpf__attach() argument
52 ret = ret < 0 ? ret : iterators_bpf__dump_bpf_map__attach(skel); in iterators_bpf__attach()
53 ret = ret < 0 ? ret : iterators_bpf__dump_bpf_prog__attach(skel); in iterators_bpf__attach()
58 iterators_bpf__detach(struct iterators_bpf *skel) in iterators_bpf__detach() argument
[all …]
/Linux-v6.1/samples/v4l/
Dv4l2-pci-skeleton.c121 struct skeleton *skel = dev_id; in skeleton_irq() local
128 spin_lock(&skel->qlock); in skeleton_irq()
130 spin_unlock(&skel->qlock); in skeleton_irq()
132 new_buf->vb.sequence = skel->sequence++; in skeleton_irq()
133 new_buf->vb.field = skel->field; in skeleton_irq()
134 if (skel->format.field == V4L2_FIELD_ALTERNATE) { in skeleton_irq()
135 if (skel->field == V4L2_FIELD_BOTTOM) in skeleton_irq()
136 skel->field = V4L2_FIELD_TOP; in skeleton_irq()
137 else if (skel->field == V4L2_FIELD_TOP) in skeleton_irq()
138 skel->field = V4L2_FIELD_BOTTOM; in skeleton_irq()
[all …]
/Linux-v6.1/tools/testing/selftests/bpf/
Dtest_cpp.cpp14 T *skel; member in Skeleton
16 Skeleton(): skel(nullptr) { } in Skeleton()
18 ~Skeleton() { if (skel) T::destroy(skel); } in ~Skeleton()
24 if (skel) in open()
27 skel = T::open(opts); in open()
28 err = libbpf_get_error(skel); in open()
30 skel = nullptr; in open()
37 int load() { return T::load(skel); } in load()
39 int attach() { return T::attach(skel); } in attach()
41 void detach() { return T::detach(skel); } in detach()
[all …]

12345678