1 // SPDX-License-Identifier: GPL-2.0
2 #include <bpf/btf.h>
3 #include <test_btf.h>
4 #include <linux/btf.h>
5 #include <test_progs.h>
6 #include <network_helpers.h>
7
8 #include "linked_list.skel.h"
9 #include "linked_list_fail.skel.h"
10
11 static char log_buf[1024 * 1024];
12
13 static struct {
14 const char *prog_name;
15 const char *err_msg;
16 } linked_list_fail_tests[] = {
17 #define TEST(test, off) \
18 { #test "_missing_lock_push_front", \
19 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
20 { #test "_missing_lock_push_back", \
21 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
22 { #test "_missing_lock_pop_front", \
23 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
24 { #test "_missing_lock_pop_back", \
25 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" },
26 TEST(kptr, 40)
27 TEST(global, 16)
28 TEST(map, 0)
29 TEST(inner_map, 0)
30 #undef TEST
31 #define TEST(test, op) \
32 { #test "_kptr_incorrect_lock_" #op, \
33 "held lock and object are not in the same allocation\n" \
34 "bpf_spin_lock at off=40 must be held for bpf_list_head" }, \
35 { #test "_global_incorrect_lock_" #op, \
36 "held lock and object are not in the same allocation\n" \
37 "bpf_spin_lock at off=16 must be held for bpf_list_head" }, \
38 { #test "_map_incorrect_lock_" #op, \
39 "held lock and object are not in the same allocation\n" \
40 "bpf_spin_lock at off=0 must be held for bpf_list_head" }, \
41 { #test "_inner_map_incorrect_lock_" #op, \
42 "held lock and object are not in the same allocation\n" \
43 "bpf_spin_lock at off=0 must be held for bpf_list_head" },
44 TEST(kptr, push_front)
45 TEST(kptr, push_back)
46 TEST(kptr, pop_front)
47 TEST(kptr, pop_back)
48 TEST(global, push_front)
49 TEST(global, push_back)
50 TEST(global, pop_front)
51 TEST(global, pop_back)
52 TEST(map, push_front)
53 TEST(map, push_back)
54 TEST(map, pop_front)
55 TEST(map, pop_back)
56 TEST(inner_map, push_front)
57 TEST(inner_map, push_back)
58 TEST(inner_map, pop_front)
59 TEST(inner_map, pop_back)
60 #undef TEST
61 { "map_compat_kprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
62 { "map_compat_kretprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
63 { "map_compat_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
64 { "map_compat_perf", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
65 { "map_compat_raw_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
66 { "map_compat_raw_tp_w", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
67 { "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" },
68 { "obj_new_no_composite", "bpf_obj_new type ID argument must be of a struct" },
69 { "obj_new_no_struct", "bpf_obj_new type ID argument must be of a struct" },
70 { "obj_drop_non_zero_off", "R1 must have zero offset when passed to release func" },
71 { "new_null_ret", "R0 invalid mem access 'ptr_or_null_'" },
72 { "obj_new_acq", "Unreleased reference id=" },
73 { "use_after_drop", "invalid mem access 'scalar'" },
74 { "ptr_walk_scalar", "type=scalar expected=percpu_ptr_" },
75 { "direct_read_lock", "direct access to bpf_spin_lock is disallowed" },
76 { "direct_write_lock", "direct access to bpf_spin_lock is disallowed" },
77 { "direct_read_head", "direct access to bpf_list_head is disallowed" },
78 { "direct_write_head", "direct access to bpf_list_head is disallowed" },
79 { "direct_read_node", "direct access to bpf_list_node is disallowed" },
80 { "direct_write_node", "direct access to bpf_list_node is disallowed" },
81 { "use_after_unlock_push_front", "invalid mem access 'scalar'" },
82 { "use_after_unlock_push_back", "invalid mem access 'scalar'" },
83 { "double_push_front", "arg#1 expected pointer to allocated object" },
84 { "double_push_back", "arg#1 expected pointer to allocated object" },
85 { "no_node_value_type", "bpf_list_node not found at offset=0" },
86 { "incorrect_value_type",
87 "operation on bpf_list_head expects arg#1 bpf_list_node at offset=48 in struct foo, "
88 "but arg is at offset=0 in struct bar" },
89 { "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
90 { "incorrect_node_off1", "bpf_list_node not found at offset=49" },
91 { "incorrect_node_off2", "arg#1 offset=0, but expected bpf_list_node at offset=48 in struct foo" },
92 { "no_head_type", "bpf_list_head not found at offset=0" },
93 { "incorrect_head_var_off1", "R1 doesn't have constant offset" },
94 { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
95 { "incorrect_head_off1", "bpf_list_head not found at offset=25" },
96 { "incorrect_head_off2", "bpf_list_head not found at offset=1" },
97 { "pop_front_off",
98 "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) "
99 "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n"
100 "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
101 { "pop_back_off",
102 "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) "
103 "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n"
104 "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
105 };
106
test_linked_list_fail_prog(const char * prog_name,const char * err_msg)107 static void test_linked_list_fail_prog(const char *prog_name, const char *err_msg)
108 {
109 LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
110 .kernel_log_size = sizeof(log_buf),
111 .kernel_log_level = 1);
112 struct linked_list_fail *skel;
113 struct bpf_program *prog;
114 int ret;
115
116 skel = linked_list_fail__open_opts(&opts);
117 if (!ASSERT_OK_PTR(skel, "linked_list_fail__open_opts"))
118 return;
119
120 prog = bpf_object__find_program_by_name(skel->obj, prog_name);
121 if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
122 goto end;
123
124 bpf_program__set_autoload(prog, true);
125
126 ret = linked_list_fail__load(skel);
127 if (!ASSERT_ERR(ret, "linked_list_fail__load must fail"))
128 goto end;
129
130 if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
131 fprintf(stderr, "Expected: %s\n", err_msg);
132 fprintf(stderr, "Verifier: %s\n", log_buf);
133 }
134
135 end:
136 linked_list_fail__destroy(skel);
137 }
138
clear_fields(struct bpf_map * map)139 static void clear_fields(struct bpf_map *map)
140 {
141 char buf[24];
142 int key = 0;
143
144 memset(buf, 0xff, sizeof(buf));
145 ASSERT_OK(bpf_map__update_elem(map, &key, sizeof(key), buf, sizeof(buf), 0), "check_and_free_fields");
146 }
147
148 enum {
149 TEST_ALL,
150 PUSH_POP,
151 PUSH_POP_MULT,
152 LIST_IN_LIST,
153 };
154
test_linked_list_success(int mode,bool leave_in_map)155 static void test_linked_list_success(int mode, bool leave_in_map)
156 {
157 LIBBPF_OPTS(bpf_test_run_opts, opts,
158 .data_in = &pkt_v4,
159 .data_size_in = sizeof(pkt_v4),
160 .repeat = 1,
161 );
162 struct linked_list *skel;
163 int ret;
164
165 skel = linked_list__open_and_load();
166 if (!ASSERT_OK_PTR(skel, "linked_list__open_and_load"))
167 return;
168
169 if (mode == LIST_IN_LIST)
170 goto lil;
171 if (mode == PUSH_POP_MULT)
172 goto ppm;
173
174 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop), &opts);
175 ASSERT_OK(ret, "map_list_push_pop");
176 ASSERT_OK(opts.retval, "map_list_push_pop retval");
177 if (!leave_in_map)
178 clear_fields(skel->maps.array_map);
179
180 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop), &opts);
181 ASSERT_OK(ret, "inner_map_list_push_pop");
182 ASSERT_OK(opts.retval, "inner_map_list_push_pop retval");
183 if (!leave_in_map)
184 clear_fields(skel->maps.inner_map);
185
186 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop), &opts);
187 ASSERT_OK(ret, "global_list_push_pop");
188 ASSERT_OK(opts.retval, "global_list_push_pop retval");
189 if (!leave_in_map)
190 clear_fields(skel->maps.bss_A);
191
192 if (mode == PUSH_POP)
193 goto end;
194
195 ppm:
196 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop_multiple), &opts);
197 ASSERT_OK(ret, "map_list_push_pop_multiple");
198 ASSERT_OK(opts.retval, "map_list_push_pop_multiple retval");
199 if (!leave_in_map)
200 clear_fields(skel->maps.array_map);
201
202 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop_multiple), &opts);
203 ASSERT_OK(ret, "inner_map_list_push_pop_multiple");
204 ASSERT_OK(opts.retval, "inner_map_list_push_pop_multiple retval");
205 if (!leave_in_map)
206 clear_fields(skel->maps.inner_map);
207
208 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_multiple), &opts);
209 ASSERT_OK(ret, "global_list_push_pop_multiple");
210 ASSERT_OK(opts.retval, "global_list_push_pop_multiple retval");
211 if (!leave_in_map)
212 clear_fields(skel->maps.bss_A);
213
214 if (mode == PUSH_POP_MULT)
215 goto end;
216
217 lil:
218 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_in_list), &opts);
219 ASSERT_OK(ret, "map_list_in_list");
220 ASSERT_OK(opts.retval, "map_list_in_list retval");
221 if (!leave_in_map)
222 clear_fields(skel->maps.array_map);
223
224 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_in_list), &opts);
225 ASSERT_OK(ret, "inner_map_list_in_list");
226 ASSERT_OK(opts.retval, "inner_map_list_in_list retval");
227 if (!leave_in_map)
228 clear_fields(skel->maps.inner_map);
229
230 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_in_list), &opts);
231 ASSERT_OK(ret, "global_list_in_list");
232 ASSERT_OK(opts.retval, "global_list_in_list retval");
233 if (!leave_in_map)
234 clear_fields(skel->maps.bss_A);
235 end:
236 linked_list__destroy(skel);
237 }
238
239 #define SPIN_LOCK 2
240 #define LIST_HEAD 3
241 #define LIST_NODE 4
242
init_btf(void)243 static struct btf *init_btf(void)
244 {
245 int id, lid, hid, nid;
246 struct btf *btf;
247
248 btf = btf__new_empty();
249 if (!ASSERT_OK_PTR(btf, "btf__new_empty"))
250 return NULL;
251 id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED);
252 if (!ASSERT_EQ(id, 1, "btf__add_int"))
253 goto end;
254 lid = btf__add_struct(btf, "bpf_spin_lock", 4);
255 if (!ASSERT_EQ(lid, SPIN_LOCK, "btf__add_struct bpf_spin_lock"))
256 goto end;
257 hid = btf__add_struct(btf, "bpf_list_head", 16);
258 if (!ASSERT_EQ(hid, LIST_HEAD, "btf__add_struct bpf_list_head"))
259 goto end;
260 nid = btf__add_struct(btf, "bpf_list_node", 24);
261 if (!ASSERT_EQ(nid, LIST_NODE, "btf__add_struct bpf_list_node"))
262 goto end;
263 return btf;
264 end:
265 btf__free(btf);
266 return NULL;
267 }
268
list_and_rb_node_same_struct(bool refcount_field)269 static void list_and_rb_node_same_struct(bool refcount_field)
270 {
271 int bpf_rb_node_btf_id, bpf_refcount_btf_id, foo_btf_id;
272 struct btf *btf;
273 int id, err;
274
275 btf = init_btf();
276 if (!ASSERT_OK_PTR(btf, "init_btf"))
277 return;
278
279 bpf_rb_node_btf_id = btf__add_struct(btf, "bpf_rb_node", 32);
280 if (!ASSERT_GT(bpf_rb_node_btf_id, 0, "btf__add_struct bpf_rb_node"))
281 return;
282
283 if (refcount_field) {
284 bpf_refcount_btf_id = btf__add_struct(btf, "bpf_refcount", 4);
285 if (!ASSERT_GT(bpf_refcount_btf_id, 0, "btf__add_struct bpf_refcount"))
286 return;
287 }
288
289 id = btf__add_struct(btf, "bar", refcount_field ? 60 : 56);
290 if (!ASSERT_GT(id, 0, "btf__add_struct bar"))
291 return;
292 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
293 if (!ASSERT_OK(err, "btf__add_field bar::a"))
294 return;
295 err = btf__add_field(btf, "c", bpf_rb_node_btf_id, 192, 0);
296 if (!ASSERT_OK(err, "btf__add_field bar::c"))
297 return;
298 if (refcount_field) {
299 err = btf__add_field(btf, "ref", bpf_refcount_btf_id, 448, 0);
300 if (!ASSERT_OK(err, "btf__add_field bar::ref"))
301 return;
302 }
303
304 foo_btf_id = btf__add_struct(btf, "foo", 20);
305 if (!ASSERT_GT(foo_btf_id, 0, "btf__add_struct foo"))
306 return;
307 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
308 if (!ASSERT_OK(err, "btf__add_field foo::a"))
309 return;
310 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
311 if (!ASSERT_OK(err, "btf__add_field foo::b"))
312 return;
313 id = btf__add_decl_tag(btf, "contains:bar:a", foo_btf_id, 0);
314 if (!ASSERT_GT(id, 0, "btf__add_decl_tag contains:bar:a"))
315 return;
316
317 err = btf__load_into_kernel(btf);
318 ASSERT_EQ(err, refcount_field ? 0 : -EINVAL, "check btf");
319 btf__free(btf);
320 }
321
test_btf(void)322 static void test_btf(void)
323 {
324 struct btf *btf = NULL;
325 int id, err;
326
327 while (test__start_subtest("btf: too many locks")) {
328 btf = init_btf();
329 if (!ASSERT_OK_PTR(btf, "init_btf"))
330 break;
331 id = btf__add_struct(btf, "foo", 24);
332 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
333 break;
334 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
335 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
336 break;
337 err = btf__add_field(btf, "b", SPIN_LOCK, 32, 0);
338 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
339 break;
340 err = btf__add_field(btf, "c", LIST_HEAD, 64, 0);
341 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
342 break;
343
344 err = btf__load_into_kernel(btf);
345 ASSERT_EQ(err, -E2BIG, "check btf");
346 btf__free(btf);
347 break;
348 }
349
350 while (test__start_subtest("btf: missing lock")) {
351 btf = init_btf();
352 if (!ASSERT_OK_PTR(btf, "init_btf"))
353 break;
354 id = btf__add_struct(btf, "foo", 16);
355 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
356 break;
357 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
358 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
359 break;
360 id = btf__add_decl_tag(btf, "contains:baz:a", 5, 0);
361 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:baz:a"))
362 break;
363 id = btf__add_struct(btf, "baz", 16);
364 if (!ASSERT_EQ(id, 7, "btf__add_struct baz"))
365 break;
366 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
367 if (!ASSERT_OK(err, "btf__add_field baz::a"))
368 break;
369
370 err = btf__load_into_kernel(btf);
371 ASSERT_EQ(err, -EINVAL, "check btf");
372 btf__free(btf);
373 break;
374 }
375
376 while (test__start_subtest("btf: bad offset")) {
377 btf = init_btf();
378 if (!ASSERT_OK_PTR(btf, "init_btf"))
379 break;
380 id = btf__add_struct(btf, "foo", 36);
381 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
382 break;
383 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
384 if (!ASSERT_OK(err, "btf__add_field foo::a"))
385 break;
386 err = btf__add_field(btf, "b", LIST_NODE, 0, 0);
387 if (!ASSERT_OK(err, "btf__add_field foo::b"))
388 break;
389 err = btf__add_field(btf, "c", SPIN_LOCK, 0, 0);
390 if (!ASSERT_OK(err, "btf__add_field foo::c"))
391 break;
392 id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
393 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
394 break;
395
396 err = btf__load_into_kernel(btf);
397 ASSERT_EQ(err, -EEXIST, "check btf");
398 btf__free(btf);
399 break;
400 }
401
402 while (test__start_subtest("btf: missing contains:")) {
403 btf = init_btf();
404 if (!ASSERT_OK_PTR(btf, "init_btf"))
405 break;
406 id = btf__add_struct(btf, "foo", 24);
407 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
408 break;
409 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
410 if (!ASSERT_OK(err, "btf__add_field foo::a"))
411 break;
412 err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
413 if (!ASSERT_OK(err, "btf__add_field foo::b"))
414 break;
415
416 err = btf__load_into_kernel(btf);
417 ASSERT_EQ(err, -EINVAL, "check btf");
418 btf__free(btf);
419 break;
420 }
421
422 while (test__start_subtest("btf: missing struct")) {
423 btf = init_btf();
424 if (!ASSERT_OK_PTR(btf, "init_btf"))
425 break;
426 id = btf__add_struct(btf, "foo", 24);
427 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
428 break;
429 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
430 if (!ASSERT_OK(err, "btf__add_field foo::a"))
431 break;
432 err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
433 if (!ASSERT_OK(err, "btf__add_field foo::b"))
434 break;
435 id = btf__add_decl_tag(btf, "contains:bar:bar", 5, 1);
436 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:bar"))
437 break;
438
439 err = btf__load_into_kernel(btf);
440 ASSERT_EQ(err, -ENOENT, "check btf");
441 btf__free(btf);
442 break;
443 }
444
445 while (test__start_subtest("btf: missing node")) {
446 btf = init_btf();
447 if (!ASSERT_OK_PTR(btf, "init_btf"))
448 break;
449 id = btf__add_struct(btf, "foo", 24);
450 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
451 break;
452 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
453 if (!ASSERT_OK(err, "btf__add_field foo::a"))
454 break;
455 err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
456 if (!ASSERT_OK(err, "btf__add_field foo::b"))
457 break;
458 id = btf__add_decl_tag(btf, "contains:foo:c", 5, 1);
459 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:c"))
460 break;
461
462 err = btf__load_into_kernel(btf);
463 btf__free(btf);
464 ASSERT_EQ(err, -ENOENT, "check btf");
465 break;
466 }
467
468 while (test__start_subtest("btf: node incorrect type")) {
469 btf = init_btf();
470 if (!ASSERT_OK_PTR(btf, "init_btf"))
471 break;
472 id = btf__add_struct(btf, "foo", 20);
473 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
474 break;
475 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
476 if (!ASSERT_OK(err, "btf__add_field foo::a"))
477 break;
478 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
479 if (!ASSERT_OK(err, "btf__add_field foo::b"))
480 break;
481 id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
482 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
483 break;
484 id = btf__add_struct(btf, "bar", 4);
485 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
486 break;
487 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
488 if (!ASSERT_OK(err, "btf__add_field bar::a"))
489 break;
490
491 err = btf__load_into_kernel(btf);
492 ASSERT_EQ(err, -EINVAL, "check btf");
493 btf__free(btf);
494 break;
495 }
496
497 while (test__start_subtest("btf: multiple bpf_list_node with name b")) {
498 btf = init_btf();
499 if (!ASSERT_OK_PTR(btf, "init_btf"))
500 break;
501 id = btf__add_struct(btf, "foo", 52);
502 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
503 break;
504 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
505 if (!ASSERT_OK(err, "btf__add_field foo::a"))
506 break;
507 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
508 if (!ASSERT_OK(err, "btf__add_field foo::b"))
509 break;
510 err = btf__add_field(btf, "b", LIST_NODE, 256, 0);
511 if (!ASSERT_OK(err, "btf__add_field foo::c"))
512 break;
513 err = btf__add_field(btf, "d", SPIN_LOCK, 384, 0);
514 if (!ASSERT_OK(err, "btf__add_field foo::d"))
515 break;
516 id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
517 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
518 break;
519
520 err = btf__load_into_kernel(btf);
521 ASSERT_EQ(err, -EINVAL, "check btf");
522 btf__free(btf);
523 break;
524 }
525
526 while (test__start_subtest("btf: owning | owned AA cycle")) {
527 btf = init_btf();
528 if (!ASSERT_OK_PTR(btf, "init_btf"))
529 break;
530 id = btf__add_struct(btf, "foo", 44);
531 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
532 break;
533 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
534 if (!ASSERT_OK(err, "btf__add_field foo::a"))
535 break;
536 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
537 if (!ASSERT_OK(err, "btf__add_field foo::b"))
538 break;
539 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
540 if (!ASSERT_OK(err, "btf__add_field foo::c"))
541 break;
542 id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
543 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
544 break;
545
546 err = btf__load_into_kernel(btf);
547 ASSERT_EQ(err, -ELOOP, "check btf");
548 btf__free(btf);
549 break;
550 }
551
552 while (test__start_subtest("btf: owning | owned ABA cycle")) {
553 btf = init_btf();
554 if (!ASSERT_OK_PTR(btf, "init_btf"))
555 break;
556 id = btf__add_struct(btf, "foo", 44);
557 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
558 break;
559 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
560 if (!ASSERT_OK(err, "btf__add_field foo::a"))
561 break;
562 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
563 if (!ASSERT_OK(err, "btf__add_field foo::b"))
564 break;
565 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
566 if (!ASSERT_OK(err, "btf__add_field foo::c"))
567 break;
568 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
569 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
570 break;
571 id = btf__add_struct(btf, "bar", 44);
572 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
573 break;
574 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
575 if (!ASSERT_OK(err, "btf__add_field bar::a"))
576 break;
577 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
578 if (!ASSERT_OK(err, "btf__add_field bar::b"))
579 break;
580 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
581 if (!ASSERT_OK(err, "btf__add_field bar::c"))
582 break;
583 id = btf__add_decl_tag(btf, "contains:foo:b", 7, 0);
584 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:foo:b"))
585 break;
586
587 err = btf__load_into_kernel(btf);
588 ASSERT_EQ(err, -ELOOP, "check btf");
589 btf__free(btf);
590 break;
591 }
592
593 while (test__start_subtest("btf: owning -> owned")) {
594 btf = init_btf();
595 if (!ASSERT_OK_PTR(btf, "init_btf"))
596 break;
597 id = btf__add_struct(btf, "foo", 28);
598 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
599 break;
600 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
601 if (!ASSERT_OK(err, "btf__add_field foo::a"))
602 break;
603 err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0);
604 if (!ASSERT_OK(err, "btf__add_field foo::b"))
605 break;
606 id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
607 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
608 break;
609 id = btf__add_struct(btf, "bar", 24);
610 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
611 break;
612 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
613 if (!ASSERT_OK(err, "btf__add_field bar::a"))
614 break;
615
616 err = btf__load_into_kernel(btf);
617 ASSERT_EQ(err, 0, "check btf");
618 btf__free(btf);
619 break;
620 }
621
622 while (test__start_subtest("btf: owning -> owning | owned -> owned")) {
623 btf = init_btf();
624 if (!ASSERT_OK_PTR(btf, "init_btf"))
625 break;
626 id = btf__add_struct(btf, "foo", 28);
627 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
628 break;
629 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
630 if (!ASSERT_OK(err, "btf__add_field foo::a"))
631 break;
632 err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0);
633 if (!ASSERT_OK(err, "btf__add_field foo::b"))
634 break;
635 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
636 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
637 break;
638 id = btf__add_struct(btf, "bar", 44);
639 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
640 break;
641 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
642 if (!ASSERT_OK(err, "btf__add_field bar::a"))
643 break;
644 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
645 if (!ASSERT_OK(err, "btf__add_field bar::b"))
646 break;
647 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
648 if (!ASSERT_OK(err, "btf__add_field bar::c"))
649 break;
650 id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
651 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
652 break;
653 id = btf__add_struct(btf, "baz", 24);
654 if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
655 break;
656 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
657 if (!ASSERT_OK(err, "btf__add_field baz:a"))
658 break;
659
660 err = btf__load_into_kernel(btf);
661 ASSERT_EQ(err, 0, "check btf");
662 btf__free(btf);
663 break;
664 }
665
666 while (test__start_subtest("btf: owning | owned -> owning | owned -> owned")) {
667 btf = init_btf();
668 if (!ASSERT_OK_PTR(btf, "init_btf"))
669 break;
670 id = btf__add_struct(btf, "foo", 44);
671 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
672 break;
673 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
674 if (!ASSERT_OK(err, "btf__add_field foo::a"))
675 break;
676 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
677 if (!ASSERT_OK(err, "btf__add_field foo::b"))
678 break;
679 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
680 if (!ASSERT_OK(err, "btf__add_field foo::c"))
681 break;
682 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
683 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
684 break;
685 id = btf__add_struct(btf, "bar", 44);
686 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
687 break;
688 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
689 if (!ASSERT_OK(err, "btf__add_field bar:a"))
690 break;
691 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
692 if (!ASSERT_OK(err, "btf__add_field bar:b"))
693 break;
694 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
695 if (!ASSERT_OK(err, "btf__add_field bar:c"))
696 break;
697 id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
698 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
699 break;
700 id = btf__add_struct(btf, "baz", 24);
701 if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
702 break;
703 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
704 if (!ASSERT_OK(err, "btf__add_field baz:a"))
705 break;
706
707 err = btf__load_into_kernel(btf);
708 ASSERT_EQ(err, -ELOOP, "check btf");
709 btf__free(btf);
710 break;
711 }
712
713 while (test__start_subtest("btf: owning -> owning | owned -> owning | owned -> owned")) {
714 btf = init_btf();
715 if (!ASSERT_OK_PTR(btf, "init_btf"))
716 break;
717 id = btf__add_struct(btf, "foo", 20);
718 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
719 break;
720 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
721 if (!ASSERT_OK(err, "btf__add_field foo::a"))
722 break;
723 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
724 if (!ASSERT_OK(err, "btf__add_field foo::b"))
725 break;
726 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
727 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
728 break;
729 id = btf__add_struct(btf, "bar", 44);
730 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
731 break;
732 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
733 if (!ASSERT_OK(err, "btf__add_field bar::a"))
734 break;
735 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
736 if (!ASSERT_OK(err, "btf__add_field bar::b"))
737 break;
738 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
739 if (!ASSERT_OK(err, "btf__add_field bar::c"))
740 break;
741 id = btf__add_decl_tag(btf, "contains:baz:b", 7, 0);
742 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag"))
743 break;
744 id = btf__add_struct(btf, "baz", 44);
745 if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
746 break;
747 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
748 if (!ASSERT_OK(err, "btf__add_field bar::a"))
749 break;
750 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
751 if (!ASSERT_OK(err, "btf__add_field bar::b"))
752 break;
753 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
754 if (!ASSERT_OK(err, "btf__add_field bar::c"))
755 break;
756 id = btf__add_decl_tag(btf, "contains:bam:a", 9, 0);
757 if (!ASSERT_EQ(id, 10, "btf__add_decl_tag contains:bam:a"))
758 break;
759 id = btf__add_struct(btf, "bam", 24);
760 if (!ASSERT_EQ(id, 11, "btf__add_struct bam"))
761 break;
762 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
763 if (!ASSERT_OK(err, "btf__add_field bam::a"))
764 break;
765
766 err = btf__load_into_kernel(btf);
767 ASSERT_EQ(err, -ELOOP, "check btf");
768 btf__free(btf);
769 break;
770 }
771
772 while (test__start_subtest("btf: list_node and rb_node in same struct")) {
773 list_and_rb_node_same_struct(true);
774 break;
775 }
776
777 while (test__start_subtest("btf: list_node and rb_node in same struct, no bpf_refcount")) {
778 list_and_rb_node_same_struct(false);
779 break;
780 }
781 }
782
test_linked_list(void)783 void test_linked_list(void)
784 {
785 int i;
786
787 for (i = 0; i < ARRAY_SIZE(linked_list_fail_tests); i++) {
788 if (!test__start_subtest(linked_list_fail_tests[i].prog_name))
789 continue;
790 test_linked_list_fail_prog(linked_list_fail_tests[i].prog_name,
791 linked_list_fail_tests[i].err_msg);
792 }
793 test_btf();
794 test_linked_list_success(PUSH_POP, false);
795 test_linked_list_success(PUSH_POP, true);
796 test_linked_list_success(PUSH_POP_MULT, false);
797 test_linked_list_success(PUSH_POP_MULT, true);
798 test_linked_list_success(LIST_IN_LIST, false);
799 test_linked_list_success(LIST_IN_LIST, true);
800 test_linked_list_success(TEST_ALL, false);
801 }
802