1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.*/
3
4 #define _GNU_SOURCE
5 #include <unistd.h>
6 #include <sys/syscall.h>
7 #include <sys/types.h>
8 #include <test_progs.h>
9 #include "cgrp_ls_tp_btf.skel.h"
10 #include "cgrp_ls_recursion.skel.h"
11 #include "cgrp_ls_attach_cgroup.skel.h"
12 #include "cgrp_ls_negative.skel.h"
13 #include "cgrp_ls_sleepable.skel.h"
14 #include "network_helpers.h"
15 #include "cgroup_helpers.h"
16
17 struct socket_cookie {
18 __u64 cookie_key;
19 __u64 cookie_value;
20 };
21
test_tp_btf(int cgroup_fd)22 static void test_tp_btf(int cgroup_fd)
23 {
24 struct cgrp_ls_tp_btf *skel;
25 long val1 = 1, val2 = 0;
26 int err;
27
28 skel = cgrp_ls_tp_btf__open_and_load();
29 if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
30 return;
31
32 /* populate a value in map_b */
33 err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val1, BPF_ANY);
34 if (!ASSERT_OK(err, "map_update_elem"))
35 goto out;
36
37 /* check value */
38 err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val2);
39 if (!ASSERT_OK(err, "map_lookup_elem"))
40 goto out;
41 if (!ASSERT_EQ(val2, 1, "map_lookup_elem, invalid val"))
42 goto out;
43
44 /* delete value */
45 err = bpf_map_delete_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd);
46 if (!ASSERT_OK(err, "map_delete_elem"))
47 goto out;
48
49 skel->bss->target_pid = syscall(SYS_gettid);
50
51 err = cgrp_ls_tp_btf__attach(skel);
52 if (!ASSERT_OK(err, "skel_attach"))
53 goto out;
54
55 syscall(SYS_gettid);
56 syscall(SYS_gettid);
57
58 skel->bss->target_pid = 0;
59
60 /* 3x syscalls: 1x attach and 2x gettid */
61 ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt");
62 ASSERT_EQ(skel->bss->exit_cnt, 3, "exit_cnt");
63 ASSERT_EQ(skel->bss->mismatch_cnt, 0, "mismatch_cnt");
64 out:
65 cgrp_ls_tp_btf__destroy(skel);
66 }
67
test_attach_cgroup(int cgroup_fd)68 static void test_attach_cgroup(int cgroup_fd)
69 {
70 int server_fd = 0, client_fd = 0, err = 0;
71 socklen_t addr_len = sizeof(struct sockaddr_in6);
72 struct cgrp_ls_attach_cgroup *skel;
73 __u32 cookie_expected_value;
74 struct sockaddr_in6 addr;
75 struct socket_cookie val;
76
77 skel = cgrp_ls_attach_cgroup__open_and_load();
78 if (!ASSERT_OK_PTR(skel, "skel_open"))
79 return;
80
81 skel->links.set_cookie = bpf_program__attach_cgroup(
82 skel->progs.set_cookie, cgroup_fd);
83 if (!ASSERT_OK_PTR(skel->links.set_cookie, "prog_attach"))
84 goto out;
85
86 skel->links.update_cookie_sockops = bpf_program__attach_cgroup(
87 skel->progs.update_cookie_sockops, cgroup_fd);
88 if (!ASSERT_OK_PTR(skel->links.update_cookie_sockops, "prog_attach"))
89 goto out;
90
91 skel->links.update_cookie_tracing = bpf_program__attach(
92 skel->progs.update_cookie_tracing);
93 if (!ASSERT_OK_PTR(skel->links.update_cookie_tracing, "prog_attach"))
94 goto out;
95
96 server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
97 if (!ASSERT_GE(server_fd, 0, "start_server"))
98 goto out;
99
100 client_fd = connect_to_fd(server_fd, 0);
101 if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
102 goto close_server_fd;
103
104 err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.socket_cookies),
105 &cgroup_fd, &val);
106 if (!ASSERT_OK(err, "map_lookup(socket_cookies)"))
107 goto close_client_fd;
108
109 err = getsockname(client_fd, (struct sockaddr *)&addr, &addr_len);
110 if (!ASSERT_OK(err, "getsockname"))
111 goto close_client_fd;
112
113 cookie_expected_value = (ntohs(addr.sin6_port) << 8) | 0xFF;
114 ASSERT_EQ(val.cookie_value, cookie_expected_value, "cookie_value");
115
116 close_client_fd:
117 close(client_fd);
118 close_server_fd:
119 close(server_fd);
120 out:
121 cgrp_ls_attach_cgroup__destroy(skel);
122 }
123
test_recursion(int cgroup_fd)124 static void test_recursion(int cgroup_fd)
125 {
126 struct cgrp_ls_recursion *skel;
127 int err;
128
129 skel = cgrp_ls_recursion__open_and_load();
130 if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
131 return;
132
133 err = cgrp_ls_recursion__attach(skel);
134 if (!ASSERT_OK(err, "skel_attach"))
135 goto out;
136
137 /* trigger sys_enter, make sure it does not cause deadlock */
138 syscall(SYS_gettid);
139
140 out:
141 cgrp_ls_recursion__destroy(skel);
142 }
143
test_negative(void)144 static void test_negative(void)
145 {
146 struct cgrp_ls_negative *skel;
147
148 skel = cgrp_ls_negative__open_and_load();
149 if (!ASSERT_ERR_PTR(skel, "skel_open_and_load")) {
150 cgrp_ls_negative__destroy(skel);
151 return;
152 }
153 }
154
test_cgroup_iter_sleepable(int cgroup_fd,__u64 cgroup_id)155 static void test_cgroup_iter_sleepable(int cgroup_fd, __u64 cgroup_id)
156 {
157 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
158 union bpf_iter_link_info linfo;
159 struct cgrp_ls_sleepable *skel;
160 struct bpf_link *link;
161 int err, iter_fd;
162 char buf[16];
163
164 skel = cgrp_ls_sleepable__open();
165 if (!ASSERT_OK_PTR(skel, "skel_open"))
166 return;
167
168 bpf_program__set_autoload(skel->progs.cgroup_iter, true);
169 err = cgrp_ls_sleepable__load(skel);
170 if (!ASSERT_OK(err, "skel_load"))
171 goto out;
172
173 memset(&linfo, 0, sizeof(linfo));
174 linfo.cgroup.cgroup_fd = cgroup_fd;
175 linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY;
176 opts.link_info = &linfo;
177 opts.link_info_len = sizeof(linfo);
178 link = bpf_program__attach_iter(skel->progs.cgroup_iter, &opts);
179 if (!ASSERT_OK_PTR(link, "attach_iter"))
180 goto out;
181
182 iter_fd = bpf_iter_create(bpf_link__fd(link));
183 if (!ASSERT_GE(iter_fd, 0, "iter_create"))
184 goto out;
185
186 /* trigger the program run */
187 (void)read(iter_fd, buf, sizeof(buf));
188
189 ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
190
191 close(iter_fd);
192 out:
193 cgrp_ls_sleepable__destroy(skel);
194 }
195
test_yes_rcu_lock(__u64 cgroup_id)196 static void test_yes_rcu_lock(__u64 cgroup_id)
197 {
198 struct cgrp_ls_sleepable *skel;
199 int err;
200
201 skel = cgrp_ls_sleepable__open();
202 if (!ASSERT_OK_PTR(skel, "skel_open"))
203 return;
204
205 skel->bss->target_pid = syscall(SYS_gettid);
206
207 bpf_program__set_autoload(skel->progs.yes_rcu_lock, true);
208 err = cgrp_ls_sleepable__load(skel);
209 if (!ASSERT_OK(err, "skel_load"))
210 goto out;
211
212 err = cgrp_ls_sleepable__attach(skel);
213 if (!ASSERT_OK(err, "skel_attach"))
214 goto out;
215
216 syscall(SYS_getpgid);
217
218 ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
219 out:
220 cgrp_ls_sleepable__destroy(skel);
221 }
222
test_no_rcu_lock(void)223 static void test_no_rcu_lock(void)
224 {
225 struct cgrp_ls_sleepable *skel;
226 int err;
227
228 skel = cgrp_ls_sleepable__open();
229 if (!ASSERT_OK_PTR(skel, "skel_open"))
230 return;
231
232 bpf_program__set_autoload(skel->progs.no_rcu_lock, true);
233 err = cgrp_ls_sleepable__load(skel);
234 ASSERT_ERR(err, "skel_load");
235
236 cgrp_ls_sleepable__destroy(skel);
237 }
238
test_cgrp_local_storage(void)239 void test_cgrp_local_storage(void)
240 {
241 __u64 cgroup_id;
242 int cgroup_fd;
243
244 cgroup_fd = test__join_cgroup("/cgrp_local_storage");
245 if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /cgrp_local_storage"))
246 return;
247
248 cgroup_id = get_cgroup_id("/cgrp_local_storage");
249 if (test__start_subtest("tp_btf"))
250 test_tp_btf(cgroup_fd);
251 if (test__start_subtest("attach_cgroup"))
252 test_attach_cgroup(cgroup_fd);
253 if (test__start_subtest("recursion"))
254 test_recursion(cgroup_fd);
255 if (test__start_subtest("negative"))
256 test_negative();
257 if (test__start_subtest("cgroup_iter_sleepable"))
258 test_cgroup_iter_sleepable(cgroup_fd, cgroup_id);
259 if (test__start_subtest("yes_rcu_lock"))
260 test_yes_rcu_lock(cgroup_id);
261 if (test__start_subtest("no_rcu_lock"))
262 test_no_rcu_lock();
263
264 close(cgroup_fd);
265 }
266