1 // SPDX-License-Identifier: GPL-2.0
2 #include <vmlinux.h>
3 #include <bpf/bpf_tracing.h>
4 #include <bpf/bpf_helpers.h>
5 #include "../bpf_testmod/bpf_testmod_kfunc.h"
6
7 struct map_value {
8 struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr;
9 struct prog_test_ref_kfunc __kptr *ref_ptr;
10 };
11
12 struct array_map {
13 __uint(type, BPF_MAP_TYPE_ARRAY);
14 __type(key, int);
15 __type(value, struct map_value);
16 __uint(max_entries, 1);
17 } array_map SEC(".maps");
18
19 struct pcpu_array_map {
20 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
21 __type(key, int);
22 __type(value, struct map_value);
23 __uint(max_entries, 1);
24 } pcpu_array_map SEC(".maps");
25
26 struct hash_map {
27 __uint(type, BPF_MAP_TYPE_HASH);
28 __type(key, int);
29 __type(value, struct map_value);
30 __uint(max_entries, 1);
31 } hash_map SEC(".maps");
32
33 struct pcpu_hash_map {
34 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
35 __type(key, int);
36 __type(value, struct map_value);
37 __uint(max_entries, 1);
38 } pcpu_hash_map SEC(".maps");
39
40 struct hash_malloc_map {
41 __uint(type, BPF_MAP_TYPE_HASH);
42 __type(key, int);
43 __type(value, struct map_value);
44 __uint(max_entries, 1);
45 __uint(map_flags, BPF_F_NO_PREALLOC);
46 } hash_malloc_map SEC(".maps");
47
48 struct pcpu_hash_malloc_map {
49 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
50 __type(key, int);
51 __type(value, struct map_value);
52 __uint(max_entries, 1);
53 __uint(map_flags, BPF_F_NO_PREALLOC);
54 } pcpu_hash_malloc_map SEC(".maps");
55
56 struct lru_hash_map {
57 __uint(type, BPF_MAP_TYPE_LRU_HASH);
58 __type(key, int);
59 __type(value, struct map_value);
60 __uint(max_entries, 1);
61 } lru_hash_map SEC(".maps");
62
63 struct lru_pcpu_hash_map {
64 __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
65 __type(key, int);
66 __type(value, struct map_value);
67 __uint(max_entries, 1);
68 } lru_pcpu_hash_map SEC(".maps");
69
70 struct cgrp_ls_map {
71 __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
72 __uint(map_flags, BPF_F_NO_PREALLOC);
73 __type(key, int);
74 __type(value, struct map_value);
75 } cgrp_ls_map SEC(".maps");
76
77 struct task_ls_map {
78 __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
79 __uint(map_flags, BPF_F_NO_PREALLOC);
80 __type(key, int);
81 __type(value, struct map_value);
82 } task_ls_map SEC(".maps");
83
84 struct inode_ls_map {
85 __uint(type, BPF_MAP_TYPE_INODE_STORAGE);
86 __uint(map_flags, BPF_F_NO_PREALLOC);
87 __type(key, int);
88 __type(value, struct map_value);
89 } inode_ls_map SEC(".maps");
90
91 struct sk_ls_map {
92 __uint(type, BPF_MAP_TYPE_SK_STORAGE);
93 __uint(map_flags, BPF_F_NO_PREALLOC);
94 __type(key, int);
95 __type(value, struct map_value);
96 } sk_ls_map SEC(".maps");
97
98 #define DEFINE_MAP_OF_MAP(map_type, inner_map_type, name) \
99 struct { \
100 __uint(type, map_type); \
101 __uint(max_entries, 1); \
102 __uint(key_size, sizeof(int)); \
103 __uint(value_size, sizeof(int)); \
104 __array(values, struct inner_map_type); \
105 } name SEC(".maps") = { \
106 .values = { [0] = &inner_map_type }, \
107 }
108
109 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_map, array_of_array_maps);
110 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_map, array_of_hash_maps);
111 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_malloc_map, array_of_hash_malloc_maps);
112 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, lru_hash_map, array_of_lru_hash_maps);
113 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, array_map, hash_of_array_maps);
114 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_map, hash_of_hash_maps);
115 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_malloc_maps);
116 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps);
117
118 #define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val))
119
test_kptr_unref(struct map_value * v)120 static void test_kptr_unref(struct map_value *v)
121 {
122 struct prog_test_ref_kfunc *p;
123
124 p = v->unref_ptr;
125 /* store untrusted_ptr_or_null_ */
126 WRITE_ONCE(v->unref_ptr, p);
127 if (!p)
128 return;
129 if (p->a + p->b > 100)
130 return;
131 /* store untrusted_ptr_ */
132 WRITE_ONCE(v->unref_ptr, p);
133 /* store NULL */
134 WRITE_ONCE(v->unref_ptr, NULL);
135 }
136
test_kptr_ref(struct map_value * v)137 static void test_kptr_ref(struct map_value *v)
138 {
139 struct prog_test_ref_kfunc *p;
140
141 p = v->ref_ptr;
142 /* store ptr_or_null_ */
143 WRITE_ONCE(v->unref_ptr, p);
144 if (!p)
145 return;
146 /*
147 * p is rcu_ptr_prog_test_ref_kfunc,
148 * because bpf prog is non-sleepable and runs in RCU CS.
149 * p can be passed to kfunc that requires KF_RCU.
150 */
151 bpf_kfunc_call_test_ref(p);
152 if (p->a + p->b > 100)
153 return;
154 /* store NULL */
155 p = bpf_kptr_xchg(&v->ref_ptr, NULL);
156 if (!p)
157 return;
158 /*
159 * p is trusted_ptr_prog_test_ref_kfunc.
160 * p can be passed to kfunc that requires KF_RCU.
161 */
162 bpf_kfunc_call_test_ref(p);
163 if (p->a + p->b > 100) {
164 bpf_kfunc_call_test_release(p);
165 return;
166 }
167 /* store ptr_ */
168 WRITE_ONCE(v->unref_ptr, p);
169 bpf_kfunc_call_test_release(p);
170
171 p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
172 if (!p)
173 return;
174 /* store ptr_ */
175 p = bpf_kptr_xchg(&v->ref_ptr, p);
176 if (!p)
177 return;
178 if (p->a + p->b > 100) {
179 bpf_kfunc_call_test_release(p);
180 return;
181 }
182 bpf_kfunc_call_test_release(p);
183 }
184
test_kptr(struct map_value * v)185 static void test_kptr(struct map_value *v)
186 {
187 test_kptr_unref(v);
188 test_kptr_ref(v);
189 }
190
191 SEC("tc")
test_map_kptr(struct __sk_buff * ctx)192 int test_map_kptr(struct __sk_buff *ctx)
193 {
194 struct map_value *v;
195 int key = 0;
196
197 #define TEST(map) \
198 v = bpf_map_lookup_elem(&map, &key); \
199 if (!v) \
200 return 0; \
201 test_kptr(v)
202
203 TEST(array_map);
204 TEST(hash_map);
205 TEST(hash_malloc_map);
206 TEST(lru_hash_map);
207
208 #undef TEST
209 return 0;
210 }
211
212 SEC("tp_btf/cgroup_mkdir")
BPF_PROG(test_cgrp_map_kptr,struct cgroup * cgrp,const char * path)213 int BPF_PROG(test_cgrp_map_kptr, struct cgroup *cgrp, const char *path)
214 {
215 struct map_value *v;
216
217 v = bpf_cgrp_storage_get(&cgrp_ls_map, cgrp, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
218 if (v)
219 test_kptr(v);
220 return 0;
221 }
222
223 SEC("lsm/inode_unlink")
BPF_PROG(test_task_map_kptr,struct inode * inode,struct dentry * victim)224 int BPF_PROG(test_task_map_kptr, struct inode *inode, struct dentry *victim)
225 {
226 struct task_struct *task;
227 struct map_value *v;
228
229 task = bpf_get_current_task_btf();
230 if (!task)
231 return 0;
232 v = bpf_task_storage_get(&task_ls_map, task, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
233 if (v)
234 test_kptr(v);
235 return 0;
236 }
237
238 SEC("lsm/inode_unlink")
BPF_PROG(test_inode_map_kptr,struct inode * inode,struct dentry * victim)239 int BPF_PROG(test_inode_map_kptr, struct inode *inode, struct dentry *victim)
240 {
241 struct map_value *v;
242
243 v = bpf_inode_storage_get(&inode_ls_map, inode, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
244 if (v)
245 test_kptr(v);
246 return 0;
247 }
248
249 SEC("tc")
test_sk_map_kptr(struct __sk_buff * ctx)250 int test_sk_map_kptr(struct __sk_buff *ctx)
251 {
252 struct map_value *v;
253 struct bpf_sock *sk;
254
255 sk = ctx->sk;
256 if (!sk)
257 return 0;
258 v = bpf_sk_storage_get(&sk_ls_map, sk, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
259 if (v)
260 test_kptr(v);
261 return 0;
262 }
263
264 SEC("tc")
test_map_in_map_kptr(struct __sk_buff * ctx)265 int test_map_in_map_kptr(struct __sk_buff *ctx)
266 {
267 struct map_value *v;
268 int key = 0;
269 void *map;
270
271 #define TEST(map_in_map) \
272 map = bpf_map_lookup_elem(&map_in_map, &key); \
273 if (!map) \
274 return 0; \
275 v = bpf_map_lookup_elem(map, &key); \
276 if (!v) \
277 return 0; \
278 test_kptr(v)
279
280 TEST(array_of_array_maps);
281 TEST(array_of_hash_maps);
282 TEST(array_of_hash_malloc_maps);
283 TEST(array_of_lru_hash_maps);
284 TEST(hash_of_array_maps);
285 TEST(hash_of_hash_maps);
286 TEST(hash_of_hash_malloc_maps);
287 TEST(hash_of_lru_hash_maps);
288
289 #undef TEST
290 return 0;
291 }
292
293 int ref = 1;
294
295 static __always_inline
test_map_kptr_ref_pre(struct map_value * v)296 int test_map_kptr_ref_pre(struct map_value *v)
297 {
298 struct prog_test_ref_kfunc *p, *p_st;
299 unsigned long arg = 0;
300 int ret;
301
302 p = bpf_kfunc_call_test_acquire(&arg);
303 if (!p)
304 return 1;
305 ref++;
306
307 p_st = p->next;
308 if (p_st->cnt.refs.counter != ref) {
309 ret = 2;
310 goto end;
311 }
312
313 p = bpf_kptr_xchg(&v->ref_ptr, p);
314 if (p) {
315 ret = 3;
316 goto end;
317 }
318 if (p_st->cnt.refs.counter != ref)
319 return 4;
320
321 p = bpf_kptr_xchg(&v->ref_ptr, NULL);
322 if (!p)
323 return 5;
324 bpf_kfunc_call_test_release(p);
325 ref--;
326 if (p_st->cnt.refs.counter != ref)
327 return 6;
328
329 p = bpf_kfunc_call_test_acquire(&arg);
330 if (!p)
331 return 7;
332 ref++;
333 p = bpf_kptr_xchg(&v->ref_ptr, p);
334 if (p) {
335 ret = 8;
336 goto end;
337 }
338 if (p_st->cnt.refs.counter != ref)
339 return 9;
340 /* Leave in map */
341
342 return 0;
343 end:
344 ref--;
345 bpf_kfunc_call_test_release(p);
346 return ret;
347 }
348
349 static __always_inline
test_map_kptr_ref_post(struct map_value * v)350 int test_map_kptr_ref_post(struct map_value *v)
351 {
352 struct prog_test_ref_kfunc *p, *p_st;
353
354 p_st = v->ref_ptr;
355 if (!p_st || p_st->cnt.refs.counter != ref)
356 return 1;
357
358 p = bpf_kptr_xchg(&v->ref_ptr, NULL);
359 if (!p)
360 return 2;
361 if (p_st->cnt.refs.counter != ref) {
362 bpf_kfunc_call_test_release(p);
363 return 3;
364 }
365
366 p = bpf_kptr_xchg(&v->ref_ptr, p);
367 if (p) {
368 bpf_kfunc_call_test_release(p);
369 return 4;
370 }
371 if (p_st->cnt.refs.counter != ref)
372 return 5;
373
374 return 0;
375 }
376
377 #define TEST(map) \
378 v = bpf_map_lookup_elem(&map, &key); \
379 if (!v) \
380 return -1; \
381 ret = test_map_kptr_ref_pre(v); \
382 if (ret) \
383 return ret;
384
385 #define TEST_PCPU(map) \
386 v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
387 if (!v) \
388 return -1; \
389 ret = test_map_kptr_ref_pre(v); \
390 if (ret) \
391 return ret;
392
393 SEC("tc")
test_map_kptr_ref1(struct __sk_buff * ctx)394 int test_map_kptr_ref1(struct __sk_buff *ctx)
395 {
396 struct map_value *v, val = {};
397 int key = 0, ret;
398
399 bpf_map_update_elem(&hash_map, &key, &val, 0);
400 bpf_map_update_elem(&hash_malloc_map, &key, &val, 0);
401 bpf_map_update_elem(&lru_hash_map, &key, &val, 0);
402
403 bpf_map_update_elem(&pcpu_hash_map, &key, &val, 0);
404 bpf_map_update_elem(&pcpu_hash_malloc_map, &key, &val, 0);
405 bpf_map_update_elem(&lru_pcpu_hash_map, &key, &val, 0);
406
407 TEST(array_map);
408 TEST(hash_map);
409 TEST(hash_malloc_map);
410 TEST(lru_hash_map);
411
412 TEST_PCPU(pcpu_array_map);
413 TEST_PCPU(pcpu_hash_map);
414 TEST_PCPU(pcpu_hash_malloc_map);
415 TEST_PCPU(lru_pcpu_hash_map);
416
417 return 0;
418 }
419
420 #undef TEST
421 #undef TEST_PCPU
422
423 #define TEST(map) \
424 v = bpf_map_lookup_elem(&map, &key); \
425 if (!v) \
426 return -1; \
427 ret = test_map_kptr_ref_post(v); \
428 if (ret) \
429 return ret;
430
431 #define TEST_PCPU(map) \
432 v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
433 if (!v) \
434 return -1; \
435 ret = test_map_kptr_ref_post(v); \
436 if (ret) \
437 return ret;
438
439 SEC("tc")
test_map_kptr_ref2(struct __sk_buff * ctx)440 int test_map_kptr_ref2(struct __sk_buff *ctx)
441 {
442 struct map_value *v;
443 int key = 0, ret;
444
445 TEST(array_map);
446 TEST(hash_map);
447 TEST(hash_malloc_map);
448 TEST(lru_hash_map);
449
450 TEST_PCPU(pcpu_array_map);
451 TEST_PCPU(pcpu_hash_map);
452 TEST_PCPU(pcpu_hash_malloc_map);
453 TEST_PCPU(lru_pcpu_hash_map);
454
455 return 0;
456 }
457
458 #undef TEST
459 #undef TEST_PCPU
460
461 SEC("tc")
test_map_kptr_ref3(struct __sk_buff * ctx)462 int test_map_kptr_ref3(struct __sk_buff *ctx)
463 {
464 struct prog_test_ref_kfunc *p;
465 unsigned long sp = 0;
466
467 p = bpf_kfunc_call_test_acquire(&sp);
468 if (!p)
469 return 1;
470 ref++;
471 if (p->cnt.refs.counter != ref) {
472 bpf_kfunc_call_test_release(p);
473 return 2;
474 }
475 bpf_kfunc_call_test_release(p);
476 ref--;
477 return 0;
478 }
479
480 SEC("syscall")
test_ls_map_kptr_ref1(void * ctx)481 int test_ls_map_kptr_ref1(void *ctx)
482 {
483 struct task_struct *current;
484 struct map_value *v;
485
486 current = bpf_get_current_task_btf();
487 if (!current)
488 return 100;
489 v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
490 if (v)
491 return 150;
492 v = bpf_task_storage_get(&task_ls_map, current, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
493 if (!v)
494 return 200;
495 return test_map_kptr_ref_pre(v);
496 }
497
498 SEC("syscall")
test_ls_map_kptr_ref2(void * ctx)499 int test_ls_map_kptr_ref2(void *ctx)
500 {
501 struct task_struct *current;
502 struct map_value *v;
503
504 current = bpf_get_current_task_btf();
505 if (!current)
506 return 100;
507 v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
508 if (!v)
509 return 200;
510 return test_map_kptr_ref_post(v);
511 }
512
513 SEC("syscall")
test_ls_map_kptr_ref_del(void * ctx)514 int test_ls_map_kptr_ref_del(void *ctx)
515 {
516 struct task_struct *current;
517 struct map_value *v;
518
519 current = bpf_get_current_task_btf();
520 if (!current)
521 return 100;
522 v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
523 if (!v)
524 return 200;
525 if (!v->ref_ptr)
526 return 300;
527 return bpf_task_storage_delete(&task_ls_map, current);
528 }
529
530 char _license[] SEC("license") = "GPL";
531