1 // SPDX-License-Identifier: GPL-2.0
2 #include <vmlinux.h>
3 #include <bpf/bpf_tracing.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_core_read.h>
6 #include "bpf_misc.h"
7 #include "../bpf_testmod/bpf_testmod_kfunc.h"
8
9 struct map_value {
10 char buf[8];
11 struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr;
12 struct prog_test_ref_kfunc __kptr *ref_ptr;
13 struct prog_test_member __kptr *ref_memb_ptr;
14 };
15
16 struct array_map {
17 __uint(type, BPF_MAP_TYPE_ARRAY);
18 __type(key, int);
19 __type(value, struct map_value);
20 __uint(max_entries, 1);
21 } array_map SEC(".maps");
22
23 SEC("?tc")
24 __failure __msg("kptr access size must be BPF_DW")
size_not_bpf_dw(struct __sk_buff * ctx)25 int size_not_bpf_dw(struct __sk_buff *ctx)
26 {
27 struct map_value *v;
28 int key = 0;
29
30 v = bpf_map_lookup_elem(&array_map, &key);
31 if (!v)
32 return 0;
33
34 *(u32 *)&v->unref_ptr = 0;
35 return 0;
36 }
37
38 SEC("?tc")
39 __failure __msg("kptr access cannot have variable offset")
non_const_var_off(struct __sk_buff * ctx)40 int non_const_var_off(struct __sk_buff *ctx)
41 {
42 struct map_value *v;
43 int key = 0, id;
44
45 v = bpf_map_lookup_elem(&array_map, &key);
46 if (!v)
47 return 0;
48
49 id = ctx->protocol;
50 if (id < 4 || id > 12)
51 return 0;
52 *(u64 *)((void *)v + id) = 0;
53
54 return 0;
55 }
56
57 SEC("?tc")
58 __failure __msg("R1 doesn't have constant offset. kptr has to be")
non_const_var_off_kptr_xchg(struct __sk_buff * ctx)59 int non_const_var_off_kptr_xchg(struct __sk_buff *ctx)
60 {
61 struct map_value *v;
62 int key = 0, id;
63
64 v = bpf_map_lookup_elem(&array_map, &key);
65 if (!v)
66 return 0;
67
68 id = ctx->protocol;
69 if (id < 4 || id > 12)
70 return 0;
71 bpf_kptr_xchg((void *)v + id, NULL);
72
73 return 0;
74 }
75
76 SEC("?tc")
77 __failure __msg("kptr access misaligned expected=8 off=7")
misaligned_access_write(struct __sk_buff * ctx)78 int misaligned_access_write(struct __sk_buff *ctx)
79 {
80 struct map_value *v;
81 int key = 0;
82
83 v = bpf_map_lookup_elem(&array_map, &key);
84 if (!v)
85 return 0;
86
87 *(void **)((void *)v + 7) = NULL;
88
89 return 0;
90 }
91
92 SEC("?tc")
93 __failure __msg("kptr access misaligned expected=8 off=1")
misaligned_access_read(struct __sk_buff * ctx)94 int misaligned_access_read(struct __sk_buff *ctx)
95 {
96 struct map_value *v;
97 int key = 0;
98
99 v = bpf_map_lookup_elem(&array_map, &key);
100 if (!v)
101 return 0;
102
103 return *(u64 *)((void *)v + 1);
104 }
105
106 SEC("?tc")
107 __failure __msg("variable untrusted_ptr_ access var_off=(0x0; 0x1e0)")
reject_var_off_store(struct __sk_buff * ctx)108 int reject_var_off_store(struct __sk_buff *ctx)
109 {
110 struct prog_test_ref_kfunc *unref_ptr;
111 struct map_value *v;
112 int key = 0, id;
113
114 v = bpf_map_lookup_elem(&array_map, &key);
115 if (!v)
116 return 0;
117
118 unref_ptr = v->unref_ptr;
119 if (!unref_ptr)
120 return 0;
121 id = ctx->protocol;
122 if (id < 4 || id > 12)
123 return 0;
124 unref_ptr += id;
125 v->unref_ptr = unref_ptr;
126
127 return 0;
128 }
129
130 SEC("?tc")
131 __failure __msg("invalid kptr access, R1 type=untrusted_ptr_prog_test_ref_kfunc")
reject_bad_type_match(struct __sk_buff * ctx)132 int reject_bad_type_match(struct __sk_buff *ctx)
133 {
134 struct prog_test_ref_kfunc *unref_ptr;
135 struct map_value *v;
136 int key = 0;
137
138 v = bpf_map_lookup_elem(&array_map, &key);
139 if (!v)
140 return 0;
141
142 unref_ptr = v->unref_ptr;
143 if (!unref_ptr)
144 return 0;
145 unref_ptr = (void *)unref_ptr + 4;
146 v->unref_ptr = unref_ptr;
147
148 return 0;
149 }
150
151 SEC("?tc")
152 __failure __msg("R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_")
marked_as_untrusted_or_null(struct __sk_buff * ctx)153 int marked_as_untrusted_or_null(struct __sk_buff *ctx)
154 {
155 struct map_value *v;
156 int key = 0;
157
158 v = bpf_map_lookup_elem(&array_map, &key);
159 if (!v)
160 return 0;
161
162 bpf_this_cpu_ptr(v->unref_ptr);
163 return 0;
164 }
165
166 SEC("?tc")
167 __failure __msg("access beyond struct prog_test_ref_kfunc at off 32 size 4")
correct_btf_id_check_size(struct __sk_buff * ctx)168 int correct_btf_id_check_size(struct __sk_buff *ctx)
169 {
170 struct prog_test_ref_kfunc *p;
171 struct map_value *v;
172 int key = 0;
173
174 v = bpf_map_lookup_elem(&array_map, &key);
175 if (!v)
176 return 0;
177
178 p = v->unref_ptr;
179 if (!p)
180 return 0;
181 return *(int *)((void *)p + bpf_core_type_size(struct prog_test_ref_kfunc));
182 }
183
184 SEC("?tc")
185 __failure __msg("R1 type=untrusted_ptr_ expected=percpu_ptr_")
inherit_untrusted_on_walk(struct __sk_buff * ctx)186 int inherit_untrusted_on_walk(struct __sk_buff *ctx)
187 {
188 struct prog_test_ref_kfunc *unref_ptr;
189 struct map_value *v;
190 int key = 0;
191
192 v = bpf_map_lookup_elem(&array_map, &key);
193 if (!v)
194 return 0;
195
196 unref_ptr = v->unref_ptr;
197 if (!unref_ptr)
198 return 0;
199 unref_ptr = unref_ptr->next;
200 bpf_this_cpu_ptr(unref_ptr);
201 return 0;
202 }
203
204 SEC("?tc")
205 __failure __msg("off=8 kptr isn't referenced kptr")
reject_kptr_xchg_on_unref(struct __sk_buff * ctx)206 int reject_kptr_xchg_on_unref(struct __sk_buff *ctx)
207 {
208 struct map_value *v;
209 int key = 0;
210
211 v = bpf_map_lookup_elem(&array_map, &key);
212 if (!v)
213 return 0;
214
215 bpf_kptr_xchg(&v->unref_ptr, NULL);
216 return 0;
217 }
218
219 SEC("?tc")
220 __failure __msg("R1 type=rcu_ptr_or_null_ expected=percpu_ptr_")
mark_ref_as_untrusted_or_null(struct __sk_buff * ctx)221 int mark_ref_as_untrusted_or_null(struct __sk_buff *ctx)
222 {
223 struct map_value *v;
224 int key = 0;
225
226 v = bpf_map_lookup_elem(&array_map, &key);
227 if (!v)
228 return 0;
229
230 bpf_this_cpu_ptr(v->ref_ptr);
231 return 0;
232 }
233
234 SEC("?tc")
235 __failure __msg("store to referenced kptr disallowed")
reject_untrusted_store_to_ref(struct __sk_buff * ctx)236 int reject_untrusted_store_to_ref(struct __sk_buff *ctx)
237 {
238 struct prog_test_ref_kfunc *p;
239 struct map_value *v;
240 int key = 0;
241
242 v = bpf_map_lookup_elem(&array_map, &key);
243 if (!v)
244 return 0;
245
246 p = v->ref_ptr;
247 if (!p)
248 return 0;
249 /* Checkmate, clang */
250 *(struct prog_test_ref_kfunc * volatile *)&v->ref_ptr = p;
251 return 0;
252 }
253
254 SEC("?tc")
255 __failure __msg("R2 must be referenced")
reject_untrusted_xchg(struct __sk_buff * ctx)256 int reject_untrusted_xchg(struct __sk_buff *ctx)
257 {
258 struct prog_test_ref_kfunc *p;
259 struct map_value *v;
260 int key = 0;
261
262 v = bpf_map_lookup_elem(&array_map, &key);
263 if (!v)
264 return 0;
265
266 p = v->ref_ptr;
267 if (!p)
268 return 0;
269 bpf_kptr_xchg(&v->ref_ptr, p);
270 return 0;
271 }
272
273 SEC("?tc")
274 __failure
275 __msg("invalid kptr access, R2 type=ptr_prog_test_ref_kfunc expected=ptr_prog_test_member")
reject_bad_type_xchg(struct __sk_buff * ctx)276 int reject_bad_type_xchg(struct __sk_buff *ctx)
277 {
278 struct prog_test_ref_kfunc *ref_ptr;
279 struct map_value *v;
280 int key = 0;
281
282 v = bpf_map_lookup_elem(&array_map, &key);
283 if (!v)
284 return 0;
285
286 ref_ptr = bpf_kfunc_call_test_acquire(&(unsigned long){0});
287 if (!ref_ptr)
288 return 0;
289 bpf_kptr_xchg(&v->ref_memb_ptr, ref_ptr);
290 return 0;
291 }
292
293 SEC("?tc")
294 __failure __msg("invalid kptr access, R2 type=ptr_prog_test_ref_kfunc")
reject_member_of_ref_xchg(struct __sk_buff * ctx)295 int reject_member_of_ref_xchg(struct __sk_buff *ctx)
296 {
297 struct prog_test_ref_kfunc *ref_ptr;
298 struct map_value *v;
299 int key = 0;
300
301 v = bpf_map_lookup_elem(&array_map, &key);
302 if (!v)
303 return 0;
304
305 ref_ptr = bpf_kfunc_call_test_acquire(&(unsigned long){0});
306 if (!ref_ptr)
307 return 0;
308 bpf_kptr_xchg(&v->ref_memb_ptr, &ref_ptr->memb);
309 return 0;
310 }
311
312 SEC("?syscall")
313 __failure __msg("kptr cannot be accessed indirectly by helper")
reject_indirect_helper_access(struct __sk_buff * ctx)314 int reject_indirect_helper_access(struct __sk_buff *ctx)
315 {
316 struct map_value *v;
317 int key = 0;
318
319 v = bpf_map_lookup_elem(&array_map, &key);
320 if (!v)
321 return 0;
322
323 bpf_get_current_comm(v, sizeof(v->buf) + 1);
324 return 0;
325 }
326
327 __noinline
write_func(int * p)328 int write_func(int *p)
329 {
330 return p ? *p = 42 : 0;
331 }
332
333 SEC("?tc")
334 __failure __msg("kptr cannot be accessed indirectly by helper")
reject_indirect_global_func_access(struct __sk_buff * ctx)335 int reject_indirect_global_func_access(struct __sk_buff *ctx)
336 {
337 struct map_value *v;
338 int key = 0;
339
340 v = bpf_map_lookup_elem(&array_map, &key);
341 if (!v)
342 return 0;
343
344 return write_func((void *)v + 5);
345 }
346
347 SEC("?tc")
348 __failure __msg("Unreleased reference id=5 alloc_insn=")
kptr_xchg_ref_state(struct __sk_buff * ctx)349 int kptr_xchg_ref_state(struct __sk_buff *ctx)
350 {
351 struct prog_test_ref_kfunc *p;
352 struct map_value *v;
353 int key = 0;
354
355 v = bpf_map_lookup_elem(&array_map, &key);
356 if (!v)
357 return 0;
358
359 p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
360 if (!p)
361 return 0;
362 bpf_kptr_xchg(&v->ref_ptr, p);
363 return 0;
364 }
365
366 SEC("?tc")
367 __failure __msg("Possibly NULL pointer passed to helper arg2")
kptr_xchg_possibly_null(struct __sk_buff * ctx)368 int kptr_xchg_possibly_null(struct __sk_buff *ctx)
369 {
370 struct prog_test_ref_kfunc *p;
371 struct map_value *v;
372 int key = 0;
373
374 v = bpf_map_lookup_elem(&array_map, &key);
375 if (!v)
376 return 0;
377
378 p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
379
380 /* PTR_TO_BTF_ID | PTR_MAYBE_NULL passed to bpf_kptr_xchg() */
381 p = bpf_kptr_xchg(&v->ref_ptr, p);
382 if (p)
383 bpf_kfunc_call_test_release(p);
384
385 return 0;
386 }
387
388 char _license[] SEC("license") = "GPL";
389