1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018 Facebook */
3
4 #include <uapi/linux/btf.h>
5 #include <uapi/linux/types.h>
6 #include <linux/seq_file.h>
7 #include <linux/compiler.h>
8 #include <linux/ctype.h>
9 #include <linux/errno.h>
10 #include <linux/slab.h>
11 #include <linux/anon_inodes.h>
12 #include <linux/file.h>
13 #include <linux/uaccess.h>
14 #include <linux/kernel.h>
15 #include <linux/idr.h>
16 #include <linux/sort.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/btf.h>
19
20 /* BTF (BPF Type Format) is the meta data format which describes
21 * the data types of BPF program/map. Hence, it basically focus
22 * on the C programming language which the modern BPF is primary
23 * using.
24 *
25 * ELF Section:
26 * ~~~~~~~~~~~
27 * The BTF data is stored under the ".BTF" ELF section
28 *
29 * struct btf_type:
30 * ~~~~~~~~~~~~~~~
31 * Each 'struct btf_type' object describes a C data type.
32 * Depending on the type it is describing, a 'struct btf_type'
33 * object may be followed by more data. F.e.
34 * To describe an array, 'struct btf_type' is followed by
35 * 'struct btf_array'.
36 *
37 * 'struct btf_type' and any extra data following it are
38 * 4 bytes aligned.
39 *
40 * Type section:
41 * ~~~~~~~~~~~~~
42 * The BTF type section contains a list of 'struct btf_type' objects.
43 * Each one describes a C type. Recall from the above section
44 * that a 'struct btf_type' object could be immediately followed by extra
45 * data in order to desribe some particular C types.
46 *
47 * type_id:
48 * ~~~~~~~
49 * Each btf_type object is identified by a type_id. The type_id
50 * is implicitly implied by the location of the btf_type object in
51 * the BTF type section. The first one has type_id 1. The second
52 * one has type_id 2...etc. Hence, an earlier btf_type has
53 * a smaller type_id.
54 *
55 * A btf_type object may refer to another btf_type object by using
56 * type_id (i.e. the "type" in the "struct btf_type").
57 *
58 * NOTE that we cannot assume any reference-order.
59 * A btf_type object can refer to an earlier btf_type object
60 * but it can also refer to a later btf_type object.
61 *
62 * For example, to describe "const void *". A btf_type
63 * object describing "const" may refer to another btf_type
64 * object describing "void *". This type-reference is done
65 * by specifying type_id:
66 *
67 * [1] CONST (anon) type_id=2
68 * [2] PTR (anon) type_id=0
69 *
70 * The above is the btf_verifier debug log:
71 * - Each line started with "[?]" is a btf_type object
72 * - [?] is the type_id of the btf_type object.
73 * - CONST/PTR is the BTF_KIND_XXX
74 * - "(anon)" is the name of the type. It just
75 * happens that CONST and PTR has no name.
76 * - type_id=XXX is the 'u32 type' in btf_type
77 *
78 * NOTE: "void" has type_id 0
79 *
80 * String section:
81 * ~~~~~~~~~~~~~~
82 * The BTF string section contains the names used by the type section.
83 * Each string is referred by an "offset" from the beginning of the
84 * string section.
85 *
86 * Each string is '\0' terminated.
87 *
88 * The first character in the string section must be '\0'
89 * which is used to mean 'anonymous'. Some btf_type may not
90 * have a name.
91 */
92
93 /* BTF verification:
94 *
95 * To verify BTF data, two passes are needed.
96 *
97 * Pass #1
98 * ~~~~~~~
99 * The first pass is to collect all btf_type objects to
100 * an array: "btf->types".
101 *
102 * Depending on the C type that a btf_type is describing,
103 * a btf_type may be followed by extra data. We don't know
104 * how many btf_type is there, and more importantly we don't
105 * know where each btf_type is located in the type section.
106 *
107 * Without knowing the location of each type_id, most verifications
108 * cannot be done. e.g. an earlier btf_type may refer to a later
109 * btf_type (recall the "const void *" above), so we cannot
110 * check this type-reference in the first pass.
111 *
112 * In the first pass, it still does some verifications (e.g.
113 * checking the name is a valid offset to the string section).
114 *
115 * Pass #2
116 * ~~~~~~~
117 * The main focus is to resolve a btf_type that is referring
118 * to another type.
119 *
120 * We have to ensure the referring type:
121 * 1) does exist in the BTF (i.e. in btf->types[])
122 * 2) does not cause a loop:
123 * struct A {
124 * struct B b;
125 * };
126 *
127 * struct B {
128 * struct A a;
129 * };
130 *
131 * btf_type_needs_resolve() decides if a btf_type needs
132 * to be resolved.
133 *
134 * The needs_resolve type implements the "resolve()" ops which
135 * essentially does a DFS and detects backedge.
136 *
137 * During resolve (or DFS), different C types have different
138 * "RESOLVED" conditions.
139 *
140 * When resolving a BTF_KIND_STRUCT, we need to resolve all its
141 * members because a member is always referring to another
142 * type. A struct's member can be treated as "RESOLVED" if
143 * it is referring to a BTF_KIND_PTR. Otherwise, the
144 * following valid C struct would be rejected:
145 *
146 * struct A {
147 * int m;
148 * struct A *a;
149 * };
150 *
151 * When resolving a BTF_KIND_PTR, it needs to keep resolving if
152 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot
153 * detect a pointer loop, e.g.:
154 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
155 * ^ |
156 * +-----------------------------------------+
157 *
158 */
159
160 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
161 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
162 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
163 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
164 #define BITS_ROUNDUP_BYTES(bits) \
165 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
166
167 #define BTF_INFO_MASK 0x8f00ffff
168 #define BTF_INT_MASK 0x0fffffff
169 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
170 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
171
172 /* 16MB for 64k structs and each has 16 members and
173 * a few MB spaces for the string section.
174 * The hard limit is S32_MAX.
175 */
176 #define BTF_MAX_SIZE (16 * 1024 * 1024)
177
178 #define for_each_member(i, struct_type, member) \
179 for (i = 0, member = btf_type_member(struct_type); \
180 i < btf_type_vlen(struct_type); \
181 i++, member++)
182
183 #define for_each_member_from(i, from, struct_type, member) \
184 for (i = from, member = btf_type_member(struct_type) + from; \
185 i < btf_type_vlen(struct_type); \
186 i++, member++)
187
188 #define for_each_vsi(i, struct_type, member) \
189 for (i = 0, member = btf_type_var_secinfo(struct_type); \
190 i < btf_type_vlen(struct_type); \
191 i++, member++)
192
193 #define for_each_vsi_from(i, from, struct_type, member) \
194 for (i = from, member = btf_type_var_secinfo(struct_type) + from; \
195 i < btf_type_vlen(struct_type); \
196 i++, member++)
197
198 DEFINE_IDR(btf_idr);
199 DEFINE_SPINLOCK(btf_idr_lock);
200
201 struct btf {
202 void *data;
203 struct btf_type **types;
204 u32 *resolved_ids;
205 u32 *resolved_sizes;
206 const char *strings;
207 void *nohdr_data;
208 struct btf_header hdr;
209 u32 nr_types;
210 u32 types_size;
211 u32 data_size;
212 refcount_t refcnt;
213 u32 id;
214 struct rcu_head rcu;
215 };
216
217 enum verifier_phase {
218 CHECK_META,
219 CHECK_TYPE,
220 };
221
222 struct resolve_vertex {
223 const struct btf_type *t;
224 u32 type_id;
225 u16 next_member;
226 };
227
228 enum visit_state {
229 NOT_VISITED,
230 VISITED,
231 RESOLVED,
232 };
233
234 enum resolve_mode {
235 RESOLVE_TBD, /* To Be Determined */
236 RESOLVE_PTR, /* Resolving for Pointer */
237 RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union
238 * or array
239 */
240 };
241
242 #define MAX_RESOLVE_DEPTH 32
243
244 struct btf_sec_info {
245 u32 off;
246 u32 len;
247 };
248
249 struct btf_verifier_env {
250 struct btf *btf;
251 u8 *visit_states;
252 struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
253 struct bpf_verifier_log log;
254 u32 log_type_id;
255 u32 top_stack;
256 enum verifier_phase phase;
257 enum resolve_mode resolve_mode;
258 };
259
260 static const char * const btf_kind_str[NR_BTF_KINDS] = {
261 [BTF_KIND_UNKN] = "UNKNOWN",
262 [BTF_KIND_INT] = "INT",
263 [BTF_KIND_PTR] = "PTR",
264 [BTF_KIND_ARRAY] = "ARRAY",
265 [BTF_KIND_STRUCT] = "STRUCT",
266 [BTF_KIND_UNION] = "UNION",
267 [BTF_KIND_ENUM] = "ENUM",
268 [BTF_KIND_FWD] = "FWD",
269 [BTF_KIND_TYPEDEF] = "TYPEDEF",
270 [BTF_KIND_VOLATILE] = "VOLATILE",
271 [BTF_KIND_CONST] = "CONST",
272 [BTF_KIND_RESTRICT] = "RESTRICT",
273 [BTF_KIND_FUNC] = "FUNC",
274 [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
275 [BTF_KIND_VAR] = "VAR",
276 [BTF_KIND_DATASEC] = "DATASEC",
277 };
278
279 struct btf_kind_operations {
280 s32 (*check_meta)(struct btf_verifier_env *env,
281 const struct btf_type *t,
282 u32 meta_left);
283 int (*resolve)(struct btf_verifier_env *env,
284 const struct resolve_vertex *v);
285 int (*check_member)(struct btf_verifier_env *env,
286 const struct btf_type *struct_type,
287 const struct btf_member *member,
288 const struct btf_type *member_type);
289 int (*check_kflag_member)(struct btf_verifier_env *env,
290 const struct btf_type *struct_type,
291 const struct btf_member *member,
292 const struct btf_type *member_type);
293 void (*log_details)(struct btf_verifier_env *env,
294 const struct btf_type *t);
295 void (*seq_show)(const struct btf *btf, const struct btf_type *t,
296 u32 type_id, void *data, u8 bits_offsets,
297 struct seq_file *m);
298 };
299
300 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
301 static struct btf_type btf_void;
302
303 static int btf_resolve(struct btf_verifier_env *env,
304 const struct btf_type *t, u32 type_id);
305
btf_type_is_modifier(const struct btf_type * t)306 static bool btf_type_is_modifier(const struct btf_type *t)
307 {
308 /* Some of them is not strictly a C modifier
309 * but they are grouped into the same bucket
310 * for BTF concern:
311 * A type (t) that refers to another
312 * type through t->type AND its size cannot
313 * be determined without following the t->type.
314 *
315 * ptr does not fall into this bucket
316 * because its size is always sizeof(void *).
317 */
318 switch (BTF_INFO_KIND(t->info)) {
319 case BTF_KIND_TYPEDEF:
320 case BTF_KIND_VOLATILE:
321 case BTF_KIND_CONST:
322 case BTF_KIND_RESTRICT:
323 return true;
324 }
325
326 return false;
327 }
328
btf_type_is_void(const struct btf_type * t)329 bool btf_type_is_void(const struct btf_type *t)
330 {
331 return t == &btf_void;
332 }
333
btf_type_is_fwd(const struct btf_type * t)334 static bool btf_type_is_fwd(const struct btf_type *t)
335 {
336 return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
337 }
338
btf_type_is_func(const struct btf_type * t)339 static bool btf_type_is_func(const struct btf_type *t)
340 {
341 return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC;
342 }
343
btf_type_is_func_proto(const struct btf_type * t)344 static bool btf_type_is_func_proto(const struct btf_type *t)
345 {
346 return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO;
347 }
348
btf_type_nosize(const struct btf_type * t)349 static bool btf_type_nosize(const struct btf_type *t)
350 {
351 return btf_type_is_void(t) || btf_type_is_fwd(t) ||
352 btf_type_is_func(t) || btf_type_is_func_proto(t);
353 }
354
btf_type_nosize_or_null(const struct btf_type * t)355 static bool btf_type_nosize_or_null(const struct btf_type *t)
356 {
357 return !t || btf_type_nosize(t);
358 }
359
360 /* union is only a special case of struct:
361 * all its offsetof(member) == 0
362 */
btf_type_is_struct(const struct btf_type * t)363 static bool btf_type_is_struct(const struct btf_type *t)
364 {
365 u8 kind = BTF_INFO_KIND(t->info);
366
367 return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
368 }
369
__btf_type_is_struct(const struct btf_type * t)370 static bool __btf_type_is_struct(const struct btf_type *t)
371 {
372 return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
373 }
374
btf_type_is_array(const struct btf_type * t)375 static bool btf_type_is_array(const struct btf_type *t)
376 {
377 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
378 }
379
btf_type_is_ptr(const struct btf_type * t)380 static bool btf_type_is_ptr(const struct btf_type *t)
381 {
382 return BTF_INFO_KIND(t->info) == BTF_KIND_PTR;
383 }
384
btf_type_is_int(const struct btf_type * t)385 static bool btf_type_is_int(const struct btf_type *t)
386 {
387 return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
388 }
389
btf_type_is_var(const struct btf_type * t)390 static bool btf_type_is_var(const struct btf_type *t)
391 {
392 return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
393 }
394
btf_type_is_datasec(const struct btf_type * t)395 static bool btf_type_is_datasec(const struct btf_type *t)
396 {
397 return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
398 }
399
400 /* Types that act only as a source, not sink or intermediate
401 * type when resolving.
402 */
btf_type_is_resolve_source_only(const struct btf_type * t)403 static bool btf_type_is_resolve_source_only(const struct btf_type *t)
404 {
405 return btf_type_is_var(t) ||
406 btf_type_is_datasec(t);
407 }
408
409 /* What types need to be resolved?
410 *
411 * btf_type_is_modifier() is an obvious one.
412 *
413 * btf_type_is_struct() because its member refers to
414 * another type (through member->type).
415 *
416 * btf_type_is_var() because the variable refers to
417 * another type. btf_type_is_datasec() holds multiple
418 * btf_type_is_var() types that need resolving.
419 *
420 * btf_type_is_array() because its element (array->type)
421 * refers to another type. Array can be thought of a
422 * special case of struct while array just has the same
423 * member-type repeated by array->nelems of times.
424 */
btf_type_needs_resolve(const struct btf_type * t)425 static bool btf_type_needs_resolve(const struct btf_type *t)
426 {
427 return btf_type_is_modifier(t) ||
428 btf_type_is_ptr(t) ||
429 btf_type_is_struct(t) ||
430 btf_type_is_array(t) ||
431 btf_type_is_var(t) ||
432 btf_type_is_datasec(t);
433 }
434
435 /* t->size can be used */
btf_type_has_size(const struct btf_type * t)436 static bool btf_type_has_size(const struct btf_type *t)
437 {
438 switch (BTF_INFO_KIND(t->info)) {
439 case BTF_KIND_INT:
440 case BTF_KIND_STRUCT:
441 case BTF_KIND_UNION:
442 case BTF_KIND_ENUM:
443 case BTF_KIND_DATASEC:
444 return true;
445 }
446
447 return false;
448 }
449
btf_int_encoding_str(u8 encoding)450 static const char *btf_int_encoding_str(u8 encoding)
451 {
452 if (encoding == 0)
453 return "(none)";
454 else if (encoding == BTF_INT_SIGNED)
455 return "SIGNED";
456 else if (encoding == BTF_INT_CHAR)
457 return "CHAR";
458 else if (encoding == BTF_INT_BOOL)
459 return "BOOL";
460 else
461 return "UNKN";
462 }
463
btf_type_vlen(const struct btf_type * t)464 static u16 btf_type_vlen(const struct btf_type *t)
465 {
466 return BTF_INFO_VLEN(t->info);
467 }
468
btf_type_kflag(const struct btf_type * t)469 static bool btf_type_kflag(const struct btf_type *t)
470 {
471 return BTF_INFO_KFLAG(t->info);
472 }
473
btf_member_bit_offset(const struct btf_type * struct_type,const struct btf_member * member)474 static u32 btf_member_bit_offset(const struct btf_type *struct_type,
475 const struct btf_member *member)
476 {
477 return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset)
478 : member->offset;
479 }
480
btf_member_bitfield_size(const struct btf_type * struct_type,const struct btf_member * member)481 static u32 btf_member_bitfield_size(const struct btf_type *struct_type,
482 const struct btf_member *member)
483 {
484 return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset)
485 : 0;
486 }
487
btf_type_int(const struct btf_type * t)488 static u32 btf_type_int(const struct btf_type *t)
489 {
490 return *(u32 *)(t + 1);
491 }
492
btf_type_array(const struct btf_type * t)493 static const struct btf_array *btf_type_array(const struct btf_type *t)
494 {
495 return (const struct btf_array *)(t + 1);
496 }
497
btf_type_member(const struct btf_type * t)498 static const struct btf_member *btf_type_member(const struct btf_type *t)
499 {
500 return (const struct btf_member *)(t + 1);
501 }
502
btf_type_enum(const struct btf_type * t)503 static const struct btf_enum *btf_type_enum(const struct btf_type *t)
504 {
505 return (const struct btf_enum *)(t + 1);
506 }
507
btf_type_var(const struct btf_type * t)508 static const struct btf_var *btf_type_var(const struct btf_type *t)
509 {
510 return (const struct btf_var *)(t + 1);
511 }
512
btf_type_var_secinfo(const struct btf_type * t)513 static const struct btf_var_secinfo *btf_type_var_secinfo(const struct btf_type *t)
514 {
515 return (const struct btf_var_secinfo *)(t + 1);
516 }
517
btf_type_ops(const struct btf_type * t)518 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
519 {
520 return kind_ops[BTF_INFO_KIND(t->info)];
521 }
522
btf_name_offset_valid(const struct btf * btf,u32 offset)523 static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
524 {
525 return BTF_STR_OFFSET_VALID(offset) &&
526 offset < btf->hdr.str_len;
527 }
528
__btf_name_char_ok(char c,bool first,bool dot_ok)529 static bool __btf_name_char_ok(char c, bool first, bool dot_ok)
530 {
531 if ((first ? !isalpha(c) :
532 !isalnum(c)) &&
533 c != '_' &&
534 ((c == '.' && !dot_ok) ||
535 c != '.'))
536 return false;
537 return true;
538 }
539
__btf_name_valid(const struct btf * btf,u32 offset,bool dot_ok)540 static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok)
541 {
542 /* offset must be valid */
543 const char *src = &btf->strings[offset];
544 const char *src_limit;
545
546 if (!__btf_name_char_ok(*src, true, dot_ok))
547 return false;
548
549 /* set a limit on identifier length */
550 src_limit = src + KSYM_NAME_LEN;
551 src++;
552 while (*src && src < src_limit) {
553 if (!__btf_name_char_ok(*src, false, dot_ok))
554 return false;
555 src++;
556 }
557
558 return !*src;
559 }
560
561 /* Only C-style identifier is permitted. This can be relaxed if
562 * necessary.
563 */
btf_name_valid_identifier(const struct btf * btf,u32 offset)564 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
565 {
566 return __btf_name_valid(btf, offset, false);
567 }
568
btf_name_valid_section(const struct btf * btf,u32 offset)569 static bool btf_name_valid_section(const struct btf *btf, u32 offset)
570 {
571 return __btf_name_valid(btf, offset, true);
572 }
573
__btf_name_by_offset(const struct btf * btf,u32 offset)574 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
575 {
576 if (!offset)
577 return "(anon)";
578 else if (offset < btf->hdr.str_len)
579 return &btf->strings[offset];
580 else
581 return "(invalid-name-offset)";
582 }
583
btf_name_by_offset(const struct btf * btf,u32 offset)584 const char *btf_name_by_offset(const struct btf *btf, u32 offset)
585 {
586 if (offset < btf->hdr.str_len)
587 return &btf->strings[offset];
588
589 return NULL;
590 }
591
btf_type_by_id(const struct btf * btf,u32 type_id)592 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
593 {
594 if (type_id > btf->nr_types)
595 return NULL;
596
597 return btf->types[type_id];
598 }
599
600 /*
601 * Regular int is not a bit field and it must be either
602 * u8/u16/u32/u64 or __int128.
603 */
btf_type_int_is_regular(const struct btf_type * t)604 static bool btf_type_int_is_regular(const struct btf_type *t)
605 {
606 u8 nr_bits, nr_bytes;
607 u32 int_data;
608
609 int_data = btf_type_int(t);
610 nr_bits = BTF_INT_BITS(int_data);
611 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
612 if (BITS_PER_BYTE_MASKED(nr_bits) ||
613 BTF_INT_OFFSET(int_data) ||
614 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
615 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
616 nr_bytes != (2 * sizeof(u64)))) {
617 return false;
618 }
619
620 return true;
621 }
622
623 /*
624 * Check that given struct member is a regular int with expected
625 * offset and size.
626 */
btf_member_is_reg_int(const struct btf * btf,const struct btf_type * s,const struct btf_member * m,u32 expected_offset,u32 expected_size)627 bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
628 const struct btf_member *m,
629 u32 expected_offset, u32 expected_size)
630 {
631 const struct btf_type *t;
632 u32 id, int_data;
633 u8 nr_bits;
634
635 id = m->type;
636 t = btf_type_id_size(btf, &id, NULL);
637 if (!t || !btf_type_is_int(t))
638 return false;
639
640 int_data = btf_type_int(t);
641 nr_bits = BTF_INT_BITS(int_data);
642 if (btf_type_kflag(s)) {
643 u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
644 u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
645
646 /* if kflag set, int should be a regular int and
647 * bit offset should be at byte boundary.
648 */
649 return !bitfield_size &&
650 BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
651 BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
652 }
653
654 if (BTF_INT_OFFSET(int_data) ||
655 BITS_PER_BYTE_MASKED(m->offset) ||
656 BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
657 BITS_PER_BYTE_MASKED(nr_bits) ||
658 BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
659 return false;
660
661 return true;
662 }
663
__btf_verifier_log(struct bpf_verifier_log * log,const char * fmt,...)664 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
665 const char *fmt, ...)
666 {
667 va_list args;
668
669 va_start(args, fmt);
670 bpf_verifier_vlog(log, fmt, args);
671 va_end(args);
672 }
673
btf_verifier_log(struct btf_verifier_env * env,const char * fmt,...)674 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
675 const char *fmt, ...)
676 {
677 struct bpf_verifier_log *log = &env->log;
678 va_list args;
679
680 if (!bpf_verifier_log_needed(log))
681 return;
682
683 va_start(args, fmt);
684 bpf_verifier_vlog(log, fmt, args);
685 va_end(args);
686 }
687
__btf_verifier_log_type(struct btf_verifier_env * env,const struct btf_type * t,bool log_details,const char * fmt,...)688 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
689 const struct btf_type *t,
690 bool log_details,
691 const char *fmt, ...)
692 {
693 struct bpf_verifier_log *log = &env->log;
694 u8 kind = BTF_INFO_KIND(t->info);
695 struct btf *btf = env->btf;
696 va_list args;
697
698 if (!bpf_verifier_log_needed(log))
699 return;
700
701 __btf_verifier_log(log, "[%u] %s %s%s",
702 env->log_type_id,
703 btf_kind_str[kind],
704 __btf_name_by_offset(btf, t->name_off),
705 log_details ? " " : "");
706
707 if (log_details)
708 btf_type_ops(t)->log_details(env, t);
709
710 if (fmt && *fmt) {
711 __btf_verifier_log(log, " ");
712 va_start(args, fmt);
713 bpf_verifier_vlog(log, fmt, args);
714 va_end(args);
715 }
716
717 __btf_verifier_log(log, "\n");
718 }
719
720 #define btf_verifier_log_type(env, t, ...) \
721 __btf_verifier_log_type((env), (t), true, __VA_ARGS__)
722 #define btf_verifier_log_basic(env, t, ...) \
723 __btf_verifier_log_type((env), (t), false, __VA_ARGS__)
724
725 __printf(4, 5)
btf_verifier_log_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const char * fmt,...)726 static void btf_verifier_log_member(struct btf_verifier_env *env,
727 const struct btf_type *struct_type,
728 const struct btf_member *member,
729 const char *fmt, ...)
730 {
731 struct bpf_verifier_log *log = &env->log;
732 struct btf *btf = env->btf;
733 va_list args;
734
735 if (!bpf_verifier_log_needed(log))
736 return;
737
738 /* The CHECK_META phase already did a btf dump.
739 *
740 * If member is logged again, it must hit an error in
741 * parsing this member. It is useful to print out which
742 * struct this member belongs to.
743 */
744 if (env->phase != CHECK_META)
745 btf_verifier_log_type(env, struct_type, NULL);
746
747 if (btf_type_kflag(struct_type))
748 __btf_verifier_log(log,
749 "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
750 __btf_name_by_offset(btf, member->name_off),
751 member->type,
752 BTF_MEMBER_BITFIELD_SIZE(member->offset),
753 BTF_MEMBER_BIT_OFFSET(member->offset));
754 else
755 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
756 __btf_name_by_offset(btf, member->name_off),
757 member->type, member->offset);
758
759 if (fmt && *fmt) {
760 __btf_verifier_log(log, " ");
761 va_start(args, fmt);
762 bpf_verifier_vlog(log, fmt, args);
763 va_end(args);
764 }
765
766 __btf_verifier_log(log, "\n");
767 }
768
769 __printf(4, 5)
btf_verifier_log_vsi(struct btf_verifier_env * env,const struct btf_type * datasec_type,const struct btf_var_secinfo * vsi,const char * fmt,...)770 static void btf_verifier_log_vsi(struct btf_verifier_env *env,
771 const struct btf_type *datasec_type,
772 const struct btf_var_secinfo *vsi,
773 const char *fmt, ...)
774 {
775 struct bpf_verifier_log *log = &env->log;
776 va_list args;
777
778 if (!bpf_verifier_log_needed(log))
779 return;
780 if (env->phase != CHECK_META)
781 btf_verifier_log_type(env, datasec_type, NULL);
782
783 __btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
784 vsi->type, vsi->offset, vsi->size);
785 if (fmt && *fmt) {
786 __btf_verifier_log(log, " ");
787 va_start(args, fmt);
788 bpf_verifier_vlog(log, fmt, args);
789 va_end(args);
790 }
791
792 __btf_verifier_log(log, "\n");
793 }
794
btf_verifier_log_hdr(struct btf_verifier_env * env,u32 btf_data_size)795 static void btf_verifier_log_hdr(struct btf_verifier_env *env,
796 u32 btf_data_size)
797 {
798 struct bpf_verifier_log *log = &env->log;
799 const struct btf *btf = env->btf;
800 const struct btf_header *hdr;
801
802 if (!bpf_verifier_log_needed(log))
803 return;
804
805 hdr = &btf->hdr;
806 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
807 __btf_verifier_log(log, "version: %u\n", hdr->version);
808 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
809 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
810 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
811 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
812 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
813 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
814 __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
815 }
816
btf_add_type(struct btf_verifier_env * env,struct btf_type * t)817 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
818 {
819 struct btf *btf = env->btf;
820
821 /* < 2 because +1 for btf_void which is always in btf->types[0].
822 * btf_void is not accounted in btf->nr_types because btf_void
823 * does not come from the BTF file.
824 */
825 if (btf->types_size - btf->nr_types < 2) {
826 /* Expand 'types' array */
827
828 struct btf_type **new_types;
829 u32 expand_by, new_size;
830
831 if (btf->types_size == BTF_MAX_TYPE) {
832 btf_verifier_log(env, "Exceeded max num of types");
833 return -E2BIG;
834 }
835
836 expand_by = max_t(u32, btf->types_size >> 2, 16);
837 new_size = min_t(u32, BTF_MAX_TYPE,
838 btf->types_size + expand_by);
839
840 new_types = kvcalloc(new_size, sizeof(*new_types),
841 GFP_KERNEL | __GFP_NOWARN);
842 if (!new_types)
843 return -ENOMEM;
844
845 if (btf->nr_types == 0)
846 new_types[0] = &btf_void;
847 else
848 memcpy(new_types, btf->types,
849 sizeof(*btf->types) * (btf->nr_types + 1));
850
851 kvfree(btf->types);
852 btf->types = new_types;
853 btf->types_size = new_size;
854 }
855
856 btf->types[++(btf->nr_types)] = t;
857
858 return 0;
859 }
860
btf_alloc_id(struct btf * btf)861 static int btf_alloc_id(struct btf *btf)
862 {
863 int id;
864
865 idr_preload(GFP_KERNEL);
866 spin_lock_bh(&btf_idr_lock);
867 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
868 if (id > 0)
869 btf->id = id;
870 spin_unlock_bh(&btf_idr_lock);
871 idr_preload_end();
872
873 if (WARN_ON_ONCE(!id))
874 return -ENOSPC;
875
876 return id > 0 ? 0 : id;
877 }
878
btf_free_id(struct btf * btf)879 static void btf_free_id(struct btf *btf)
880 {
881 unsigned long flags;
882
883 /*
884 * In map-in-map, calling map_delete_elem() on outer
885 * map will call bpf_map_put on the inner map.
886 * It will then eventually call btf_free_id()
887 * on the inner map. Some of the map_delete_elem()
888 * implementation may have irq disabled, so
889 * we need to use the _irqsave() version instead
890 * of the _bh() version.
891 */
892 spin_lock_irqsave(&btf_idr_lock, flags);
893 idr_remove(&btf_idr, btf->id);
894 spin_unlock_irqrestore(&btf_idr_lock, flags);
895 }
896
btf_free(struct btf * btf)897 static void btf_free(struct btf *btf)
898 {
899 kvfree(btf->types);
900 kvfree(btf->resolved_sizes);
901 kvfree(btf->resolved_ids);
902 kvfree(btf->data);
903 kfree(btf);
904 }
905
btf_free_rcu(struct rcu_head * rcu)906 static void btf_free_rcu(struct rcu_head *rcu)
907 {
908 struct btf *btf = container_of(rcu, struct btf, rcu);
909
910 btf_free(btf);
911 }
912
btf_put(struct btf * btf)913 void btf_put(struct btf *btf)
914 {
915 if (btf && refcount_dec_and_test(&btf->refcnt)) {
916 btf_free_id(btf);
917 call_rcu(&btf->rcu, btf_free_rcu);
918 }
919 }
920
env_resolve_init(struct btf_verifier_env * env)921 static int env_resolve_init(struct btf_verifier_env *env)
922 {
923 struct btf *btf = env->btf;
924 u32 nr_types = btf->nr_types;
925 u32 *resolved_sizes = NULL;
926 u32 *resolved_ids = NULL;
927 u8 *visit_states = NULL;
928
929 /* +1 for btf_void */
930 resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes),
931 GFP_KERNEL | __GFP_NOWARN);
932 if (!resolved_sizes)
933 goto nomem;
934
935 resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids),
936 GFP_KERNEL | __GFP_NOWARN);
937 if (!resolved_ids)
938 goto nomem;
939
940 visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states),
941 GFP_KERNEL | __GFP_NOWARN);
942 if (!visit_states)
943 goto nomem;
944
945 btf->resolved_sizes = resolved_sizes;
946 btf->resolved_ids = resolved_ids;
947 env->visit_states = visit_states;
948
949 return 0;
950
951 nomem:
952 kvfree(resolved_sizes);
953 kvfree(resolved_ids);
954 kvfree(visit_states);
955 return -ENOMEM;
956 }
957
btf_verifier_env_free(struct btf_verifier_env * env)958 static void btf_verifier_env_free(struct btf_verifier_env *env)
959 {
960 kvfree(env->visit_states);
961 kfree(env);
962 }
963
env_type_is_resolve_sink(const struct btf_verifier_env * env,const struct btf_type * next_type)964 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
965 const struct btf_type *next_type)
966 {
967 switch (env->resolve_mode) {
968 case RESOLVE_TBD:
969 /* int, enum or void is a sink */
970 return !btf_type_needs_resolve(next_type);
971 case RESOLVE_PTR:
972 /* int, enum, void, struct, array, func or func_proto is a sink
973 * for ptr
974 */
975 return !btf_type_is_modifier(next_type) &&
976 !btf_type_is_ptr(next_type);
977 case RESOLVE_STRUCT_OR_ARRAY:
978 /* int, enum, void, ptr, func or func_proto is a sink
979 * for struct and array
980 */
981 return !btf_type_is_modifier(next_type) &&
982 !btf_type_is_array(next_type) &&
983 !btf_type_is_struct(next_type);
984 default:
985 BUG();
986 }
987 }
988
env_type_is_resolved(const struct btf_verifier_env * env,u32 type_id)989 static bool env_type_is_resolved(const struct btf_verifier_env *env,
990 u32 type_id)
991 {
992 return env->visit_states[type_id] == RESOLVED;
993 }
994
env_stack_push(struct btf_verifier_env * env,const struct btf_type * t,u32 type_id)995 static int env_stack_push(struct btf_verifier_env *env,
996 const struct btf_type *t, u32 type_id)
997 {
998 struct resolve_vertex *v;
999
1000 if (env->top_stack == MAX_RESOLVE_DEPTH)
1001 return -E2BIG;
1002
1003 if (env->visit_states[type_id] != NOT_VISITED)
1004 return -EEXIST;
1005
1006 env->visit_states[type_id] = VISITED;
1007
1008 v = &env->stack[env->top_stack++];
1009 v->t = t;
1010 v->type_id = type_id;
1011 v->next_member = 0;
1012
1013 if (env->resolve_mode == RESOLVE_TBD) {
1014 if (btf_type_is_ptr(t))
1015 env->resolve_mode = RESOLVE_PTR;
1016 else if (btf_type_is_struct(t) || btf_type_is_array(t))
1017 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
1018 }
1019
1020 return 0;
1021 }
1022
env_stack_set_next_member(struct btf_verifier_env * env,u16 next_member)1023 static void env_stack_set_next_member(struct btf_verifier_env *env,
1024 u16 next_member)
1025 {
1026 env->stack[env->top_stack - 1].next_member = next_member;
1027 }
1028
env_stack_pop_resolved(struct btf_verifier_env * env,u32 resolved_type_id,u32 resolved_size)1029 static void env_stack_pop_resolved(struct btf_verifier_env *env,
1030 u32 resolved_type_id,
1031 u32 resolved_size)
1032 {
1033 u32 type_id = env->stack[--(env->top_stack)].type_id;
1034 struct btf *btf = env->btf;
1035
1036 btf->resolved_sizes[type_id] = resolved_size;
1037 btf->resolved_ids[type_id] = resolved_type_id;
1038 env->visit_states[type_id] = RESOLVED;
1039 }
1040
env_stack_peak(struct btf_verifier_env * env)1041 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
1042 {
1043 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
1044 }
1045
1046 /* The input param "type_id" must point to a needs_resolve type */
btf_type_id_resolve(const struct btf * btf,u32 * type_id)1047 static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
1048 u32 *type_id)
1049 {
1050 *type_id = btf->resolved_ids[*type_id];
1051 return btf_type_by_id(btf, *type_id);
1052 }
1053
btf_type_id_size(const struct btf * btf,u32 * type_id,u32 * ret_size)1054 const struct btf_type *btf_type_id_size(const struct btf *btf,
1055 u32 *type_id, u32 *ret_size)
1056 {
1057 const struct btf_type *size_type;
1058 u32 size_type_id = *type_id;
1059 u32 size = 0;
1060
1061 size_type = btf_type_by_id(btf, size_type_id);
1062 if (btf_type_nosize_or_null(size_type))
1063 return NULL;
1064
1065 if (btf_type_has_size(size_type)) {
1066 size = size_type->size;
1067 } else if (btf_type_is_array(size_type)) {
1068 size = btf->resolved_sizes[size_type_id];
1069 } else if (btf_type_is_ptr(size_type)) {
1070 size = sizeof(void *);
1071 } else {
1072 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
1073 !btf_type_is_var(size_type)))
1074 return NULL;
1075
1076 size_type_id = btf->resolved_ids[size_type_id];
1077 size_type = btf_type_by_id(btf, size_type_id);
1078 if (btf_type_nosize_or_null(size_type))
1079 return NULL;
1080 else if (btf_type_has_size(size_type))
1081 size = size_type->size;
1082 else if (btf_type_is_array(size_type))
1083 size = btf->resolved_sizes[size_type_id];
1084 else if (btf_type_is_ptr(size_type))
1085 size = sizeof(void *);
1086 else
1087 return NULL;
1088 }
1089
1090 *type_id = size_type_id;
1091 if (ret_size)
1092 *ret_size = size;
1093
1094 return size_type;
1095 }
1096
btf_df_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)1097 static int btf_df_check_member(struct btf_verifier_env *env,
1098 const struct btf_type *struct_type,
1099 const struct btf_member *member,
1100 const struct btf_type *member_type)
1101 {
1102 btf_verifier_log_basic(env, struct_type,
1103 "Unsupported check_member");
1104 return -EINVAL;
1105 }
1106
btf_df_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)1107 static int btf_df_check_kflag_member(struct btf_verifier_env *env,
1108 const struct btf_type *struct_type,
1109 const struct btf_member *member,
1110 const struct btf_type *member_type)
1111 {
1112 btf_verifier_log_basic(env, struct_type,
1113 "Unsupported check_kflag_member");
1114 return -EINVAL;
1115 }
1116
1117 /* Used for ptr, array and struct/union type members.
1118 * int, enum and modifier types have their specific callback functions.
1119 */
btf_generic_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)1120 static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
1121 const struct btf_type *struct_type,
1122 const struct btf_member *member,
1123 const struct btf_type *member_type)
1124 {
1125 if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
1126 btf_verifier_log_member(env, struct_type, member,
1127 "Invalid member bitfield_size");
1128 return -EINVAL;
1129 }
1130
1131 /* bitfield size is 0, so member->offset represents bit offset only.
1132 * It is safe to call non kflag check_member variants.
1133 */
1134 return btf_type_ops(member_type)->check_member(env, struct_type,
1135 member,
1136 member_type);
1137 }
1138
btf_df_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)1139 static int btf_df_resolve(struct btf_verifier_env *env,
1140 const struct resolve_vertex *v)
1141 {
1142 btf_verifier_log_basic(env, v->t, "Unsupported resolve");
1143 return -EINVAL;
1144 }
1145
btf_df_seq_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offsets,struct seq_file * m)1146 static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t,
1147 u32 type_id, void *data, u8 bits_offsets,
1148 struct seq_file *m)
1149 {
1150 seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
1151 }
1152
btf_int_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)1153 static int btf_int_check_member(struct btf_verifier_env *env,
1154 const struct btf_type *struct_type,
1155 const struct btf_member *member,
1156 const struct btf_type *member_type)
1157 {
1158 u32 int_data = btf_type_int(member_type);
1159 u32 struct_bits_off = member->offset;
1160 u32 struct_size = struct_type->size;
1161 u32 nr_copy_bits;
1162 u32 bytes_offset;
1163
1164 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
1165 btf_verifier_log_member(env, struct_type, member,
1166 "bits_offset exceeds U32_MAX");
1167 return -EINVAL;
1168 }
1169
1170 struct_bits_off += BTF_INT_OFFSET(int_data);
1171 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1172 nr_copy_bits = BTF_INT_BITS(int_data) +
1173 BITS_PER_BYTE_MASKED(struct_bits_off);
1174
1175 if (nr_copy_bits > BITS_PER_U128) {
1176 btf_verifier_log_member(env, struct_type, member,
1177 "nr_copy_bits exceeds 128");
1178 return -EINVAL;
1179 }
1180
1181 if (struct_size < bytes_offset ||
1182 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1183 btf_verifier_log_member(env, struct_type, member,
1184 "Member exceeds struct_size");
1185 return -EINVAL;
1186 }
1187
1188 return 0;
1189 }
1190
btf_int_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)1191 static int btf_int_check_kflag_member(struct btf_verifier_env *env,
1192 const struct btf_type *struct_type,
1193 const struct btf_member *member,
1194 const struct btf_type *member_type)
1195 {
1196 u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
1197 u32 int_data = btf_type_int(member_type);
1198 u32 struct_size = struct_type->size;
1199 u32 nr_copy_bits;
1200
1201 /* a regular int type is required for the kflag int member */
1202 if (!btf_type_int_is_regular(member_type)) {
1203 btf_verifier_log_member(env, struct_type, member,
1204 "Invalid member base type");
1205 return -EINVAL;
1206 }
1207
1208 /* check sanity of bitfield size */
1209 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
1210 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
1211 nr_int_data_bits = BTF_INT_BITS(int_data);
1212 if (!nr_bits) {
1213 /* Not a bitfield member, member offset must be at byte
1214 * boundary.
1215 */
1216 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1217 btf_verifier_log_member(env, struct_type, member,
1218 "Invalid member offset");
1219 return -EINVAL;
1220 }
1221
1222 nr_bits = nr_int_data_bits;
1223 } else if (nr_bits > nr_int_data_bits) {
1224 btf_verifier_log_member(env, struct_type, member,
1225 "Invalid member bitfield_size");
1226 return -EINVAL;
1227 }
1228
1229 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1230 nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
1231 if (nr_copy_bits > BITS_PER_U128) {
1232 btf_verifier_log_member(env, struct_type, member,
1233 "nr_copy_bits exceeds 128");
1234 return -EINVAL;
1235 }
1236
1237 if (struct_size < bytes_offset ||
1238 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1239 btf_verifier_log_member(env, struct_type, member,
1240 "Member exceeds struct_size");
1241 return -EINVAL;
1242 }
1243
1244 return 0;
1245 }
1246
btf_int_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)1247 static s32 btf_int_check_meta(struct btf_verifier_env *env,
1248 const struct btf_type *t,
1249 u32 meta_left)
1250 {
1251 u32 int_data, nr_bits, meta_needed = sizeof(int_data);
1252 u16 encoding;
1253
1254 if (meta_left < meta_needed) {
1255 btf_verifier_log_basic(env, t,
1256 "meta_left:%u meta_needed:%u",
1257 meta_left, meta_needed);
1258 return -EINVAL;
1259 }
1260
1261 if (btf_type_vlen(t)) {
1262 btf_verifier_log_type(env, t, "vlen != 0");
1263 return -EINVAL;
1264 }
1265
1266 if (btf_type_kflag(t)) {
1267 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1268 return -EINVAL;
1269 }
1270
1271 int_data = btf_type_int(t);
1272 if (int_data & ~BTF_INT_MASK) {
1273 btf_verifier_log_basic(env, t, "Invalid int_data:%x",
1274 int_data);
1275 return -EINVAL;
1276 }
1277
1278 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
1279
1280 if (nr_bits > BITS_PER_U128) {
1281 btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
1282 BITS_PER_U128);
1283 return -EINVAL;
1284 }
1285
1286 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
1287 btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
1288 return -EINVAL;
1289 }
1290
1291 /*
1292 * Only one of the encoding bits is allowed and it
1293 * should be sufficient for the pretty print purpose (i.e. decoding).
1294 * Multiple bits can be allowed later if it is found
1295 * to be insufficient.
1296 */
1297 encoding = BTF_INT_ENCODING(int_data);
1298 if (encoding &&
1299 encoding != BTF_INT_SIGNED &&
1300 encoding != BTF_INT_CHAR &&
1301 encoding != BTF_INT_BOOL) {
1302 btf_verifier_log_type(env, t, "Unsupported encoding");
1303 return -ENOTSUPP;
1304 }
1305
1306 btf_verifier_log_type(env, t, NULL);
1307
1308 return meta_needed;
1309 }
1310
btf_int_log(struct btf_verifier_env * env,const struct btf_type * t)1311 static void btf_int_log(struct btf_verifier_env *env,
1312 const struct btf_type *t)
1313 {
1314 int int_data = btf_type_int(t);
1315
1316 btf_verifier_log(env,
1317 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
1318 t->size, BTF_INT_OFFSET(int_data),
1319 BTF_INT_BITS(int_data),
1320 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
1321 }
1322
btf_int128_print(struct seq_file * m,void * data)1323 static void btf_int128_print(struct seq_file *m, void *data)
1324 {
1325 /* data points to a __int128 number.
1326 * Suppose
1327 * int128_num = *(__int128 *)data;
1328 * The below formulas shows what upper_num and lower_num represents:
1329 * upper_num = int128_num >> 64;
1330 * lower_num = int128_num & 0xffffffffFFFFFFFFULL;
1331 */
1332 u64 upper_num, lower_num;
1333
1334 #ifdef __BIG_ENDIAN_BITFIELD
1335 upper_num = *(u64 *)data;
1336 lower_num = *(u64 *)(data + 8);
1337 #else
1338 upper_num = *(u64 *)(data + 8);
1339 lower_num = *(u64 *)data;
1340 #endif
1341 if (upper_num == 0)
1342 seq_printf(m, "0x%llx", lower_num);
1343 else
1344 seq_printf(m, "0x%llx%016llx", upper_num, lower_num);
1345 }
1346
btf_int128_shift(u64 * print_num,u16 left_shift_bits,u16 right_shift_bits)1347 static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
1348 u16 right_shift_bits)
1349 {
1350 u64 upper_num, lower_num;
1351
1352 #ifdef __BIG_ENDIAN_BITFIELD
1353 upper_num = print_num[0];
1354 lower_num = print_num[1];
1355 #else
1356 upper_num = print_num[1];
1357 lower_num = print_num[0];
1358 #endif
1359
1360 /* shake out un-needed bits by shift/or operations */
1361 if (left_shift_bits >= 64) {
1362 upper_num = lower_num << (left_shift_bits - 64);
1363 lower_num = 0;
1364 } else {
1365 upper_num = (upper_num << left_shift_bits) |
1366 (lower_num >> (64 - left_shift_bits));
1367 lower_num = lower_num << left_shift_bits;
1368 }
1369
1370 if (right_shift_bits >= 64) {
1371 lower_num = upper_num >> (right_shift_bits - 64);
1372 upper_num = 0;
1373 } else {
1374 lower_num = (lower_num >> right_shift_bits) |
1375 (upper_num << (64 - right_shift_bits));
1376 upper_num = upper_num >> right_shift_bits;
1377 }
1378
1379 #ifdef __BIG_ENDIAN_BITFIELD
1380 print_num[0] = upper_num;
1381 print_num[1] = lower_num;
1382 #else
1383 print_num[0] = lower_num;
1384 print_num[1] = upper_num;
1385 #endif
1386 }
1387
btf_bitfield_seq_show(void * data,u8 bits_offset,u8 nr_bits,struct seq_file * m)1388 static void btf_bitfield_seq_show(void *data, u8 bits_offset,
1389 u8 nr_bits, struct seq_file *m)
1390 {
1391 u16 left_shift_bits, right_shift_bits;
1392 u8 nr_copy_bytes;
1393 u8 nr_copy_bits;
1394 u64 print_num[2] = {};
1395
1396 nr_copy_bits = nr_bits + bits_offset;
1397 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
1398
1399 memcpy(print_num, data, nr_copy_bytes);
1400
1401 #ifdef __BIG_ENDIAN_BITFIELD
1402 left_shift_bits = bits_offset;
1403 #else
1404 left_shift_bits = BITS_PER_U128 - nr_copy_bits;
1405 #endif
1406 right_shift_bits = BITS_PER_U128 - nr_bits;
1407
1408 btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
1409 btf_int128_print(m, print_num);
1410 }
1411
1412
btf_int_bits_seq_show(const struct btf * btf,const struct btf_type * t,void * data,u8 bits_offset,struct seq_file * m)1413 static void btf_int_bits_seq_show(const struct btf *btf,
1414 const struct btf_type *t,
1415 void *data, u8 bits_offset,
1416 struct seq_file *m)
1417 {
1418 u32 int_data = btf_type_int(t);
1419 u8 nr_bits = BTF_INT_BITS(int_data);
1420 u8 total_bits_offset;
1421
1422 /*
1423 * bits_offset is at most 7.
1424 * BTF_INT_OFFSET() cannot exceed 128 bits.
1425 */
1426 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
1427 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
1428 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
1429 btf_bitfield_seq_show(data, bits_offset, nr_bits, m);
1430 }
1431
btf_int_seq_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct seq_file * m)1432 static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
1433 u32 type_id, void *data, u8 bits_offset,
1434 struct seq_file *m)
1435 {
1436 u32 int_data = btf_type_int(t);
1437 u8 encoding = BTF_INT_ENCODING(int_data);
1438 bool sign = encoding & BTF_INT_SIGNED;
1439 u8 nr_bits = BTF_INT_BITS(int_data);
1440
1441 if (bits_offset || BTF_INT_OFFSET(int_data) ||
1442 BITS_PER_BYTE_MASKED(nr_bits)) {
1443 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1444 return;
1445 }
1446
1447 switch (nr_bits) {
1448 case 128:
1449 btf_int128_print(m, data);
1450 break;
1451 case 64:
1452 if (sign)
1453 seq_printf(m, "%lld", *(s64 *)data);
1454 else
1455 seq_printf(m, "%llu", *(u64 *)data);
1456 break;
1457 case 32:
1458 if (sign)
1459 seq_printf(m, "%d", *(s32 *)data);
1460 else
1461 seq_printf(m, "%u", *(u32 *)data);
1462 break;
1463 case 16:
1464 if (sign)
1465 seq_printf(m, "%d", *(s16 *)data);
1466 else
1467 seq_printf(m, "%u", *(u16 *)data);
1468 break;
1469 case 8:
1470 if (sign)
1471 seq_printf(m, "%d", *(s8 *)data);
1472 else
1473 seq_printf(m, "%u", *(u8 *)data);
1474 break;
1475 default:
1476 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1477 }
1478 }
1479
1480 static const struct btf_kind_operations int_ops = {
1481 .check_meta = btf_int_check_meta,
1482 .resolve = btf_df_resolve,
1483 .check_member = btf_int_check_member,
1484 .check_kflag_member = btf_int_check_kflag_member,
1485 .log_details = btf_int_log,
1486 .seq_show = btf_int_seq_show,
1487 };
1488
btf_modifier_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)1489 static int btf_modifier_check_member(struct btf_verifier_env *env,
1490 const struct btf_type *struct_type,
1491 const struct btf_member *member,
1492 const struct btf_type *member_type)
1493 {
1494 const struct btf_type *resolved_type;
1495 u32 resolved_type_id = member->type;
1496 struct btf_member resolved_member;
1497 struct btf *btf = env->btf;
1498
1499 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1500 if (!resolved_type) {
1501 btf_verifier_log_member(env, struct_type, member,
1502 "Invalid member");
1503 return -EINVAL;
1504 }
1505
1506 resolved_member = *member;
1507 resolved_member.type = resolved_type_id;
1508
1509 return btf_type_ops(resolved_type)->check_member(env, struct_type,
1510 &resolved_member,
1511 resolved_type);
1512 }
1513
btf_modifier_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)1514 static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
1515 const struct btf_type *struct_type,
1516 const struct btf_member *member,
1517 const struct btf_type *member_type)
1518 {
1519 const struct btf_type *resolved_type;
1520 u32 resolved_type_id = member->type;
1521 struct btf_member resolved_member;
1522 struct btf *btf = env->btf;
1523
1524 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1525 if (!resolved_type) {
1526 btf_verifier_log_member(env, struct_type, member,
1527 "Invalid member");
1528 return -EINVAL;
1529 }
1530
1531 resolved_member = *member;
1532 resolved_member.type = resolved_type_id;
1533
1534 return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
1535 &resolved_member,
1536 resolved_type);
1537 }
1538
btf_ptr_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)1539 static int btf_ptr_check_member(struct btf_verifier_env *env,
1540 const struct btf_type *struct_type,
1541 const struct btf_member *member,
1542 const struct btf_type *member_type)
1543 {
1544 u32 struct_size, struct_bits_off, bytes_offset;
1545
1546 struct_size = struct_type->size;
1547 struct_bits_off = member->offset;
1548 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1549
1550 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1551 btf_verifier_log_member(env, struct_type, member,
1552 "Member is not byte aligned");
1553 return -EINVAL;
1554 }
1555
1556 if (struct_size - bytes_offset < sizeof(void *)) {
1557 btf_verifier_log_member(env, struct_type, member,
1558 "Member exceeds struct_size");
1559 return -EINVAL;
1560 }
1561
1562 return 0;
1563 }
1564
btf_ref_type_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)1565 static int btf_ref_type_check_meta(struct btf_verifier_env *env,
1566 const struct btf_type *t,
1567 u32 meta_left)
1568 {
1569 if (btf_type_vlen(t)) {
1570 btf_verifier_log_type(env, t, "vlen != 0");
1571 return -EINVAL;
1572 }
1573
1574 if (btf_type_kflag(t)) {
1575 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1576 return -EINVAL;
1577 }
1578
1579 if (!BTF_TYPE_ID_VALID(t->type)) {
1580 btf_verifier_log_type(env, t, "Invalid type_id");
1581 return -EINVAL;
1582 }
1583
1584 /* typedef type must have a valid name, and other ref types,
1585 * volatile, const, restrict, should have a null name.
1586 */
1587 if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
1588 if (!t->name_off ||
1589 !btf_name_valid_identifier(env->btf, t->name_off)) {
1590 btf_verifier_log_type(env, t, "Invalid name");
1591 return -EINVAL;
1592 }
1593 } else {
1594 if (t->name_off) {
1595 btf_verifier_log_type(env, t, "Invalid name");
1596 return -EINVAL;
1597 }
1598 }
1599
1600 btf_verifier_log_type(env, t, NULL);
1601
1602 return 0;
1603 }
1604
btf_modifier_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)1605 static int btf_modifier_resolve(struct btf_verifier_env *env,
1606 const struct resolve_vertex *v)
1607 {
1608 const struct btf_type *t = v->t;
1609 const struct btf_type *next_type;
1610 u32 next_type_id = t->type;
1611 struct btf *btf = env->btf;
1612
1613 next_type = btf_type_by_id(btf, next_type_id);
1614 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1615 btf_verifier_log_type(env, v->t, "Invalid type_id");
1616 return -EINVAL;
1617 }
1618
1619 if (!env_type_is_resolve_sink(env, next_type) &&
1620 !env_type_is_resolved(env, next_type_id))
1621 return env_stack_push(env, next_type, next_type_id);
1622
1623 /* Figure out the resolved next_type_id with size.
1624 * They will be stored in the current modifier's
1625 * resolved_ids and resolved_sizes such that it can
1626 * save us a few type-following when we use it later (e.g. in
1627 * pretty print).
1628 */
1629 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1630 if (env_type_is_resolved(env, next_type_id))
1631 next_type = btf_type_id_resolve(btf, &next_type_id);
1632
1633 /* "typedef void new_void", "const void"...etc */
1634 if (!btf_type_is_void(next_type) &&
1635 !btf_type_is_fwd(next_type) &&
1636 !btf_type_is_func_proto(next_type)) {
1637 btf_verifier_log_type(env, v->t, "Invalid type_id");
1638 return -EINVAL;
1639 }
1640 }
1641
1642 env_stack_pop_resolved(env, next_type_id, 0);
1643
1644 return 0;
1645 }
1646
btf_var_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)1647 static int btf_var_resolve(struct btf_verifier_env *env,
1648 const struct resolve_vertex *v)
1649 {
1650 const struct btf_type *next_type;
1651 const struct btf_type *t = v->t;
1652 u32 next_type_id = t->type;
1653 struct btf *btf = env->btf;
1654
1655 next_type = btf_type_by_id(btf, next_type_id);
1656 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1657 btf_verifier_log_type(env, v->t, "Invalid type_id");
1658 return -EINVAL;
1659 }
1660
1661 if (!env_type_is_resolve_sink(env, next_type) &&
1662 !env_type_is_resolved(env, next_type_id))
1663 return env_stack_push(env, next_type, next_type_id);
1664
1665 if (btf_type_is_modifier(next_type)) {
1666 const struct btf_type *resolved_type;
1667 u32 resolved_type_id;
1668
1669 resolved_type_id = next_type_id;
1670 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1671
1672 if (btf_type_is_ptr(resolved_type) &&
1673 !env_type_is_resolve_sink(env, resolved_type) &&
1674 !env_type_is_resolved(env, resolved_type_id))
1675 return env_stack_push(env, resolved_type,
1676 resolved_type_id);
1677 }
1678
1679 /* We must resolve to something concrete at this point, no
1680 * forward types or similar that would resolve to size of
1681 * zero is allowed.
1682 */
1683 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1684 btf_verifier_log_type(env, v->t, "Invalid type_id");
1685 return -EINVAL;
1686 }
1687
1688 env_stack_pop_resolved(env, next_type_id, 0);
1689
1690 return 0;
1691 }
1692
btf_ptr_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)1693 static int btf_ptr_resolve(struct btf_verifier_env *env,
1694 const struct resolve_vertex *v)
1695 {
1696 const struct btf_type *next_type;
1697 const struct btf_type *t = v->t;
1698 u32 next_type_id = t->type;
1699 struct btf *btf = env->btf;
1700
1701 next_type = btf_type_by_id(btf, next_type_id);
1702 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1703 btf_verifier_log_type(env, v->t, "Invalid type_id");
1704 return -EINVAL;
1705 }
1706
1707 if (!env_type_is_resolve_sink(env, next_type) &&
1708 !env_type_is_resolved(env, next_type_id))
1709 return env_stack_push(env, next_type, next_type_id);
1710
1711 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
1712 * the modifier may have stopped resolving when it was resolved
1713 * to a ptr (last-resolved-ptr).
1714 *
1715 * We now need to continue from the last-resolved-ptr to
1716 * ensure the last-resolved-ptr will not referring back to
1717 * the currenct ptr (t).
1718 */
1719 if (btf_type_is_modifier(next_type)) {
1720 const struct btf_type *resolved_type;
1721 u32 resolved_type_id;
1722
1723 resolved_type_id = next_type_id;
1724 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1725
1726 if (btf_type_is_ptr(resolved_type) &&
1727 !env_type_is_resolve_sink(env, resolved_type) &&
1728 !env_type_is_resolved(env, resolved_type_id))
1729 return env_stack_push(env, resolved_type,
1730 resolved_type_id);
1731 }
1732
1733 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1734 if (env_type_is_resolved(env, next_type_id))
1735 next_type = btf_type_id_resolve(btf, &next_type_id);
1736
1737 if (!btf_type_is_void(next_type) &&
1738 !btf_type_is_fwd(next_type) &&
1739 !btf_type_is_func_proto(next_type)) {
1740 btf_verifier_log_type(env, v->t, "Invalid type_id");
1741 return -EINVAL;
1742 }
1743 }
1744
1745 env_stack_pop_resolved(env, next_type_id, 0);
1746
1747 return 0;
1748 }
1749
btf_modifier_seq_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct seq_file * m)1750 static void btf_modifier_seq_show(const struct btf *btf,
1751 const struct btf_type *t,
1752 u32 type_id, void *data,
1753 u8 bits_offset, struct seq_file *m)
1754 {
1755 t = btf_type_id_resolve(btf, &type_id);
1756
1757 btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1758 }
1759
btf_var_seq_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct seq_file * m)1760 static void btf_var_seq_show(const struct btf *btf, const struct btf_type *t,
1761 u32 type_id, void *data, u8 bits_offset,
1762 struct seq_file *m)
1763 {
1764 t = btf_type_id_resolve(btf, &type_id);
1765
1766 btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1767 }
1768
btf_ptr_seq_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct seq_file * m)1769 static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t,
1770 u32 type_id, void *data, u8 bits_offset,
1771 struct seq_file *m)
1772 {
1773 /* It is a hashed value */
1774 seq_printf(m, "%p", *(void **)data);
1775 }
1776
btf_ref_type_log(struct btf_verifier_env * env,const struct btf_type * t)1777 static void btf_ref_type_log(struct btf_verifier_env *env,
1778 const struct btf_type *t)
1779 {
1780 btf_verifier_log(env, "type_id=%u", t->type);
1781 }
1782
1783 static struct btf_kind_operations modifier_ops = {
1784 .check_meta = btf_ref_type_check_meta,
1785 .resolve = btf_modifier_resolve,
1786 .check_member = btf_modifier_check_member,
1787 .check_kflag_member = btf_modifier_check_kflag_member,
1788 .log_details = btf_ref_type_log,
1789 .seq_show = btf_modifier_seq_show,
1790 };
1791
1792 static struct btf_kind_operations ptr_ops = {
1793 .check_meta = btf_ref_type_check_meta,
1794 .resolve = btf_ptr_resolve,
1795 .check_member = btf_ptr_check_member,
1796 .check_kflag_member = btf_generic_check_kflag_member,
1797 .log_details = btf_ref_type_log,
1798 .seq_show = btf_ptr_seq_show,
1799 };
1800
btf_fwd_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)1801 static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
1802 const struct btf_type *t,
1803 u32 meta_left)
1804 {
1805 if (btf_type_vlen(t)) {
1806 btf_verifier_log_type(env, t, "vlen != 0");
1807 return -EINVAL;
1808 }
1809
1810 if (t->type) {
1811 btf_verifier_log_type(env, t, "type != 0");
1812 return -EINVAL;
1813 }
1814
1815 /* fwd type must have a valid name */
1816 if (!t->name_off ||
1817 !btf_name_valid_identifier(env->btf, t->name_off)) {
1818 btf_verifier_log_type(env, t, "Invalid name");
1819 return -EINVAL;
1820 }
1821
1822 btf_verifier_log_type(env, t, NULL);
1823
1824 return 0;
1825 }
1826
btf_fwd_type_log(struct btf_verifier_env * env,const struct btf_type * t)1827 static void btf_fwd_type_log(struct btf_verifier_env *env,
1828 const struct btf_type *t)
1829 {
1830 btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
1831 }
1832
1833 static struct btf_kind_operations fwd_ops = {
1834 .check_meta = btf_fwd_check_meta,
1835 .resolve = btf_df_resolve,
1836 .check_member = btf_df_check_member,
1837 .check_kflag_member = btf_df_check_kflag_member,
1838 .log_details = btf_fwd_type_log,
1839 .seq_show = btf_df_seq_show,
1840 };
1841
btf_array_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)1842 static int btf_array_check_member(struct btf_verifier_env *env,
1843 const struct btf_type *struct_type,
1844 const struct btf_member *member,
1845 const struct btf_type *member_type)
1846 {
1847 u32 struct_bits_off = member->offset;
1848 u32 struct_size, bytes_offset;
1849 u32 array_type_id, array_size;
1850 struct btf *btf = env->btf;
1851
1852 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1853 btf_verifier_log_member(env, struct_type, member,
1854 "Member is not byte aligned");
1855 return -EINVAL;
1856 }
1857
1858 array_type_id = member->type;
1859 btf_type_id_size(btf, &array_type_id, &array_size);
1860 struct_size = struct_type->size;
1861 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1862 if (struct_size - bytes_offset < array_size) {
1863 btf_verifier_log_member(env, struct_type, member,
1864 "Member exceeds struct_size");
1865 return -EINVAL;
1866 }
1867
1868 return 0;
1869 }
1870
btf_array_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)1871 static s32 btf_array_check_meta(struct btf_verifier_env *env,
1872 const struct btf_type *t,
1873 u32 meta_left)
1874 {
1875 const struct btf_array *array = btf_type_array(t);
1876 u32 meta_needed = sizeof(*array);
1877
1878 if (meta_left < meta_needed) {
1879 btf_verifier_log_basic(env, t,
1880 "meta_left:%u meta_needed:%u",
1881 meta_left, meta_needed);
1882 return -EINVAL;
1883 }
1884
1885 /* array type should not have a name */
1886 if (t->name_off) {
1887 btf_verifier_log_type(env, t, "Invalid name");
1888 return -EINVAL;
1889 }
1890
1891 if (btf_type_vlen(t)) {
1892 btf_verifier_log_type(env, t, "vlen != 0");
1893 return -EINVAL;
1894 }
1895
1896 if (btf_type_kflag(t)) {
1897 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1898 return -EINVAL;
1899 }
1900
1901 if (t->size) {
1902 btf_verifier_log_type(env, t, "size != 0");
1903 return -EINVAL;
1904 }
1905
1906 /* Array elem type and index type cannot be in type void,
1907 * so !array->type and !array->index_type are not allowed.
1908 */
1909 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
1910 btf_verifier_log_type(env, t, "Invalid elem");
1911 return -EINVAL;
1912 }
1913
1914 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
1915 btf_verifier_log_type(env, t, "Invalid index");
1916 return -EINVAL;
1917 }
1918
1919 btf_verifier_log_type(env, t, NULL);
1920
1921 return meta_needed;
1922 }
1923
btf_array_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)1924 static int btf_array_resolve(struct btf_verifier_env *env,
1925 const struct resolve_vertex *v)
1926 {
1927 const struct btf_array *array = btf_type_array(v->t);
1928 const struct btf_type *elem_type, *index_type;
1929 u32 elem_type_id, index_type_id;
1930 struct btf *btf = env->btf;
1931 u32 elem_size;
1932
1933 /* Check array->index_type */
1934 index_type_id = array->index_type;
1935 index_type = btf_type_by_id(btf, index_type_id);
1936 if (btf_type_nosize_or_null(index_type) ||
1937 btf_type_is_resolve_source_only(index_type)) {
1938 btf_verifier_log_type(env, v->t, "Invalid index");
1939 return -EINVAL;
1940 }
1941
1942 if (!env_type_is_resolve_sink(env, index_type) &&
1943 !env_type_is_resolved(env, index_type_id))
1944 return env_stack_push(env, index_type, index_type_id);
1945
1946 index_type = btf_type_id_size(btf, &index_type_id, NULL);
1947 if (!index_type || !btf_type_is_int(index_type) ||
1948 !btf_type_int_is_regular(index_type)) {
1949 btf_verifier_log_type(env, v->t, "Invalid index");
1950 return -EINVAL;
1951 }
1952
1953 /* Check array->type */
1954 elem_type_id = array->type;
1955 elem_type = btf_type_by_id(btf, elem_type_id);
1956 if (btf_type_nosize_or_null(elem_type) ||
1957 btf_type_is_resolve_source_only(elem_type)) {
1958 btf_verifier_log_type(env, v->t,
1959 "Invalid elem");
1960 return -EINVAL;
1961 }
1962
1963 if (!env_type_is_resolve_sink(env, elem_type) &&
1964 !env_type_is_resolved(env, elem_type_id))
1965 return env_stack_push(env, elem_type, elem_type_id);
1966
1967 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1968 if (!elem_type) {
1969 btf_verifier_log_type(env, v->t, "Invalid elem");
1970 return -EINVAL;
1971 }
1972
1973 if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
1974 btf_verifier_log_type(env, v->t, "Invalid array of int");
1975 return -EINVAL;
1976 }
1977
1978 if (array->nelems && elem_size > U32_MAX / array->nelems) {
1979 btf_verifier_log_type(env, v->t,
1980 "Array size overflows U32_MAX");
1981 return -EINVAL;
1982 }
1983
1984 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
1985
1986 return 0;
1987 }
1988
btf_array_log(struct btf_verifier_env * env,const struct btf_type * t)1989 static void btf_array_log(struct btf_verifier_env *env,
1990 const struct btf_type *t)
1991 {
1992 const struct btf_array *array = btf_type_array(t);
1993
1994 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
1995 array->type, array->index_type, array->nelems);
1996 }
1997
btf_array_seq_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct seq_file * m)1998 static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t,
1999 u32 type_id, void *data, u8 bits_offset,
2000 struct seq_file *m)
2001 {
2002 const struct btf_array *array = btf_type_array(t);
2003 const struct btf_kind_operations *elem_ops;
2004 const struct btf_type *elem_type;
2005 u32 i, elem_size, elem_type_id;
2006
2007 elem_type_id = array->type;
2008 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2009 elem_ops = btf_type_ops(elem_type);
2010 seq_puts(m, "[");
2011 for (i = 0; i < array->nelems; i++) {
2012 if (i)
2013 seq_puts(m, ",");
2014
2015 elem_ops->seq_show(btf, elem_type, elem_type_id, data,
2016 bits_offset, m);
2017 data += elem_size;
2018 }
2019 seq_puts(m, "]");
2020 }
2021
2022 static struct btf_kind_operations array_ops = {
2023 .check_meta = btf_array_check_meta,
2024 .resolve = btf_array_resolve,
2025 .check_member = btf_array_check_member,
2026 .check_kflag_member = btf_generic_check_kflag_member,
2027 .log_details = btf_array_log,
2028 .seq_show = btf_array_seq_show,
2029 };
2030
btf_struct_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2031 static int btf_struct_check_member(struct btf_verifier_env *env,
2032 const struct btf_type *struct_type,
2033 const struct btf_member *member,
2034 const struct btf_type *member_type)
2035 {
2036 u32 struct_bits_off = member->offset;
2037 u32 struct_size, bytes_offset;
2038
2039 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2040 btf_verifier_log_member(env, struct_type, member,
2041 "Member is not byte aligned");
2042 return -EINVAL;
2043 }
2044
2045 struct_size = struct_type->size;
2046 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2047 if (struct_size - bytes_offset < member_type->size) {
2048 btf_verifier_log_member(env, struct_type, member,
2049 "Member exceeds struct_size");
2050 return -EINVAL;
2051 }
2052
2053 return 0;
2054 }
2055
btf_struct_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2056 static s32 btf_struct_check_meta(struct btf_verifier_env *env,
2057 const struct btf_type *t,
2058 u32 meta_left)
2059 {
2060 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
2061 const struct btf_member *member;
2062 u32 meta_needed, last_offset;
2063 struct btf *btf = env->btf;
2064 u32 struct_size = t->size;
2065 u32 offset;
2066 u16 i;
2067
2068 meta_needed = btf_type_vlen(t) * sizeof(*member);
2069 if (meta_left < meta_needed) {
2070 btf_verifier_log_basic(env, t,
2071 "meta_left:%u meta_needed:%u",
2072 meta_left, meta_needed);
2073 return -EINVAL;
2074 }
2075
2076 /* struct type either no name or a valid one */
2077 if (t->name_off &&
2078 !btf_name_valid_identifier(env->btf, t->name_off)) {
2079 btf_verifier_log_type(env, t, "Invalid name");
2080 return -EINVAL;
2081 }
2082
2083 btf_verifier_log_type(env, t, NULL);
2084
2085 last_offset = 0;
2086 for_each_member(i, t, member) {
2087 if (!btf_name_offset_valid(btf, member->name_off)) {
2088 btf_verifier_log_member(env, t, member,
2089 "Invalid member name_offset:%u",
2090 member->name_off);
2091 return -EINVAL;
2092 }
2093
2094 /* struct member either no name or a valid one */
2095 if (member->name_off &&
2096 !btf_name_valid_identifier(btf, member->name_off)) {
2097 btf_verifier_log_member(env, t, member, "Invalid name");
2098 return -EINVAL;
2099 }
2100 /* A member cannot be in type void */
2101 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
2102 btf_verifier_log_member(env, t, member,
2103 "Invalid type_id");
2104 return -EINVAL;
2105 }
2106
2107 offset = btf_member_bit_offset(t, member);
2108 if (is_union && offset) {
2109 btf_verifier_log_member(env, t, member,
2110 "Invalid member bits_offset");
2111 return -EINVAL;
2112 }
2113
2114 /*
2115 * ">" instead of ">=" because the last member could be
2116 * "char a[0];"
2117 */
2118 if (last_offset > offset) {
2119 btf_verifier_log_member(env, t, member,
2120 "Invalid member bits_offset");
2121 return -EINVAL;
2122 }
2123
2124 if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
2125 btf_verifier_log_member(env, t, member,
2126 "Member bits_offset exceeds its struct size");
2127 return -EINVAL;
2128 }
2129
2130 btf_verifier_log_member(env, t, member, NULL);
2131 last_offset = offset;
2132 }
2133
2134 return meta_needed;
2135 }
2136
btf_struct_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2137 static int btf_struct_resolve(struct btf_verifier_env *env,
2138 const struct resolve_vertex *v)
2139 {
2140 const struct btf_member *member;
2141 int err;
2142 u16 i;
2143
2144 /* Before continue resolving the next_member,
2145 * ensure the last member is indeed resolved to a
2146 * type with size info.
2147 */
2148 if (v->next_member) {
2149 const struct btf_type *last_member_type;
2150 const struct btf_member *last_member;
2151 u16 last_member_type_id;
2152
2153 last_member = btf_type_member(v->t) + v->next_member - 1;
2154 last_member_type_id = last_member->type;
2155 if (WARN_ON_ONCE(!env_type_is_resolved(env,
2156 last_member_type_id)))
2157 return -EINVAL;
2158
2159 last_member_type = btf_type_by_id(env->btf,
2160 last_member_type_id);
2161 if (btf_type_kflag(v->t))
2162 err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
2163 last_member,
2164 last_member_type);
2165 else
2166 err = btf_type_ops(last_member_type)->check_member(env, v->t,
2167 last_member,
2168 last_member_type);
2169 if (err)
2170 return err;
2171 }
2172
2173 for_each_member_from(i, v->next_member, v->t, member) {
2174 u32 member_type_id = member->type;
2175 const struct btf_type *member_type = btf_type_by_id(env->btf,
2176 member_type_id);
2177
2178 if (btf_type_nosize_or_null(member_type) ||
2179 btf_type_is_resolve_source_only(member_type)) {
2180 btf_verifier_log_member(env, v->t, member,
2181 "Invalid member");
2182 return -EINVAL;
2183 }
2184
2185 if (!env_type_is_resolve_sink(env, member_type) &&
2186 !env_type_is_resolved(env, member_type_id)) {
2187 env_stack_set_next_member(env, i + 1);
2188 return env_stack_push(env, member_type, member_type_id);
2189 }
2190
2191 if (btf_type_kflag(v->t))
2192 err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
2193 member,
2194 member_type);
2195 else
2196 err = btf_type_ops(member_type)->check_member(env, v->t,
2197 member,
2198 member_type);
2199 if (err)
2200 return err;
2201 }
2202
2203 env_stack_pop_resolved(env, 0, 0);
2204
2205 return 0;
2206 }
2207
btf_struct_log(struct btf_verifier_env * env,const struct btf_type * t)2208 static void btf_struct_log(struct btf_verifier_env *env,
2209 const struct btf_type *t)
2210 {
2211 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2212 }
2213
2214 /* find 'struct bpf_spin_lock' in map value.
2215 * return >= 0 offset if found
2216 * and < 0 in case of error
2217 */
btf_find_spin_lock(const struct btf * btf,const struct btf_type * t)2218 int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
2219 {
2220 const struct btf_member *member;
2221 u32 i, off = -ENOENT;
2222
2223 if (!__btf_type_is_struct(t))
2224 return -EINVAL;
2225
2226 for_each_member(i, t, member) {
2227 const struct btf_type *member_type = btf_type_by_id(btf,
2228 member->type);
2229 if (!__btf_type_is_struct(member_type))
2230 continue;
2231 if (member_type->size != sizeof(struct bpf_spin_lock))
2232 continue;
2233 if (strcmp(__btf_name_by_offset(btf, member_type->name_off),
2234 "bpf_spin_lock"))
2235 continue;
2236 if (off != -ENOENT)
2237 /* only one 'struct bpf_spin_lock' is allowed */
2238 return -E2BIG;
2239 off = btf_member_bit_offset(t, member);
2240 if (off % 8)
2241 /* valid C code cannot generate such BTF */
2242 return -EINVAL;
2243 off /= 8;
2244 if (off % __alignof__(struct bpf_spin_lock))
2245 /* valid struct bpf_spin_lock will be 4 byte aligned */
2246 return -EINVAL;
2247 }
2248 return off;
2249 }
2250
btf_struct_seq_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct seq_file * m)2251 static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
2252 u32 type_id, void *data, u8 bits_offset,
2253 struct seq_file *m)
2254 {
2255 const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ",";
2256 const struct btf_member *member;
2257 u32 i;
2258
2259 seq_puts(m, "{");
2260 for_each_member(i, t, member) {
2261 const struct btf_type *member_type = btf_type_by_id(btf,
2262 member->type);
2263 const struct btf_kind_operations *ops;
2264 u32 member_offset, bitfield_size;
2265 u32 bytes_offset;
2266 u8 bits8_offset;
2267
2268 if (i)
2269 seq_puts(m, seq);
2270
2271 member_offset = btf_member_bit_offset(t, member);
2272 bitfield_size = btf_member_bitfield_size(t, member);
2273 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
2274 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
2275 if (bitfield_size) {
2276 btf_bitfield_seq_show(data + bytes_offset, bits8_offset,
2277 bitfield_size, m);
2278 } else {
2279 ops = btf_type_ops(member_type);
2280 ops->seq_show(btf, member_type, member->type,
2281 data + bytes_offset, bits8_offset, m);
2282 }
2283 }
2284 seq_puts(m, "}");
2285 }
2286
2287 static struct btf_kind_operations struct_ops = {
2288 .check_meta = btf_struct_check_meta,
2289 .resolve = btf_struct_resolve,
2290 .check_member = btf_struct_check_member,
2291 .check_kflag_member = btf_generic_check_kflag_member,
2292 .log_details = btf_struct_log,
2293 .seq_show = btf_struct_seq_show,
2294 };
2295
btf_enum_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2296 static int btf_enum_check_member(struct btf_verifier_env *env,
2297 const struct btf_type *struct_type,
2298 const struct btf_member *member,
2299 const struct btf_type *member_type)
2300 {
2301 u32 struct_bits_off = member->offset;
2302 u32 struct_size, bytes_offset;
2303
2304 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2305 btf_verifier_log_member(env, struct_type, member,
2306 "Member is not byte aligned");
2307 return -EINVAL;
2308 }
2309
2310 struct_size = struct_type->size;
2311 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2312 if (struct_size - bytes_offset < sizeof(int)) {
2313 btf_verifier_log_member(env, struct_type, member,
2314 "Member exceeds struct_size");
2315 return -EINVAL;
2316 }
2317
2318 return 0;
2319 }
2320
btf_enum_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2321 static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
2322 const struct btf_type *struct_type,
2323 const struct btf_member *member,
2324 const struct btf_type *member_type)
2325 {
2326 u32 struct_bits_off, nr_bits, bytes_end, struct_size;
2327 u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
2328
2329 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2330 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2331 if (!nr_bits) {
2332 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2333 btf_verifier_log_member(env, struct_type, member,
2334 "Member is not byte aligned");
2335 return -EINVAL;
2336 }
2337
2338 nr_bits = int_bitsize;
2339 } else if (nr_bits > int_bitsize) {
2340 btf_verifier_log_member(env, struct_type, member,
2341 "Invalid member bitfield_size");
2342 return -EINVAL;
2343 }
2344
2345 struct_size = struct_type->size;
2346 bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
2347 if (struct_size < bytes_end) {
2348 btf_verifier_log_member(env, struct_type, member,
2349 "Member exceeds struct_size");
2350 return -EINVAL;
2351 }
2352
2353 return 0;
2354 }
2355
btf_enum_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2356 static s32 btf_enum_check_meta(struct btf_verifier_env *env,
2357 const struct btf_type *t,
2358 u32 meta_left)
2359 {
2360 const struct btf_enum *enums = btf_type_enum(t);
2361 struct btf *btf = env->btf;
2362 u16 i, nr_enums;
2363 u32 meta_needed;
2364
2365 nr_enums = btf_type_vlen(t);
2366 meta_needed = nr_enums * sizeof(*enums);
2367
2368 if (meta_left < meta_needed) {
2369 btf_verifier_log_basic(env, t,
2370 "meta_left:%u meta_needed:%u",
2371 meta_left, meta_needed);
2372 return -EINVAL;
2373 }
2374
2375 if (btf_type_kflag(t)) {
2376 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2377 return -EINVAL;
2378 }
2379
2380 if (t->size > 8 || !is_power_of_2(t->size)) {
2381 btf_verifier_log_type(env, t, "Unexpected size");
2382 return -EINVAL;
2383 }
2384
2385 /* enum type either no name or a valid one */
2386 if (t->name_off &&
2387 !btf_name_valid_identifier(env->btf, t->name_off)) {
2388 btf_verifier_log_type(env, t, "Invalid name");
2389 return -EINVAL;
2390 }
2391
2392 btf_verifier_log_type(env, t, NULL);
2393
2394 for (i = 0; i < nr_enums; i++) {
2395 if (!btf_name_offset_valid(btf, enums[i].name_off)) {
2396 btf_verifier_log(env, "\tInvalid name_offset:%u",
2397 enums[i].name_off);
2398 return -EINVAL;
2399 }
2400
2401 /* enum member must have a valid name */
2402 if (!enums[i].name_off ||
2403 !btf_name_valid_identifier(btf, enums[i].name_off)) {
2404 btf_verifier_log_type(env, t, "Invalid name");
2405 return -EINVAL;
2406 }
2407
2408
2409 btf_verifier_log(env, "\t%s val=%d\n",
2410 __btf_name_by_offset(btf, enums[i].name_off),
2411 enums[i].val);
2412 }
2413
2414 return meta_needed;
2415 }
2416
btf_enum_log(struct btf_verifier_env * env,const struct btf_type * t)2417 static void btf_enum_log(struct btf_verifier_env *env,
2418 const struct btf_type *t)
2419 {
2420 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2421 }
2422
btf_enum_seq_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct seq_file * m)2423 static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
2424 u32 type_id, void *data, u8 bits_offset,
2425 struct seq_file *m)
2426 {
2427 const struct btf_enum *enums = btf_type_enum(t);
2428 u32 i, nr_enums = btf_type_vlen(t);
2429 int v = *(int *)data;
2430
2431 for (i = 0; i < nr_enums; i++) {
2432 if (v == enums[i].val) {
2433 seq_printf(m, "%s",
2434 __btf_name_by_offset(btf,
2435 enums[i].name_off));
2436 return;
2437 }
2438 }
2439
2440 seq_printf(m, "%d", v);
2441 }
2442
2443 static struct btf_kind_operations enum_ops = {
2444 .check_meta = btf_enum_check_meta,
2445 .resolve = btf_df_resolve,
2446 .check_member = btf_enum_check_member,
2447 .check_kflag_member = btf_enum_check_kflag_member,
2448 .log_details = btf_enum_log,
2449 .seq_show = btf_enum_seq_show,
2450 };
2451
btf_func_proto_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2452 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
2453 const struct btf_type *t,
2454 u32 meta_left)
2455 {
2456 u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
2457
2458 if (meta_left < meta_needed) {
2459 btf_verifier_log_basic(env, t,
2460 "meta_left:%u meta_needed:%u",
2461 meta_left, meta_needed);
2462 return -EINVAL;
2463 }
2464
2465 if (t->name_off) {
2466 btf_verifier_log_type(env, t, "Invalid name");
2467 return -EINVAL;
2468 }
2469
2470 if (btf_type_kflag(t)) {
2471 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2472 return -EINVAL;
2473 }
2474
2475 btf_verifier_log_type(env, t, NULL);
2476
2477 return meta_needed;
2478 }
2479
btf_func_proto_log(struct btf_verifier_env * env,const struct btf_type * t)2480 static void btf_func_proto_log(struct btf_verifier_env *env,
2481 const struct btf_type *t)
2482 {
2483 const struct btf_param *args = (const struct btf_param *)(t + 1);
2484 u16 nr_args = btf_type_vlen(t), i;
2485
2486 btf_verifier_log(env, "return=%u args=(", t->type);
2487 if (!nr_args) {
2488 btf_verifier_log(env, "void");
2489 goto done;
2490 }
2491
2492 if (nr_args == 1 && !args[0].type) {
2493 /* Only one vararg */
2494 btf_verifier_log(env, "vararg");
2495 goto done;
2496 }
2497
2498 btf_verifier_log(env, "%u %s", args[0].type,
2499 __btf_name_by_offset(env->btf,
2500 args[0].name_off));
2501 for (i = 1; i < nr_args - 1; i++)
2502 btf_verifier_log(env, ", %u %s", args[i].type,
2503 __btf_name_by_offset(env->btf,
2504 args[i].name_off));
2505
2506 if (nr_args > 1) {
2507 const struct btf_param *last_arg = &args[nr_args - 1];
2508
2509 if (last_arg->type)
2510 btf_verifier_log(env, ", %u %s", last_arg->type,
2511 __btf_name_by_offset(env->btf,
2512 last_arg->name_off));
2513 else
2514 btf_verifier_log(env, ", vararg");
2515 }
2516
2517 done:
2518 btf_verifier_log(env, ")");
2519 }
2520
2521 static struct btf_kind_operations func_proto_ops = {
2522 .check_meta = btf_func_proto_check_meta,
2523 .resolve = btf_df_resolve,
2524 /*
2525 * BTF_KIND_FUNC_PROTO cannot be directly referred by
2526 * a struct's member.
2527 *
2528 * It should be a funciton pointer instead.
2529 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
2530 *
2531 * Hence, there is no btf_func_check_member().
2532 */
2533 .check_member = btf_df_check_member,
2534 .check_kflag_member = btf_df_check_kflag_member,
2535 .log_details = btf_func_proto_log,
2536 .seq_show = btf_df_seq_show,
2537 };
2538
btf_func_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2539 static s32 btf_func_check_meta(struct btf_verifier_env *env,
2540 const struct btf_type *t,
2541 u32 meta_left)
2542 {
2543 if (!t->name_off ||
2544 !btf_name_valid_identifier(env->btf, t->name_off)) {
2545 btf_verifier_log_type(env, t, "Invalid name");
2546 return -EINVAL;
2547 }
2548
2549 if (btf_type_vlen(t)) {
2550 btf_verifier_log_type(env, t, "vlen != 0");
2551 return -EINVAL;
2552 }
2553
2554 if (btf_type_kflag(t)) {
2555 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2556 return -EINVAL;
2557 }
2558
2559 btf_verifier_log_type(env, t, NULL);
2560
2561 return 0;
2562 }
2563
2564 static struct btf_kind_operations func_ops = {
2565 .check_meta = btf_func_check_meta,
2566 .resolve = btf_df_resolve,
2567 .check_member = btf_df_check_member,
2568 .check_kflag_member = btf_df_check_kflag_member,
2569 .log_details = btf_ref_type_log,
2570 .seq_show = btf_df_seq_show,
2571 };
2572
btf_var_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2573 static s32 btf_var_check_meta(struct btf_verifier_env *env,
2574 const struct btf_type *t,
2575 u32 meta_left)
2576 {
2577 const struct btf_var *var;
2578 u32 meta_needed = sizeof(*var);
2579
2580 if (meta_left < meta_needed) {
2581 btf_verifier_log_basic(env, t,
2582 "meta_left:%u meta_needed:%u",
2583 meta_left, meta_needed);
2584 return -EINVAL;
2585 }
2586
2587 if (btf_type_vlen(t)) {
2588 btf_verifier_log_type(env, t, "vlen != 0");
2589 return -EINVAL;
2590 }
2591
2592 if (btf_type_kflag(t)) {
2593 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2594 return -EINVAL;
2595 }
2596
2597 if (!t->name_off ||
2598 !__btf_name_valid(env->btf, t->name_off, true)) {
2599 btf_verifier_log_type(env, t, "Invalid name");
2600 return -EINVAL;
2601 }
2602
2603 /* A var cannot be in type void */
2604 if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
2605 btf_verifier_log_type(env, t, "Invalid type_id");
2606 return -EINVAL;
2607 }
2608
2609 var = btf_type_var(t);
2610 if (var->linkage != BTF_VAR_STATIC &&
2611 var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2612 btf_verifier_log_type(env, t, "Linkage not supported");
2613 return -EINVAL;
2614 }
2615
2616 btf_verifier_log_type(env, t, NULL);
2617
2618 return meta_needed;
2619 }
2620
btf_var_log(struct btf_verifier_env * env,const struct btf_type * t)2621 static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
2622 {
2623 const struct btf_var *var = btf_type_var(t);
2624
2625 btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
2626 }
2627
2628 static const struct btf_kind_operations var_ops = {
2629 .check_meta = btf_var_check_meta,
2630 .resolve = btf_var_resolve,
2631 .check_member = btf_df_check_member,
2632 .check_kflag_member = btf_df_check_kflag_member,
2633 .log_details = btf_var_log,
2634 .seq_show = btf_var_seq_show,
2635 };
2636
btf_datasec_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2637 static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
2638 const struct btf_type *t,
2639 u32 meta_left)
2640 {
2641 const struct btf_var_secinfo *vsi;
2642 u64 last_vsi_end_off = 0, sum = 0;
2643 u32 i, meta_needed;
2644
2645 meta_needed = btf_type_vlen(t) * sizeof(*vsi);
2646 if (meta_left < meta_needed) {
2647 btf_verifier_log_basic(env, t,
2648 "meta_left:%u meta_needed:%u",
2649 meta_left, meta_needed);
2650 return -EINVAL;
2651 }
2652
2653 if (!btf_type_vlen(t)) {
2654 btf_verifier_log_type(env, t, "vlen == 0");
2655 return -EINVAL;
2656 }
2657
2658 if (!t->size) {
2659 btf_verifier_log_type(env, t, "size == 0");
2660 return -EINVAL;
2661 }
2662
2663 if (btf_type_kflag(t)) {
2664 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2665 return -EINVAL;
2666 }
2667
2668 if (!t->name_off ||
2669 !btf_name_valid_section(env->btf, t->name_off)) {
2670 btf_verifier_log_type(env, t, "Invalid name");
2671 return -EINVAL;
2672 }
2673
2674 btf_verifier_log_type(env, t, NULL);
2675
2676 for_each_vsi(i, t, vsi) {
2677 /* A var cannot be in type void */
2678 if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
2679 btf_verifier_log_vsi(env, t, vsi,
2680 "Invalid type_id");
2681 return -EINVAL;
2682 }
2683
2684 if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
2685 btf_verifier_log_vsi(env, t, vsi,
2686 "Invalid offset");
2687 return -EINVAL;
2688 }
2689
2690 if (!vsi->size || vsi->size > t->size) {
2691 btf_verifier_log_vsi(env, t, vsi,
2692 "Invalid size");
2693 return -EINVAL;
2694 }
2695
2696 last_vsi_end_off = vsi->offset + vsi->size;
2697 if (last_vsi_end_off > t->size) {
2698 btf_verifier_log_vsi(env, t, vsi,
2699 "Invalid offset+size");
2700 return -EINVAL;
2701 }
2702
2703 btf_verifier_log_vsi(env, t, vsi, NULL);
2704 sum += vsi->size;
2705 }
2706
2707 if (t->size < sum) {
2708 btf_verifier_log_type(env, t, "Invalid btf_info size");
2709 return -EINVAL;
2710 }
2711
2712 return meta_needed;
2713 }
2714
btf_datasec_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2715 static int btf_datasec_resolve(struct btf_verifier_env *env,
2716 const struct resolve_vertex *v)
2717 {
2718 const struct btf_var_secinfo *vsi;
2719 struct btf *btf = env->btf;
2720 u16 i;
2721
2722 for_each_vsi_from(i, v->next_member, v->t, vsi) {
2723 u32 var_type_id = vsi->type, type_id, type_size = 0;
2724 const struct btf_type *var_type = btf_type_by_id(env->btf,
2725 var_type_id);
2726 if (!var_type || !btf_type_is_var(var_type)) {
2727 btf_verifier_log_vsi(env, v->t, vsi,
2728 "Not a VAR kind member");
2729 return -EINVAL;
2730 }
2731
2732 if (!env_type_is_resolve_sink(env, var_type) &&
2733 !env_type_is_resolved(env, var_type_id)) {
2734 env_stack_set_next_member(env, i + 1);
2735 return env_stack_push(env, var_type, var_type_id);
2736 }
2737
2738 type_id = var_type->type;
2739 if (!btf_type_id_size(btf, &type_id, &type_size)) {
2740 btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
2741 return -EINVAL;
2742 }
2743
2744 if (vsi->size < type_size) {
2745 btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
2746 return -EINVAL;
2747 }
2748 }
2749
2750 env_stack_pop_resolved(env, 0, 0);
2751 return 0;
2752 }
2753
btf_datasec_log(struct btf_verifier_env * env,const struct btf_type * t)2754 static void btf_datasec_log(struct btf_verifier_env *env,
2755 const struct btf_type *t)
2756 {
2757 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2758 }
2759
btf_datasec_seq_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct seq_file * m)2760 static void btf_datasec_seq_show(const struct btf *btf,
2761 const struct btf_type *t, u32 type_id,
2762 void *data, u8 bits_offset,
2763 struct seq_file *m)
2764 {
2765 const struct btf_var_secinfo *vsi;
2766 const struct btf_type *var;
2767 u32 i;
2768
2769 seq_printf(m, "section (\"%s\") = {", __btf_name_by_offset(btf, t->name_off));
2770 for_each_vsi(i, t, vsi) {
2771 var = btf_type_by_id(btf, vsi->type);
2772 if (i)
2773 seq_puts(m, ",");
2774 btf_type_ops(var)->seq_show(btf, var, vsi->type,
2775 data + vsi->offset, bits_offset, m);
2776 }
2777 seq_puts(m, "}");
2778 }
2779
2780 static const struct btf_kind_operations datasec_ops = {
2781 .check_meta = btf_datasec_check_meta,
2782 .resolve = btf_datasec_resolve,
2783 .check_member = btf_df_check_member,
2784 .check_kflag_member = btf_df_check_kflag_member,
2785 .log_details = btf_datasec_log,
2786 .seq_show = btf_datasec_seq_show,
2787 };
2788
btf_func_proto_check(struct btf_verifier_env * env,const struct btf_type * t)2789 static int btf_func_proto_check(struct btf_verifier_env *env,
2790 const struct btf_type *t)
2791 {
2792 const struct btf_type *ret_type;
2793 const struct btf_param *args;
2794 const struct btf *btf;
2795 u16 nr_args, i;
2796 int err;
2797
2798 btf = env->btf;
2799 args = (const struct btf_param *)(t + 1);
2800 nr_args = btf_type_vlen(t);
2801
2802 /* Check func return type which could be "void" (t->type == 0) */
2803 if (t->type) {
2804 u32 ret_type_id = t->type;
2805
2806 ret_type = btf_type_by_id(btf, ret_type_id);
2807 if (!ret_type) {
2808 btf_verifier_log_type(env, t, "Invalid return type");
2809 return -EINVAL;
2810 }
2811
2812 if (btf_type_needs_resolve(ret_type) &&
2813 !env_type_is_resolved(env, ret_type_id)) {
2814 err = btf_resolve(env, ret_type, ret_type_id);
2815 if (err)
2816 return err;
2817 }
2818
2819 /* Ensure the return type is a type that has a size */
2820 if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
2821 btf_verifier_log_type(env, t, "Invalid return type");
2822 return -EINVAL;
2823 }
2824 }
2825
2826 if (!nr_args)
2827 return 0;
2828
2829 /* Last func arg type_id could be 0 if it is a vararg */
2830 if (!args[nr_args - 1].type) {
2831 if (args[nr_args - 1].name_off) {
2832 btf_verifier_log_type(env, t, "Invalid arg#%u",
2833 nr_args);
2834 return -EINVAL;
2835 }
2836 nr_args--;
2837 }
2838
2839 err = 0;
2840 for (i = 0; i < nr_args; i++) {
2841 const struct btf_type *arg_type;
2842 u32 arg_type_id;
2843
2844 arg_type_id = args[i].type;
2845 arg_type = btf_type_by_id(btf, arg_type_id);
2846 if (!arg_type) {
2847 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2848 err = -EINVAL;
2849 break;
2850 }
2851
2852 if (args[i].name_off &&
2853 (!btf_name_offset_valid(btf, args[i].name_off) ||
2854 !btf_name_valid_identifier(btf, args[i].name_off))) {
2855 btf_verifier_log_type(env, t,
2856 "Invalid arg#%u", i + 1);
2857 err = -EINVAL;
2858 break;
2859 }
2860
2861 if (btf_type_needs_resolve(arg_type) &&
2862 !env_type_is_resolved(env, arg_type_id)) {
2863 err = btf_resolve(env, arg_type, arg_type_id);
2864 if (err)
2865 break;
2866 }
2867
2868 if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
2869 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2870 err = -EINVAL;
2871 break;
2872 }
2873 }
2874
2875 return err;
2876 }
2877
btf_func_check(struct btf_verifier_env * env,const struct btf_type * t)2878 static int btf_func_check(struct btf_verifier_env *env,
2879 const struct btf_type *t)
2880 {
2881 const struct btf_type *proto_type;
2882 const struct btf_param *args;
2883 const struct btf *btf;
2884 u16 nr_args, i;
2885
2886 btf = env->btf;
2887 proto_type = btf_type_by_id(btf, t->type);
2888
2889 if (!proto_type || !btf_type_is_func_proto(proto_type)) {
2890 btf_verifier_log_type(env, t, "Invalid type_id");
2891 return -EINVAL;
2892 }
2893
2894 args = (const struct btf_param *)(proto_type + 1);
2895 nr_args = btf_type_vlen(proto_type);
2896 for (i = 0; i < nr_args; i++) {
2897 if (!args[i].name_off && args[i].type) {
2898 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2899 return -EINVAL;
2900 }
2901 }
2902
2903 return 0;
2904 }
2905
2906 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
2907 [BTF_KIND_INT] = &int_ops,
2908 [BTF_KIND_PTR] = &ptr_ops,
2909 [BTF_KIND_ARRAY] = &array_ops,
2910 [BTF_KIND_STRUCT] = &struct_ops,
2911 [BTF_KIND_UNION] = &struct_ops,
2912 [BTF_KIND_ENUM] = &enum_ops,
2913 [BTF_KIND_FWD] = &fwd_ops,
2914 [BTF_KIND_TYPEDEF] = &modifier_ops,
2915 [BTF_KIND_VOLATILE] = &modifier_ops,
2916 [BTF_KIND_CONST] = &modifier_ops,
2917 [BTF_KIND_RESTRICT] = &modifier_ops,
2918 [BTF_KIND_FUNC] = &func_ops,
2919 [BTF_KIND_FUNC_PROTO] = &func_proto_ops,
2920 [BTF_KIND_VAR] = &var_ops,
2921 [BTF_KIND_DATASEC] = &datasec_ops,
2922 };
2923
btf_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2924 static s32 btf_check_meta(struct btf_verifier_env *env,
2925 const struct btf_type *t,
2926 u32 meta_left)
2927 {
2928 u32 saved_meta_left = meta_left;
2929 s32 var_meta_size;
2930
2931 if (meta_left < sizeof(*t)) {
2932 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
2933 env->log_type_id, meta_left, sizeof(*t));
2934 return -EINVAL;
2935 }
2936 meta_left -= sizeof(*t);
2937
2938 if (t->info & ~BTF_INFO_MASK) {
2939 btf_verifier_log(env, "[%u] Invalid btf_info:%x",
2940 env->log_type_id, t->info);
2941 return -EINVAL;
2942 }
2943
2944 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
2945 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
2946 btf_verifier_log(env, "[%u] Invalid kind:%u",
2947 env->log_type_id, BTF_INFO_KIND(t->info));
2948 return -EINVAL;
2949 }
2950
2951 if (!btf_name_offset_valid(env->btf, t->name_off)) {
2952 btf_verifier_log(env, "[%u] Invalid name_offset:%u",
2953 env->log_type_id, t->name_off);
2954 return -EINVAL;
2955 }
2956
2957 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
2958 if (var_meta_size < 0)
2959 return var_meta_size;
2960
2961 meta_left -= var_meta_size;
2962
2963 return saved_meta_left - meta_left;
2964 }
2965
btf_check_all_metas(struct btf_verifier_env * env)2966 static int btf_check_all_metas(struct btf_verifier_env *env)
2967 {
2968 struct btf *btf = env->btf;
2969 struct btf_header *hdr;
2970 void *cur, *end;
2971
2972 hdr = &btf->hdr;
2973 cur = btf->nohdr_data + hdr->type_off;
2974 end = cur + hdr->type_len;
2975
2976 env->log_type_id = 1;
2977 while (cur < end) {
2978 struct btf_type *t = cur;
2979 s32 meta_size;
2980
2981 meta_size = btf_check_meta(env, t, end - cur);
2982 if (meta_size < 0)
2983 return meta_size;
2984
2985 btf_add_type(env, t);
2986 cur += meta_size;
2987 env->log_type_id++;
2988 }
2989
2990 return 0;
2991 }
2992
btf_resolve_valid(struct btf_verifier_env * env,const struct btf_type * t,u32 type_id)2993 static bool btf_resolve_valid(struct btf_verifier_env *env,
2994 const struct btf_type *t,
2995 u32 type_id)
2996 {
2997 struct btf *btf = env->btf;
2998
2999 if (!env_type_is_resolved(env, type_id))
3000 return false;
3001
3002 if (btf_type_is_struct(t) || btf_type_is_datasec(t))
3003 return !btf->resolved_ids[type_id] &&
3004 !btf->resolved_sizes[type_id];
3005
3006 if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
3007 btf_type_is_var(t)) {
3008 t = btf_type_id_resolve(btf, &type_id);
3009 return t &&
3010 !btf_type_is_modifier(t) &&
3011 !btf_type_is_var(t) &&
3012 !btf_type_is_datasec(t);
3013 }
3014
3015 if (btf_type_is_array(t)) {
3016 const struct btf_array *array = btf_type_array(t);
3017 const struct btf_type *elem_type;
3018 u32 elem_type_id = array->type;
3019 u32 elem_size;
3020
3021 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
3022 return elem_type && !btf_type_is_modifier(elem_type) &&
3023 (array->nelems * elem_size ==
3024 btf->resolved_sizes[type_id]);
3025 }
3026
3027 return false;
3028 }
3029
btf_resolve(struct btf_verifier_env * env,const struct btf_type * t,u32 type_id)3030 static int btf_resolve(struct btf_verifier_env *env,
3031 const struct btf_type *t, u32 type_id)
3032 {
3033 u32 save_log_type_id = env->log_type_id;
3034 const struct resolve_vertex *v;
3035 int err = 0;
3036
3037 env->resolve_mode = RESOLVE_TBD;
3038 env_stack_push(env, t, type_id);
3039 while (!err && (v = env_stack_peak(env))) {
3040 env->log_type_id = v->type_id;
3041 err = btf_type_ops(v->t)->resolve(env, v);
3042 }
3043
3044 env->log_type_id = type_id;
3045 if (err == -E2BIG) {
3046 btf_verifier_log_type(env, t,
3047 "Exceeded max resolving depth:%u",
3048 MAX_RESOLVE_DEPTH);
3049 } else if (err == -EEXIST) {
3050 btf_verifier_log_type(env, t, "Loop detected");
3051 }
3052
3053 /* Final sanity check */
3054 if (!err && !btf_resolve_valid(env, t, type_id)) {
3055 btf_verifier_log_type(env, t, "Invalid resolve state");
3056 err = -EINVAL;
3057 }
3058
3059 env->log_type_id = save_log_type_id;
3060 return err;
3061 }
3062
btf_check_all_types(struct btf_verifier_env * env)3063 static int btf_check_all_types(struct btf_verifier_env *env)
3064 {
3065 struct btf *btf = env->btf;
3066 u32 type_id;
3067 int err;
3068
3069 err = env_resolve_init(env);
3070 if (err)
3071 return err;
3072
3073 env->phase++;
3074 for (type_id = 1; type_id <= btf->nr_types; type_id++) {
3075 const struct btf_type *t = btf_type_by_id(btf, type_id);
3076
3077 env->log_type_id = type_id;
3078 if (btf_type_needs_resolve(t) &&
3079 !env_type_is_resolved(env, type_id)) {
3080 err = btf_resolve(env, t, type_id);
3081 if (err)
3082 return err;
3083 }
3084
3085 if (btf_type_is_func_proto(t)) {
3086 err = btf_func_proto_check(env, t);
3087 if (err)
3088 return err;
3089 }
3090
3091 if (btf_type_is_func(t)) {
3092 err = btf_func_check(env, t);
3093 if (err)
3094 return err;
3095 }
3096 }
3097
3098 return 0;
3099 }
3100
btf_parse_type_sec(struct btf_verifier_env * env)3101 static int btf_parse_type_sec(struct btf_verifier_env *env)
3102 {
3103 const struct btf_header *hdr = &env->btf->hdr;
3104 int err;
3105
3106 /* Type section must align to 4 bytes */
3107 if (hdr->type_off & (sizeof(u32) - 1)) {
3108 btf_verifier_log(env, "Unaligned type_off");
3109 return -EINVAL;
3110 }
3111
3112 if (!hdr->type_len) {
3113 btf_verifier_log(env, "No type found");
3114 return -EINVAL;
3115 }
3116
3117 err = btf_check_all_metas(env);
3118 if (err)
3119 return err;
3120
3121 return btf_check_all_types(env);
3122 }
3123
btf_parse_str_sec(struct btf_verifier_env * env)3124 static int btf_parse_str_sec(struct btf_verifier_env *env)
3125 {
3126 const struct btf_header *hdr;
3127 struct btf *btf = env->btf;
3128 const char *start, *end;
3129
3130 hdr = &btf->hdr;
3131 start = btf->nohdr_data + hdr->str_off;
3132 end = start + hdr->str_len;
3133
3134 if (end != btf->data + btf->data_size) {
3135 btf_verifier_log(env, "String section is not at the end");
3136 return -EINVAL;
3137 }
3138
3139 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
3140 start[0] || end[-1]) {
3141 btf_verifier_log(env, "Invalid string section");
3142 return -EINVAL;
3143 }
3144
3145 btf->strings = start;
3146
3147 return 0;
3148 }
3149
3150 static const size_t btf_sec_info_offset[] = {
3151 offsetof(struct btf_header, type_off),
3152 offsetof(struct btf_header, str_off),
3153 };
3154
btf_sec_info_cmp(const void * a,const void * b)3155 static int btf_sec_info_cmp(const void *a, const void *b)
3156 {
3157 const struct btf_sec_info *x = a;
3158 const struct btf_sec_info *y = b;
3159
3160 return (int)(x->off - y->off) ? : (int)(x->len - y->len);
3161 }
3162
btf_check_sec_info(struct btf_verifier_env * env,u32 btf_data_size)3163 static int btf_check_sec_info(struct btf_verifier_env *env,
3164 u32 btf_data_size)
3165 {
3166 struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
3167 u32 total, expected_total, i;
3168 const struct btf_header *hdr;
3169 const struct btf *btf;
3170
3171 btf = env->btf;
3172 hdr = &btf->hdr;
3173
3174 /* Populate the secs from hdr */
3175 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
3176 secs[i] = *(struct btf_sec_info *)((void *)hdr +
3177 btf_sec_info_offset[i]);
3178
3179 sort(secs, ARRAY_SIZE(btf_sec_info_offset),
3180 sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
3181
3182 /* Check for gaps and overlap among sections */
3183 total = 0;
3184 expected_total = btf_data_size - hdr->hdr_len;
3185 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
3186 if (expected_total < secs[i].off) {
3187 btf_verifier_log(env, "Invalid section offset");
3188 return -EINVAL;
3189 }
3190 if (total < secs[i].off) {
3191 /* gap */
3192 btf_verifier_log(env, "Unsupported section found");
3193 return -EINVAL;
3194 }
3195 if (total > secs[i].off) {
3196 btf_verifier_log(env, "Section overlap found");
3197 return -EINVAL;
3198 }
3199 if (expected_total - total < secs[i].len) {
3200 btf_verifier_log(env,
3201 "Total section length too long");
3202 return -EINVAL;
3203 }
3204 total += secs[i].len;
3205 }
3206
3207 /* There is data other than hdr and known sections */
3208 if (expected_total != total) {
3209 btf_verifier_log(env, "Unsupported section found");
3210 return -EINVAL;
3211 }
3212
3213 return 0;
3214 }
3215
btf_parse_hdr(struct btf_verifier_env * env)3216 static int btf_parse_hdr(struct btf_verifier_env *env)
3217 {
3218 u32 hdr_len, hdr_copy, btf_data_size;
3219 const struct btf_header *hdr;
3220 struct btf *btf;
3221 int err;
3222
3223 btf = env->btf;
3224 btf_data_size = btf->data_size;
3225
3226 if (btf_data_size <
3227 offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) {
3228 btf_verifier_log(env, "hdr_len not found");
3229 return -EINVAL;
3230 }
3231
3232 hdr = btf->data;
3233 hdr_len = hdr->hdr_len;
3234 if (btf_data_size < hdr_len) {
3235 btf_verifier_log(env, "btf_header not found");
3236 return -EINVAL;
3237 }
3238
3239 /* Ensure the unsupported header fields are zero */
3240 if (hdr_len > sizeof(btf->hdr)) {
3241 u8 *expected_zero = btf->data + sizeof(btf->hdr);
3242 u8 *end = btf->data + hdr_len;
3243
3244 for (; expected_zero < end; expected_zero++) {
3245 if (*expected_zero) {
3246 btf_verifier_log(env, "Unsupported btf_header");
3247 return -E2BIG;
3248 }
3249 }
3250 }
3251
3252 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
3253 memcpy(&btf->hdr, btf->data, hdr_copy);
3254
3255 hdr = &btf->hdr;
3256
3257 btf_verifier_log_hdr(env, btf_data_size);
3258
3259 if (hdr->magic != BTF_MAGIC) {
3260 btf_verifier_log(env, "Invalid magic");
3261 return -EINVAL;
3262 }
3263
3264 if (hdr->version != BTF_VERSION) {
3265 btf_verifier_log(env, "Unsupported version");
3266 return -ENOTSUPP;
3267 }
3268
3269 if (hdr->flags) {
3270 btf_verifier_log(env, "Unsupported flags");
3271 return -ENOTSUPP;
3272 }
3273
3274 if (btf_data_size == hdr->hdr_len) {
3275 btf_verifier_log(env, "No data");
3276 return -EINVAL;
3277 }
3278
3279 err = btf_check_sec_info(env, btf_data_size);
3280 if (err)
3281 return err;
3282
3283 return 0;
3284 }
3285
btf_parse(void __user * btf_data,u32 btf_data_size,u32 log_level,char __user * log_ubuf,u32 log_size)3286 static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
3287 u32 log_level, char __user *log_ubuf, u32 log_size)
3288 {
3289 struct btf_verifier_env *env = NULL;
3290 struct bpf_verifier_log *log;
3291 struct btf *btf = NULL;
3292 u8 *data;
3293 int err;
3294
3295 if (btf_data_size > BTF_MAX_SIZE)
3296 return ERR_PTR(-E2BIG);
3297
3298 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
3299 if (!env)
3300 return ERR_PTR(-ENOMEM);
3301
3302 log = &env->log;
3303 if (log_level || log_ubuf || log_size) {
3304 /* user requested verbose verifier output
3305 * and supplied buffer to store the verification trace
3306 */
3307 log->level = log_level;
3308 log->ubuf = log_ubuf;
3309 log->len_total = log_size;
3310
3311 /* log attributes have to be sane */
3312 if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
3313 !log->level || !log->ubuf) {
3314 err = -EINVAL;
3315 goto errout;
3316 }
3317 }
3318
3319 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
3320 if (!btf) {
3321 err = -ENOMEM;
3322 goto errout;
3323 }
3324 env->btf = btf;
3325
3326 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
3327 if (!data) {
3328 err = -ENOMEM;
3329 goto errout;
3330 }
3331
3332 btf->data = data;
3333 btf->data_size = btf_data_size;
3334
3335 if (copy_from_user(data, btf_data, btf_data_size)) {
3336 err = -EFAULT;
3337 goto errout;
3338 }
3339
3340 err = btf_parse_hdr(env);
3341 if (err)
3342 goto errout;
3343
3344 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
3345
3346 err = btf_parse_str_sec(env);
3347 if (err)
3348 goto errout;
3349
3350 err = btf_parse_type_sec(env);
3351 if (err)
3352 goto errout;
3353
3354 if (log->level && bpf_verifier_log_full(log)) {
3355 err = -ENOSPC;
3356 goto errout;
3357 }
3358
3359 btf_verifier_env_free(env);
3360 refcount_set(&btf->refcnt, 1);
3361 return btf;
3362
3363 errout:
3364 btf_verifier_env_free(env);
3365 if (btf)
3366 btf_free(btf);
3367 return ERR_PTR(err);
3368 }
3369
btf_type_seq_show(const struct btf * btf,u32 type_id,void * obj,struct seq_file * m)3370 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
3371 struct seq_file *m)
3372 {
3373 const struct btf_type *t = btf_type_by_id(btf, type_id);
3374
3375 btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m);
3376 }
3377
3378 #ifdef CONFIG_PROC_FS
bpf_btf_show_fdinfo(struct seq_file * m,struct file * filp)3379 static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
3380 {
3381 const struct btf *btf = filp->private_data;
3382
3383 seq_printf(m, "btf_id:\t%u\n", btf->id);
3384 }
3385 #endif
3386
btf_release(struct inode * inode,struct file * filp)3387 static int btf_release(struct inode *inode, struct file *filp)
3388 {
3389 btf_put(filp->private_data);
3390 return 0;
3391 }
3392
3393 const struct file_operations btf_fops = {
3394 #ifdef CONFIG_PROC_FS
3395 .show_fdinfo = bpf_btf_show_fdinfo,
3396 #endif
3397 .release = btf_release,
3398 };
3399
__btf_new_fd(struct btf * btf)3400 static int __btf_new_fd(struct btf *btf)
3401 {
3402 return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
3403 }
3404
btf_new_fd(const union bpf_attr * attr)3405 int btf_new_fd(const union bpf_attr *attr)
3406 {
3407 struct btf *btf;
3408 int ret;
3409
3410 btf = btf_parse(u64_to_user_ptr(attr->btf),
3411 attr->btf_size, attr->btf_log_level,
3412 u64_to_user_ptr(attr->btf_log_buf),
3413 attr->btf_log_size);
3414 if (IS_ERR(btf))
3415 return PTR_ERR(btf);
3416
3417 ret = btf_alloc_id(btf);
3418 if (ret) {
3419 btf_free(btf);
3420 return ret;
3421 }
3422
3423 /*
3424 * The BTF ID is published to the userspace.
3425 * All BTF free must go through call_rcu() from
3426 * now on (i.e. free by calling btf_put()).
3427 */
3428
3429 ret = __btf_new_fd(btf);
3430 if (ret < 0)
3431 btf_put(btf);
3432
3433 return ret;
3434 }
3435
btf_get_by_fd(int fd)3436 struct btf *btf_get_by_fd(int fd)
3437 {
3438 struct btf *btf;
3439 struct fd f;
3440
3441 f = fdget(fd);
3442
3443 if (!f.file)
3444 return ERR_PTR(-EBADF);
3445
3446 if (f.file->f_op != &btf_fops) {
3447 fdput(f);
3448 return ERR_PTR(-EINVAL);
3449 }
3450
3451 btf = f.file->private_data;
3452 refcount_inc(&btf->refcnt);
3453 fdput(f);
3454
3455 return btf;
3456 }
3457
btf_get_info_by_fd(const struct btf * btf,const union bpf_attr * attr,union bpf_attr __user * uattr)3458 int btf_get_info_by_fd(const struct btf *btf,
3459 const union bpf_attr *attr,
3460 union bpf_attr __user *uattr)
3461 {
3462 struct bpf_btf_info __user *uinfo;
3463 struct bpf_btf_info info = {};
3464 u32 info_copy, btf_copy;
3465 void __user *ubtf;
3466 u32 uinfo_len;
3467
3468 uinfo = u64_to_user_ptr(attr->info.info);
3469 uinfo_len = attr->info.info_len;
3470
3471 info_copy = min_t(u32, uinfo_len, sizeof(info));
3472 if (copy_from_user(&info, uinfo, info_copy))
3473 return -EFAULT;
3474
3475 info.id = btf->id;
3476 ubtf = u64_to_user_ptr(info.btf);
3477 btf_copy = min_t(u32, btf->data_size, info.btf_size);
3478 if (copy_to_user(ubtf, btf->data, btf_copy))
3479 return -EFAULT;
3480 info.btf_size = btf->data_size;
3481
3482 if (copy_to_user(uinfo, &info, info_copy) ||
3483 put_user(info_copy, &uattr->info.info_len))
3484 return -EFAULT;
3485
3486 return 0;
3487 }
3488
btf_get_fd_by_id(u32 id)3489 int btf_get_fd_by_id(u32 id)
3490 {
3491 struct btf *btf;
3492 int fd;
3493
3494 rcu_read_lock();
3495 btf = idr_find(&btf_idr, id);
3496 if (!btf || !refcount_inc_not_zero(&btf->refcnt))
3497 btf = ERR_PTR(-ENOENT);
3498 rcu_read_unlock();
3499
3500 if (IS_ERR(btf))
3501 return PTR_ERR(btf);
3502
3503 fd = __btf_new_fd(btf);
3504 if (fd < 0)
3505 btf_put(btf);
3506
3507 return fd;
3508 }
3509
btf_id(const struct btf * btf)3510 u32 btf_id(const struct btf *btf)
3511 {
3512 return btf->id;
3513 }
3514