1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2
3 /*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
9 * Copyright (C) 2017 Nicira, Inc.
10 * Copyright (C) 2019 Isovalent, Inc.
11 */
12
13 #ifndef _GNU_SOURCE
14 #define _GNU_SOURCE
15 #endif
16 #include <stdlib.h>
17 #include <stdio.h>
18 #include <stdarg.h>
19 #include <libgen.h>
20 #include <inttypes.h>
21 #include <limits.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <endian.h>
25 #include <fcntl.h>
26 #include <errno.h>
27 #include <ctype.h>
28 #include <asm/unistd.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/bpf.h>
32 #include <linux/btf.h>
33 #include <linux/filter.h>
34 #include <linux/list.h>
35 #include <linux/limits.h>
36 #include <linux/perf_event.h>
37 #include <linux/ring_buffer.h>
38 #include <linux/version.h>
39 #include <sys/epoll.h>
40 #include <sys/ioctl.h>
41 #include <sys/mman.h>
42 #include <sys/stat.h>
43 #include <sys/types.h>
44 #include <sys/vfs.h>
45 #include <sys/utsname.h>
46 #include <sys/resource.h>
47 #include <libelf.h>
48 #include <gelf.h>
49 #include <zlib.h>
50
51 #include "libbpf.h"
52 #include "bpf.h"
53 #include "btf.h"
54 #include "str_error.h"
55 #include "libbpf_internal.h"
56 #include "hashmap.h"
57
58 #ifndef EM_BPF
59 #define EM_BPF 247
60 #endif
61
62 #ifndef BPF_FS_MAGIC
63 #define BPF_FS_MAGIC 0xcafe4a11
64 #endif
65
66 #define BPF_INSN_SZ (sizeof(struct bpf_insn))
67
68 /* vsprintf() in __base_pr() uses nonliteral format string. It may break
69 * compilation if user enables corresponding warning. Disable it explicitly.
70 */
71 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
72
73 #define __printf(a, b) __attribute__((format(printf, a, b)))
74
75 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
76 static const struct btf_type *
77 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
78
__base_pr(enum libbpf_print_level level,const char * format,va_list args)79 static int __base_pr(enum libbpf_print_level level, const char *format,
80 va_list args)
81 {
82 if (level == LIBBPF_DEBUG)
83 return 0;
84
85 return vfprintf(stderr, format, args);
86 }
87
88 static libbpf_print_fn_t __libbpf_pr = __base_pr;
89
libbpf_set_print(libbpf_print_fn_t fn)90 libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
91 {
92 libbpf_print_fn_t old_print_fn = __libbpf_pr;
93
94 __libbpf_pr = fn;
95 return old_print_fn;
96 }
97
98 __printf(2, 3)
libbpf_print(enum libbpf_print_level level,const char * format,...)99 void libbpf_print(enum libbpf_print_level level, const char *format, ...)
100 {
101 va_list args;
102
103 if (!__libbpf_pr)
104 return;
105
106 va_start(args, format);
107 __libbpf_pr(level, format, args);
108 va_end(args);
109 }
110
pr_perm_msg(int err)111 static void pr_perm_msg(int err)
112 {
113 struct rlimit limit;
114 char buf[100];
115
116 if (err != -EPERM || geteuid() != 0)
117 return;
118
119 err = getrlimit(RLIMIT_MEMLOCK, &limit);
120 if (err)
121 return;
122
123 if (limit.rlim_cur == RLIM_INFINITY)
124 return;
125
126 if (limit.rlim_cur < 1024)
127 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
128 else if (limit.rlim_cur < 1024*1024)
129 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
130 else
131 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
132
133 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
134 buf);
135 }
136
137 #define STRERR_BUFSIZE 128
138
139 /* Copied from tools/perf/util/util.h */
140 #ifndef zfree
141 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
142 #endif
143
144 #ifndef zclose
145 # define zclose(fd) ({ \
146 int ___err = 0; \
147 if ((fd) >= 0) \
148 ___err = close((fd)); \
149 fd = -1; \
150 ___err; })
151 #endif
152
ptr_to_u64(const void * ptr)153 static inline __u64 ptr_to_u64(const void *ptr)
154 {
155 return (__u64) (unsigned long) ptr;
156 }
157
158 enum kern_feature_id {
159 /* v4.14: kernel support for program & map names. */
160 FEAT_PROG_NAME,
161 /* v5.2: kernel support for global data sections. */
162 FEAT_GLOBAL_DATA,
163 /* BTF support */
164 FEAT_BTF,
165 /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
166 FEAT_BTF_FUNC,
167 /* BTF_KIND_VAR and BTF_KIND_DATASEC support */
168 FEAT_BTF_DATASEC,
169 /* BTF_FUNC_GLOBAL is supported */
170 FEAT_BTF_GLOBAL_FUNC,
171 /* BPF_F_MMAPABLE is supported for arrays */
172 FEAT_ARRAY_MMAP,
173 /* kernel support for expected_attach_type in BPF_PROG_LOAD */
174 FEAT_EXP_ATTACH_TYPE,
175 /* bpf_probe_read_{kernel,user}[_str] helpers */
176 FEAT_PROBE_READ_KERN,
177 /* BPF_PROG_BIND_MAP is supported */
178 FEAT_PROG_BIND_MAP,
179 __FEAT_CNT,
180 };
181
182 static bool kernel_supports(enum kern_feature_id feat_id);
183
184 enum reloc_type {
185 RELO_LD64,
186 RELO_CALL,
187 RELO_DATA,
188 RELO_EXTERN,
189 };
190
191 struct reloc_desc {
192 enum reloc_type type;
193 int insn_idx;
194 int map_idx;
195 int sym_off;
196 bool processed;
197 };
198
199 struct bpf_sec_def;
200
201 typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
202 struct bpf_program *prog);
203
204 struct bpf_sec_def {
205 const char *sec;
206 size_t len;
207 enum bpf_prog_type prog_type;
208 enum bpf_attach_type expected_attach_type;
209 bool is_exp_attach_type_optional;
210 bool is_attachable;
211 bool is_attach_btf;
212 bool is_sleepable;
213 attach_fn_t attach_fn;
214 };
215
216 /*
217 * bpf_prog should be a better name but it has been used in
218 * linux/filter.h.
219 */
220 struct bpf_program {
221 const struct bpf_sec_def *sec_def;
222 char *sec_name;
223 size_t sec_idx;
224 /* this program's instruction offset (in number of instructions)
225 * within its containing ELF section
226 */
227 size_t sec_insn_off;
228 /* number of original instructions in ELF section belonging to this
229 * program, not taking into account subprogram instructions possible
230 * appended later during relocation
231 */
232 size_t sec_insn_cnt;
233 /* Offset (in number of instructions) of the start of instruction
234 * belonging to this BPF program within its containing main BPF
235 * program. For the entry-point (main) BPF program, this is always
236 * zero. For a sub-program, this gets reset before each of main BPF
237 * programs are processed and relocated and is used to determined
238 * whether sub-program was already appended to the main program, and
239 * if yes, at which instruction offset.
240 */
241 size_t sub_insn_off;
242
243 char *name;
244 /* sec_name with / replaced by _; makes recursive pinning
245 * in bpf_object__pin_programs easier
246 */
247 char *pin_name;
248
249 /* instructions that belong to BPF program; insns[0] is located at
250 * sec_insn_off instruction within its ELF section in ELF file, so
251 * when mapping ELF file instruction index to the local instruction,
252 * one needs to subtract sec_insn_off; and vice versa.
253 */
254 struct bpf_insn *insns;
255 /* actual number of instruction in this BPF program's image; for
256 * entry-point BPF programs this includes the size of main program
257 * itself plus all the used sub-programs, appended at the end
258 */
259 size_t insns_cnt;
260
261 struct reloc_desc *reloc_desc;
262 int nr_reloc;
263 int log_level;
264
265 struct {
266 int nr;
267 int *fds;
268 } instances;
269 bpf_program_prep_t preprocessor;
270
271 struct bpf_object *obj;
272 void *priv;
273 bpf_program_clear_priv_t clear_priv;
274
275 bool load;
276 enum bpf_prog_type type;
277 enum bpf_attach_type expected_attach_type;
278 int prog_ifindex;
279 __u32 attach_btf_id;
280 __u32 attach_prog_fd;
281 void *func_info;
282 __u32 func_info_rec_size;
283 __u32 func_info_cnt;
284
285 void *line_info;
286 __u32 line_info_rec_size;
287 __u32 line_info_cnt;
288 __u32 prog_flags;
289 };
290
291 struct bpf_struct_ops {
292 const char *tname;
293 const struct btf_type *type;
294 struct bpf_program **progs;
295 __u32 *kern_func_off;
296 /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
297 void *data;
298 /* e.g. struct bpf_struct_ops_tcp_congestion_ops in
299 * btf_vmlinux's format.
300 * struct bpf_struct_ops_tcp_congestion_ops {
301 * [... some other kernel fields ...]
302 * struct tcp_congestion_ops data;
303 * }
304 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
305 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
306 * from "data".
307 */
308 void *kern_vdata;
309 __u32 type_id;
310 };
311
312 #define DATA_SEC ".data"
313 #define BSS_SEC ".bss"
314 #define RODATA_SEC ".rodata"
315 #define KCONFIG_SEC ".kconfig"
316 #define KSYMS_SEC ".ksyms"
317 #define STRUCT_OPS_SEC ".struct_ops"
318
319 enum libbpf_map_type {
320 LIBBPF_MAP_UNSPEC,
321 LIBBPF_MAP_DATA,
322 LIBBPF_MAP_BSS,
323 LIBBPF_MAP_RODATA,
324 LIBBPF_MAP_KCONFIG,
325 };
326
327 static const char * const libbpf_type_to_btf_name[] = {
328 [LIBBPF_MAP_DATA] = DATA_SEC,
329 [LIBBPF_MAP_BSS] = BSS_SEC,
330 [LIBBPF_MAP_RODATA] = RODATA_SEC,
331 [LIBBPF_MAP_KCONFIG] = KCONFIG_SEC,
332 };
333
334 struct bpf_map {
335 char *name;
336 int fd;
337 int sec_idx;
338 size_t sec_offset;
339 int map_ifindex;
340 int inner_map_fd;
341 struct bpf_map_def def;
342 __u32 numa_node;
343 __u32 btf_var_idx;
344 __u32 btf_key_type_id;
345 __u32 btf_value_type_id;
346 __u32 btf_vmlinux_value_type_id;
347 void *priv;
348 bpf_map_clear_priv_t clear_priv;
349 enum libbpf_map_type libbpf_type;
350 void *mmaped;
351 struct bpf_struct_ops *st_ops;
352 struct bpf_map *inner_map;
353 void **init_slots;
354 int init_slots_sz;
355 char *pin_path;
356 bool pinned;
357 bool reused;
358 };
359
360 enum extern_type {
361 EXT_UNKNOWN,
362 EXT_KCFG,
363 EXT_KSYM,
364 };
365
366 enum kcfg_type {
367 KCFG_UNKNOWN,
368 KCFG_CHAR,
369 KCFG_BOOL,
370 KCFG_INT,
371 KCFG_TRISTATE,
372 KCFG_CHAR_ARR,
373 };
374
375 struct extern_desc {
376 enum extern_type type;
377 int sym_idx;
378 int btf_id;
379 int sec_btf_id;
380 const char *name;
381 bool is_set;
382 bool is_weak;
383 union {
384 struct {
385 enum kcfg_type type;
386 int sz;
387 int align;
388 int data_off;
389 bool is_signed;
390 } kcfg;
391 struct {
392 unsigned long long addr;
393
394 /* target btf_id of the corresponding kernel var. */
395 int vmlinux_btf_id;
396
397 /* local btf_id of the ksym extern's type. */
398 __u32 type_id;
399 } ksym;
400 };
401 };
402
403 static LIST_HEAD(bpf_objects_list);
404
405 struct bpf_object {
406 char name[BPF_OBJ_NAME_LEN];
407 char license[64];
408 __u32 kern_version;
409
410 struct bpf_program *programs;
411 size_t nr_programs;
412 struct bpf_map *maps;
413 size_t nr_maps;
414 size_t maps_cap;
415
416 char *kconfig;
417 struct extern_desc *externs;
418 int nr_extern;
419 int kconfig_map_idx;
420 int rodata_map_idx;
421
422 bool loaded;
423 bool has_subcalls;
424
425 /*
426 * Information when doing elf related work. Only valid if fd
427 * is valid.
428 */
429 struct {
430 int fd;
431 const void *obj_buf;
432 size_t obj_buf_sz;
433 Elf *elf;
434 GElf_Ehdr ehdr;
435 Elf_Data *symbols;
436 Elf_Data *data;
437 Elf_Data *rodata;
438 Elf_Data *bss;
439 Elf_Data *st_ops_data;
440 size_t shstrndx; /* section index for section name strings */
441 size_t strtabidx;
442 struct {
443 GElf_Shdr shdr;
444 Elf_Data *data;
445 } *reloc_sects;
446 int nr_reloc_sects;
447 int maps_shndx;
448 int btf_maps_shndx;
449 __u32 btf_maps_sec_btf_id;
450 int text_shndx;
451 int symbols_shndx;
452 int data_shndx;
453 int rodata_shndx;
454 int bss_shndx;
455 int st_ops_shndx;
456 } efile;
457 /*
458 * All loaded bpf_object is linked in a list, which is
459 * hidden to caller. bpf_objects__<func> handlers deal with
460 * all objects.
461 */
462 struct list_head list;
463
464 struct btf *btf;
465 /* Parse and load BTF vmlinux if any of the programs in the object need
466 * it at load time.
467 */
468 struct btf *btf_vmlinux;
469 struct btf_ext *btf_ext;
470
471 void *priv;
472 bpf_object_clear_priv_t clear_priv;
473
474 char path[];
475 };
476 #define obj_elf_valid(o) ((o)->efile.elf)
477
478 static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
479 static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
480 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
481 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
482 static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr);
483 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
484 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
485 static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
486 size_t off, __u32 sym_type, GElf_Sym *sym);
487
bpf_program__unload(struct bpf_program * prog)488 void bpf_program__unload(struct bpf_program *prog)
489 {
490 int i;
491
492 if (!prog)
493 return;
494
495 /*
496 * If the object is opened but the program was never loaded,
497 * it is possible that prog->instances.nr == -1.
498 */
499 if (prog->instances.nr > 0) {
500 for (i = 0; i < prog->instances.nr; i++)
501 zclose(prog->instances.fds[i]);
502 } else if (prog->instances.nr != -1) {
503 pr_warn("Internal error: instances.nr is %d\n",
504 prog->instances.nr);
505 }
506
507 prog->instances.nr = -1;
508 zfree(&prog->instances.fds);
509
510 zfree(&prog->func_info);
511 zfree(&prog->line_info);
512 }
513
bpf_program__exit(struct bpf_program * prog)514 static void bpf_program__exit(struct bpf_program *prog)
515 {
516 if (!prog)
517 return;
518
519 if (prog->clear_priv)
520 prog->clear_priv(prog, prog->priv);
521
522 prog->priv = NULL;
523 prog->clear_priv = NULL;
524
525 bpf_program__unload(prog);
526 zfree(&prog->name);
527 zfree(&prog->sec_name);
528 zfree(&prog->pin_name);
529 zfree(&prog->insns);
530 zfree(&prog->reloc_desc);
531
532 prog->nr_reloc = 0;
533 prog->insns_cnt = 0;
534 prog->sec_idx = -1;
535 }
536
__bpf_program__pin_name(struct bpf_program * prog)537 static char *__bpf_program__pin_name(struct bpf_program *prog)
538 {
539 char *name, *p;
540
541 name = p = strdup(prog->sec_name);
542 while ((p = strchr(p, '/')))
543 *p = '_';
544
545 return name;
546 }
547
insn_is_subprog_call(const struct bpf_insn * insn)548 static bool insn_is_subprog_call(const struct bpf_insn *insn)
549 {
550 return BPF_CLASS(insn->code) == BPF_JMP &&
551 BPF_OP(insn->code) == BPF_CALL &&
552 BPF_SRC(insn->code) == BPF_K &&
553 insn->src_reg == BPF_PSEUDO_CALL &&
554 insn->dst_reg == 0 &&
555 insn->off == 0;
556 }
557
558 static int
bpf_object__init_prog(struct bpf_object * obj,struct bpf_program * prog,const char * name,size_t sec_idx,const char * sec_name,size_t sec_off,void * insn_data,size_t insn_data_sz)559 bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
560 const char *name, size_t sec_idx, const char *sec_name,
561 size_t sec_off, void *insn_data, size_t insn_data_sz)
562 {
563 if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
564 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
565 sec_name, name, sec_off, insn_data_sz);
566 return -EINVAL;
567 }
568
569 memset(prog, 0, sizeof(*prog));
570 prog->obj = obj;
571
572 prog->sec_idx = sec_idx;
573 prog->sec_insn_off = sec_off / BPF_INSN_SZ;
574 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
575 /* insns_cnt can later be increased by appending used subprograms */
576 prog->insns_cnt = prog->sec_insn_cnt;
577
578 prog->type = BPF_PROG_TYPE_UNSPEC;
579 prog->load = true;
580
581 prog->instances.fds = NULL;
582 prog->instances.nr = -1;
583
584 prog->sec_name = strdup(sec_name);
585 if (!prog->sec_name)
586 goto errout;
587
588 prog->name = strdup(name);
589 if (!prog->name)
590 goto errout;
591
592 prog->pin_name = __bpf_program__pin_name(prog);
593 if (!prog->pin_name)
594 goto errout;
595
596 prog->insns = malloc(insn_data_sz);
597 if (!prog->insns)
598 goto errout;
599 memcpy(prog->insns, insn_data, insn_data_sz);
600
601 return 0;
602 errout:
603 pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
604 bpf_program__exit(prog);
605 return -ENOMEM;
606 }
607
608 static int
bpf_object__add_programs(struct bpf_object * obj,Elf_Data * sec_data,const char * sec_name,int sec_idx)609 bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
610 const char *sec_name, int sec_idx)
611 {
612 struct bpf_program *prog, *progs;
613 void *data = sec_data->d_buf;
614 size_t sec_sz = sec_data->d_size, sec_off, prog_sz;
615 int nr_progs, err;
616 const char *name;
617 GElf_Sym sym;
618
619 progs = obj->programs;
620 nr_progs = obj->nr_programs;
621 sec_off = 0;
622
623 while (sec_off < sec_sz) {
624 if (elf_sym_by_sec_off(obj, sec_idx, sec_off, STT_FUNC, &sym)) {
625 pr_warn("sec '%s': failed to find program symbol at offset %zu\n",
626 sec_name, sec_off);
627 return -LIBBPF_ERRNO__FORMAT;
628 }
629
630 prog_sz = sym.st_size;
631
632 name = elf_sym_str(obj, sym.st_name);
633 if (!name) {
634 pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
635 sec_name, sec_off);
636 return -LIBBPF_ERRNO__FORMAT;
637 }
638
639 if (sec_off + prog_sz > sec_sz) {
640 pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
641 sec_name, sec_off);
642 return -LIBBPF_ERRNO__FORMAT;
643 }
644
645 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
646 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
647
648 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
649 if (!progs) {
650 /*
651 * In this case the original obj->programs
652 * is still valid, so don't need special treat for
653 * bpf_close_object().
654 */
655 pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
656 sec_name, name);
657 return -ENOMEM;
658 }
659 obj->programs = progs;
660
661 prog = &progs[nr_progs];
662
663 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
664 sec_off, data + sec_off, prog_sz);
665 if (err)
666 return err;
667
668 nr_progs++;
669 obj->nr_programs = nr_progs;
670
671 sec_off += prog_sz;
672 }
673
674 return 0;
675 }
676
get_kernel_version(void)677 static __u32 get_kernel_version(void)
678 {
679 __u32 major, minor, patch;
680 struct utsname info;
681
682 uname(&info);
683 if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
684 return 0;
685 return KERNEL_VERSION(major, minor, patch);
686 }
687
688 static const struct btf_member *
find_member_by_offset(const struct btf_type * t,__u32 bit_offset)689 find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
690 {
691 struct btf_member *m;
692 int i;
693
694 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
695 if (btf_member_bit_offset(t, i) == bit_offset)
696 return m;
697 }
698
699 return NULL;
700 }
701
702 static const struct btf_member *
find_member_by_name(const struct btf * btf,const struct btf_type * t,const char * name)703 find_member_by_name(const struct btf *btf, const struct btf_type *t,
704 const char *name)
705 {
706 struct btf_member *m;
707 int i;
708
709 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
710 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
711 return m;
712 }
713
714 return NULL;
715 }
716
717 #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
718 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
719 const char *name, __u32 kind);
720
721 static int
find_struct_ops_kern_types(const struct btf * btf,const char * tname,const struct btf_type ** type,__u32 * type_id,const struct btf_type ** vtype,__u32 * vtype_id,const struct btf_member ** data_member)722 find_struct_ops_kern_types(const struct btf *btf, const char *tname,
723 const struct btf_type **type, __u32 *type_id,
724 const struct btf_type **vtype, __u32 *vtype_id,
725 const struct btf_member **data_member)
726 {
727 const struct btf_type *kern_type, *kern_vtype;
728 const struct btf_member *kern_data_member;
729 __s32 kern_vtype_id, kern_type_id;
730 __u32 i;
731
732 kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
733 if (kern_type_id < 0) {
734 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
735 tname);
736 return kern_type_id;
737 }
738 kern_type = btf__type_by_id(btf, kern_type_id);
739
740 /* Find the corresponding "map_value" type that will be used
741 * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example,
742 * find "struct bpf_struct_ops_tcp_congestion_ops" from the
743 * btf_vmlinux.
744 */
745 kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
746 tname, BTF_KIND_STRUCT);
747 if (kern_vtype_id < 0) {
748 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
749 STRUCT_OPS_VALUE_PREFIX, tname);
750 return kern_vtype_id;
751 }
752 kern_vtype = btf__type_by_id(btf, kern_vtype_id);
753
754 /* Find "struct tcp_congestion_ops" from
755 * struct bpf_struct_ops_tcp_congestion_ops {
756 * [ ... ]
757 * struct tcp_congestion_ops data;
758 * }
759 */
760 kern_data_member = btf_members(kern_vtype);
761 for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
762 if (kern_data_member->type == kern_type_id)
763 break;
764 }
765 if (i == btf_vlen(kern_vtype)) {
766 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
767 tname, STRUCT_OPS_VALUE_PREFIX, tname);
768 return -EINVAL;
769 }
770
771 *type = kern_type;
772 *type_id = kern_type_id;
773 *vtype = kern_vtype;
774 *vtype_id = kern_vtype_id;
775 *data_member = kern_data_member;
776
777 return 0;
778 }
779
bpf_map__is_struct_ops(const struct bpf_map * map)780 static bool bpf_map__is_struct_ops(const struct bpf_map *map)
781 {
782 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
783 }
784
785 /* Init the map's fields that depend on kern_btf */
bpf_map__init_kern_struct_ops(struct bpf_map * map,const struct btf * btf,const struct btf * kern_btf)786 static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
787 const struct btf *btf,
788 const struct btf *kern_btf)
789 {
790 const struct btf_member *member, *kern_member, *kern_data_member;
791 const struct btf_type *type, *kern_type, *kern_vtype;
792 __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
793 struct bpf_struct_ops *st_ops;
794 void *data, *kern_data;
795 const char *tname;
796 int err;
797
798 st_ops = map->st_ops;
799 type = st_ops->type;
800 tname = st_ops->tname;
801 err = find_struct_ops_kern_types(kern_btf, tname,
802 &kern_type, &kern_type_id,
803 &kern_vtype, &kern_vtype_id,
804 &kern_data_member);
805 if (err)
806 return err;
807
808 pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
809 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
810
811 map->def.value_size = kern_vtype->size;
812 map->btf_vmlinux_value_type_id = kern_vtype_id;
813
814 st_ops->kern_vdata = calloc(1, kern_vtype->size);
815 if (!st_ops->kern_vdata)
816 return -ENOMEM;
817
818 data = st_ops->data;
819 kern_data_off = kern_data_member->offset / 8;
820 kern_data = st_ops->kern_vdata + kern_data_off;
821
822 member = btf_members(type);
823 for (i = 0; i < btf_vlen(type); i++, member++) {
824 const struct btf_type *mtype, *kern_mtype;
825 __u32 mtype_id, kern_mtype_id;
826 void *mdata, *kern_mdata;
827 __s64 msize, kern_msize;
828 __u32 moff, kern_moff;
829 __u32 kern_member_idx;
830 const char *mname;
831
832 mname = btf__name_by_offset(btf, member->name_off);
833 kern_member = find_member_by_name(kern_btf, kern_type, mname);
834 if (!kern_member) {
835 pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
836 map->name, mname);
837 return -ENOTSUP;
838 }
839
840 kern_member_idx = kern_member - btf_members(kern_type);
841 if (btf_member_bitfield_size(type, i) ||
842 btf_member_bitfield_size(kern_type, kern_member_idx)) {
843 pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
844 map->name, mname);
845 return -ENOTSUP;
846 }
847
848 moff = member->offset / 8;
849 kern_moff = kern_member->offset / 8;
850
851 mdata = data + moff;
852 kern_mdata = kern_data + kern_moff;
853
854 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
855 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
856 &kern_mtype_id);
857 if (BTF_INFO_KIND(mtype->info) !=
858 BTF_INFO_KIND(kern_mtype->info)) {
859 pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
860 map->name, mname, BTF_INFO_KIND(mtype->info),
861 BTF_INFO_KIND(kern_mtype->info));
862 return -ENOTSUP;
863 }
864
865 if (btf_is_ptr(mtype)) {
866 struct bpf_program *prog;
867
868 mtype = skip_mods_and_typedefs(btf, mtype->type, &mtype_id);
869 kern_mtype = skip_mods_and_typedefs(kern_btf,
870 kern_mtype->type,
871 &kern_mtype_id);
872 if (!btf_is_func_proto(mtype) ||
873 !btf_is_func_proto(kern_mtype)) {
874 pr_warn("struct_ops init_kern %s: non func ptr %s is not supported\n",
875 map->name, mname);
876 return -ENOTSUP;
877 }
878
879 prog = st_ops->progs[i];
880 if (!prog) {
881 pr_debug("struct_ops init_kern %s: func ptr %s is not set\n",
882 map->name, mname);
883 continue;
884 }
885
886 prog->attach_btf_id = kern_type_id;
887 prog->expected_attach_type = kern_member_idx;
888
889 st_ops->kern_func_off[i] = kern_data_off + kern_moff;
890
891 pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
892 map->name, mname, prog->name, moff,
893 kern_moff);
894
895 continue;
896 }
897
898 msize = btf__resolve_size(btf, mtype_id);
899 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
900 if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
901 pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
902 map->name, mname, (ssize_t)msize,
903 (ssize_t)kern_msize);
904 return -ENOTSUP;
905 }
906
907 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
908 map->name, mname, (unsigned int)msize,
909 moff, kern_moff);
910 memcpy(kern_mdata, mdata, msize);
911 }
912
913 return 0;
914 }
915
bpf_object__init_kern_struct_ops_maps(struct bpf_object * obj)916 static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
917 {
918 struct bpf_map *map;
919 size_t i;
920 int err;
921
922 for (i = 0; i < obj->nr_maps; i++) {
923 map = &obj->maps[i];
924
925 if (!bpf_map__is_struct_ops(map))
926 continue;
927
928 err = bpf_map__init_kern_struct_ops(map, obj->btf,
929 obj->btf_vmlinux);
930 if (err)
931 return err;
932 }
933
934 return 0;
935 }
936
bpf_object__init_struct_ops_maps(struct bpf_object * obj)937 static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
938 {
939 const struct btf_type *type, *datasec;
940 const struct btf_var_secinfo *vsi;
941 struct bpf_struct_ops *st_ops;
942 const char *tname, *var_name;
943 __s32 type_id, datasec_id;
944 const struct btf *btf;
945 struct bpf_map *map;
946 __u32 i;
947
948 if (obj->efile.st_ops_shndx == -1)
949 return 0;
950
951 btf = obj->btf;
952 datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
953 BTF_KIND_DATASEC);
954 if (datasec_id < 0) {
955 pr_warn("struct_ops init: DATASEC %s not found\n",
956 STRUCT_OPS_SEC);
957 return -EINVAL;
958 }
959
960 datasec = btf__type_by_id(btf, datasec_id);
961 vsi = btf_var_secinfos(datasec);
962 for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
963 type = btf__type_by_id(obj->btf, vsi->type);
964 var_name = btf__name_by_offset(obj->btf, type->name_off);
965
966 type_id = btf__resolve_type(obj->btf, vsi->type);
967 if (type_id < 0) {
968 pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
969 vsi->type, STRUCT_OPS_SEC);
970 return -EINVAL;
971 }
972
973 type = btf__type_by_id(obj->btf, type_id);
974 tname = btf__name_by_offset(obj->btf, type->name_off);
975 if (!tname[0]) {
976 pr_warn("struct_ops init: anonymous type is not supported\n");
977 return -ENOTSUP;
978 }
979 if (!btf_is_struct(type)) {
980 pr_warn("struct_ops init: %s is not a struct\n", tname);
981 return -EINVAL;
982 }
983
984 map = bpf_object__add_map(obj);
985 if (IS_ERR(map))
986 return PTR_ERR(map);
987
988 map->sec_idx = obj->efile.st_ops_shndx;
989 map->sec_offset = vsi->offset;
990 map->name = strdup(var_name);
991 if (!map->name)
992 return -ENOMEM;
993
994 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
995 map->def.key_size = sizeof(int);
996 map->def.value_size = type->size;
997 map->def.max_entries = 1;
998
999 map->st_ops = calloc(1, sizeof(*map->st_ops));
1000 if (!map->st_ops)
1001 return -ENOMEM;
1002 st_ops = map->st_ops;
1003 st_ops->data = malloc(type->size);
1004 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1005 st_ops->kern_func_off = malloc(btf_vlen(type) *
1006 sizeof(*st_ops->kern_func_off));
1007 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1008 return -ENOMEM;
1009
1010 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
1011 pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1012 var_name, STRUCT_OPS_SEC);
1013 return -EINVAL;
1014 }
1015
1016 memcpy(st_ops->data,
1017 obj->efile.st_ops_data->d_buf + vsi->offset,
1018 type->size);
1019 st_ops->tname = tname;
1020 st_ops->type = type;
1021 st_ops->type_id = type_id;
1022
1023 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1024 tname, type_id, var_name, vsi->offset);
1025 }
1026
1027 return 0;
1028 }
1029
bpf_object__new(const char * path,const void * obj_buf,size_t obj_buf_sz,const char * obj_name)1030 static struct bpf_object *bpf_object__new(const char *path,
1031 const void *obj_buf,
1032 size_t obj_buf_sz,
1033 const char *obj_name)
1034 {
1035 struct bpf_object *obj;
1036 char *end;
1037
1038 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1039 if (!obj) {
1040 pr_warn("alloc memory failed for %s\n", path);
1041 return ERR_PTR(-ENOMEM);
1042 }
1043
1044 strcpy(obj->path, path);
1045 if (obj_name) {
1046 strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
1047 obj->name[sizeof(obj->name) - 1] = 0;
1048 } else {
1049 /* Using basename() GNU version which doesn't modify arg. */
1050 strncpy(obj->name, basename((void *)path),
1051 sizeof(obj->name) - 1);
1052 end = strchr(obj->name, '.');
1053 if (end)
1054 *end = 0;
1055 }
1056
1057 obj->efile.fd = -1;
1058 /*
1059 * Caller of this function should also call
1060 * bpf_object__elf_finish() after data collection to return
1061 * obj_buf to user. If not, we should duplicate the buffer to
1062 * avoid user freeing them before elf finish.
1063 */
1064 obj->efile.obj_buf = obj_buf;
1065 obj->efile.obj_buf_sz = obj_buf_sz;
1066 obj->efile.maps_shndx = -1;
1067 obj->efile.btf_maps_shndx = -1;
1068 obj->efile.data_shndx = -1;
1069 obj->efile.rodata_shndx = -1;
1070 obj->efile.bss_shndx = -1;
1071 obj->efile.st_ops_shndx = -1;
1072 obj->kconfig_map_idx = -1;
1073 obj->rodata_map_idx = -1;
1074
1075 obj->kern_version = get_kernel_version();
1076 obj->loaded = false;
1077
1078 INIT_LIST_HEAD(&obj->list);
1079 list_add(&obj->list, &bpf_objects_list);
1080 return obj;
1081 }
1082
bpf_object__elf_finish(struct bpf_object * obj)1083 static void bpf_object__elf_finish(struct bpf_object *obj)
1084 {
1085 if (!obj_elf_valid(obj))
1086 return;
1087
1088 if (obj->efile.elf) {
1089 elf_end(obj->efile.elf);
1090 obj->efile.elf = NULL;
1091 }
1092 obj->efile.symbols = NULL;
1093 obj->efile.data = NULL;
1094 obj->efile.rodata = NULL;
1095 obj->efile.bss = NULL;
1096 obj->efile.st_ops_data = NULL;
1097
1098 zfree(&obj->efile.reloc_sects);
1099 obj->efile.nr_reloc_sects = 0;
1100 zclose(obj->efile.fd);
1101 obj->efile.obj_buf = NULL;
1102 obj->efile.obj_buf_sz = 0;
1103 }
1104
1105 /* if libelf is old and doesn't support mmap(), fall back to read() */
1106 #ifndef ELF_C_READ_MMAP
1107 #define ELF_C_READ_MMAP ELF_C_READ
1108 #endif
1109
bpf_object__elf_init(struct bpf_object * obj)1110 static int bpf_object__elf_init(struct bpf_object *obj)
1111 {
1112 int err = 0;
1113 GElf_Ehdr *ep;
1114
1115 if (obj_elf_valid(obj)) {
1116 pr_warn("elf: init internal error\n");
1117 return -LIBBPF_ERRNO__LIBELF;
1118 }
1119
1120 if (obj->efile.obj_buf_sz > 0) {
1121 /*
1122 * obj_buf should have been validated by
1123 * bpf_object__open_buffer().
1124 */
1125 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
1126 obj->efile.obj_buf_sz);
1127 } else {
1128 obj->efile.fd = open(obj->path, O_RDONLY);
1129 if (obj->efile.fd < 0) {
1130 char errmsg[STRERR_BUFSIZE], *cp;
1131
1132 err = -errno;
1133 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
1134 pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1135 return err;
1136 }
1137
1138 obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1139 }
1140
1141 if (!obj->efile.elf) {
1142 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1143 err = -LIBBPF_ERRNO__LIBELF;
1144 goto errout;
1145 }
1146
1147 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
1148 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1149 err = -LIBBPF_ERRNO__FORMAT;
1150 goto errout;
1151 }
1152 ep = &obj->efile.ehdr;
1153
1154 if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) {
1155 pr_warn("elf: failed to get section names section index for %s: %s\n",
1156 obj->path, elf_errmsg(-1));
1157 err = -LIBBPF_ERRNO__FORMAT;
1158 goto errout;
1159 }
1160
1161 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
1162 if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
1163 pr_warn("elf: failed to get section names strings from %s: %s\n",
1164 obj->path, elf_errmsg(-1));
1165 return -LIBBPF_ERRNO__FORMAT;
1166 }
1167
1168 /* Old LLVM set e_machine to EM_NONE */
1169 if (ep->e_type != ET_REL ||
1170 (ep->e_machine && ep->e_machine != EM_BPF)) {
1171 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1172 err = -LIBBPF_ERRNO__FORMAT;
1173 goto errout;
1174 }
1175
1176 return 0;
1177 errout:
1178 bpf_object__elf_finish(obj);
1179 return err;
1180 }
1181
bpf_object__check_endianness(struct bpf_object * obj)1182 static int bpf_object__check_endianness(struct bpf_object *obj)
1183 {
1184 #if __BYTE_ORDER == __LITTLE_ENDIAN
1185 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
1186 return 0;
1187 #elif __BYTE_ORDER == __BIG_ENDIAN
1188 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
1189 return 0;
1190 #else
1191 # error "Unrecognized __BYTE_ORDER__"
1192 #endif
1193 pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1194 return -LIBBPF_ERRNO__ENDIAN;
1195 }
1196
1197 static int
bpf_object__init_license(struct bpf_object * obj,void * data,size_t size)1198 bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1199 {
1200 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
1201 pr_debug("license of %s is %s\n", obj->path, obj->license);
1202 return 0;
1203 }
1204
1205 static int
bpf_object__init_kversion(struct bpf_object * obj,void * data,size_t size)1206 bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1207 {
1208 __u32 kver;
1209
1210 if (size != sizeof(kver)) {
1211 pr_warn("invalid kver section in %s\n", obj->path);
1212 return -LIBBPF_ERRNO__FORMAT;
1213 }
1214 memcpy(&kver, data, sizeof(kver));
1215 obj->kern_version = kver;
1216 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1217 return 0;
1218 }
1219
bpf_map_type__is_map_in_map(enum bpf_map_type type)1220 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1221 {
1222 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1223 type == BPF_MAP_TYPE_HASH_OF_MAPS)
1224 return true;
1225 return false;
1226 }
1227
bpf_object__section_size(const struct bpf_object * obj,const char * name,__u32 * size)1228 int bpf_object__section_size(const struct bpf_object *obj, const char *name,
1229 __u32 *size)
1230 {
1231 int ret = -ENOENT;
1232
1233 *size = 0;
1234 if (!name) {
1235 return -EINVAL;
1236 } else if (!strcmp(name, DATA_SEC)) {
1237 if (obj->efile.data)
1238 *size = obj->efile.data->d_size;
1239 } else if (!strcmp(name, BSS_SEC)) {
1240 if (obj->efile.bss)
1241 *size = obj->efile.bss->d_size;
1242 } else if (!strcmp(name, RODATA_SEC)) {
1243 if (obj->efile.rodata)
1244 *size = obj->efile.rodata->d_size;
1245 } else if (!strcmp(name, STRUCT_OPS_SEC)) {
1246 if (obj->efile.st_ops_data)
1247 *size = obj->efile.st_ops_data->d_size;
1248 } else {
1249 Elf_Scn *scn = elf_sec_by_name(obj, name);
1250 Elf_Data *data = elf_sec_data(obj, scn);
1251
1252 if (data) {
1253 ret = 0; /* found it */
1254 *size = data->d_size;
1255 }
1256 }
1257
1258 return *size ? 0 : ret;
1259 }
1260
bpf_object__variable_offset(const struct bpf_object * obj,const char * name,__u32 * off)1261 int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
1262 __u32 *off)
1263 {
1264 Elf_Data *symbols = obj->efile.symbols;
1265 const char *sname;
1266 size_t si;
1267
1268 if (!name || !off)
1269 return -EINVAL;
1270
1271 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
1272 GElf_Sym sym;
1273
1274 if (!gelf_getsym(symbols, si, &sym))
1275 continue;
1276 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1277 GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
1278 continue;
1279
1280 sname = elf_sym_str(obj, sym.st_name);
1281 if (!sname) {
1282 pr_warn("failed to get sym name string for var %s\n",
1283 name);
1284 return -EIO;
1285 }
1286 if (strcmp(name, sname) == 0) {
1287 *off = sym.st_value;
1288 return 0;
1289 }
1290 }
1291
1292 return -ENOENT;
1293 }
1294
bpf_object__add_map(struct bpf_object * obj)1295 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1296 {
1297 struct bpf_map *new_maps;
1298 size_t new_cap;
1299 int i;
1300
1301 if (obj->nr_maps < obj->maps_cap)
1302 return &obj->maps[obj->nr_maps++];
1303
1304 new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
1305 new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps));
1306 if (!new_maps) {
1307 pr_warn("alloc maps for object failed\n");
1308 return ERR_PTR(-ENOMEM);
1309 }
1310
1311 obj->maps_cap = new_cap;
1312 obj->maps = new_maps;
1313
1314 /* zero out new maps */
1315 memset(obj->maps + obj->nr_maps, 0,
1316 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
1317 /*
1318 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
1319 * when failure (zclose won't close negative fd)).
1320 */
1321 for (i = obj->nr_maps; i < obj->maps_cap; i++) {
1322 obj->maps[i].fd = -1;
1323 obj->maps[i].inner_map_fd = -1;
1324 }
1325
1326 return &obj->maps[obj->nr_maps++];
1327 }
1328
bpf_map_mmap_sz(const struct bpf_map * map)1329 static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1330 {
1331 long page_sz = sysconf(_SC_PAGE_SIZE);
1332 size_t map_sz;
1333
1334 map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
1335 map_sz = roundup(map_sz, page_sz);
1336 return map_sz;
1337 }
1338
internal_map_name(struct bpf_object * obj,enum libbpf_map_type type)1339 static char *internal_map_name(struct bpf_object *obj,
1340 enum libbpf_map_type type)
1341 {
1342 char map_name[BPF_OBJ_NAME_LEN], *p;
1343 const char *sfx = libbpf_type_to_btf_name[type];
1344 int sfx_len = max((size_t)7, strlen(sfx));
1345 int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1,
1346 strlen(obj->name));
1347
1348 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1349 sfx_len, libbpf_type_to_btf_name[type]);
1350
1351 /* sanitise map name to characters allowed by kernel */
1352 for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1353 if (!isalnum(*p) && *p != '_' && *p != '.')
1354 *p = '_';
1355
1356 return strdup(map_name);
1357 }
1358
1359 static int
bpf_object__init_internal_map(struct bpf_object * obj,enum libbpf_map_type type,int sec_idx,void * data,size_t data_sz)1360 bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1361 int sec_idx, void *data, size_t data_sz)
1362 {
1363 struct bpf_map_def *def;
1364 struct bpf_map *map;
1365 int err;
1366
1367 map = bpf_object__add_map(obj);
1368 if (IS_ERR(map))
1369 return PTR_ERR(map);
1370
1371 map->libbpf_type = type;
1372 map->sec_idx = sec_idx;
1373 map->sec_offset = 0;
1374 map->name = internal_map_name(obj, type);
1375 if (!map->name) {
1376 pr_warn("failed to alloc map name\n");
1377 return -ENOMEM;
1378 }
1379
1380 def = &map->def;
1381 def->type = BPF_MAP_TYPE_ARRAY;
1382 def->key_size = sizeof(int);
1383 def->value_size = data_sz;
1384 def->max_entries = 1;
1385 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1386 ? BPF_F_RDONLY_PROG : 0;
1387 def->map_flags |= BPF_F_MMAPABLE;
1388
1389 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1390 map->name, map->sec_idx, map->sec_offset, def->map_flags);
1391
1392 map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
1393 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1394 if (map->mmaped == MAP_FAILED) {
1395 err = -errno;
1396 map->mmaped = NULL;
1397 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1398 map->name, err);
1399 zfree(&map->name);
1400 return err;
1401 }
1402
1403 if (data)
1404 memcpy(map->mmaped, data, data_sz);
1405
1406 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1407 return 0;
1408 }
1409
bpf_object__init_global_data_maps(struct bpf_object * obj)1410 static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1411 {
1412 int err;
1413
1414 /*
1415 * Populate obj->maps with libbpf internal maps.
1416 */
1417 if (obj->efile.data_shndx >= 0) {
1418 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1419 obj->efile.data_shndx,
1420 obj->efile.data->d_buf,
1421 obj->efile.data->d_size);
1422 if (err)
1423 return err;
1424 }
1425 if (obj->efile.rodata_shndx >= 0) {
1426 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1427 obj->efile.rodata_shndx,
1428 obj->efile.rodata->d_buf,
1429 obj->efile.rodata->d_size);
1430 if (err)
1431 return err;
1432
1433 obj->rodata_map_idx = obj->nr_maps - 1;
1434 }
1435 if (obj->efile.bss_shndx >= 0) {
1436 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1437 obj->efile.bss_shndx,
1438 NULL,
1439 obj->efile.bss->d_size);
1440 if (err)
1441 return err;
1442 }
1443 return 0;
1444 }
1445
1446
find_extern_by_name(const struct bpf_object * obj,const void * name)1447 static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1448 const void *name)
1449 {
1450 int i;
1451
1452 for (i = 0; i < obj->nr_extern; i++) {
1453 if (strcmp(obj->externs[i].name, name) == 0)
1454 return &obj->externs[i];
1455 }
1456 return NULL;
1457 }
1458
set_kcfg_value_tri(struct extern_desc * ext,void * ext_val,char value)1459 static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
1460 char value)
1461 {
1462 switch (ext->kcfg.type) {
1463 case KCFG_BOOL:
1464 if (value == 'm') {
1465 pr_warn("extern (kcfg) %s=%c should be tristate or char\n",
1466 ext->name, value);
1467 return -EINVAL;
1468 }
1469 *(bool *)ext_val = value == 'y' ? true : false;
1470 break;
1471 case KCFG_TRISTATE:
1472 if (value == 'y')
1473 *(enum libbpf_tristate *)ext_val = TRI_YES;
1474 else if (value == 'm')
1475 *(enum libbpf_tristate *)ext_val = TRI_MODULE;
1476 else /* value == 'n' */
1477 *(enum libbpf_tristate *)ext_val = TRI_NO;
1478 break;
1479 case KCFG_CHAR:
1480 *(char *)ext_val = value;
1481 break;
1482 case KCFG_UNKNOWN:
1483 case KCFG_INT:
1484 case KCFG_CHAR_ARR:
1485 default:
1486 pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n",
1487 ext->name, value);
1488 return -EINVAL;
1489 }
1490 ext->is_set = true;
1491 return 0;
1492 }
1493
set_kcfg_value_str(struct extern_desc * ext,char * ext_val,const char * value)1494 static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
1495 const char *value)
1496 {
1497 size_t len;
1498
1499 if (ext->kcfg.type != KCFG_CHAR_ARR) {
1500 pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value);
1501 return -EINVAL;
1502 }
1503
1504 len = strlen(value);
1505 if (value[len - 1] != '"') {
1506 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
1507 ext->name, value);
1508 return -EINVAL;
1509 }
1510
1511 /* strip quotes */
1512 len -= 2;
1513 if (len >= ext->kcfg.sz) {
1514 pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
1515 ext->name, value, len, ext->kcfg.sz - 1);
1516 len = ext->kcfg.sz - 1;
1517 }
1518 memcpy(ext_val, value + 1, len);
1519 ext_val[len] = '\0';
1520 ext->is_set = true;
1521 return 0;
1522 }
1523
parse_u64(const char * value,__u64 * res)1524 static int parse_u64(const char *value, __u64 *res)
1525 {
1526 char *value_end;
1527 int err;
1528
1529 errno = 0;
1530 *res = strtoull(value, &value_end, 0);
1531 if (errno) {
1532 err = -errno;
1533 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
1534 return err;
1535 }
1536 if (*value_end) {
1537 pr_warn("failed to parse '%s' as integer completely\n", value);
1538 return -EINVAL;
1539 }
1540 return 0;
1541 }
1542
is_kcfg_value_in_range(const struct extern_desc * ext,__u64 v)1543 static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
1544 {
1545 int bit_sz = ext->kcfg.sz * 8;
1546
1547 if (ext->kcfg.sz == 8)
1548 return true;
1549
1550 /* Validate that value stored in u64 fits in integer of `ext->sz`
1551 * bytes size without any loss of information. If the target integer
1552 * is signed, we rely on the following limits of integer type of
1553 * Y bits and subsequent transformation:
1554 *
1555 * -2^(Y-1) <= X <= 2^(Y-1) - 1
1556 * 0 <= X + 2^(Y-1) <= 2^Y - 1
1557 * 0 <= X + 2^(Y-1) < 2^Y
1558 *
1559 * For unsigned target integer, check that all the (64 - Y) bits are
1560 * zero.
1561 */
1562 if (ext->kcfg.is_signed)
1563 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
1564 else
1565 return (v >> bit_sz) == 0;
1566 }
1567
set_kcfg_value_num(struct extern_desc * ext,void * ext_val,__u64 value)1568 static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
1569 __u64 value)
1570 {
1571 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
1572 pr_warn("extern (kcfg) %s=%llu should be integer\n",
1573 ext->name, (unsigned long long)value);
1574 return -EINVAL;
1575 }
1576 if (!is_kcfg_value_in_range(ext, value)) {
1577 pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n",
1578 ext->name, (unsigned long long)value, ext->kcfg.sz);
1579 return -ERANGE;
1580 }
1581 switch (ext->kcfg.sz) {
1582 case 1: *(__u8 *)ext_val = value; break;
1583 case 2: *(__u16 *)ext_val = value; break;
1584 case 4: *(__u32 *)ext_val = value; break;
1585 case 8: *(__u64 *)ext_val = value; break;
1586 default:
1587 return -EINVAL;
1588 }
1589 ext->is_set = true;
1590 return 0;
1591 }
1592
bpf_object__process_kconfig_line(struct bpf_object * obj,char * buf,void * data)1593 static int bpf_object__process_kconfig_line(struct bpf_object *obj,
1594 char *buf, void *data)
1595 {
1596 struct extern_desc *ext;
1597 char *sep, *value;
1598 int len, err = 0;
1599 void *ext_val;
1600 __u64 num;
1601
1602 if (strncmp(buf, "CONFIG_", 7))
1603 return 0;
1604
1605 sep = strchr(buf, '=');
1606 if (!sep) {
1607 pr_warn("failed to parse '%s': no separator\n", buf);
1608 return -EINVAL;
1609 }
1610
1611 /* Trim ending '\n' */
1612 len = strlen(buf);
1613 if (buf[len - 1] == '\n')
1614 buf[len - 1] = '\0';
1615 /* Split on '=' and ensure that a value is present. */
1616 *sep = '\0';
1617 if (!sep[1]) {
1618 *sep = '=';
1619 pr_warn("failed to parse '%s': no value\n", buf);
1620 return -EINVAL;
1621 }
1622
1623 ext = find_extern_by_name(obj, buf);
1624 if (!ext || ext->is_set)
1625 return 0;
1626
1627 ext_val = data + ext->kcfg.data_off;
1628 value = sep + 1;
1629
1630 switch (*value) {
1631 case 'y': case 'n': case 'm':
1632 err = set_kcfg_value_tri(ext, ext_val, *value);
1633 break;
1634 case '"':
1635 err = set_kcfg_value_str(ext, ext_val, value);
1636 break;
1637 default:
1638 /* assume integer */
1639 err = parse_u64(value, &num);
1640 if (err) {
1641 pr_warn("extern (kcfg) %s=%s should be integer\n",
1642 ext->name, value);
1643 return err;
1644 }
1645 err = set_kcfg_value_num(ext, ext_val, num);
1646 break;
1647 }
1648 if (err)
1649 return err;
1650 pr_debug("extern (kcfg) %s=%s\n", ext->name, value);
1651 return 0;
1652 }
1653
bpf_object__read_kconfig_file(struct bpf_object * obj,void * data)1654 static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
1655 {
1656 char buf[PATH_MAX];
1657 struct utsname uts;
1658 int len, err = 0;
1659 gzFile file;
1660
1661 uname(&uts);
1662 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
1663 if (len < 0)
1664 return -EINVAL;
1665 else if (len >= PATH_MAX)
1666 return -ENAMETOOLONG;
1667
1668 /* gzopen also accepts uncompressed files. */
1669 file = gzopen(buf, "r");
1670 if (!file)
1671 file = gzopen("/proc/config.gz", "r");
1672
1673 if (!file) {
1674 pr_warn("failed to open system Kconfig\n");
1675 return -ENOENT;
1676 }
1677
1678 while (gzgets(file, buf, sizeof(buf))) {
1679 err = bpf_object__process_kconfig_line(obj, buf, data);
1680 if (err) {
1681 pr_warn("error parsing system Kconfig line '%s': %d\n",
1682 buf, err);
1683 goto out;
1684 }
1685 }
1686
1687 out:
1688 gzclose(file);
1689 return err;
1690 }
1691
bpf_object__read_kconfig_mem(struct bpf_object * obj,const char * config,void * data)1692 static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
1693 const char *config, void *data)
1694 {
1695 char buf[PATH_MAX];
1696 int err = 0;
1697 FILE *file;
1698
1699 file = fmemopen((void *)config, strlen(config), "r");
1700 if (!file) {
1701 err = -errno;
1702 pr_warn("failed to open in-memory Kconfig: %d\n", err);
1703 return err;
1704 }
1705
1706 while (fgets(buf, sizeof(buf), file)) {
1707 err = bpf_object__process_kconfig_line(obj, buf, data);
1708 if (err) {
1709 pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
1710 buf, err);
1711 break;
1712 }
1713 }
1714
1715 fclose(file);
1716 return err;
1717 }
1718
bpf_object__init_kconfig_map(struct bpf_object * obj)1719 static int bpf_object__init_kconfig_map(struct bpf_object *obj)
1720 {
1721 struct extern_desc *last_ext = NULL, *ext;
1722 size_t map_sz;
1723 int i, err;
1724
1725 for (i = 0; i < obj->nr_extern; i++) {
1726 ext = &obj->externs[i];
1727 if (ext->type == EXT_KCFG)
1728 last_ext = ext;
1729 }
1730
1731 if (!last_ext)
1732 return 0;
1733
1734 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
1735 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
1736 obj->efile.symbols_shndx,
1737 NULL, map_sz);
1738 if (err)
1739 return err;
1740
1741 obj->kconfig_map_idx = obj->nr_maps - 1;
1742
1743 return 0;
1744 }
1745
bpf_object__init_user_maps(struct bpf_object * obj,bool strict)1746 static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
1747 {
1748 Elf_Data *symbols = obj->efile.symbols;
1749 int i, map_def_sz = 0, nr_maps = 0, nr_syms;
1750 Elf_Data *data = NULL;
1751 Elf_Scn *scn;
1752
1753 if (obj->efile.maps_shndx < 0)
1754 return 0;
1755
1756 if (!symbols)
1757 return -EINVAL;
1758
1759
1760 scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
1761 data = elf_sec_data(obj, scn);
1762 if (!scn || !data) {
1763 pr_warn("elf: failed to get legacy map definitions for %s\n",
1764 obj->path);
1765 return -EINVAL;
1766 }
1767
1768 /*
1769 * Count number of maps. Each map has a name.
1770 * Array of maps is not supported: only the first element is
1771 * considered.
1772 *
1773 * TODO: Detect array of map and report error.
1774 */
1775 nr_syms = symbols->d_size / sizeof(GElf_Sym);
1776 for (i = 0; i < nr_syms; i++) {
1777 GElf_Sym sym;
1778
1779 if (!gelf_getsym(symbols, i, &sym))
1780 continue;
1781 if (sym.st_shndx != obj->efile.maps_shndx)
1782 continue;
1783 nr_maps++;
1784 }
1785 /* Assume equally sized map definitions */
1786 pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n",
1787 nr_maps, data->d_size, obj->path);
1788
1789 if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
1790 pr_warn("elf: unable to determine legacy map definition size in %s\n",
1791 obj->path);
1792 return -EINVAL;
1793 }
1794 map_def_sz = data->d_size / nr_maps;
1795
1796 /* Fill obj->maps using data in "maps" section. */
1797 for (i = 0; i < nr_syms; i++) {
1798 GElf_Sym sym;
1799 const char *map_name;
1800 struct bpf_map_def *def;
1801 struct bpf_map *map;
1802
1803 if (!gelf_getsym(symbols, i, &sym))
1804 continue;
1805 if (sym.st_shndx != obj->efile.maps_shndx)
1806 continue;
1807
1808 map = bpf_object__add_map(obj);
1809 if (IS_ERR(map))
1810 return PTR_ERR(map);
1811
1812 map_name = elf_sym_str(obj, sym.st_name);
1813 if (!map_name) {
1814 pr_warn("failed to get map #%d name sym string for obj %s\n",
1815 i, obj->path);
1816 return -LIBBPF_ERRNO__FORMAT;
1817 }
1818
1819 map->libbpf_type = LIBBPF_MAP_UNSPEC;
1820 map->sec_idx = sym.st_shndx;
1821 map->sec_offset = sym.st_value;
1822 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
1823 map_name, map->sec_idx, map->sec_offset);
1824 if (sym.st_value + map_def_sz > data->d_size) {
1825 pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
1826 obj->path, map_name);
1827 return -EINVAL;
1828 }
1829
1830 map->name = strdup(map_name);
1831 if (!map->name) {
1832 pr_warn("failed to alloc map name\n");
1833 return -ENOMEM;
1834 }
1835 pr_debug("map %d is \"%s\"\n", i, map->name);
1836 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
1837 /*
1838 * If the definition of the map in the object file fits in
1839 * bpf_map_def, copy it. Any extra fields in our version
1840 * of bpf_map_def will default to zero as a result of the
1841 * calloc above.
1842 */
1843 if (map_def_sz <= sizeof(struct bpf_map_def)) {
1844 memcpy(&map->def, def, map_def_sz);
1845 } else {
1846 /*
1847 * Here the map structure being read is bigger than what
1848 * we expect, truncate if the excess bits are all zero.
1849 * If they are not zero, reject this map as
1850 * incompatible.
1851 */
1852 char *b;
1853
1854 for (b = ((char *)def) + sizeof(struct bpf_map_def);
1855 b < ((char *)def) + map_def_sz; b++) {
1856 if (*b != 0) {
1857 pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
1858 obj->path, map_name);
1859 if (strict)
1860 return -EINVAL;
1861 }
1862 }
1863 memcpy(&map->def, def, sizeof(struct bpf_map_def));
1864 }
1865 }
1866 return 0;
1867 }
1868
1869 static const struct btf_type *
skip_mods_and_typedefs(const struct btf * btf,__u32 id,__u32 * res_id)1870 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
1871 {
1872 const struct btf_type *t = btf__type_by_id(btf, id);
1873
1874 if (res_id)
1875 *res_id = id;
1876
1877 while (btf_is_mod(t) || btf_is_typedef(t)) {
1878 if (res_id)
1879 *res_id = t->type;
1880 t = btf__type_by_id(btf, t->type);
1881 }
1882
1883 return t;
1884 }
1885
1886 static const struct btf_type *
resolve_func_ptr(const struct btf * btf,__u32 id,__u32 * res_id)1887 resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
1888 {
1889 const struct btf_type *t;
1890
1891 t = skip_mods_and_typedefs(btf, id, NULL);
1892 if (!btf_is_ptr(t))
1893 return NULL;
1894
1895 t = skip_mods_and_typedefs(btf, t->type, res_id);
1896
1897 return btf_is_func_proto(t) ? t : NULL;
1898 }
1899
btf_kind_str(const struct btf_type * t)1900 static const char *btf_kind_str(const struct btf_type *t)
1901 {
1902 switch (btf_kind(t)) {
1903 case BTF_KIND_UNKN: return "void";
1904 case BTF_KIND_INT: return "int";
1905 case BTF_KIND_PTR: return "ptr";
1906 case BTF_KIND_ARRAY: return "array";
1907 case BTF_KIND_STRUCT: return "struct";
1908 case BTF_KIND_UNION: return "union";
1909 case BTF_KIND_ENUM: return "enum";
1910 case BTF_KIND_FWD: return "fwd";
1911 case BTF_KIND_TYPEDEF: return "typedef";
1912 case BTF_KIND_VOLATILE: return "volatile";
1913 case BTF_KIND_CONST: return "const";
1914 case BTF_KIND_RESTRICT: return "restrict";
1915 case BTF_KIND_FUNC: return "func";
1916 case BTF_KIND_FUNC_PROTO: return "func_proto";
1917 case BTF_KIND_VAR: return "var";
1918 case BTF_KIND_DATASEC: return "datasec";
1919 default: return "unknown";
1920 }
1921 }
1922
1923 /*
1924 * Fetch integer attribute of BTF map definition. Such attributes are
1925 * represented using a pointer to an array, in which dimensionality of array
1926 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
1927 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
1928 * type definition, while using only sizeof(void *) space in ELF data section.
1929 */
get_map_field_int(const char * map_name,const struct btf * btf,const struct btf_member * m,__u32 * res)1930 static bool get_map_field_int(const char *map_name, const struct btf *btf,
1931 const struct btf_member *m, __u32 *res)
1932 {
1933 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
1934 const char *name = btf__name_by_offset(btf, m->name_off);
1935 const struct btf_array *arr_info;
1936 const struct btf_type *arr_t;
1937
1938 if (!btf_is_ptr(t)) {
1939 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
1940 map_name, name, btf_kind_str(t));
1941 return false;
1942 }
1943
1944 arr_t = btf__type_by_id(btf, t->type);
1945 if (!arr_t) {
1946 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
1947 map_name, name, t->type);
1948 return false;
1949 }
1950 if (!btf_is_array(arr_t)) {
1951 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
1952 map_name, name, btf_kind_str(arr_t));
1953 return false;
1954 }
1955 arr_info = btf_array(arr_t);
1956 *res = arr_info->nelems;
1957 return true;
1958 }
1959
build_map_pin_path(struct bpf_map * map,const char * path)1960 static int build_map_pin_path(struct bpf_map *map, const char *path)
1961 {
1962 char buf[PATH_MAX];
1963 int len;
1964
1965 if (!path)
1966 path = "/sys/fs/bpf";
1967
1968 len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
1969 if (len < 0)
1970 return -EINVAL;
1971 else if (len >= PATH_MAX)
1972 return -ENAMETOOLONG;
1973
1974 return bpf_map__set_pin_path(map, buf);
1975 }
1976
1977
parse_btf_map_def(struct bpf_object * obj,struct bpf_map * map,const struct btf_type * def,bool strict,bool is_inner,const char * pin_root_path)1978 static int parse_btf_map_def(struct bpf_object *obj,
1979 struct bpf_map *map,
1980 const struct btf_type *def,
1981 bool strict, bool is_inner,
1982 const char *pin_root_path)
1983 {
1984 const struct btf_type *t;
1985 const struct btf_member *m;
1986 int vlen, i;
1987
1988 vlen = btf_vlen(def);
1989 m = btf_members(def);
1990 for (i = 0; i < vlen; i++, m++) {
1991 const char *name = btf__name_by_offset(obj->btf, m->name_off);
1992
1993 if (!name) {
1994 pr_warn("map '%s': invalid field #%d.\n", map->name, i);
1995 return -EINVAL;
1996 }
1997 if (strcmp(name, "type") == 0) {
1998 if (!get_map_field_int(map->name, obj->btf, m,
1999 &map->def.type))
2000 return -EINVAL;
2001 pr_debug("map '%s': found type = %u.\n",
2002 map->name, map->def.type);
2003 } else if (strcmp(name, "max_entries") == 0) {
2004 if (!get_map_field_int(map->name, obj->btf, m,
2005 &map->def.max_entries))
2006 return -EINVAL;
2007 pr_debug("map '%s': found max_entries = %u.\n",
2008 map->name, map->def.max_entries);
2009 } else if (strcmp(name, "map_flags") == 0) {
2010 if (!get_map_field_int(map->name, obj->btf, m,
2011 &map->def.map_flags))
2012 return -EINVAL;
2013 pr_debug("map '%s': found map_flags = %u.\n",
2014 map->name, map->def.map_flags);
2015 } else if (strcmp(name, "numa_node") == 0) {
2016 if (!get_map_field_int(map->name, obj->btf, m, &map->numa_node))
2017 return -EINVAL;
2018 pr_debug("map '%s': found numa_node = %u.\n", map->name, map->numa_node);
2019 } else if (strcmp(name, "key_size") == 0) {
2020 __u32 sz;
2021
2022 if (!get_map_field_int(map->name, obj->btf, m, &sz))
2023 return -EINVAL;
2024 pr_debug("map '%s': found key_size = %u.\n",
2025 map->name, sz);
2026 if (map->def.key_size && map->def.key_size != sz) {
2027 pr_warn("map '%s': conflicting key size %u != %u.\n",
2028 map->name, map->def.key_size, sz);
2029 return -EINVAL;
2030 }
2031 map->def.key_size = sz;
2032 } else if (strcmp(name, "key") == 0) {
2033 __s64 sz;
2034
2035 t = btf__type_by_id(obj->btf, m->type);
2036 if (!t) {
2037 pr_warn("map '%s': key type [%d] not found.\n",
2038 map->name, m->type);
2039 return -EINVAL;
2040 }
2041 if (!btf_is_ptr(t)) {
2042 pr_warn("map '%s': key spec is not PTR: %s.\n",
2043 map->name, btf_kind_str(t));
2044 return -EINVAL;
2045 }
2046 sz = btf__resolve_size(obj->btf, t->type);
2047 if (sz < 0) {
2048 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2049 map->name, t->type, (ssize_t)sz);
2050 return sz;
2051 }
2052 pr_debug("map '%s': found key [%u], sz = %zd.\n",
2053 map->name, t->type, (ssize_t)sz);
2054 if (map->def.key_size && map->def.key_size != sz) {
2055 pr_warn("map '%s': conflicting key size %u != %zd.\n",
2056 map->name, map->def.key_size, (ssize_t)sz);
2057 return -EINVAL;
2058 }
2059 map->def.key_size = sz;
2060 map->btf_key_type_id = t->type;
2061 } else if (strcmp(name, "value_size") == 0) {
2062 __u32 sz;
2063
2064 if (!get_map_field_int(map->name, obj->btf, m, &sz))
2065 return -EINVAL;
2066 pr_debug("map '%s': found value_size = %u.\n",
2067 map->name, sz);
2068 if (map->def.value_size && map->def.value_size != sz) {
2069 pr_warn("map '%s': conflicting value size %u != %u.\n",
2070 map->name, map->def.value_size, sz);
2071 return -EINVAL;
2072 }
2073 map->def.value_size = sz;
2074 } else if (strcmp(name, "value") == 0) {
2075 __s64 sz;
2076
2077 t = btf__type_by_id(obj->btf, m->type);
2078 if (!t) {
2079 pr_warn("map '%s': value type [%d] not found.\n",
2080 map->name, m->type);
2081 return -EINVAL;
2082 }
2083 if (!btf_is_ptr(t)) {
2084 pr_warn("map '%s': value spec is not PTR: %s.\n",
2085 map->name, btf_kind_str(t));
2086 return -EINVAL;
2087 }
2088 sz = btf__resolve_size(obj->btf, t->type);
2089 if (sz < 0) {
2090 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2091 map->name, t->type, (ssize_t)sz);
2092 return sz;
2093 }
2094 pr_debug("map '%s': found value [%u], sz = %zd.\n",
2095 map->name, t->type, (ssize_t)sz);
2096 if (map->def.value_size && map->def.value_size != sz) {
2097 pr_warn("map '%s': conflicting value size %u != %zd.\n",
2098 map->name, map->def.value_size, (ssize_t)sz);
2099 return -EINVAL;
2100 }
2101 map->def.value_size = sz;
2102 map->btf_value_type_id = t->type;
2103 }
2104 else if (strcmp(name, "values") == 0) {
2105 int err;
2106
2107 if (is_inner) {
2108 pr_warn("map '%s': multi-level inner maps not supported.\n",
2109 map->name);
2110 return -ENOTSUP;
2111 }
2112 if (i != vlen - 1) {
2113 pr_warn("map '%s': '%s' member should be last.\n",
2114 map->name, name);
2115 return -EINVAL;
2116 }
2117 if (!bpf_map_type__is_map_in_map(map->def.type)) {
2118 pr_warn("map '%s': should be map-in-map.\n",
2119 map->name);
2120 return -ENOTSUP;
2121 }
2122 if (map->def.value_size && map->def.value_size != 4) {
2123 pr_warn("map '%s': conflicting value size %u != 4.\n",
2124 map->name, map->def.value_size);
2125 return -EINVAL;
2126 }
2127 map->def.value_size = 4;
2128 t = btf__type_by_id(obj->btf, m->type);
2129 if (!t) {
2130 pr_warn("map '%s': map-in-map inner type [%d] not found.\n",
2131 map->name, m->type);
2132 return -EINVAL;
2133 }
2134 if (!btf_is_array(t) || btf_array(t)->nelems) {
2135 pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n",
2136 map->name);
2137 return -EINVAL;
2138 }
2139 t = skip_mods_and_typedefs(obj->btf, btf_array(t)->type,
2140 NULL);
2141 if (!btf_is_ptr(t)) {
2142 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2143 map->name, btf_kind_str(t));
2144 return -EINVAL;
2145 }
2146 t = skip_mods_and_typedefs(obj->btf, t->type, NULL);
2147 if (!btf_is_struct(t)) {
2148 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2149 map->name, btf_kind_str(t));
2150 return -EINVAL;
2151 }
2152
2153 map->inner_map = calloc(1, sizeof(*map->inner_map));
2154 if (!map->inner_map)
2155 return -ENOMEM;
2156 map->inner_map->sec_idx = obj->efile.btf_maps_shndx;
2157 map->inner_map->name = malloc(strlen(map->name) +
2158 sizeof(".inner") + 1);
2159 if (!map->inner_map->name)
2160 return -ENOMEM;
2161 sprintf(map->inner_map->name, "%s.inner", map->name);
2162
2163 err = parse_btf_map_def(obj, map->inner_map, t, strict,
2164 true /* is_inner */, NULL);
2165 if (err)
2166 return err;
2167 } else if (strcmp(name, "pinning") == 0) {
2168 __u32 val;
2169 int err;
2170
2171 if (is_inner) {
2172 pr_debug("map '%s': inner def can't be pinned.\n",
2173 map->name);
2174 return -EINVAL;
2175 }
2176 if (!get_map_field_int(map->name, obj->btf, m, &val))
2177 return -EINVAL;
2178 pr_debug("map '%s': found pinning = %u.\n",
2179 map->name, val);
2180
2181 if (val != LIBBPF_PIN_NONE &&
2182 val != LIBBPF_PIN_BY_NAME) {
2183 pr_warn("map '%s': invalid pinning value %u.\n",
2184 map->name, val);
2185 return -EINVAL;
2186 }
2187 if (val == LIBBPF_PIN_BY_NAME) {
2188 err = build_map_pin_path(map, pin_root_path);
2189 if (err) {
2190 pr_warn("map '%s': couldn't build pin path.\n",
2191 map->name);
2192 return err;
2193 }
2194 }
2195 } else {
2196 if (strict) {
2197 pr_warn("map '%s': unknown field '%s'.\n",
2198 map->name, name);
2199 return -ENOTSUP;
2200 }
2201 pr_debug("map '%s': ignoring unknown field '%s'.\n",
2202 map->name, name);
2203 }
2204 }
2205
2206 if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
2207 pr_warn("map '%s': map type isn't specified.\n", map->name);
2208 return -EINVAL;
2209 }
2210
2211 return 0;
2212 }
2213
bpf_object__init_user_btf_map(struct bpf_object * obj,const struct btf_type * sec,int var_idx,int sec_idx,const Elf_Data * data,bool strict,const char * pin_root_path)2214 static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2215 const struct btf_type *sec,
2216 int var_idx, int sec_idx,
2217 const Elf_Data *data, bool strict,
2218 const char *pin_root_path)
2219 {
2220 const struct btf_type *var, *def;
2221 const struct btf_var_secinfo *vi;
2222 const struct btf_var *var_extra;
2223 const char *map_name;
2224 struct bpf_map *map;
2225
2226 vi = btf_var_secinfos(sec) + var_idx;
2227 var = btf__type_by_id(obj->btf, vi->type);
2228 var_extra = btf_var(var);
2229 map_name = btf__name_by_offset(obj->btf, var->name_off);
2230
2231 if (map_name == NULL || map_name[0] == '\0') {
2232 pr_warn("map #%d: empty name.\n", var_idx);
2233 return -EINVAL;
2234 }
2235 if ((__u64)vi->offset + vi->size > data->d_size) {
2236 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2237 return -EINVAL;
2238 }
2239 if (!btf_is_var(var)) {
2240 pr_warn("map '%s': unexpected var kind %s.\n",
2241 map_name, btf_kind_str(var));
2242 return -EINVAL;
2243 }
2244 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2245 var_extra->linkage != BTF_VAR_STATIC) {
2246 pr_warn("map '%s': unsupported var linkage %u.\n",
2247 map_name, var_extra->linkage);
2248 return -EOPNOTSUPP;
2249 }
2250
2251 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2252 if (!btf_is_struct(def)) {
2253 pr_warn("map '%s': unexpected def kind %s.\n",
2254 map_name, btf_kind_str(var));
2255 return -EINVAL;
2256 }
2257 if (def->size > vi->size) {
2258 pr_warn("map '%s': invalid def size.\n", map_name);
2259 return -EINVAL;
2260 }
2261
2262 map = bpf_object__add_map(obj);
2263 if (IS_ERR(map))
2264 return PTR_ERR(map);
2265 map->name = strdup(map_name);
2266 if (!map->name) {
2267 pr_warn("map '%s': failed to alloc map name.\n", map_name);
2268 return -ENOMEM;
2269 }
2270 map->libbpf_type = LIBBPF_MAP_UNSPEC;
2271 map->def.type = BPF_MAP_TYPE_UNSPEC;
2272 map->sec_idx = sec_idx;
2273 map->sec_offset = vi->offset;
2274 map->btf_var_idx = var_idx;
2275 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2276 map_name, map->sec_idx, map->sec_offset);
2277
2278 return parse_btf_map_def(obj, map, def, strict, false, pin_root_path);
2279 }
2280
bpf_object__init_user_btf_maps(struct bpf_object * obj,bool strict,const char * pin_root_path)2281 static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2282 const char *pin_root_path)
2283 {
2284 const struct btf_type *sec = NULL;
2285 int nr_types, i, vlen, err;
2286 const struct btf_type *t;
2287 const char *name;
2288 Elf_Data *data;
2289 Elf_Scn *scn;
2290
2291 if (obj->efile.btf_maps_shndx < 0)
2292 return 0;
2293
2294 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2295 data = elf_sec_data(obj, scn);
2296 if (!scn || !data) {
2297 pr_warn("elf: failed to get %s map definitions for %s\n",
2298 MAPS_ELF_SEC, obj->path);
2299 return -EINVAL;
2300 }
2301
2302 nr_types = btf__get_nr_types(obj->btf);
2303 for (i = 1; i <= nr_types; i++) {
2304 t = btf__type_by_id(obj->btf, i);
2305 if (!btf_is_datasec(t))
2306 continue;
2307 name = btf__name_by_offset(obj->btf, t->name_off);
2308 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2309 sec = t;
2310 obj->efile.btf_maps_sec_btf_id = i;
2311 break;
2312 }
2313 }
2314
2315 if (!sec) {
2316 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
2317 return -ENOENT;
2318 }
2319
2320 vlen = btf_vlen(sec);
2321 for (i = 0; i < vlen; i++) {
2322 err = bpf_object__init_user_btf_map(obj, sec, i,
2323 obj->efile.btf_maps_shndx,
2324 data, strict,
2325 pin_root_path);
2326 if (err)
2327 return err;
2328 }
2329
2330 return 0;
2331 }
2332
bpf_object__init_maps(struct bpf_object * obj,const struct bpf_object_open_opts * opts)2333 static int bpf_object__init_maps(struct bpf_object *obj,
2334 const struct bpf_object_open_opts *opts)
2335 {
2336 const char *pin_root_path;
2337 bool strict;
2338 int err;
2339
2340 strict = !OPTS_GET(opts, relaxed_maps, false);
2341 pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
2342
2343 err = bpf_object__init_user_maps(obj, strict);
2344 err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2345 err = err ?: bpf_object__init_global_data_maps(obj);
2346 err = err ?: bpf_object__init_kconfig_map(obj);
2347 err = err ?: bpf_object__init_struct_ops_maps(obj);
2348 if (err)
2349 return err;
2350
2351 return 0;
2352 }
2353
section_have_execinstr(struct bpf_object * obj,int idx)2354 static bool section_have_execinstr(struct bpf_object *obj, int idx)
2355 {
2356 GElf_Shdr sh;
2357
2358 if (elf_sec_hdr(obj, elf_sec_by_idx(obj, idx), &sh))
2359 return false;
2360
2361 return sh.sh_flags & SHF_EXECINSTR;
2362 }
2363
btf_needs_sanitization(struct bpf_object * obj)2364 static bool btf_needs_sanitization(struct bpf_object *obj)
2365 {
2366 bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
2367 bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
2368 bool has_func = kernel_supports(FEAT_BTF_FUNC);
2369
2370 return !has_func || !has_datasec || !has_func_global;
2371 }
2372
bpf_object__sanitize_btf(struct bpf_object * obj,struct btf * btf)2373 static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
2374 {
2375 bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
2376 bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
2377 bool has_func = kernel_supports(FEAT_BTF_FUNC);
2378 struct btf_type *t;
2379 int i, j, vlen;
2380
2381 for (i = 1; i <= btf__get_nr_types(btf); i++) {
2382 t = (struct btf_type *)btf__type_by_id(btf, i);
2383
2384 if (!has_datasec && btf_is_var(t)) {
2385 /* replace VAR with INT */
2386 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
2387 /*
2388 * using size = 1 is the safest choice, 4 will be too
2389 * big and cause kernel BTF validation failure if
2390 * original variable took less than 4 bytes
2391 */
2392 t->size = 1;
2393 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
2394 } else if (!has_datasec && btf_is_datasec(t)) {
2395 /* replace DATASEC with STRUCT */
2396 const struct btf_var_secinfo *v = btf_var_secinfos(t);
2397 struct btf_member *m = btf_members(t);
2398 struct btf_type *vt;
2399 char *name;
2400
2401 name = (char *)btf__name_by_offset(btf, t->name_off);
2402 while (*name) {
2403 if (*name == '.')
2404 *name = '_';
2405 name++;
2406 }
2407
2408 vlen = btf_vlen(t);
2409 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
2410 for (j = 0; j < vlen; j++, v++, m++) {
2411 /* order of field assignments is important */
2412 m->offset = v->offset * 8;
2413 m->type = v->type;
2414 /* preserve variable name as member name */
2415 vt = (void *)btf__type_by_id(btf, v->type);
2416 m->name_off = vt->name_off;
2417 }
2418 } else if (!has_func && btf_is_func_proto(t)) {
2419 /* replace FUNC_PROTO with ENUM */
2420 vlen = btf_vlen(t);
2421 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
2422 t->size = sizeof(__u32); /* kernel enforced */
2423 } else if (!has_func && btf_is_func(t)) {
2424 /* replace FUNC with TYPEDEF */
2425 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
2426 } else if (!has_func_global && btf_is_func(t)) {
2427 /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
2428 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
2429 }
2430 }
2431 }
2432
libbpf_needs_btf(const struct bpf_object * obj)2433 static bool libbpf_needs_btf(const struct bpf_object *obj)
2434 {
2435 return obj->efile.btf_maps_shndx >= 0 ||
2436 obj->efile.st_ops_shndx >= 0 ||
2437 obj->nr_extern > 0;
2438 }
2439
kernel_needs_btf(const struct bpf_object * obj)2440 static bool kernel_needs_btf(const struct bpf_object *obj)
2441 {
2442 return obj->efile.st_ops_shndx >= 0;
2443 }
2444
bpf_object__init_btf(struct bpf_object * obj,Elf_Data * btf_data,Elf_Data * btf_ext_data)2445 static int bpf_object__init_btf(struct bpf_object *obj,
2446 Elf_Data *btf_data,
2447 Elf_Data *btf_ext_data)
2448 {
2449 int err = -ENOENT;
2450
2451 if (btf_data) {
2452 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
2453 if (IS_ERR(obj->btf)) {
2454 err = PTR_ERR(obj->btf);
2455 obj->btf = NULL;
2456 pr_warn("Error loading ELF section %s: %d.\n",
2457 BTF_ELF_SEC, err);
2458 goto out;
2459 }
2460 /* enforce 8-byte pointers for BPF-targeted BTFs */
2461 btf__set_pointer_size(obj->btf, 8);
2462 err = 0;
2463 }
2464 if (btf_ext_data) {
2465 if (!obj->btf) {
2466 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
2467 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
2468 goto out;
2469 }
2470 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
2471 btf_ext_data->d_size);
2472 if (IS_ERR(obj->btf_ext)) {
2473 pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n",
2474 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
2475 obj->btf_ext = NULL;
2476 goto out;
2477 }
2478 }
2479 out:
2480 if (err && libbpf_needs_btf(obj)) {
2481 pr_warn("BTF is required, but is missing or corrupted.\n");
2482 return err;
2483 }
2484 return 0;
2485 }
2486
bpf_object__finalize_btf(struct bpf_object * obj)2487 static int bpf_object__finalize_btf(struct bpf_object *obj)
2488 {
2489 int err;
2490
2491 if (!obj->btf)
2492 return 0;
2493
2494 err = btf__finalize_data(obj, obj->btf);
2495 if (err) {
2496 pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
2497 return err;
2498 }
2499
2500 return 0;
2501 }
2502
libbpf_prog_needs_vmlinux_btf(struct bpf_program * prog)2503 static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog)
2504 {
2505 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
2506 prog->type == BPF_PROG_TYPE_LSM)
2507 return true;
2508
2509 /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
2510 * also need vmlinux BTF
2511 */
2512 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
2513 return true;
2514
2515 return false;
2516 }
2517
bpf_object__load_vmlinux_btf(struct bpf_object * obj)2518 static int bpf_object__load_vmlinux_btf(struct bpf_object *obj)
2519 {
2520 bool need_vmlinux_btf = false;
2521 struct bpf_program *prog;
2522 int i, err;
2523
2524 /* CO-RE relocations need kernel BTF */
2525 if (obj->btf_ext && obj->btf_ext->core_relo_info.len)
2526 need_vmlinux_btf = true;
2527
2528 /* Support for typed ksyms needs kernel BTF */
2529 for (i = 0; i < obj->nr_extern; i++) {
2530 const struct extern_desc *ext;
2531
2532 ext = &obj->externs[i];
2533 if (ext->type == EXT_KSYM && ext->ksym.type_id) {
2534 need_vmlinux_btf = true;
2535 break;
2536 }
2537 }
2538
2539 bpf_object__for_each_program(prog, obj) {
2540 if (!prog->load)
2541 continue;
2542 if (libbpf_prog_needs_vmlinux_btf(prog)) {
2543 need_vmlinux_btf = true;
2544 break;
2545 }
2546 }
2547
2548 if (!need_vmlinux_btf)
2549 return 0;
2550
2551 obj->btf_vmlinux = libbpf_find_kernel_btf();
2552 if (IS_ERR(obj->btf_vmlinux)) {
2553 err = PTR_ERR(obj->btf_vmlinux);
2554 pr_warn("Error loading vmlinux BTF: %d\n", err);
2555 obj->btf_vmlinux = NULL;
2556 return err;
2557 }
2558 return 0;
2559 }
2560
bpf_object__sanitize_and_load_btf(struct bpf_object * obj)2561 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
2562 {
2563 struct btf *kern_btf = obj->btf;
2564 bool btf_mandatory, sanitize;
2565 int err = 0;
2566
2567 if (!obj->btf)
2568 return 0;
2569
2570 if (!kernel_supports(FEAT_BTF)) {
2571 if (kernel_needs_btf(obj)) {
2572 err = -EOPNOTSUPP;
2573 goto report;
2574 }
2575 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
2576 return 0;
2577 }
2578
2579 sanitize = btf_needs_sanitization(obj);
2580 if (sanitize) {
2581 const void *raw_data;
2582 __u32 sz;
2583
2584 /* clone BTF to sanitize a copy and leave the original intact */
2585 raw_data = btf__get_raw_data(obj->btf, &sz);
2586 kern_btf = btf__new(raw_data, sz);
2587 if (IS_ERR(kern_btf))
2588 return PTR_ERR(kern_btf);
2589
2590 /* enforce 8-byte pointers for BPF-targeted BTFs */
2591 btf__set_pointer_size(obj->btf, 8);
2592 bpf_object__sanitize_btf(obj, kern_btf);
2593 }
2594
2595 err = btf__load(kern_btf);
2596 if (sanitize) {
2597 if (!err) {
2598 /* move fd to libbpf's BTF */
2599 btf__set_fd(obj->btf, btf__fd(kern_btf));
2600 btf__set_fd(kern_btf, -1);
2601 }
2602 btf__free(kern_btf);
2603 }
2604 report:
2605 if (err) {
2606 btf_mandatory = kernel_needs_btf(obj);
2607 pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
2608 btf_mandatory ? "BTF is mandatory, can't proceed."
2609 : "BTF is optional, ignoring.");
2610 if (!btf_mandatory)
2611 err = 0;
2612 }
2613 return err;
2614 }
2615
elf_sym_str(const struct bpf_object * obj,size_t off)2616 static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
2617 {
2618 const char *name;
2619
2620 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
2621 if (!name) {
2622 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
2623 off, obj->path, elf_errmsg(-1));
2624 return NULL;
2625 }
2626
2627 return name;
2628 }
2629
elf_sec_str(const struct bpf_object * obj,size_t off)2630 static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
2631 {
2632 const char *name;
2633
2634 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
2635 if (!name) {
2636 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
2637 off, obj->path, elf_errmsg(-1));
2638 return NULL;
2639 }
2640
2641 return name;
2642 }
2643
elf_sec_by_idx(const struct bpf_object * obj,size_t idx)2644 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
2645 {
2646 Elf_Scn *scn;
2647
2648 scn = elf_getscn(obj->efile.elf, idx);
2649 if (!scn) {
2650 pr_warn("elf: failed to get section(%zu) from %s: %s\n",
2651 idx, obj->path, elf_errmsg(-1));
2652 return NULL;
2653 }
2654 return scn;
2655 }
2656
elf_sec_by_name(const struct bpf_object * obj,const char * name)2657 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
2658 {
2659 Elf_Scn *scn = NULL;
2660 Elf *elf = obj->efile.elf;
2661 const char *sec_name;
2662
2663 while ((scn = elf_nextscn(elf, scn)) != NULL) {
2664 sec_name = elf_sec_name(obj, scn);
2665 if (!sec_name)
2666 return NULL;
2667
2668 if (strcmp(sec_name, name) != 0)
2669 continue;
2670
2671 return scn;
2672 }
2673 return NULL;
2674 }
2675
elf_sec_hdr(const struct bpf_object * obj,Elf_Scn * scn,GElf_Shdr * hdr)2676 static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr)
2677 {
2678 if (!scn)
2679 return -EINVAL;
2680
2681 if (gelf_getshdr(scn, hdr) != hdr) {
2682 pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
2683 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2684 return -EINVAL;
2685 }
2686
2687 return 0;
2688 }
2689
elf_sec_name(const struct bpf_object * obj,Elf_Scn * scn)2690 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
2691 {
2692 const char *name;
2693 GElf_Shdr sh;
2694
2695 if (!scn)
2696 return NULL;
2697
2698 if (elf_sec_hdr(obj, scn, &sh))
2699 return NULL;
2700
2701 name = elf_sec_str(obj, sh.sh_name);
2702 if (!name) {
2703 pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
2704 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2705 return NULL;
2706 }
2707
2708 return name;
2709 }
2710
elf_sec_data(const struct bpf_object * obj,Elf_Scn * scn)2711 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
2712 {
2713 Elf_Data *data;
2714
2715 if (!scn)
2716 return NULL;
2717
2718 data = elf_getdata(scn, 0);
2719 if (!data) {
2720 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
2721 elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
2722 obj->path, elf_errmsg(-1));
2723 return NULL;
2724 }
2725
2726 return data;
2727 }
2728
elf_sym_by_sec_off(const struct bpf_object * obj,size_t sec_idx,size_t off,__u32 sym_type,GElf_Sym * sym)2729 static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
2730 size_t off, __u32 sym_type, GElf_Sym *sym)
2731 {
2732 Elf_Data *symbols = obj->efile.symbols;
2733 size_t n = symbols->d_size / sizeof(GElf_Sym);
2734 int i;
2735
2736 for (i = 0; i < n; i++) {
2737 if (!gelf_getsym(symbols, i, sym))
2738 continue;
2739 if (sym->st_shndx != sec_idx || sym->st_value != off)
2740 continue;
2741 if (GELF_ST_TYPE(sym->st_info) != sym_type)
2742 continue;
2743 return 0;
2744 }
2745
2746 return -ENOENT;
2747 }
2748
is_sec_name_dwarf(const char * name)2749 static bool is_sec_name_dwarf(const char *name)
2750 {
2751 /* approximation, but the actual list is too long */
2752 return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0;
2753 }
2754
ignore_elf_section(GElf_Shdr * hdr,const char * name)2755 static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
2756 {
2757 /* no special handling of .strtab */
2758 if (hdr->sh_type == SHT_STRTAB)
2759 return true;
2760
2761 /* ignore .llvm_addrsig section as well */
2762 if (hdr->sh_type == 0x6FFF4C03 /* SHT_LLVM_ADDRSIG */)
2763 return true;
2764
2765 /* no subprograms will lead to an empty .text section, ignore it */
2766 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
2767 strcmp(name, ".text") == 0)
2768 return true;
2769
2770 /* DWARF sections */
2771 if (is_sec_name_dwarf(name))
2772 return true;
2773
2774 if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) {
2775 name += sizeof(".rel") - 1;
2776 /* DWARF section relocations */
2777 if (is_sec_name_dwarf(name))
2778 return true;
2779
2780 /* .BTF and .BTF.ext don't need relocations */
2781 if (strcmp(name, BTF_ELF_SEC) == 0 ||
2782 strcmp(name, BTF_EXT_ELF_SEC) == 0)
2783 return true;
2784 }
2785
2786 return false;
2787 }
2788
cmp_progs(const void * _a,const void * _b)2789 static int cmp_progs(const void *_a, const void *_b)
2790 {
2791 const struct bpf_program *a = _a;
2792 const struct bpf_program *b = _b;
2793
2794 if (a->sec_idx != b->sec_idx)
2795 return a->sec_idx < b->sec_idx ? -1 : 1;
2796
2797 /* sec_insn_off can't be the same within the section */
2798 return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
2799 }
2800
bpf_object__elf_collect(struct bpf_object * obj)2801 static int bpf_object__elf_collect(struct bpf_object *obj)
2802 {
2803 Elf *elf = obj->efile.elf;
2804 Elf_Data *btf_ext_data = NULL;
2805 Elf_Data *btf_data = NULL;
2806 int idx = 0, err = 0;
2807 const char *name;
2808 Elf_Data *data;
2809 Elf_Scn *scn;
2810 GElf_Shdr sh;
2811
2812 /* a bunch of ELF parsing functionality depends on processing symbols,
2813 * so do the first pass and find the symbol table
2814 */
2815 scn = NULL;
2816 while ((scn = elf_nextscn(elf, scn)) != NULL) {
2817 if (elf_sec_hdr(obj, scn, &sh))
2818 return -LIBBPF_ERRNO__FORMAT;
2819
2820 if (sh.sh_type == SHT_SYMTAB) {
2821 if (obj->efile.symbols) {
2822 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
2823 return -LIBBPF_ERRNO__FORMAT;
2824 }
2825
2826 data = elf_sec_data(obj, scn);
2827 if (!data)
2828 return -LIBBPF_ERRNO__FORMAT;
2829
2830 obj->efile.symbols = data;
2831 obj->efile.symbols_shndx = elf_ndxscn(scn);
2832 obj->efile.strtabidx = sh.sh_link;
2833 }
2834 }
2835
2836 scn = NULL;
2837 while ((scn = elf_nextscn(elf, scn)) != NULL) {
2838 idx++;
2839
2840 if (elf_sec_hdr(obj, scn, &sh))
2841 return -LIBBPF_ERRNO__FORMAT;
2842
2843 name = elf_sec_str(obj, sh.sh_name);
2844 if (!name)
2845 return -LIBBPF_ERRNO__FORMAT;
2846
2847 if (ignore_elf_section(&sh, name))
2848 continue;
2849
2850 data = elf_sec_data(obj, scn);
2851 if (!data)
2852 return -LIBBPF_ERRNO__FORMAT;
2853
2854 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
2855 idx, name, (unsigned long)data->d_size,
2856 (int)sh.sh_link, (unsigned long)sh.sh_flags,
2857 (int)sh.sh_type);
2858
2859 if (strcmp(name, "license") == 0) {
2860 err = bpf_object__init_license(obj, data->d_buf, data->d_size);
2861 if (err)
2862 return err;
2863 } else if (strcmp(name, "version") == 0) {
2864 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
2865 if (err)
2866 return err;
2867 } else if (strcmp(name, "maps") == 0) {
2868 obj->efile.maps_shndx = idx;
2869 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
2870 obj->efile.btf_maps_shndx = idx;
2871 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
2872 btf_data = data;
2873 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
2874 btf_ext_data = data;
2875 } else if (sh.sh_type == SHT_SYMTAB) {
2876 /* already processed during the first pass above */
2877 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
2878 if (sh.sh_flags & SHF_EXECINSTR) {
2879 if (strcmp(name, ".text") == 0)
2880 obj->efile.text_shndx = idx;
2881 err = bpf_object__add_programs(obj, data, name, idx);
2882 if (err)
2883 return err;
2884 } else if (strcmp(name, DATA_SEC) == 0) {
2885 obj->efile.data = data;
2886 obj->efile.data_shndx = idx;
2887 } else if (strcmp(name, RODATA_SEC) == 0) {
2888 obj->efile.rodata = data;
2889 obj->efile.rodata_shndx = idx;
2890 } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
2891 obj->efile.st_ops_data = data;
2892 obj->efile.st_ops_shndx = idx;
2893 } else {
2894 pr_info("elf: skipping unrecognized data section(%d) %s\n",
2895 idx, name);
2896 }
2897 } else if (sh.sh_type == SHT_REL) {
2898 int nr_sects = obj->efile.nr_reloc_sects;
2899 void *sects = obj->efile.reloc_sects;
2900 int sec = sh.sh_info; /* points to other section */
2901
2902 /* Only do relo for section with exec instructions */
2903 if (!section_have_execinstr(obj, sec) &&
2904 strcmp(name, ".rel" STRUCT_OPS_SEC) &&
2905 strcmp(name, ".rel" MAPS_ELF_SEC)) {
2906 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
2907 idx, name, sec,
2908 elf_sec_name(obj, elf_sec_by_idx(obj, sec)) ?: "<?>");
2909 continue;
2910 }
2911
2912 sects = libbpf_reallocarray(sects, nr_sects + 1,
2913 sizeof(*obj->efile.reloc_sects));
2914 if (!sects)
2915 return -ENOMEM;
2916
2917 obj->efile.reloc_sects = sects;
2918 obj->efile.nr_reloc_sects++;
2919
2920 obj->efile.reloc_sects[nr_sects].shdr = sh;
2921 obj->efile.reloc_sects[nr_sects].data = data;
2922 } else if (sh.sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
2923 obj->efile.bss = data;
2924 obj->efile.bss_shndx = idx;
2925 } else {
2926 pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
2927 (size_t)sh.sh_size);
2928 }
2929 }
2930
2931 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
2932 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
2933 return -LIBBPF_ERRNO__FORMAT;
2934 }
2935
2936 /* sort BPF programs by section name and in-section instruction offset
2937 * for faster search */
2938 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
2939
2940 return bpf_object__init_btf(obj, btf_data, btf_ext_data);
2941 }
2942
sym_is_extern(const GElf_Sym * sym)2943 static bool sym_is_extern(const GElf_Sym *sym)
2944 {
2945 int bind = GELF_ST_BIND(sym->st_info);
2946 /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
2947 return sym->st_shndx == SHN_UNDEF &&
2948 (bind == STB_GLOBAL || bind == STB_WEAK) &&
2949 GELF_ST_TYPE(sym->st_info) == STT_NOTYPE;
2950 }
2951
find_extern_btf_id(const struct btf * btf,const char * ext_name)2952 static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
2953 {
2954 const struct btf_type *t;
2955 const char *var_name;
2956 int i, n;
2957
2958 if (!btf)
2959 return -ESRCH;
2960
2961 n = btf__get_nr_types(btf);
2962 for (i = 1; i <= n; i++) {
2963 t = btf__type_by_id(btf, i);
2964
2965 if (!btf_is_var(t))
2966 continue;
2967
2968 var_name = btf__name_by_offset(btf, t->name_off);
2969 if (strcmp(var_name, ext_name))
2970 continue;
2971
2972 if (btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
2973 return -EINVAL;
2974
2975 return i;
2976 }
2977
2978 return -ENOENT;
2979 }
2980
find_extern_sec_btf_id(struct btf * btf,int ext_btf_id)2981 static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
2982 const struct btf_var_secinfo *vs;
2983 const struct btf_type *t;
2984 int i, j, n;
2985
2986 if (!btf)
2987 return -ESRCH;
2988
2989 n = btf__get_nr_types(btf);
2990 for (i = 1; i <= n; i++) {
2991 t = btf__type_by_id(btf, i);
2992
2993 if (!btf_is_datasec(t))
2994 continue;
2995
2996 vs = btf_var_secinfos(t);
2997 for (j = 0; j < btf_vlen(t); j++, vs++) {
2998 if (vs->type == ext_btf_id)
2999 return i;
3000 }
3001 }
3002
3003 return -ENOENT;
3004 }
3005
find_kcfg_type(const struct btf * btf,int id,bool * is_signed)3006 static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
3007 bool *is_signed)
3008 {
3009 const struct btf_type *t;
3010 const char *name;
3011
3012 t = skip_mods_and_typedefs(btf, id, NULL);
3013 name = btf__name_by_offset(btf, t->name_off);
3014
3015 if (is_signed)
3016 *is_signed = false;
3017 switch (btf_kind(t)) {
3018 case BTF_KIND_INT: {
3019 int enc = btf_int_encoding(t);
3020
3021 if (enc & BTF_INT_BOOL)
3022 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
3023 if (is_signed)
3024 *is_signed = enc & BTF_INT_SIGNED;
3025 if (t->size == 1)
3026 return KCFG_CHAR;
3027 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
3028 return KCFG_UNKNOWN;
3029 return KCFG_INT;
3030 }
3031 case BTF_KIND_ENUM:
3032 if (t->size != 4)
3033 return KCFG_UNKNOWN;
3034 if (strcmp(name, "libbpf_tristate"))
3035 return KCFG_UNKNOWN;
3036 return KCFG_TRISTATE;
3037 case BTF_KIND_ARRAY:
3038 if (btf_array(t)->nelems == 0)
3039 return KCFG_UNKNOWN;
3040 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
3041 return KCFG_UNKNOWN;
3042 return KCFG_CHAR_ARR;
3043 default:
3044 return KCFG_UNKNOWN;
3045 }
3046 }
3047
cmp_externs(const void * _a,const void * _b)3048 static int cmp_externs(const void *_a, const void *_b)
3049 {
3050 const struct extern_desc *a = _a;
3051 const struct extern_desc *b = _b;
3052
3053 if (a->type != b->type)
3054 return a->type < b->type ? -1 : 1;
3055
3056 if (a->type == EXT_KCFG) {
3057 /* descending order by alignment requirements */
3058 if (a->kcfg.align != b->kcfg.align)
3059 return a->kcfg.align > b->kcfg.align ? -1 : 1;
3060 /* ascending order by size, within same alignment class */
3061 if (a->kcfg.sz != b->kcfg.sz)
3062 return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
3063 }
3064
3065 /* resolve ties by name */
3066 return strcmp(a->name, b->name);
3067 }
3068
find_int_btf_id(const struct btf * btf)3069 static int find_int_btf_id(const struct btf *btf)
3070 {
3071 const struct btf_type *t;
3072 int i, n;
3073
3074 n = btf__get_nr_types(btf);
3075 for (i = 1; i <= n; i++) {
3076 t = btf__type_by_id(btf, i);
3077
3078 if (btf_is_int(t) && btf_int_bits(t) == 32)
3079 return i;
3080 }
3081
3082 return 0;
3083 }
3084
bpf_object__collect_externs(struct bpf_object * obj)3085 static int bpf_object__collect_externs(struct bpf_object *obj)
3086 {
3087 struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
3088 const struct btf_type *t;
3089 struct extern_desc *ext;
3090 int i, n, off;
3091 const char *ext_name, *sec_name;
3092 Elf_Scn *scn;
3093 GElf_Shdr sh;
3094
3095 if (!obj->efile.symbols)
3096 return 0;
3097
3098 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
3099 if (elf_sec_hdr(obj, scn, &sh))
3100 return -LIBBPF_ERRNO__FORMAT;
3101
3102 n = sh.sh_size / sh.sh_entsize;
3103 pr_debug("looking for externs among %d symbols...\n", n);
3104
3105 for (i = 0; i < n; i++) {
3106 GElf_Sym sym;
3107
3108 if (!gelf_getsym(obj->efile.symbols, i, &sym))
3109 return -LIBBPF_ERRNO__FORMAT;
3110 if (!sym_is_extern(&sym))
3111 continue;
3112 ext_name = elf_sym_str(obj, sym.st_name);
3113 if (!ext_name || !ext_name[0])
3114 continue;
3115
3116 ext = obj->externs;
3117 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
3118 if (!ext)
3119 return -ENOMEM;
3120 obj->externs = ext;
3121 ext = &ext[obj->nr_extern];
3122 memset(ext, 0, sizeof(*ext));
3123 obj->nr_extern++;
3124
3125 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
3126 if (ext->btf_id <= 0) {
3127 pr_warn("failed to find BTF for extern '%s': %d\n",
3128 ext_name, ext->btf_id);
3129 return ext->btf_id;
3130 }
3131 t = btf__type_by_id(obj->btf, ext->btf_id);
3132 ext->name = btf__name_by_offset(obj->btf, t->name_off);
3133 ext->sym_idx = i;
3134 ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK;
3135
3136 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
3137 if (ext->sec_btf_id <= 0) {
3138 pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
3139 ext_name, ext->btf_id, ext->sec_btf_id);
3140 return ext->sec_btf_id;
3141 }
3142 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
3143 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
3144
3145 if (strcmp(sec_name, KCONFIG_SEC) == 0) {
3146 kcfg_sec = sec;
3147 ext->type = EXT_KCFG;
3148 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
3149 if (ext->kcfg.sz <= 0) {
3150 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
3151 ext_name, ext->kcfg.sz);
3152 return ext->kcfg.sz;
3153 }
3154 ext->kcfg.align = btf__align_of(obj->btf, t->type);
3155 if (ext->kcfg.align <= 0) {
3156 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
3157 ext_name, ext->kcfg.align);
3158 return -EINVAL;
3159 }
3160 ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
3161 &ext->kcfg.is_signed);
3162 if (ext->kcfg.type == KCFG_UNKNOWN) {
3163 pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name);
3164 return -ENOTSUP;
3165 }
3166 } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
3167 ksym_sec = sec;
3168 ext->type = EXT_KSYM;
3169 skip_mods_and_typedefs(obj->btf, t->type,
3170 &ext->ksym.type_id);
3171 } else {
3172 pr_warn("unrecognized extern section '%s'\n", sec_name);
3173 return -ENOTSUP;
3174 }
3175 }
3176 pr_debug("collected %d externs total\n", obj->nr_extern);
3177
3178 if (!obj->nr_extern)
3179 return 0;
3180
3181 /* sort externs by type, for kcfg ones also by (align, size, name) */
3182 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
3183
3184 /* for .ksyms section, we need to turn all externs into allocated
3185 * variables in BTF to pass kernel verification; we do this by
3186 * pretending that each extern is a 8-byte variable
3187 */
3188 if (ksym_sec) {
3189 /* find existing 4-byte integer type in BTF to use for fake
3190 * extern variables in DATASEC
3191 */
3192 int int_btf_id = find_int_btf_id(obj->btf);
3193
3194 for (i = 0; i < obj->nr_extern; i++) {
3195 ext = &obj->externs[i];
3196 if (ext->type != EXT_KSYM)
3197 continue;
3198 pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
3199 i, ext->sym_idx, ext->name);
3200 }
3201
3202 sec = ksym_sec;
3203 n = btf_vlen(sec);
3204 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
3205 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3206 struct btf_type *vt;
3207
3208 vt = (void *)btf__type_by_id(obj->btf, vs->type);
3209 ext_name = btf__name_by_offset(obj->btf, vt->name_off);
3210 ext = find_extern_by_name(obj, ext_name);
3211 if (!ext) {
3212 pr_warn("failed to find extern definition for BTF var '%s'\n",
3213 ext_name);
3214 return -ESRCH;
3215 }
3216 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3217 vt->type = int_btf_id;
3218 vs->offset = off;
3219 vs->size = sizeof(int);
3220 }
3221 sec->size = off;
3222 }
3223
3224 if (kcfg_sec) {
3225 sec = kcfg_sec;
3226 /* for kcfg externs calculate their offsets within a .kconfig map */
3227 off = 0;
3228 for (i = 0; i < obj->nr_extern; i++) {
3229 ext = &obj->externs[i];
3230 if (ext->type != EXT_KCFG)
3231 continue;
3232
3233 ext->kcfg.data_off = roundup(off, ext->kcfg.align);
3234 off = ext->kcfg.data_off + ext->kcfg.sz;
3235 pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
3236 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
3237 }
3238 sec->size = off;
3239 n = btf_vlen(sec);
3240 for (i = 0; i < n; i++) {
3241 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3242
3243 t = btf__type_by_id(obj->btf, vs->type);
3244 ext_name = btf__name_by_offset(obj->btf, t->name_off);
3245 ext = find_extern_by_name(obj, ext_name);
3246 if (!ext) {
3247 pr_warn("failed to find extern definition for BTF var '%s'\n",
3248 ext_name);
3249 return -ESRCH;
3250 }
3251 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3252 vs->offset = ext->kcfg.data_off;
3253 }
3254 }
3255 return 0;
3256 }
3257
3258 struct bpf_program *
bpf_object__find_program_by_title(const struct bpf_object * obj,const char * title)3259 bpf_object__find_program_by_title(const struct bpf_object *obj,
3260 const char *title)
3261 {
3262 struct bpf_program *pos;
3263
3264 bpf_object__for_each_program(pos, obj) {
3265 if (pos->sec_name && !strcmp(pos->sec_name, title))
3266 return pos;
3267 }
3268 return NULL;
3269 }
3270
prog_is_subprog(const struct bpf_object * obj,const struct bpf_program * prog)3271 static bool prog_is_subprog(const struct bpf_object *obj,
3272 const struct bpf_program *prog)
3273 {
3274 /* For legacy reasons, libbpf supports an entry-point BPF programs
3275 * without SEC() attribute, i.e., those in the .text section. But if
3276 * there are 2 or more such programs in the .text section, they all
3277 * must be subprograms called from entry-point BPF programs in
3278 * designated SEC()'tions, otherwise there is no way to distinguish
3279 * which of those programs should be loaded vs which are a subprogram.
3280 * Similarly, if there is a function/program in .text and at least one
3281 * other BPF program with custom SEC() attribute, then we just assume
3282 * .text programs are subprograms (even if they are not called from
3283 * other programs), because libbpf never explicitly supported mixing
3284 * SEC()-designated BPF programs and .text entry-point BPF programs.
3285 */
3286 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
3287 }
3288
3289 struct bpf_program *
bpf_object__find_program_by_name(const struct bpf_object * obj,const char * name)3290 bpf_object__find_program_by_name(const struct bpf_object *obj,
3291 const char *name)
3292 {
3293 struct bpf_program *prog;
3294
3295 bpf_object__for_each_program(prog, obj) {
3296 if (prog_is_subprog(obj, prog))
3297 continue;
3298 if (!strcmp(prog->name, name))
3299 return prog;
3300 }
3301 return NULL;
3302 }
3303
bpf_object__shndx_is_data(const struct bpf_object * obj,int shndx)3304 static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
3305 int shndx)
3306 {
3307 return shndx == obj->efile.data_shndx ||
3308 shndx == obj->efile.bss_shndx ||
3309 shndx == obj->efile.rodata_shndx;
3310 }
3311
bpf_object__shndx_is_maps(const struct bpf_object * obj,int shndx)3312 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
3313 int shndx)
3314 {
3315 return shndx == obj->efile.maps_shndx ||
3316 shndx == obj->efile.btf_maps_shndx;
3317 }
3318
3319 static enum libbpf_map_type
bpf_object__section_to_libbpf_map_type(const struct bpf_object * obj,int shndx)3320 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
3321 {
3322 if (shndx == obj->efile.data_shndx)
3323 return LIBBPF_MAP_DATA;
3324 else if (shndx == obj->efile.bss_shndx)
3325 return LIBBPF_MAP_BSS;
3326 else if (shndx == obj->efile.rodata_shndx)
3327 return LIBBPF_MAP_RODATA;
3328 else if (shndx == obj->efile.symbols_shndx)
3329 return LIBBPF_MAP_KCONFIG;
3330 else
3331 return LIBBPF_MAP_UNSPEC;
3332 }
3333
bpf_program__record_reloc(struct bpf_program * prog,struct reloc_desc * reloc_desc,__u32 insn_idx,const char * sym_name,const GElf_Sym * sym,const GElf_Rel * rel)3334 static int bpf_program__record_reloc(struct bpf_program *prog,
3335 struct reloc_desc *reloc_desc,
3336 __u32 insn_idx, const char *sym_name,
3337 const GElf_Sym *sym, const GElf_Rel *rel)
3338 {
3339 struct bpf_insn *insn = &prog->insns[insn_idx];
3340 size_t map_idx, nr_maps = prog->obj->nr_maps;
3341 struct bpf_object *obj = prog->obj;
3342 __u32 shdr_idx = sym->st_shndx;
3343 enum libbpf_map_type type;
3344 const char *sym_sec_name;
3345 struct bpf_map *map;
3346
3347 reloc_desc->processed = false;
3348
3349 /* sub-program call relocation */
3350 if (insn->code == (BPF_JMP | BPF_CALL)) {
3351 if (insn->src_reg != BPF_PSEUDO_CALL) {
3352 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
3353 return -LIBBPF_ERRNO__RELOC;
3354 }
3355 /* text_shndx can be 0, if no default "main" program exists */
3356 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
3357 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3358 pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
3359 prog->name, sym_name, sym_sec_name);
3360 return -LIBBPF_ERRNO__RELOC;
3361 }
3362 if (sym->st_value % BPF_INSN_SZ) {
3363 pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
3364 prog->name, sym_name, (size_t)sym->st_value);
3365 return -LIBBPF_ERRNO__RELOC;
3366 }
3367 reloc_desc->type = RELO_CALL;
3368 reloc_desc->insn_idx = insn_idx;
3369 reloc_desc->sym_off = sym->st_value;
3370 return 0;
3371 }
3372
3373 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) {
3374 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
3375 prog->name, sym_name, insn_idx, insn->code);
3376 return -LIBBPF_ERRNO__RELOC;
3377 }
3378
3379 if (sym_is_extern(sym)) {
3380 int sym_idx = GELF_R_SYM(rel->r_info);
3381 int i, n = obj->nr_extern;
3382 struct extern_desc *ext;
3383
3384 for (i = 0; i < n; i++) {
3385 ext = &obj->externs[i];
3386 if (ext->sym_idx == sym_idx)
3387 break;
3388 }
3389 if (i >= n) {
3390 pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
3391 prog->name, sym_name, sym_idx);
3392 return -LIBBPF_ERRNO__RELOC;
3393 }
3394 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
3395 prog->name, i, ext->name, ext->sym_idx, insn_idx);
3396 reloc_desc->type = RELO_EXTERN;
3397 reloc_desc->insn_idx = insn_idx;
3398 reloc_desc->sym_off = i; /* sym_off stores extern index */
3399 return 0;
3400 }
3401
3402 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
3403 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
3404 prog->name, sym_name, shdr_idx);
3405 return -LIBBPF_ERRNO__RELOC;
3406 }
3407
3408 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
3409 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3410
3411 /* generic map reference relocation */
3412 if (type == LIBBPF_MAP_UNSPEC) {
3413 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
3414 pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
3415 prog->name, sym_name, sym_sec_name);
3416 return -LIBBPF_ERRNO__RELOC;
3417 }
3418 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3419 map = &obj->maps[map_idx];
3420 if (map->libbpf_type != type ||
3421 map->sec_idx != sym->st_shndx ||
3422 map->sec_offset != sym->st_value)
3423 continue;
3424 pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
3425 prog->name, map_idx, map->name, map->sec_idx,
3426 map->sec_offset, insn_idx);
3427 break;
3428 }
3429 if (map_idx >= nr_maps) {
3430 pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
3431 prog->name, sym_sec_name, (size_t)sym->st_value);
3432 return -LIBBPF_ERRNO__RELOC;
3433 }
3434 reloc_desc->type = RELO_LD64;
3435 reloc_desc->insn_idx = insn_idx;
3436 reloc_desc->map_idx = map_idx;
3437 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
3438 return 0;
3439 }
3440
3441 /* global data map relocation */
3442 if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
3443 pr_warn("prog '%s': bad data relo against section '%s'\n",
3444 prog->name, sym_sec_name);
3445 return -LIBBPF_ERRNO__RELOC;
3446 }
3447 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3448 map = &obj->maps[map_idx];
3449 if (map->libbpf_type != type)
3450 continue;
3451 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
3452 prog->name, map_idx, map->name, map->sec_idx,
3453 map->sec_offset, insn_idx);
3454 break;
3455 }
3456 if (map_idx >= nr_maps) {
3457 pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
3458 prog->name, sym_sec_name);
3459 return -LIBBPF_ERRNO__RELOC;
3460 }
3461
3462 reloc_desc->type = RELO_DATA;
3463 reloc_desc->insn_idx = insn_idx;
3464 reloc_desc->map_idx = map_idx;
3465 reloc_desc->sym_off = sym->st_value;
3466 return 0;
3467 }
3468
prog_contains_insn(const struct bpf_program * prog,size_t insn_idx)3469 static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
3470 {
3471 return insn_idx >= prog->sec_insn_off &&
3472 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
3473 }
3474
find_prog_by_sec_insn(const struct bpf_object * obj,size_t sec_idx,size_t insn_idx)3475 static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
3476 size_t sec_idx, size_t insn_idx)
3477 {
3478 int l = 0, r = obj->nr_programs - 1, m;
3479 struct bpf_program *prog;
3480
3481 while (l < r) {
3482 m = l + (r - l + 1) / 2;
3483 prog = &obj->programs[m];
3484
3485 if (prog->sec_idx < sec_idx ||
3486 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
3487 l = m;
3488 else
3489 r = m - 1;
3490 }
3491 /* matching program could be at index l, but it still might be the
3492 * wrong one, so we need to double check conditions for the last time
3493 */
3494 prog = &obj->programs[l];
3495 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
3496 return prog;
3497 return NULL;
3498 }
3499
3500 static int
bpf_object__collect_prog_relos(struct bpf_object * obj,GElf_Shdr * shdr,Elf_Data * data)3501 bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data)
3502 {
3503 Elf_Data *symbols = obj->efile.symbols;
3504 const char *relo_sec_name, *sec_name;
3505 size_t sec_idx = shdr->sh_info;
3506 struct bpf_program *prog;
3507 struct reloc_desc *relos;
3508 int err, i, nrels;
3509 const char *sym_name;
3510 __u32 insn_idx;
3511 GElf_Sym sym;
3512 GElf_Rel rel;
3513
3514 relo_sec_name = elf_sec_str(obj, shdr->sh_name);
3515 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
3516 if (!relo_sec_name || !sec_name)
3517 return -EINVAL;
3518
3519 pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
3520 relo_sec_name, sec_idx, sec_name);
3521 nrels = shdr->sh_size / shdr->sh_entsize;
3522
3523 for (i = 0; i < nrels; i++) {
3524 if (!gelf_getrel(data, i, &rel)) {
3525 pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
3526 return -LIBBPF_ERRNO__FORMAT;
3527 }
3528 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
3529 pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n",
3530 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
3531 return -LIBBPF_ERRNO__FORMAT;
3532 }
3533 if (rel.r_offset % BPF_INSN_SZ) {
3534 pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
3535 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
3536 return -LIBBPF_ERRNO__FORMAT;
3537 }
3538
3539 insn_idx = rel.r_offset / BPF_INSN_SZ;
3540 /* relocations against static functions are recorded as
3541 * relocations against the section that contains a function;
3542 * in such case, symbol will be STT_SECTION and sym.st_name
3543 * will point to empty string (0), so fetch section name
3544 * instead
3545 */
3546 if (GELF_ST_TYPE(sym.st_info) == STT_SECTION && sym.st_name == 0)
3547 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym.st_shndx));
3548 else
3549 sym_name = elf_sym_str(obj, sym.st_name);
3550 sym_name = sym_name ?: "<?";
3551
3552 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
3553 relo_sec_name, i, insn_idx, sym_name);
3554
3555 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
3556 if (!prog) {
3557 pr_warn("sec '%s': relo #%d: program not found in section '%s' for insn #%u\n",
3558 relo_sec_name, i, sec_name, insn_idx);
3559 return -LIBBPF_ERRNO__RELOC;
3560 }
3561
3562 relos = libbpf_reallocarray(prog->reloc_desc,
3563 prog->nr_reloc + 1, sizeof(*relos));
3564 if (!relos)
3565 return -ENOMEM;
3566 prog->reloc_desc = relos;
3567
3568 /* adjust insn_idx to local BPF program frame of reference */
3569 insn_idx -= prog->sec_insn_off;
3570 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
3571 insn_idx, sym_name, &sym, &rel);
3572 if (err)
3573 return err;
3574
3575 prog->nr_reloc++;
3576 }
3577 return 0;
3578 }
3579
bpf_map_find_btf_info(struct bpf_object * obj,struct bpf_map * map)3580 static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
3581 {
3582 struct bpf_map_def *def = &map->def;
3583 __u32 key_type_id = 0, value_type_id = 0;
3584 int ret;
3585
3586 /* if it's BTF-defined map, we don't need to search for type IDs.
3587 * For struct_ops map, it does not need btf_key_type_id and
3588 * btf_value_type_id.
3589 */
3590 if (map->sec_idx == obj->efile.btf_maps_shndx ||
3591 bpf_map__is_struct_ops(map))
3592 return 0;
3593
3594 if (!bpf_map__is_internal(map)) {
3595 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
3596 def->value_size, &key_type_id,
3597 &value_type_id);
3598 } else {
3599 /*
3600 * LLVM annotates global data differently in BTF, that is,
3601 * only as '.data', '.bss' or '.rodata'.
3602 */
3603 ret = btf__find_by_name(obj->btf,
3604 libbpf_type_to_btf_name[map->libbpf_type]);
3605 }
3606 if (ret < 0)
3607 return ret;
3608
3609 map->btf_key_type_id = key_type_id;
3610 map->btf_value_type_id = bpf_map__is_internal(map) ?
3611 ret : value_type_id;
3612 return 0;
3613 }
3614
bpf_map__reuse_fd(struct bpf_map * map,int fd)3615 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
3616 {
3617 struct bpf_map_info info = {};
3618 __u32 len = sizeof(info);
3619 int new_fd, err;
3620 char *new_name;
3621
3622 err = bpf_obj_get_info_by_fd(fd, &info, &len);
3623 if (err)
3624 return err;
3625
3626 new_name = strdup(info.name);
3627 if (!new_name)
3628 return -errno;
3629
3630 new_fd = open("/", O_RDONLY | O_CLOEXEC);
3631 if (new_fd < 0) {
3632 err = -errno;
3633 goto err_free_new_name;
3634 }
3635
3636 new_fd = dup3(fd, new_fd, O_CLOEXEC);
3637 if (new_fd < 0) {
3638 err = -errno;
3639 goto err_close_new_fd;
3640 }
3641
3642 err = zclose(map->fd);
3643 if (err) {
3644 err = -errno;
3645 goto err_close_new_fd;
3646 }
3647 free(map->name);
3648
3649 map->fd = new_fd;
3650 map->name = new_name;
3651 map->def.type = info.type;
3652 map->def.key_size = info.key_size;
3653 map->def.value_size = info.value_size;
3654 map->def.max_entries = info.max_entries;
3655 map->def.map_flags = info.map_flags;
3656 map->btf_key_type_id = info.btf_key_type_id;
3657 map->btf_value_type_id = info.btf_value_type_id;
3658 map->reused = true;
3659
3660 return 0;
3661
3662 err_close_new_fd:
3663 close(new_fd);
3664 err_free_new_name:
3665 free(new_name);
3666 return err;
3667 }
3668
bpf_map__max_entries(const struct bpf_map * map)3669 __u32 bpf_map__max_entries(const struct bpf_map *map)
3670 {
3671 return map->def.max_entries;
3672 }
3673
bpf_map__set_max_entries(struct bpf_map * map,__u32 max_entries)3674 int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
3675 {
3676 if (map->fd >= 0)
3677 return -EBUSY;
3678 map->def.max_entries = max_entries;
3679 return 0;
3680 }
3681
bpf_map__resize(struct bpf_map * map,__u32 max_entries)3682 int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
3683 {
3684 if (!map || !max_entries)
3685 return -EINVAL;
3686
3687 return bpf_map__set_max_entries(map, max_entries);
3688 }
3689
3690 static int
bpf_object__probe_loading(struct bpf_object * obj)3691 bpf_object__probe_loading(struct bpf_object *obj)
3692 {
3693 struct bpf_load_program_attr attr;
3694 char *cp, errmsg[STRERR_BUFSIZE];
3695 struct bpf_insn insns[] = {
3696 BPF_MOV64_IMM(BPF_REG_0, 0),
3697 BPF_EXIT_INSN(),
3698 };
3699 int ret;
3700
3701 /* make sure basic loading works */
3702
3703 memset(&attr, 0, sizeof(attr));
3704 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3705 attr.insns = insns;
3706 attr.insns_cnt = ARRAY_SIZE(insns);
3707 attr.license = "GPL";
3708
3709 ret = bpf_load_program_xattr(&attr, NULL, 0);
3710 if (ret < 0) {
3711 ret = errno;
3712 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
3713 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
3714 "program. Make sure your kernel supports BPF "
3715 "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
3716 "set to big enough value.\n", __func__, cp, ret);
3717 return -ret;
3718 }
3719 close(ret);
3720
3721 return 0;
3722 }
3723
probe_fd(int fd)3724 static int probe_fd(int fd)
3725 {
3726 if (fd >= 0)
3727 close(fd);
3728 return fd >= 0;
3729 }
3730
probe_kern_prog_name(void)3731 static int probe_kern_prog_name(void)
3732 {
3733 struct bpf_load_program_attr attr;
3734 struct bpf_insn insns[] = {
3735 BPF_MOV64_IMM(BPF_REG_0, 0),
3736 BPF_EXIT_INSN(),
3737 };
3738 int ret;
3739
3740 /* make sure loading with name works */
3741
3742 memset(&attr, 0, sizeof(attr));
3743 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3744 attr.insns = insns;
3745 attr.insns_cnt = ARRAY_SIZE(insns);
3746 attr.license = "GPL";
3747 attr.name = "test";
3748 ret = bpf_load_program_xattr(&attr, NULL, 0);
3749 return probe_fd(ret);
3750 }
3751
probe_kern_global_data(void)3752 static int probe_kern_global_data(void)
3753 {
3754 struct bpf_load_program_attr prg_attr;
3755 struct bpf_create_map_attr map_attr;
3756 char *cp, errmsg[STRERR_BUFSIZE];
3757 struct bpf_insn insns[] = {
3758 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
3759 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
3760 BPF_MOV64_IMM(BPF_REG_0, 0),
3761 BPF_EXIT_INSN(),
3762 };
3763 int ret, map;
3764
3765 memset(&map_attr, 0, sizeof(map_attr));
3766 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
3767 map_attr.key_size = sizeof(int);
3768 map_attr.value_size = 32;
3769 map_attr.max_entries = 1;
3770
3771 map = bpf_create_map_xattr(&map_attr);
3772 if (map < 0) {
3773 ret = -errno;
3774 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
3775 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
3776 __func__, cp, -ret);
3777 return ret;
3778 }
3779
3780 insns[0].imm = map;
3781
3782 memset(&prg_attr, 0, sizeof(prg_attr));
3783 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3784 prg_attr.insns = insns;
3785 prg_attr.insns_cnt = ARRAY_SIZE(insns);
3786 prg_attr.license = "GPL";
3787
3788 ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
3789 close(map);
3790 return probe_fd(ret);
3791 }
3792
probe_kern_btf(void)3793 static int probe_kern_btf(void)
3794 {
3795 static const char strs[] = "\0int";
3796 __u32 types[] = {
3797 /* int */
3798 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
3799 };
3800
3801 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3802 strs, sizeof(strs)));
3803 }
3804
probe_kern_btf_func(void)3805 static int probe_kern_btf_func(void)
3806 {
3807 static const char strs[] = "\0int\0x\0a";
3808 /* void x(int a) {} */
3809 __u32 types[] = {
3810 /* int */
3811 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
3812 /* FUNC_PROTO */ /* [2] */
3813 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
3814 BTF_PARAM_ENC(7, 1),
3815 /* FUNC x */ /* [3] */
3816 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
3817 };
3818
3819 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3820 strs, sizeof(strs)));
3821 }
3822
probe_kern_btf_func_global(void)3823 static int probe_kern_btf_func_global(void)
3824 {
3825 static const char strs[] = "\0int\0x\0a";
3826 /* static void x(int a) {} */
3827 __u32 types[] = {
3828 /* int */
3829 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
3830 /* FUNC_PROTO */ /* [2] */
3831 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
3832 BTF_PARAM_ENC(7, 1),
3833 /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
3834 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
3835 };
3836
3837 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3838 strs, sizeof(strs)));
3839 }
3840
probe_kern_btf_datasec(void)3841 static int probe_kern_btf_datasec(void)
3842 {
3843 static const char strs[] = "\0x\0.data";
3844 /* static int a; */
3845 __u32 types[] = {
3846 /* int */
3847 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
3848 /* VAR x */ /* [2] */
3849 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
3850 BTF_VAR_STATIC,
3851 /* DATASEC val */ /* [3] */
3852 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
3853 BTF_VAR_SECINFO_ENC(2, 0, 4),
3854 };
3855
3856 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3857 strs, sizeof(strs)));
3858 }
3859
probe_kern_array_mmap(void)3860 static int probe_kern_array_mmap(void)
3861 {
3862 struct bpf_create_map_attr attr = {
3863 .map_type = BPF_MAP_TYPE_ARRAY,
3864 .map_flags = BPF_F_MMAPABLE,
3865 .key_size = sizeof(int),
3866 .value_size = sizeof(int),
3867 .max_entries = 1,
3868 };
3869
3870 return probe_fd(bpf_create_map_xattr(&attr));
3871 }
3872
probe_kern_exp_attach_type(void)3873 static int probe_kern_exp_attach_type(void)
3874 {
3875 struct bpf_load_program_attr attr;
3876 struct bpf_insn insns[] = {
3877 BPF_MOV64_IMM(BPF_REG_0, 0),
3878 BPF_EXIT_INSN(),
3879 };
3880
3881 memset(&attr, 0, sizeof(attr));
3882 /* use any valid combination of program type and (optional)
3883 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
3884 * to see if kernel supports expected_attach_type field for
3885 * BPF_PROG_LOAD command
3886 */
3887 attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
3888 attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE;
3889 attr.insns = insns;
3890 attr.insns_cnt = ARRAY_SIZE(insns);
3891 attr.license = "GPL";
3892
3893 return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
3894 }
3895
probe_kern_probe_read_kernel(void)3896 static int probe_kern_probe_read_kernel(void)
3897 {
3898 struct bpf_load_program_attr attr;
3899 struct bpf_insn insns[] = {
3900 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
3901 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
3902 BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */
3903 BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */
3904 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
3905 BPF_EXIT_INSN(),
3906 };
3907
3908 memset(&attr, 0, sizeof(attr));
3909 attr.prog_type = BPF_PROG_TYPE_KPROBE;
3910 attr.insns = insns;
3911 attr.insns_cnt = ARRAY_SIZE(insns);
3912 attr.license = "GPL";
3913
3914 return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
3915 }
3916
probe_prog_bind_map(void)3917 static int probe_prog_bind_map(void)
3918 {
3919 struct bpf_load_program_attr prg_attr;
3920 struct bpf_create_map_attr map_attr;
3921 char *cp, errmsg[STRERR_BUFSIZE];
3922 struct bpf_insn insns[] = {
3923 BPF_MOV64_IMM(BPF_REG_0, 0),
3924 BPF_EXIT_INSN(),
3925 };
3926 int ret, map, prog;
3927
3928 memset(&map_attr, 0, sizeof(map_attr));
3929 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
3930 map_attr.key_size = sizeof(int);
3931 map_attr.value_size = 32;
3932 map_attr.max_entries = 1;
3933
3934 map = bpf_create_map_xattr(&map_attr);
3935 if (map < 0) {
3936 ret = -errno;
3937 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
3938 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
3939 __func__, cp, -ret);
3940 return ret;
3941 }
3942
3943 memset(&prg_attr, 0, sizeof(prg_attr));
3944 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3945 prg_attr.insns = insns;
3946 prg_attr.insns_cnt = ARRAY_SIZE(insns);
3947 prg_attr.license = "GPL";
3948
3949 prog = bpf_load_program_xattr(&prg_attr, NULL, 0);
3950 if (prog < 0) {
3951 close(map);
3952 return 0;
3953 }
3954
3955 ret = bpf_prog_bind_map(prog, map, NULL);
3956
3957 close(map);
3958 close(prog);
3959
3960 return ret >= 0;
3961 }
3962
3963 enum kern_feature_result {
3964 FEAT_UNKNOWN = 0,
3965 FEAT_SUPPORTED = 1,
3966 FEAT_MISSING = 2,
3967 };
3968
3969 typedef int (*feature_probe_fn)(void);
3970
3971 static struct kern_feature_desc {
3972 const char *desc;
3973 feature_probe_fn probe;
3974 enum kern_feature_result res;
3975 } feature_probes[__FEAT_CNT] = {
3976 [FEAT_PROG_NAME] = {
3977 "BPF program name", probe_kern_prog_name,
3978 },
3979 [FEAT_GLOBAL_DATA] = {
3980 "global variables", probe_kern_global_data,
3981 },
3982 [FEAT_BTF] = {
3983 "minimal BTF", probe_kern_btf,
3984 },
3985 [FEAT_BTF_FUNC] = {
3986 "BTF functions", probe_kern_btf_func,
3987 },
3988 [FEAT_BTF_GLOBAL_FUNC] = {
3989 "BTF global function", probe_kern_btf_func_global,
3990 },
3991 [FEAT_BTF_DATASEC] = {
3992 "BTF data section and variable", probe_kern_btf_datasec,
3993 },
3994 [FEAT_ARRAY_MMAP] = {
3995 "ARRAY map mmap()", probe_kern_array_mmap,
3996 },
3997 [FEAT_EXP_ATTACH_TYPE] = {
3998 "BPF_PROG_LOAD expected_attach_type attribute",
3999 probe_kern_exp_attach_type,
4000 },
4001 [FEAT_PROBE_READ_KERN] = {
4002 "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
4003 },
4004 [FEAT_PROG_BIND_MAP] = {
4005 "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
4006 }
4007 };
4008
kernel_supports(enum kern_feature_id feat_id)4009 static bool kernel_supports(enum kern_feature_id feat_id)
4010 {
4011 struct kern_feature_desc *feat = &feature_probes[feat_id];
4012 int ret;
4013
4014 if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
4015 ret = feat->probe();
4016 if (ret > 0) {
4017 WRITE_ONCE(feat->res, FEAT_SUPPORTED);
4018 } else if (ret == 0) {
4019 WRITE_ONCE(feat->res, FEAT_MISSING);
4020 } else {
4021 pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
4022 WRITE_ONCE(feat->res, FEAT_MISSING);
4023 }
4024 }
4025
4026 return READ_ONCE(feat->res) == FEAT_SUPPORTED;
4027 }
4028
map_is_reuse_compat(const struct bpf_map * map,int map_fd)4029 static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4030 {
4031 struct bpf_map_info map_info = {};
4032 char msg[STRERR_BUFSIZE];
4033 __u32 map_info_len;
4034
4035 map_info_len = sizeof(map_info);
4036
4037 if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
4038 pr_warn("failed to get map info for map FD %d: %s\n",
4039 map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
4040 return false;
4041 }
4042
4043 return (map_info.type == map->def.type &&
4044 map_info.key_size == map->def.key_size &&
4045 map_info.value_size == map->def.value_size &&
4046 map_info.max_entries == map->def.max_entries &&
4047 map_info.map_flags == map->def.map_flags);
4048 }
4049
4050 static int
bpf_object__reuse_map(struct bpf_map * map)4051 bpf_object__reuse_map(struct bpf_map *map)
4052 {
4053 char *cp, errmsg[STRERR_BUFSIZE];
4054 int err, pin_fd;
4055
4056 pin_fd = bpf_obj_get(map->pin_path);
4057 if (pin_fd < 0) {
4058 err = -errno;
4059 if (err == -ENOENT) {
4060 pr_debug("found no pinned map to reuse at '%s'\n",
4061 map->pin_path);
4062 return 0;
4063 }
4064
4065 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4066 pr_warn("couldn't retrieve pinned map '%s': %s\n",
4067 map->pin_path, cp);
4068 return err;
4069 }
4070
4071 if (!map_is_reuse_compat(map, pin_fd)) {
4072 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
4073 map->pin_path);
4074 close(pin_fd);
4075 return -EINVAL;
4076 }
4077
4078 err = bpf_map__reuse_fd(map, pin_fd);
4079 if (err) {
4080 close(pin_fd);
4081 return err;
4082 }
4083 map->pinned = true;
4084 pr_debug("reused pinned map at '%s'\n", map->pin_path);
4085
4086 return 0;
4087 }
4088
4089 static int
bpf_object__populate_internal_map(struct bpf_object * obj,struct bpf_map * map)4090 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
4091 {
4092 enum libbpf_map_type map_type = map->libbpf_type;
4093 char *cp, errmsg[STRERR_BUFSIZE];
4094 int err, zero = 0;
4095
4096 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
4097 if (err) {
4098 err = -errno;
4099 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4100 pr_warn("Error setting initial map(%s) contents: %s\n",
4101 map->name, cp);
4102 return err;
4103 }
4104
4105 /* Freeze .rodata and .kconfig map as read-only from syscall side. */
4106 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
4107 err = bpf_map_freeze(map->fd);
4108 if (err) {
4109 err = -errno;
4110 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4111 pr_warn("Error freezing map(%s) as read-only: %s\n",
4112 map->name, cp);
4113 return err;
4114 }
4115 }
4116 return 0;
4117 }
4118
4119 static void bpf_map__destroy(struct bpf_map *map);
4120
bpf_object__create_map(struct bpf_object * obj,struct bpf_map * map)4121 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
4122 {
4123 struct bpf_create_map_attr create_attr;
4124 struct bpf_map_def *def = &map->def;
4125
4126 memset(&create_attr, 0, sizeof(create_attr));
4127
4128 if (kernel_supports(FEAT_PROG_NAME))
4129 create_attr.name = map->name;
4130 create_attr.map_ifindex = map->map_ifindex;
4131 create_attr.map_type = def->type;
4132 create_attr.map_flags = def->map_flags;
4133 create_attr.key_size = def->key_size;
4134 create_attr.value_size = def->value_size;
4135 create_attr.numa_node = map->numa_node;
4136
4137 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
4138 int nr_cpus;
4139
4140 nr_cpus = libbpf_num_possible_cpus();
4141 if (nr_cpus < 0) {
4142 pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
4143 map->name, nr_cpus);
4144 return nr_cpus;
4145 }
4146 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
4147 create_attr.max_entries = nr_cpus;
4148 } else {
4149 create_attr.max_entries = def->max_entries;
4150 }
4151
4152 if (bpf_map__is_struct_ops(map))
4153 create_attr.btf_vmlinux_value_type_id =
4154 map->btf_vmlinux_value_type_id;
4155
4156 create_attr.btf_fd = 0;
4157 create_attr.btf_key_type_id = 0;
4158 create_attr.btf_value_type_id = 0;
4159 if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
4160 create_attr.btf_fd = btf__fd(obj->btf);
4161 create_attr.btf_key_type_id = map->btf_key_type_id;
4162 create_attr.btf_value_type_id = map->btf_value_type_id;
4163 }
4164
4165 if (bpf_map_type__is_map_in_map(def->type)) {
4166 if (map->inner_map) {
4167 int err;
4168
4169 err = bpf_object__create_map(obj, map->inner_map);
4170 if (err) {
4171 pr_warn("map '%s': failed to create inner map: %d\n",
4172 map->name, err);
4173 return err;
4174 }
4175 map->inner_map_fd = bpf_map__fd(map->inner_map);
4176 }
4177 if (map->inner_map_fd >= 0)
4178 create_attr.inner_map_fd = map->inner_map_fd;
4179 }
4180
4181 map->fd = bpf_create_map_xattr(&create_attr);
4182 if (map->fd < 0 && (create_attr.btf_key_type_id ||
4183 create_attr.btf_value_type_id)) {
4184 char *cp, errmsg[STRERR_BUFSIZE];
4185 int err = -errno;
4186
4187 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4188 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
4189 map->name, cp, err);
4190 create_attr.btf_fd = 0;
4191 create_attr.btf_key_type_id = 0;
4192 create_attr.btf_value_type_id = 0;
4193 map->btf_key_type_id = 0;
4194 map->btf_value_type_id = 0;
4195 map->fd = bpf_create_map_xattr(&create_attr);
4196 }
4197
4198 if (map->fd < 0)
4199 return -errno;
4200
4201 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
4202 bpf_map__destroy(map->inner_map);
4203 zfree(&map->inner_map);
4204 }
4205
4206 return 0;
4207 }
4208
init_map_slots(struct bpf_map * map)4209 static int init_map_slots(struct bpf_map *map)
4210 {
4211 const struct bpf_map *targ_map;
4212 unsigned int i;
4213 int fd, err;
4214
4215 for (i = 0; i < map->init_slots_sz; i++) {
4216 if (!map->init_slots[i])
4217 continue;
4218
4219 targ_map = map->init_slots[i];
4220 fd = bpf_map__fd(targ_map);
4221 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
4222 if (err) {
4223 err = -errno;
4224 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
4225 map->name, i, targ_map->name,
4226 fd, err);
4227 return err;
4228 }
4229 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
4230 map->name, i, targ_map->name, fd);
4231 }
4232
4233 zfree(&map->init_slots);
4234 map->init_slots_sz = 0;
4235
4236 return 0;
4237 }
4238
4239 static int
bpf_object__create_maps(struct bpf_object * obj)4240 bpf_object__create_maps(struct bpf_object *obj)
4241 {
4242 struct bpf_map *map;
4243 char *cp, errmsg[STRERR_BUFSIZE];
4244 unsigned int i, j;
4245 int err;
4246
4247 for (i = 0; i < obj->nr_maps; i++) {
4248 map = &obj->maps[i];
4249
4250 if (map->pin_path) {
4251 err = bpf_object__reuse_map(map);
4252 if (err) {
4253 pr_warn("map '%s': error reusing pinned map\n",
4254 map->name);
4255 goto err_out;
4256 }
4257 }
4258
4259 if (map->fd >= 0) {
4260 pr_debug("map '%s': skipping creation (preset fd=%d)\n",
4261 map->name, map->fd);
4262 } else {
4263 err = bpf_object__create_map(obj, map);
4264 if (err)
4265 goto err_out;
4266
4267 pr_debug("map '%s': created successfully, fd=%d\n",
4268 map->name, map->fd);
4269
4270 if (bpf_map__is_internal(map)) {
4271 err = bpf_object__populate_internal_map(obj, map);
4272 if (err < 0) {
4273 zclose(map->fd);
4274 goto err_out;
4275 }
4276 }
4277
4278 if (map->init_slots_sz) {
4279 err = init_map_slots(map);
4280 if (err < 0) {
4281 zclose(map->fd);
4282 goto err_out;
4283 }
4284 }
4285 }
4286
4287 if (map->pin_path && !map->pinned) {
4288 err = bpf_map__pin(map, NULL);
4289 if (err) {
4290 pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
4291 map->name, map->pin_path, err);
4292 zclose(map->fd);
4293 goto err_out;
4294 }
4295 }
4296 }
4297
4298 return 0;
4299
4300 err_out:
4301 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4302 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
4303 pr_perm_msg(err);
4304 for (j = 0; j < i; j++)
4305 zclose(obj->maps[j].fd);
4306 return err;
4307 }
4308
4309 #define BPF_CORE_SPEC_MAX_LEN 64
4310
4311 /* represents BPF CO-RE field or array element accessor */
4312 struct bpf_core_accessor {
4313 __u32 type_id; /* struct/union type or array element type */
4314 __u32 idx; /* field index or array index */
4315 const char *name; /* field name or NULL for array accessor */
4316 };
4317
4318 struct bpf_core_spec {
4319 const struct btf *btf;
4320 /* high-level spec: named fields and array indices only */
4321 struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
4322 /* original unresolved (no skip_mods_or_typedefs) root type ID */
4323 __u32 root_type_id;
4324 /* CO-RE relocation kind */
4325 enum bpf_core_relo_kind relo_kind;
4326 /* high-level spec length */
4327 int len;
4328 /* raw, low-level spec: 1-to-1 with accessor spec string */
4329 int raw_spec[BPF_CORE_SPEC_MAX_LEN];
4330 /* raw spec length */
4331 int raw_len;
4332 /* field bit offset represented by spec */
4333 __u32 bit_offset;
4334 };
4335
str_is_empty(const char * s)4336 static bool str_is_empty(const char *s)
4337 {
4338 return !s || !s[0];
4339 }
4340
is_flex_arr(const struct btf * btf,const struct bpf_core_accessor * acc,const struct btf_array * arr)4341 static bool is_flex_arr(const struct btf *btf,
4342 const struct bpf_core_accessor *acc,
4343 const struct btf_array *arr)
4344 {
4345 const struct btf_type *t;
4346
4347 /* not a flexible array, if not inside a struct or has non-zero size */
4348 if (!acc->name || arr->nelems > 0)
4349 return false;
4350
4351 /* has to be the last member of enclosing struct */
4352 t = btf__type_by_id(btf, acc->type_id);
4353 return acc->idx == btf_vlen(t) - 1;
4354 }
4355
core_relo_kind_str(enum bpf_core_relo_kind kind)4356 static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
4357 {
4358 switch (kind) {
4359 case BPF_FIELD_BYTE_OFFSET: return "byte_off";
4360 case BPF_FIELD_BYTE_SIZE: return "byte_sz";
4361 case BPF_FIELD_EXISTS: return "field_exists";
4362 case BPF_FIELD_SIGNED: return "signed";
4363 case BPF_FIELD_LSHIFT_U64: return "lshift_u64";
4364 case BPF_FIELD_RSHIFT_U64: return "rshift_u64";
4365 case BPF_TYPE_ID_LOCAL: return "local_type_id";
4366 case BPF_TYPE_ID_TARGET: return "target_type_id";
4367 case BPF_TYPE_EXISTS: return "type_exists";
4368 case BPF_TYPE_SIZE: return "type_size";
4369 case BPF_ENUMVAL_EXISTS: return "enumval_exists";
4370 case BPF_ENUMVAL_VALUE: return "enumval_value";
4371 default: return "unknown";
4372 }
4373 }
4374
core_relo_is_field_based(enum bpf_core_relo_kind kind)4375 static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
4376 {
4377 switch (kind) {
4378 case BPF_FIELD_BYTE_OFFSET:
4379 case BPF_FIELD_BYTE_SIZE:
4380 case BPF_FIELD_EXISTS:
4381 case BPF_FIELD_SIGNED:
4382 case BPF_FIELD_LSHIFT_U64:
4383 case BPF_FIELD_RSHIFT_U64:
4384 return true;
4385 default:
4386 return false;
4387 }
4388 }
4389
core_relo_is_type_based(enum bpf_core_relo_kind kind)4390 static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
4391 {
4392 switch (kind) {
4393 case BPF_TYPE_ID_LOCAL:
4394 case BPF_TYPE_ID_TARGET:
4395 case BPF_TYPE_EXISTS:
4396 case BPF_TYPE_SIZE:
4397 return true;
4398 default:
4399 return false;
4400 }
4401 }
4402
core_relo_is_enumval_based(enum bpf_core_relo_kind kind)4403 static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
4404 {
4405 switch (kind) {
4406 case BPF_ENUMVAL_EXISTS:
4407 case BPF_ENUMVAL_VALUE:
4408 return true;
4409 default:
4410 return false;
4411 }
4412 }
4413
4414 /*
4415 * Turn bpf_core_relo into a low- and high-level spec representation,
4416 * validating correctness along the way, as well as calculating resulting
4417 * field bit offset, specified by accessor string. Low-level spec captures
4418 * every single level of nestedness, including traversing anonymous
4419 * struct/union members. High-level one only captures semantically meaningful
4420 * "turning points": named fields and array indicies.
4421 * E.g., for this case:
4422 *
4423 * struct sample {
4424 * int __unimportant;
4425 * struct {
4426 * int __1;
4427 * int __2;
4428 * int a[7];
4429 * };
4430 * };
4431 *
4432 * struct sample *s = ...;
4433 *
4434 * int x = &s->a[3]; // access string = '0:1:2:3'
4435 *
4436 * Low-level spec has 1:1 mapping with each element of access string (it's
4437 * just a parsed access string representation): [0, 1, 2, 3].
4438 *
4439 * High-level spec will capture only 3 points:
4440 * - intial zero-index access by pointer (&s->... is the same as &s[0]...);
4441 * - field 'a' access (corresponds to '2' in low-level spec);
4442 * - array element #3 access (corresponds to '3' in low-level spec).
4443 *
4444 * Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
4445 * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
4446 * spec and raw_spec are kept empty.
4447 *
4448 * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
4449 * string to specify enumerator's value index that need to be relocated.
4450 */
bpf_core_parse_spec(const struct btf * btf,__u32 type_id,const char * spec_str,enum bpf_core_relo_kind relo_kind,struct bpf_core_spec * spec)4451 static int bpf_core_parse_spec(const struct btf *btf,
4452 __u32 type_id,
4453 const char *spec_str,
4454 enum bpf_core_relo_kind relo_kind,
4455 struct bpf_core_spec *spec)
4456 {
4457 int access_idx, parsed_len, i;
4458 struct bpf_core_accessor *acc;
4459 const struct btf_type *t;
4460 const char *name;
4461 __u32 id;
4462 __s64 sz;
4463
4464 if (str_is_empty(spec_str) || *spec_str == ':')
4465 return -EINVAL;
4466
4467 memset(spec, 0, sizeof(*spec));
4468 spec->btf = btf;
4469 spec->root_type_id = type_id;
4470 spec->relo_kind = relo_kind;
4471
4472 /* type-based relocations don't have a field access string */
4473 if (core_relo_is_type_based(relo_kind)) {
4474 if (strcmp(spec_str, "0"))
4475 return -EINVAL;
4476 return 0;
4477 }
4478
4479 /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
4480 while (*spec_str) {
4481 if (*spec_str == ':')
4482 ++spec_str;
4483 if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
4484 return -EINVAL;
4485 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
4486 return -E2BIG;
4487 spec_str += parsed_len;
4488 spec->raw_spec[spec->raw_len++] = access_idx;
4489 }
4490
4491 if (spec->raw_len == 0)
4492 return -EINVAL;
4493
4494 t = skip_mods_and_typedefs(btf, type_id, &id);
4495 if (!t)
4496 return -EINVAL;
4497
4498 access_idx = spec->raw_spec[0];
4499 acc = &spec->spec[0];
4500 acc->type_id = id;
4501 acc->idx = access_idx;
4502 spec->len++;
4503
4504 if (core_relo_is_enumval_based(relo_kind)) {
4505 if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
4506 return -EINVAL;
4507
4508 /* record enumerator name in a first accessor */
4509 acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off);
4510 return 0;
4511 }
4512
4513 if (!core_relo_is_field_based(relo_kind))
4514 return -EINVAL;
4515
4516 sz = btf__resolve_size(btf, id);
4517 if (sz < 0)
4518 return sz;
4519 spec->bit_offset = access_idx * sz * 8;
4520
4521 for (i = 1; i < spec->raw_len; i++) {
4522 t = skip_mods_and_typedefs(btf, id, &id);
4523 if (!t)
4524 return -EINVAL;
4525
4526 access_idx = spec->raw_spec[i];
4527 acc = &spec->spec[spec->len];
4528
4529 if (btf_is_composite(t)) {
4530 const struct btf_member *m;
4531 __u32 bit_offset;
4532
4533 if (access_idx >= btf_vlen(t))
4534 return -EINVAL;
4535
4536 bit_offset = btf_member_bit_offset(t, access_idx);
4537 spec->bit_offset += bit_offset;
4538
4539 m = btf_members(t) + access_idx;
4540 if (m->name_off) {
4541 name = btf__name_by_offset(btf, m->name_off);
4542 if (str_is_empty(name))
4543 return -EINVAL;
4544
4545 acc->type_id = id;
4546 acc->idx = access_idx;
4547 acc->name = name;
4548 spec->len++;
4549 }
4550
4551 id = m->type;
4552 } else if (btf_is_array(t)) {
4553 const struct btf_array *a = btf_array(t);
4554 bool flex;
4555
4556 t = skip_mods_and_typedefs(btf, a->type, &id);
4557 if (!t)
4558 return -EINVAL;
4559
4560 flex = is_flex_arr(btf, acc - 1, a);
4561 if (!flex && access_idx >= a->nelems)
4562 return -EINVAL;
4563
4564 spec->spec[spec->len].type_id = id;
4565 spec->spec[spec->len].idx = access_idx;
4566 spec->len++;
4567
4568 sz = btf__resolve_size(btf, id);
4569 if (sz < 0)
4570 return sz;
4571 spec->bit_offset += access_idx * sz * 8;
4572 } else {
4573 pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
4574 type_id, spec_str, i, id, btf_kind_str(t));
4575 return -EINVAL;
4576 }
4577 }
4578
4579 return 0;
4580 }
4581
bpf_core_is_flavor_sep(const char * s)4582 static bool bpf_core_is_flavor_sep(const char *s)
4583 {
4584 /* check X___Y name pattern, where X and Y are not underscores */
4585 return s[0] != '_' && /* X */
4586 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
4587 s[4] != '_'; /* Y */
4588 }
4589
4590 /* Given 'some_struct_name___with_flavor' return the length of a name prefix
4591 * before last triple underscore. Struct name part after last triple
4592 * underscore is ignored by BPF CO-RE relocation during relocation matching.
4593 */
bpf_core_essential_name_len(const char * name)4594 static size_t bpf_core_essential_name_len(const char *name)
4595 {
4596 size_t n = strlen(name);
4597 int i;
4598
4599 for (i = n - 5; i >= 0; i--) {
4600 if (bpf_core_is_flavor_sep(name + i))
4601 return i + 1;
4602 }
4603 return n;
4604 }
4605
4606 /* dynamically sized list of type IDs */
4607 struct ids_vec {
4608 __u32 *data;
4609 int len;
4610 };
4611
bpf_core_free_cands(struct ids_vec * cand_ids)4612 static void bpf_core_free_cands(struct ids_vec *cand_ids)
4613 {
4614 free(cand_ids->data);
4615 free(cand_ids);
4616 }
4617
bpf_core_find_cands(const struct btf * local_btf,__u32 local_type_id,const struct btf * targ_btf)4618 static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
4619 __u32 local_type_id,
4620 const struct btf *targ_btf)
4621 {
4622 size_t local_essent_len, targ_essent_len;
4623 const char *local_name, *targ_name;
4624 const struct btf_type *t, *local_t;
4625 struct ids_vec *cand_ids;
4626 __u32 *new_ids;
4627 int i, err, n;
4628
4629 local_t = btf__type_by_id(local_btf, local_type_id);
4630 if (!local_t)
4631 return ERR_PTR(-EINVAL);
4632
4633 local_name = btf__name_by_offset(local_btf, local_t->name_off);
4634 if (str_is_empty(local_name))
4635 return ERR_PTR(-EINVAL);
4636 local_essent_len = bpf_core_essential_name_len(local_name);
4637
4638 cand_ids = calloc(1, sizeof(*cand_ids));
4639 if (!cand_ids)
4640 return ERR_PTR(-ENOMEM);
4641
4642 n = btf__get_nr_types(targ_btf);
4643 for (i = 1; i <= n; i++) {
4644 t = btf__type_by_id(targ_btf, i);
4645 if (btf_kind(t) != btf_kind(local_t))
4646 continue;
4647
4648 targ_name = btf__name_by_offset(targ_btf, t->name_off);
4649 if (str_is_empty(targ_name))
4650 continue;
4651
4652 targ_essent_len = bpf_core_essential_name_len(targ_name);
4653 if (targ_essent_len != local_essent_len)
4654 continue;
4655
4656 if (strncmp(local_name, targ_name, local_essent_len) == 0) {
4657 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s\n",
4658 local_type_id, btf_kind_str(local_t),
4659 local_name, i, btf_kind_str(t), targ_name);
4660 new_ids = libbpf_reallocarray(cand_ids->data,
4661 cand_ids->len + 1,
4662 sizeof(*cand_ids->data));
4663 if (!new_ids) {
4664 err = -ENOMEM;
4665 goto err_out;
4666 }
4667 cand_ids->data = new_ids;
4668 cand_ids->data[cand_ids->len++] = i;
4669 }
4670 }
4671 return cand_ids;
4672 err_out:
4673 bpf_core_free_cands(cand_ids);
4674 return ERR_PTR(err);
4675 }
4676
4677 /* Check two types for compatibility for the purpose of field access
4678 * relocation. const/volatile/restrict and typedefs are skipped to ensure we
4679 * are relocating semantically compatible entities:
4680 * - any two STRUCTs/UNIONs are compatible and can be mixed;
4681 * - any two FWDs are compatible, if their names match (modulo flavor suffix);
4682 * - any two PTRs are always compatible;
4683 * - for ENUMs, names should be the same (ignoring flavor suffix) or at
4684 * least one of enums should be anonymous;
4685 * - for ENUMs, check sizes, names are ignored;
4686 * - for INT, size and signedness are ignored;
4687 * - for ARRAY, dimensionality is ignored, element types are checked for
4688 * compatibility recursively;
4689 * - everything else shouldn't be ever a target of relocation.
4690 * These rules are not set in stone and probably will be adjusted as we get
4691 * more experience with using BPF CO-RE relocations.
4692 */
bpf_core_fields_are_compat(const struct btf * local_btf,__u32 local_id,const struct btf * targ_btf,__u32 targ_id)4693 static int bpf_core_fields_are_compat(const struct btf *local_btf,
4694 __u32 local_id,
4695 const struct btf *targ_btf,
4696 __u32 targ_id)
4697 {
4698 const struct btf_type *local_type, *targ_type;
4699
4700 recur:
4701 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
4702 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
4703 if (!local_type || !targ_type)
4704 return -EINVAL;
4705
4706 if (btf_is_composite(local_type) && btf_is_composite(targ_type))
4707 return 1;
4708 if (btf_kind(local_type) != btf_kind(targ_type))
4709 return 0;
4710
4711 switch (btf_kind(local_type)) {
4712 case BTF_KIND_PTR:
4713 return 1;
4714 case BTF_KIND_FWD:
4715 case BTF_KIND_ENUM: {
4716 const char *local_name, *targ_name;
4717 size_t local_len, targ_len;
4718
4719 local_name = btf__name_by_offset(local_btf,
4720 local_type->name_off);
4721 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
4722 local_len = bpf_core_essential_name_len(local_name);
4723 targ_len = bpf_core_essential_name_len(targ_name);
4724 /* one of them is anonymous or both w/ same flavor-less names */
4725 return local_len == 0 || targ_len == 0 ||
4726 (local_len == targ_len &&
4727 strncmp(local_name, targ_name, local_len) == 0);
4728 }
4729 case BTF_KIND_INT:
4730 /* just reject deprecated bitfield-like integers; all other
4731 * integers are by default compatible between each other
4732 */
4733 return btf_int_offset(local_type) == 0 &&
4734 btf_int_offset(targ_type) == 0;
4735 case BTF_KIND_ARRAY:
4736 local_id = btf_array(local_type)->type;
4737 targ_id = btf_array(targ_type)->type;
4738 goto recur;
4739 default:
4740 pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n",
4741 btf_kind(local_type), local_id, targ_id);
4742 return 0;
4743 }
4744 }
4745
4746 /*
4747 * Given single high-level named field accessor in local type, find
4748 * corresponding high-level accessor for a target type. Along the way,
4749 * maintain low-level spec for target as well. Also keep updating target
4750 * bit offset.
4751 *
4752 * Searching is performed through recursive exhaustive enumeration of all
4753 * fields of a struct/union. If there are any anonymous (embedded)
4754 * structs/unions, they are recursively searched as well. If field with
4755 * desired name is found, check compatibility between local and target types,
4756 * before returning result.
4757 *
4758 * 1 is returned, if field is found.
4759 * 0 is returned if no compatible field is found.
4760 * <0 is returned on error.
4761 */
bpf_core_match_member(const struct btf * local_btf,const struct bpf_core_accessor * local_acc,const struct btf * targ_btf,__u32 targ_id,struct bpf_core_spec * spec,__u32 * next_targ_id)4762 static int bpf_core_match_member(const struct btf *local_btf,
4763 const struct bpf_core_accessor *local_acc,
4764 const struct btf *targ_btf,
4765 __u32 targ_id,
4766 struct bpf_core_spec *spec,
4767 __u32 *next_targ_id)
4768 {
4769 const struct btf_type *local_type, *targ_type;
4770 const struct btf_member *local_member, *m;
4771 const char *local_name, *targ_name;
4772 __u32 local_id;
4773 int i, n, found;
4774
4775 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
4776 if (!targ_type)
4777 return -EINVAL;
4778 if (!btf_is_composite(targ_type))
4779 return 0;
4780
4781 local_id = local_acc->type_id;
4782 local_type = btf__type_by_id(local_btf, local_id);
4783 local_member = btf_members(local_type) + local_acc->idx;
4784 local_name = btf__name_by_offset(local_btf, local_member->name_off);
4785
4786 n = btf_vlen(targ_type);
4787 m = btf_members(targ_type);
4788 for (i = 0; i < n; i++, m++) {
4789 __u32 bit_offset;
4790
4791 bit_offset = btf_member_bit_offset(targ_type, i);
4792
4793 /* too deep struct/union/array nesting */
4794 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
4795 return -E2BIG;
4796
4797 /* speculate this member will be the good one */
4798 spec->bit_offset += bit_offset;
4799 spec->raw_spec[spec->raw_len++] = i;
4800
4801 targ_name = btf__name_by_offset(targ_btf, m->name_off);
4802 if (str_is_empty(targ_name)) {
4803 /* embedded struct/union, we need to go deeper */
4804 found = bpf_core_match_member(local_btf, local_acc,
4805 targ_btf, m->type,
4806 spec, next_targ_id);
4807 if (found) /* either found or error */
4808 return found;
4809 } else if (strcmp(local_name, targ_name) == 0) {
4810 /* matching named field */
4811 struct bpf_core_accessor *targ_acc;
4812
4813 targ_acc = &spec->spec[spec->len++];
4814 targ_acc->type_id = targ_id;
4815 targ_acc->idx = i;
4816 targ_acc->name = targ_name;
4817
4818 *next_targ_id = m->type;
4819 found = bpf_core_fields_are_compat(local_btf,
4820 local_member->type,
4821 targ_btf, m->type);
4822 if (!found)
4823 spec->len--; /* pop accessor */
4824 return found;
4825 }
4826 /* member turned out not to be what we looked for */
4827 spec->bit_offset -= bit_offset;
4828 spec->raw_len--;
4829 }
4830
4831 return 0;
4832 }
4833
4834 /* Check local and target types for compatibility. This check is used for
4835 * type-based CO-RE relocations and follow slightly different rules than
4836 * field-based relocations. This function assumes that root types were already
4837 * checked for name match. Beyond that initial root-level name check, names
4838 * are completely ignored. Compatibility rules are as follows:
4839 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
4840 * kind should match for local and target types (i.e., STRUCT is not
4841 * compatible with UNION);
4842 * - for ENUMs, the size is ignored;
4843 * - for INT, size and signedness are ignored;
4844 * - for ARRAY, dimensionality is ignored, element types are checked for
4845 * compatibility recursively;
4846 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
4847 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
4848 * - FUNC_PROTOs are compatible if they have compatible signature: same
4849 * number of input args and compatible return and argument types.
4850 * These rules are not set in stone and probably will be adjusted as we get
4851 * more experience with using BPF CO-RE relocations.
4852 */
bpf_core_types_are_compat(const struct btf * local_btf,__u32 local_id,const struct btf * targ_btf,__u32 targ_id)4853 static int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
4854 const struct btf *targ_btf, __u32 targ_id)
4855 {
4856 const struct btf_type *local_type, *targ_type;
4857 int depth = 32; /* max recursion depth */
4858
4859 /* caller made sure that names match (ignoring flavor suffix) */
4860 local_type = btf__type_by_id(local_btf, local_id);
4861 targ_type = btf__type_by_id(targ_btf, targ_id);
4862 if (btf_kind(local_type) != btf_kind(targ_type))
4863 return 0;
4864
4865 recur:
4866 depth--;
4867 if (depth < 0)
4868 return -EINVAL;
4869
4870 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
4871 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
4872 if (!local_type || !targ_type)
4873 return -EINVAL;
4874
4875 if (btf_kind(local_type) != btf_kind(targ_type))
4876 return 0;
4877
4878 switch (btf_kind(local_type)) {
4879 case BTF_KIND_UNKN:
4880 case BTF_KIND_STRUCT:
4881 case BTF_KIND_UNION:
4882 case BTF_KIND_ENUM:
4883 case BTF_KIND_FWD:
4884 return 1;
4885 case BTF_KIND_INT:
4886 /* just reject deprecated bitfield-like integers; all other
4887 * integers are by default compatible between each other
4888 */
4889 return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
4890 case BTF_KIND_PTR:
4891 local_id = local_type->type;
4892 targ_id = targ_type->type;
4893 goto recur;
4894 case BTF_KIND_ARRAY:
4895 local_id = btf_array(local_type)->type;
4896 targ_id = btf_array(targ_type)->type;
4897 goto recur;
4898 case BTF_KIND_FUNC_PROTO: {
4899 struct btf_param *local_p = btf_params(local_type);
4900 struct btf_param *targ_p = btf_params(targ_type);
4901 __u16 local_vlen = btf_vlen(local_type);
4902 __u16 targ_vlen = btf_vlen(targ_type);
4903 int i, err;
4904
4905 if (local_vlen != targ_vlen)
4906 return 0;
4907
4908 for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
4909 skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
4910 skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
4911 err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id);
4912 if (err <= 0)
4913 return err;
4914 }
4915
4916 /* tail recurse for return type check */
4917 skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
4918 skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
4919 goto recur;
4920 }
4921 default:
4922 pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
4923 btf_kind_str(local_type), local_id, targ_id);
4924 return 0;
4925 }
4926 }
4927
4928 /*
4929 * Try to match local spec to a target type and, if successful, produce full
4930 * target spec (high-level, low-level + bit offset).
4931 */
bpf_core_spec_match(struct bpf_core_spec * local_spec,const struct btf * targ_btf,__u32 targ_id,struct bpf_core_spec * targ_spec)4932 static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
4933 const struct btf *targ_btf, __u32 targ_id,
4934 struct bpf_core_spec *targ_spec)
4935 {
4936 const struct btf_type *targ_type;
4937 const struct bpf_core_accessor *local_acc;
4938 struct bpf_core_accessor *targ_acc;
4939 int i, sz, matched;
4940
4941 memset(targ_spec, 0, sizeof(*targ_spec));
4942 targ_spec->btf = targ_btf;
4943 targ_spec->root_type_id = targ_id;
4944 targ_spec->relo_kind = local_spec->relo_kind;
4945
4946 if (core_relo_is_type_based(local_spec->relo_kind)) {
4947 return bpf_core_types_are_compat(local_spec->btf,
4948 local_spec->root_type_id,
4949 targ_btf, targ_id);
4950 }
4951
4952 local_acc = &local_spec->spec[0];
4953 targ_acc = &targ_spec->spec[0];
4954
4955 if (core_relo_is_enumval_based(local_spec->relo_kind)) {
4956 size_t local_essent_len, targ_essent_len;
4957 const struct btf_enum *e;
4958 const char *targ_name;
4959
4960 /* has to resolve to an enum */
4961 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
4962 if (!btf_is_enum(targ_type))
4963 return 0;
4964
4965 local_essent_len = bpf_core_essential_name_len(local_acc->name);
4966
4967 for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) {
4968 targ_name = btf__name_by_offset(targ_spec->btf, e->name_off);
4969 targ_essent_len = bpf_core_essential_name_len(targ_name);
4970 if (targ_essent_len != local_essent_len)
4971 continue;
4972 if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) {
4973 targ_acc->type_id = targ_id;
4974 targ_acc->idx = i;
4975 targ_acc->name = targ_name;
4976 targ_spec->len++;
4977 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
4978 targ_spec->raw_len++;
4979 return 1;
4980 }
4981 }
4982 return 0;
4983 }
4984
4985 if (!core_relo_is_field_based(local_spec->relo_kind))
4986 return -EINVAL;
4987
4988 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
4989 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
4990 &targ_id);
4991 if (!targ_type)
4992 return -EINVAL;
4993
4994 if (local_acc->name) {
4995 matched = bpf_core_match_member(local_spec->btf,
4996 local_acc,
4997 targ_btf, targ_id,
4998 targ_spec, &targ_id);
4999 if (matched <= 0)
5000 return matched;
5001 } else {
5002 /* for i=0, targ_id is already treated as array element
5003 * type (because it's the original struct), for others
5004 * we should find array element type first
5005 */
5006 if (i > 0) {
5007 const struct btf_array *a;
5008 bool flex;
5009
5010 if (!btf_is_array(targ_type))
5011 return 0;
5012
5013 a = btf_array(targ_type);
5014 flex = is_flex_arr(targ_btf, targ_acc - 1, a);
5015 if (!flex && local_acc->idx >= a->nelems)
5016 return 0;
5017 if (!skip_mods_and_typedefs(targ_btf, a->type,
5018 &targ_id))
5019 return -EINVAL;
5020 }
5021
5022 /* too deep struct/union/array nesting */
5023 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
5024 return -E2BIG;
5025
5026 targ_acc->type_id = targ_id;
5027 targ_acc->idx = local_acc->idx;
5028 targ_acc->name = NULL;
5029 targ_spec->len++;
5030 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
5031 targ_spec->raw_len++;
5032
5033 sz = btf__resolve_size(targ_btf, targ_id);
5034 if (sz < 0)
5035 return sz;
5036 targ_spec->bit_offset += local_acc->idx * sz * 8;
5037 }
5038 }
5039
5040 return 1;
5041 }
5042
bpf_core_calc_field_relo(const struct bpf_program * prog,const struct bpf_core_relo * relo,const struct bpf_core_spec * spec,__u32 * val,__u32 * field_sz,__u32 * type_id,bool * validate)5043 static int bpf_core_calc_field_relo(const struct bpf_program *prog,
5044 const struct bpf_core_relo *relo,
5045 const struct bpf_core_spec *spec,
5046 __u32 *val, __u32 *field_sz, __u32 *type_id,
5047 bool *validate)
5048 {
5049 const struct bpf_core_accessor *acc;
5050 const struct btf_type *t;
5051 __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
5052 const struct btf_member *m;
5053 const struct btf_type *mt;
5054 bool bitfield;
5055 __s64 sz;
5056
5057 *field_sz = 0;
5058
5059 if (relo->kind == BPF_FIELD_EXISTS) {
5060 *val = spec ? 1 : 0;
5061 return 0;
5062 }
5063
5064 if (!spec)
5065 return -EUCLEAN; /* request instruction poisoning */
5066
5067 acc = &spec->spec[spec->len - 1];
5068 t = btf__type_by_id(spec->btf, acc->type_id);
5069
5070 /* a[n] accessor needs special handling */
5071 if (!acc->name) {
5072 if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
5073 *val = spec->bit_offset / 8;
5074 /* remember field size for load/store mem size */
5075 sz = btf__resolve_size(spec->btf, acc->type_id);
5076 if (sz < 0)
5077 return -EINVAL;
5078 *field_sz = sz;
5079 *type_id = acc->type_id;
5080 } else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
5081 sz = btf__resolve_size(spec->btf, acc->type_id);
5082 if (sz < 0)
5083 return -EINVAL;
5084 *val = sz;
5085 } else {
5086 pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
5087 prog->name, relo->kind, relo->insn_off / 8);
5088 return -EINVAL;
5089 }
5090 if (validate)
5091 *validate = true;
5092 return 0;
5093 }
5094
5095 m = btf_members(t) + acc->idx;
5096 mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id);
5097 bit_off = spec->bit_offset;
5098 bit_sz = btf_member_bitfield_size(t, acc->idx);
5099
5100 bitfield = bit_sz > 0;
5101 if (bitfield) {
5102 byte_sz = mt->size;
5103 byte_off = bit_off / 8 / byte_sz * byte_sz;
5104 /* figure out smallest int size necessary for bitfield load */
5105 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
5106 if (byte_sz >= 8) {
5107 /* bitfield can't be read with 64-bit read */
5108 pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
5109 prog->name, relo->kind, relo->insn_off / 8);
5110 return -E2BIG;
5111 }
5112 byte_sz *= 2;
5113 byte_off = bit_off / 8 / byte_sz * byte_sz;
5114 }
5115 } else {
5116 sz = btf__resolve_size(spec->btf, field_type_id);
5117 if (sz < 0)
5118 return -EINVAL;
5119 byte_sz = sz;
5120 byte_off = spec->bit_offset / 8;
5121 bit_sz = byte_sz * 8;
5122 }
5123
5124 /* for bitfields, all the relocatable aspects are ambiguous and we
5125 * might disagree with compiler, so turn off validation of expected
5126 * value, except for signedness
5127 */
5128 if (validate)
5129 *validate = !bitfield;
5130
5131 switch (relo->kind) {
5132 case BPF_FIELD_BYTE_OFFSET:
5133 *val = byte_off;
5134 if (!bitfield) {
5135 *field_sz = byte_sz;
5136 *type_id = field_type_id;
5137 }
5138 break;
5139 case BPF_FIELD_BYTE_SIZE:
5140 *val = byte_sz;
5141 break;
5142 case BPF_FIELD_SIGNED:
5143 /* enums will be assumed unsigned */
5144 *val = btf_is_enum(mt) ||
5145 (btf_int_encoding(mt) & BTF_INT_SIGNED);
5146 if (validate)
5147 *validate = true; /* signedness is never ambiguous */
5148 break;
5149 case BPF_FIELD_LSHIFT_U64:
5150 #if __BYTE_ORDER == __LITTLE_ENDIAN
5151 *val = 64 - (bit_off + bit_sz - byte_off * 8);
5152 #else
5153 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
5154 #endif
5155 break;
5156 case BPF_FIELD_RSHIFT_U64:
5157 *val = 64 - bit_sz;
5158 if (validate)
5159 *validate = true; /* right shift is never ambiguous */
5160 break;
5161 case BPF_FIELD_EXISTS:
5162 default:
5163 return -EOPNOTSUPP;
5164 }
5165
5166 return 0;
5167 }
5168
bpf_core_calc_type_relo(const struct bpf_core_relo * relo,const struct bpf_core_spec * spec,__u32 * val)5169 static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
5170 const struct bpf_core_spec *spec,
5171 __u32 *val)
5172 {
5173 __s64 sz;
5174
5175 /* type-based relos return zero when target type is not found */
5176 if (!spec) {
5177 *val = 0;
5178 return 0;
5179 }
5180
5181 switch (relo->kind) {
5182 case BPF_TYPE_ID_TARGET:
5183 *val = spec->root_type_id;
5184 break;
5185 case BPF_TYPE_EXISTS:
5186 *val = 1;
5187 break;
5188 case BPF_TYPE_SIZE:
5189 sz = btf__resolve_size(spec->btf, spec->root_type_id);
5190 if (sz < 0)
5191 return -EINVAL;
5192 *val = sz;
5193 break;
5194 case BPF_TYPE_ID_LOCAL:
5195 /* BPF_TYPE_ID_LOCAL is handled specially and shouldn't get here */
5196 default:
5197 return -EOPNOTSUPP;
5198 }
5199
5200 return 0;
5201 }
5202
bpf_core_calc_enumval_relo(const struct bpf_core_relo * relo,const struct bpf_core_spec * spec,__u32 * val)5203 static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
5204 const struct bpf_core_spec *spec,
5205 __u32 *val)
5206 {
5207 const struct btf_type *t;
5208 const struct btf_enum *e;
5209
5210 switch (relo->kind) {
5211 case BPF_ENUMVAL_EXISTS:
5212 *val = spec ? 1 : 0;
5213 break;
5214 case BPF_ENUMVAL_VALUE:
5215 if (!spec)
5216 return -EUCLEAN; /* request instruction poisoning */
5217 t = btf__type_by_id(spec->btf, spec->spec[0].type_id);
5218 e = btf_enum(t) + spec->spec[0].idx;
5219 *val = e->val;
5220 break;
5221 default:
5222 return -EOPNOTSUPP;
5223 }
5224
5225 return 0;
5226 }
5227
5228 struct bpf_core_relo_res
5229 {
5230 /* expected value in the instruction, unless validate == false */
5231 __u32 orig_val;
5232 /* new value that needs to be patched up to */
5233 __u32 new_val;
5234 /* relocation unsuccessful, poison instruction, but don't fail load */
5235 bool poison;
5236 /* some relocations can't be validated against orig_val */
5237 bool validate;
5238 /* for field byte offset relocations or the forms:
5239 * *(T *)(rX + <off>) = rY
5240 * rX = *(T *)(rY + <off>),
5241 * we remember original and resolved field size to adjust direct
5242 * memory loads of pointers and integers; this is necessary for 32-bit
5243 * host kernel architectures, but also allows to automatically
5244 * relocate fields that were resized from, e.g., u32 to u64, etc.
5245 */
5246 bool fail_memsz_adjust;
5247 __u32 orig_sz;
5248 __u32 orig_type_id;
5249 __u32 new_sz;
5250 __u32 new_type_id;
5251 };
5252
5253 /* Calculate original and target relocation values, given local and target
5254 * specs and relocation kind. These values are calculated for each candidate.
5255 * If there are multiple candidates, resulting values should all be consistent
5256 * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity.
5257 * If instruction has to be poisoned, *poison will be set to true.
5258 */
bpf_core_calc_relo(const struct bpf_program * prog,const struct bpf_core_relo * relo,int relo_idx,const struct bpf_core_spec * local_spec,const struct bpf_core_spec * targ_spec,struct bpf_core_relo_res * res)5259 static int bpf_core_calc_relo(const struct bpf_program *prog,
5260 const struct bpf_core_relo *relo,
5261 int relo_idx,
5262 const struct bpf_core_spec *local_spec,
5263 const struct bpf_core_spec *targ_spec,
5264 struct bpf_core_relo_res *res)
5265 {
5266 int err = -EOPNOTSUPP;
5267
5268 res->orig_val = 0;
5269 res->new_val = 0;
5270 res->poison = false;
5271 res->validate = true;
5272 res->fail_memsz_adjust = false;
5273 res->orig_sz = res->new_sz = 0;
5274 res->orig_type_id = res->new_type_id = 0;
5275
5276 if (core_relo_is_field_based(relo->kind)) {
5277 err = bpf_core_calc_field_relo(prog, relo, local_spec,
5278 &res->orig_val, &res->orig_sz,
5279 &res->orig_type_id, &res->validate);
5280 err = err ?: bpf_core_calc_field_relo(prog, relo, targ_spec,
5281 &res->new_val, &res->new_sz,
5282 &res->new_type_id, NULL);
5283 if (err)
5284 goto done;
5285 /* Validate if it's safe to adjust load/store memory size.
5286 * Adjustments are performed only if original and new memory
5287 * sizes differ.
5288 */
5289 res->fail_memsz_adjust = false;
5290 if (res->orig_sz != res->new_sz) {
5291 const struct btf_type *orig_t, *new_t;
5292
5293 orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id);
5294 new_t = btf__type_by_id(targ_spec->btf, res->new_type_id);
5295
5296 /* There are two use cases in which it's safe to
5297 * adjust load/store's mem size:
5298 * - reading a 32-bit kernel pointer, while on BPF
5299 * size pointers are always 64-bit; in this case
5300 * it's safe to "downsize" instruction size due to
5301 * pointer being treated as unsigned integer with
5302 * zero-extended upper 32-bits;
5303 * - reading unsigned integers, again due to
5304 * zero-extension is preserving the value correctly.
5305 *
5306 * In all other cases it's incorrect to attempt to
5307 * load/store field because read value will be
5308 * incorrect, so we poison relocated instruction.
5309 */
5310 if (btf_is_ptr(orig_t) && btf_is_ptr(new_t))
5311 goto done;
5312 if (btf_is_int(orig_t) && btf_is_int(new_t) &&
5313 btf_int_encoding(orig_t) != BTF_INT_SIGNED &&
5314 btf_int_encoding(new_t) != BTF_INT_SIGNED)
5315 goto done;
5316
5317 /* mark as invalid mem size adjustment, but this will
5318 * only be checked for LDX/STX/ST insns
5319 */
5320 res->fail_memsz_adjust = true;
5321 }
5322 } else if (core_relo_is_type_based(relo->kind)) {
5323 err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val);
5324 err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val);
5325 } else if (core_relo_is_enumval_based(relo->kind)) {
5326 err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
5327 err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
5328 }
5329
5330 done:
5331 if (err == -EUCLEAN) {
5332 /* EUCLEAN is used to signal instruction poisoning request */
5333 res->poison = true;
5334 err = 0;
5335 } else if (err == -EOPNOTSUPP) {
5336 /* EOPNOTSUPP means unknown/unsupported relocation */
5337 pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n",
5338 prog->name, relo_idx, core_relo_kind_str(relo->kind),
5339 relo->kind, relo->insn_off / 8);
5340 }
5341
5342 return err;
5343 }
5344
5345 /*
5346 * Turn instruction for which CO_RE relocation failed into invalid one with
5347 * distinct signature.
5348 */
bpf_core_poison_insn(struct bpf_program * prog,int relo_idx,int insn_idx,struct bpf_insn * insn)5349 static void bpf_core_poison_insn(struct bpf_program *prog, int relo_idx,
5350 int insn_idx, struct bpf_insn *insn)
5351 {
5352 pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
5353 prog->name, relo_idx, insn_idx);
5354 insn->code = BPF_JMP | BPF_CALL;
5355 insn->dst_reg = 0;
5356 insn->src_reg = 0;
5357 insn->off = 0;
5358 /* if this instruction is reachable (not a dead code),
5359 * verifier will complain with the following message:
5360 * invalid func unknown#195896080
5361 */
5362 insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */
5363 }
5364
is_ldimm64(struct bpf_insn * insn)5365 static bool is_ldimm64(struct bpf_insn *insn)
5366 {
5367 return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
5368 }
5369
insn_bpf_size_to_bytes(struct bpf_insn * insn)5370 static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
5371 {
5372 switch (BPF_SIZE(insn->code)) {
5373 case BPF_DW: return 8;
5374 case BPF_W: return 4;
5375 case BPF_H: return 2;
5376 case BPF_B: return 1;
5377 default: return -1;
5378 }
5379 }
5380
insn_bytes_to_bpf_size(__u32 sz)5381 static int insn_bytes_to_bpf_size(__u32 sz)
5382 {
5383 switch (sz) {
5384 case 8: return BPF_DW;
5385 case 4: return BPF_W;
5386 case 2: return BPF_H;
5387 case 1: return BPF_B;
5388 default: return -1;
5389 }
5390 }
5391
5392 /*
5393 * Patch relocatable BPF instruction.
5394 *
5395 * Patched value is determined by relocation kind and target specification.
5396 * For existence relocations target spec will be NULL if field/type is not found.
5397 * Expected insn->imm value is determined using relocation kind and local
5398 * spec, and is checked before patching instruction. If actual insn->imm value
5399 * is wrong, bail out with error.
5400 *
5401 * Currently supported classes of BPF instruction are:
5402 * 1. rX = <imm> (assignment with immediate operand);
5403 * 2. rX += <imm> (arithmetic operations with immediate operand);
5404 * 3. rX = <imm64> (load with 64-bit immediate value);
5405 * 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64};
5406 * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64};
5407 * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}.
5408 */
bpf_core_patch_insn(struct bpf_program * prog,const struct bpf_core_relo * relo,int relo_idx,const struct bpf_core_relo_res * res)5409 static int bpf_core_patch_insn(struct bpf_program *prog,
5410 const struct bpf_core_relo *relo,
5411 int relo_idx,
5412 const struct bpf_core_relo_res *res)
5413 {
5414 __u32 orig_val, new_val;
5415 struct bpf_insn *insn;
5416 int insn_idx;
5417 __u8 class;
5418
5419 if (relo->insn_off % BPF_INSN_SZ)
5420 return -EINVAL;
5421 insn_idx = relo->insn_off / BPF_INSN_SZ;
5422 /* adjust insn_idx from section frame of reference to the local
5423 * program's frame of reference; (sub-)program code is not yet
5424 * relocated, so it's enough to just subtract in-section offset
5425 */
5426 insn_idx = insn_idx - prog->sec_insn_off;
5427 insn = &prog->insns[insn_idx];
5428 class = BPF_CLASS(insn->code);
5429
5430 if (res->poison) {
5431 poison:
5432 /* poison second part of ldimm64 to avoid confusing error from
5433 * verifier about "unknown opcode 00"
5434 */
5435 if (is_ldimm64(insn))
5436 bpf_core_poison_insn(prog, relo_idx, insn_idx + 1, insn + 1);
5437 bpf_core_poison_insn(prog, relo_idx, insn_idx, insn);
5438 return 0;
5439 }
5440
5441 orig_val = res->orig_val;
5442 new_val = res->new_val;
5443
5444 switch (class) {
5445 case BPF_ALU:
5446 case BPF_ALU64:
5447 if (BPF_SRC(insn->code) != BPF_K)
5448 return -EINVAL;
5449 if (res->validate && insn->imm != orig_val) {
5450 pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
5451 prog->name, relo_idx,
5452 insn_idx, insn->imm, orig_val, new_val);
5453 return -EINVAL;
5454 }
5455 orig_val = insn->imm;
5456 insn->imm = new_val;
5457 pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
5458 prog->name, relo_idx, insn_idx,
5459 orig_val, new_val);
5460 break;
5461 case BPF_LDX:
5462 case BPF_ST:
5463 case BPF_STX:
5464 if (res->validate && insn->off != orig_val) {
5465 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
5466 prog->name, relo_idx, insn_idx, insn->off, orig_val, new_val);
5467 return -EINVAL;
5468 }
5469 if (new_val > SHRT_MAX) {
5470 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
5471 prog->name, relo_idx, insn_idx, new_val);
5472 return -ERANGE;
5473 }
5474 if (res->fail_memsz_adjust) {
5475 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
5476 "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n",
5477 prog->name, relo_idx, insn_idx);
5478 goto poison;
5479 }
5480
5481 orig_val = insn->off;
5482 insn->off = new_val;
5483 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
5484 prog->name, relo_idx, insn_idx, orig_val, new_val);
5485
5486 if (res->new_sz != res->orig_sz) {
5487 int insn_bytes_sz, insn_bpf_sz;
5488
5489 insn_bytes_sz = insn_bpf_size_to_bytes(insn);
5490 if (insn_bytes_sz != res->orig_sz) {
5491 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n",
5492 prog->name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz);
5493 return -EINVAL;
5494 }
5495
5496 insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz);
5497 if (insn_bpf_sz < 0) {
5498 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n",
5499 prog->name, relo_idx, insn_idx, res->new_sz);
5500 return -EINVAL;
5501 }
5502
5503 insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code);
5504 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n",
5505 prog->name, relo_idx, insn_idx, res->orig_sz, res->new_sz);
5506 }
5507 break;
5508 case BPF_LD: {
5509 __u64 imm;
5510
5511 if (!is_ldimm64(insn) ||
5512 insn[0].src_reg != 0 || insn[0].off != 0 ||
5513 insn_idx + 1 >= prog->insns_cnt ||
5514 insn[1].code != 0 || insn[1].dst_reg != 0 ||
5515 insn[1].src_reg != 0 || insn[1].off != 0) {
5516 pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n",
5517 prog->name, relo_idx, insn_idx);
5518 return -EINVAL;
5519 }
5520
5521 imm = insn[0].imm + ((__u64)insn[1].imm << 32);
5522 if (res->validate && imm != orig_val) {
5523 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
5524 prog->name, relo_idx,
5525 insn_idx, (unsigned long long)imm,
5526 orig_val, new_val);
5527 return -EINVAL;
5528 }
5529
5530 insn[0].imm = new_val;
5531 insn[1].imm = 0; /* currently only 32-bit values are supported */
5532 pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
5533 prog->name, relo_idx, insn_idx,
5534 (unsigned long long)imm, new_val);
5535 break;
5536 }
5537 default:
5538 pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n",
5539 prog->name, relo_idx, insn_idx, insn->code,
5540 insn->src_reg, insn->dst_reg, insn->off, insn->imm);
5541 return -EINVAL;
5542 }
5543
5544 return 0;
5545 }
5546
5547 /* Output spec definition in the format:
5548 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
5549 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
5550 */
bpf_core_dump_spec(int level,const struct bpf_core_spec * spec)5551 static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
5552 {
5553 const struct btf_type *t;
5554 const struct btf_enum *e;
5555 const char *s;
5556 __u32 type_id;
5557 int i;
5558
5559 type_id = spec->root_type_id;
5560 t = btf__type_by_id(spec->btf, type_id);
5561 s = btf__name_by_offset(spec->btf, t->name_off);
5562
5563 libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
5564
5565 if (core_relo_is_type_based(spec->relo_kind))
5566 return;
5567
5568 if (core_relo_is_enumval_based(spec->relo_kind)) {
5569 t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
5570 e = btf_enum(t) + spec->raw_spec[0];
5571 s = btf__name_by_offset(spec->btf, e->name_off);
5572
5573 libbpf_print(level, "::%s = %u", s, e->val);
5574 return;
5575 }
5576
5577 if (core_relo_is_field_based(spec->relo_kind)) {
5578 for (i = 0; i < spec->len; i++) {
5579 if (spec->spec[i].name)
5580 libbpf_print(level, ".%s", spec->spec[i].name);
5581 else if (i > 0 || spec->spec[i].idx > 0)
5582 libbpf_print(level, "[%u]", spec->spec[i].idx);
5583 }
5584
5585 libbpf_print(level, " (");
5586 for (i = 0; i < spec->raw_len; i++)
5587 libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
5588
5589 if (spec->bit_offset % 8)
5590 libbpf_print(level, " @ offset %u.%u)",
5591 spec->bit_offset / 8, spec->bit_offset % 8);
5592 else
5593 libbpf_print(level, " @ offset %u)", spec->bit_offset / 8);
5594 return;
5595 }
5596 }
5597
bpf_core_hash_fn(const void * key,void * ctx)5598 static size_t bpf_core_hash_fn(const void *key, void *ctx)
5599 {
5600 return (size_t)key;
5601 }
5602
bpf_core_equal_fn(const void * k1,const void * k2,void * ctx)5603 static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
5604 {
5605 return k1 == k2;
5606 }
5607
u32_as_hash_key(__u32 x)5608 static void *u32_as_hash_key(__u32 x)
5609 {
5610 return (void *)(uintptr_t)x;
5611 }
5612
5613 /*
5614 * CO-RE relocate single instruction.
5615 *
5616 * The outline and important points of the algorithm:
5617 * 1. For given local type, find corresponding candidate target types.
5618 * Candidate type is a type with the same "essential" name, ignoring
5619 * everything after last triple underscore (___). E.g., `sample`,
5620 * `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
5621 * for each other. Names with triple underscore are referred to as
5622 * "flavors" and are useful, among other things, to allow to
5623 * specify/support incompatible variations of the same kernel struct, which
5624 * might differ between different kernel versions and/or build
5625 * configurations.
5626 *
5627 * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
5628 * converter, when deduplicated BTF of a kernel still contains more than
5629 * one different types with the same name. In that case, ___2, ___3, etc
5630 * are appended starting from second name conflict. But start flavors are
5631 * also useful to be defined "locally", in BPF program, to extract same
5632 * data from incompatible changes between different kernel
5633 * versions/configurations. For instance, to handle field renames between
5634 * kernel versions, one can use two flavors of the struct name with the
5635 * same common name and use conditional relocations to extract that field,
5636 * depending on target kernel version.
5637 * 2. For each candidate type, try to match local specification to this
5638 * candidate target type. Matching involves finding corresponding
5639 * high-level spec accessors, meaning that all named fields should match,
5640 * as well as all array accesses should be within the actual bounds. Also,
5641 * types should be compatible (see bpf_core_fields_are_compat for details).
5642 * 3. It is supported and expected that there might be multiple flavors
5643 * matching the spec. As long as all the specs resolve to the same set of
5644 * offsets across all candidates, there is no error. If there is any
5645 * ambiguity, CO-RE relocation will fail. This is necessary to accomodate
5646 * imprefection of BTF deduplication, which can cause slight duplication of
5647 * the same BTF type, if some directly or indirectly referenced (by
5648 * pointer) type gets resolved to different actual types in different
5649 * object files. If such situation occurs, deduplicated BTF will end up
5650 * with two (or more) structurally identical types, which differ only in
5651 * types they refer to through pointer. This should be OK in most cases and
5652 * is not an error.
5653 * 4. Candidate types search is performed by linearly scanning through all
5654 * types in target BTF. It is anticipated that this is overall more
5655 * efficient memory-wise and not significantly worse (if not better)
5656 * CPU-wise compared to prebuilding a map from all local type names to
5657 * a list of candidate type names. It's also sped up by caching resolved
5658 * list of matching candidates per each local "root" type ID, that has at
5659 * least one bpf_core_relo associated with it. This list is shared
5660 * between multiple relocations for the same type ID and is updated as some
5661 * of the candidates are pruned due to structural incompatibility.
5662 */
bpf_core_apply_relo(struct bpf_program * prog,const struct bpf_core_relo * relo,int relo_idx,const struct btf * local_btf,const struct btf * targ_btf,struct hashmap * cand_cache)5663 static int bpf_core_apply_relo(struct bpf_program *prog,
5664 const struct bpf_core_relo *relo,
5665 int relo_idx,
5666 const struct btf *local_btf,
5667 const struct btf *targ_btf,
5668 struct hashmap *cand_cache)
5669 {
5670 struct bpf_core_spec local_spec, cand_spec, targ_spec = {};
5671 const void *type_key = u32_as_hash_key(relo->type_id);
5672 struct bpf_core_relo_res cand_res, targ_res;
5673 const struct btf_type *local_type;
5674 const char *local_name;
5675 struct ids_vec *cand_ids;
5676 __u32 local_id, cand_id;
5677 const char *spec_str;
5678 int i, j, err;
5679
5680 local_id = relo->type_id;
5681 local_type = btf__type_by_id(local_btf, local_id);
5682 if (!local_type)
5683 return -EINVAL;
5684
5685 local_name = btf__name_by_offset(local_btf, local_type->name_off);
5686 if (!local_name)
5687 return -EINVAL;
5688
5689 spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
5690 if (str_is_empty(spec_str))
5691 return -EINVAL;
5692
5693 err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec);
5694 if (err) {
5695 pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
5696 prog->name, relo_idx, local_id, btf_kind_str(local_type),
5697 str_is_empty(local_name) ? "<anon>" : local_name,
5698 spec_str, err);
5699 return -EINVAL;
5700 }
5701
5702 pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog->name,
5703 relo_idx, core_relo_kind_str(relo->kind), relo->kind);
5704 bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
5705 libbpf_print(LIBBPF_DEBUG, "\n");
5706
5707 /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
5708 if (relo->kind == BPF_TYPE_ID_LOCAL) {
5709 targ_res.validate = true;
5710 targ_res.poison = false;
5711 targ_res.orig_val = local_spec.root_type_id;
5712 targ_res.new_val = local_spec.root_type_id;
5713 goto patch_insn;
5714 }
5715
5716 /* libbpf doesn't support candidate search for anonymous types */
5717 if (str_is_empty(spec_str)) {
5718 pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
5719 prog->name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
5720 return -EOPNOTSUPP;
5721 }
5722
5723 if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
5724 cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
5725 if (IS_ERR(cand_ids)) {
5726 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld",
5727 prog->name, relo_idx, local_id, btf_kind_str(local_type),
5728 local_name, PTR_ERR(cand_ids));
5729 return PTR_ERR(cand_ids);
5730 }
5731 err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
5732 if (err) {
5733 bpf_core_free_cands(cand_ids);
5734 return err;
5735 }
5736 }
5737
5738 for (i = 0, j = 0; i < cand_ids->len; i++) {
5739 cand_id = cand_ids->data[i];
5740 err = bpf_core_spec_match(&local_spec, targ_btf, cand_id, &cand_spec);
5741 if (err < 0) {
5742 pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
5743 prog->name, relo_idx, i);
5744 bpf_core_dump_spec(LIBBPF_WARN, &cand_spec);
5745 libbpf_print(LIBBPF_WARN, ": %d\n", err);
5746 return err;
5747 }
5748
5749 pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog->name,
5750 relo_idx, err == 0 ? "non-matching" : "matching", i);
5751 bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
5752 libbpf_print(LIBBPF_DEBUG, "\n");
5753
5754 if (err == 0)
5755 continue;
5756
5757 err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, &cand_spec, &cand_res);
5758 if (err)
5759 return err;
5760
5761 if (j == 0) {
5762 targ_res = cand_res;
5763 targ_spec = cand_spec;
5764 } else if (cand_spec.bit_offset != targ_spec.bit_offset) {
5765 /* if there are many field relo candidates, they
5766 * should all resolve to the same bit offset
5767 */
5768 pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
5769 prog->name, relo_idx, cand_spec.bit_offset,
5770 targ_spec.bit_offset);
5771 return -EINVAL;
5772 } else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) {
5773 /* all candidates should result in the same relocation
5774 * decision and value, otherwise it's dangerous to
5775 * proceed due to ambiguity
5776 */
5777 pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
5778 prog->name, relo_idx,
5779 cand_res.poison ? "failure" : "success", cand_res.new_val,
5780 targ_res.poison ? "failure" : "success", targ_res.new_val);
5781 return -EINVAL;
5782 }
5783
5784 cand_ids->data[j++] = cand_spec.root_type_id;
5785 }
5786
5787 /*
5788 * For BPF_FIELD_EXISTS relo or when used BPF program has field
5789 * existence checks or kernel version/config checks, it's expected
5790 * that we might not find any candidates. In this case, if field
5791 * wasn't found in any candidate, the list of candidates shouldn't
5792 * change at all, we'll just handle relocating appropriately,
5793 * depending on relo's kind.
5794 */
5795 if (j > 0)
5796 cand_ids->len = j;
5797
5798 /*
5799 * If no candidates were found, it might be both a programmer error,
5800 * as well as expected case, depending whether instruction w/
5801 * relocation is guarded in some way that makes it unreachable (dead
5802 * code) if relocation can't be resolved. This is handled in
5803 * bpf_core_patch_insn() uniformly by replacing that instruction with
5804 * BPF helper call insn (using invalid helper ID). If that instruction
5805 * is indeed unreachable, then it will be ignored and eliminated by
5806 * verifier. If it was an error, then verifier will complain and point
5807 * to a specific instruction number in its log.
5808 */
5809 if (j == 0) {
5810 pr_debug("prog '%s': relo #%d: no matching targets found\n",
5811 prog->name, relo_idx);
5812
5813 /* calculate single target relo result explicitly */
5814 err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, NULL, &targ_res);
5815 if (err)
5816 return err;
5817 }
5818
5819 patch_insn:
5820 /* bpf_core_patch_insn() should know how to handle missing targ_spec */
5821 err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res);
5822 if (err) {
5823 pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
5824 prog->name, relo_idx, relo->insn_off, err);
5825 return -EINVAL;
5826 }
5827
5828 return 0;
5829 }
5830
5831 static int
bpf_object__relocate_core(struct bpf_object * obj,const char * targ_btf_path)5832 bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
5833 {
5834 const struct btf_ext_info_sec *sec;
5835 const struct bpf_core_relo *rec;
5836 const struct btf_ext_info *seg;
5837 struct hashmap_entry *entry;
5838 struct hashmap *cand_cache = NULL;
5839 struct bpf_program *prog;
5840 struct btf *targ_btf;
5841 const char *sec_name;
5842 int i, err = 0, insn_idx, sec_idx;
5843
5844 if (obj->btf_ext->core_relo_info.len == 0)
5845 return 0;
5846
5847 if (targ_btf_path)
5848 targ_btf = btf__parse(targ_btf_path, NULL);
5849 else
5850 targ_btf = obj->btf_vmlinux;
5851 if (IS_ERR_OR_NULL(targ_btf)) {
5852 pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf));
5853 return PTR_ERR(targ_btf);
5854 }
5855
5856 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
5857 if (IS_ERR(cand_cache)) {
5858 err = PTR_ERR(cand_cache);
5859 goto out;
5860 }
5861
5862 seg = &obj->btf_ext->core_relo_info;
5863 for_each_btf_ext_sec(seg, sec) {
5864 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
5865 if (str_is_empty(sec_name)) {
5866 err = -EINVAL;
5867 goto out;
5868 }
5869 /* bpf_object's ELF is gone by now so it's not easy to find
5870 * section index by section name, but we can find *any*
5871 * bpf_program within desired section name and use it's
5872 * prog->sec_idx to do a proper search by section index and
5873 * instruction offset
5874 */
5875 prog = NULL;
5876 for (i = 0; i < obj->nr_programs; i++) {
5877 prog = &obj->programs[i];
5878 if (strcmp(prog->sec_name, sec_name) == 0)
5879 break;
5880 }
5881 if (!prog) {
5882 pr_warn("sec '%s': failed to find a BPF program\n", sec_name);
5883 return -ENOENT;
5884 }
5885 sec_idx = prog->sec_idx;
5886
5887 pr_debug("sec '%s': found %d CO-RE relocations\n",
5888 sec_name, sec->num_info);
5889
5890 for_each_btf_ext_rec(seg, sec, i, rec) {
5891 insn_idx = rec->insn_off / BPF_INSN_SZ;
5892 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
5893 if (!prog) {
5894 pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n",
5895 sec_name, insn_idx, i);
5896 err = -EINVAL;
5897 goto out;
5898 }
5899 /* no need to apply CO-RE relocation if the program is
5900 * not going to be loaded
5901 */
5902 if (!prog->load)
5903 continue;
5904
5905 err = bpf_core_apply_relo(prog, rec, i, obj->btf,
5906 targ_btf, cand_cache);
5907 if (err) {
5908 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
5909 prog->name, i, err);
5910 goto out;
5911 }
5912 }
5913 }
5914
5915 out:
5916 /* obj->btf_vmlinux is freed at the end of object load phase */
5917 if (targ_btf != obj->btf_vmlinux)
5918 btf__free(targ_btf);
5919 if (!IS_ERR_OR_NULL(cand_cache)) {
5920 hashmap__for_each_entry(cand_cache, entry, i) {
5921 bpf_core_free_cands(entry->value);
5922 }
5923 hashmap__free(cand_cache);
5924 }
5925 return err;
5926 }
5927
5928 /* Relocate data references within program code:
5929 * - map references;
5930 * - global variable references;
5931 * - extern references.
5932 */
5933 static int
bpf_object__relocate_data(struct bpf_object * obj,struct bpf_program * prog)5934 bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
5935 {
5936 int i;
5937
5938 for (i = 0; i < prog->nr_reloc; i++) {
5939 struct reloc_desc *relo = &prog->reloc_desc[i];
5940 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
5941 struct extern_desc *ext;
5942
5943 switch (relo->type) {
5944 case RELO_LD64:
5945 insn[0].src_reg = BPF_PSEUDO_MAP_FD;
5946 insn[0].imm = obj->maps[relo->map_idx].fd;
5947 relo->processed = true;
5948 break;
5949 case RELO_DATA:
5950 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5951 insn[1].imm = insn[0].imm + relo->sym_off;
5952 insn[0].imm = obj->maps[relo->map_idx].fd;
5953 relo->processed = true;
5954 break;
5955 case RELO_EXTERN:
5956 ext = &obj->externs[relo->sym_off];
5957 if (ext->type == EXT_KCFG) {
5958 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5959 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
5960 insn[1].imm = ext->kcfg.data_off;
5961 } else /* EXT_KSYM */ {
5962 if (ext->ksym.type_id) { /* typed ksyms */
5963 insn[0].src_reg = BPF_PSEUDO_BTF_ID;
5964 insn[0].imm = ext->ksym.vmlinux_btf_id;
5965 } else { /* typeless ksyms */
5966 insn[0].imm = (__u32)ext->ksym.addr;
5967 insn[1].imm = ext->ksym.addr >> 32;
5968 }
5969 }
5970 relo->processed = true;
5971 break;
5972 case RELO_CALL:
5973 /* will be handled as a follow up pass */
5974 break;
5975 default:
5976 pr_warn("prog '%s': relo #%d: bad relo type %d\n",
5977 prog->name, i, relo->type);
5978 return -EINVAL;
5979 }
5980 }
5981
5982 return 0;
5983 }
5984
adjust_prog_btf_ext_info(const struct bpf_object * obj,const struct bpf_program * prog,const struct btf_ext_info * ext_info,void ** prog_info,__u32 * prog_rec_cnt,__u32 * prog_rec_sz)5985 static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
5986 const struct bpf_program *prog,
5987 const struct btf_ext_info *ext_info,
5988 void **prog_info, __u32 *prog_rec_cnt,
5989 __u32 *prog_rec_sz)
5990 {
5991 void *copy_start = NULL, *copy_end = NULL;
5992 void *rec, *rec_end, *new_prog_info;
5993 const struct btf_ext_info_sec *sec;
5994 size_t old_sz, new_sz;
5995 const char *sec_name;
5996 int i, off_adj;
5997
5998 for_each_btf_ext_sec(ext_info, sec) {
5999 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6000 if (!sec_name)
6001 return -EINVAL;
6002 if (strcmp(sec_name, prog->sec_name) != 0)
6003 continue;
6004
6005 for_each_btf_ext_rec(ext_info, sec, i, rec) {
6006 __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
6007
6008 if (insn_off < prog->sec_insn_off)
6009 continue;
6010 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
6011 break;
6012
6013 if (!copy_start)
6014 copy_start = rec;
6015 copy_end = rec + ext_info->rec_size;
6016 }
6017
6018 if (!copy_start)
6019 return -ENOENT;
6020
6021 /* append func/line info of a given (sub-)program to the main
6022 * program func/line info
6023 */
6024 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
6025 new_sz = old_sz + (copy_end - copy_start);
6026 new_prog_info = realloc(*prog_info, new_sz);
6027 if (!new_prog_info)
6028 return -ENOMEM;
6029 *prog_info = new_prog_info;
6030 *prog_rec_cnt = new_sz / ext_info->rec_size;
6031 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
6032
6033 /* Kernel instruction offsets are in units of 8-byte
6034 * instructions, while .BTF.ext instruction offsets generated
6035 * by Clang are in units of bytes. So convert Clang offsets
6036 * into kernel offsets and adjust offset according to program
6037 * relocated position.
6038 */
6039 off_adj = prog->sub_insn_off - prog->sec_insn_off;
6040 rec = new_prog_info + old_sz;
6041 rec_end = new_prog_info + new_sz;
6042 for (; rec < rec_end; rec += ext_info->rec_size) {
6043 __u32 *insn_off = rec;
6044
6045 *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
6046 }
6047 *prog_rec_sz = ext_info->rec_size;
6048 return 0;
6049 }
6050
6051 return -ENOENT;
6052 }
6053
6054 static int
reloc_prog_func_and_line_info(const struct bpf_object * obj,struct bpf_program * main_prog,const struct bpf_program * prog)6055 reloc_prog_func_and_line_info(const struct bpf_object *obj,
6056 struct bpf_program *main_prog,
6057 const struct bpf_program *prog)
6058 {
6059 int err;
6060
6061 /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
6062 * supprot func/line info
6063 */
6064 if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC))
6065 return 0;
6066
6067 /* only attempt func info relocation if main program's func_info
6068 * relocation was successful
6069 */
6070 if (main_prog != prog && !main_prog->func_info)
6071 goto line_info;
6072
6073 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6074 &main_prog->func_info,
6075 &main_prog->func_info_cnt,
6076 &main_prog->func_info_rec_size);
6077 if (err) {
6078 if (err != -ENOENT) {
6079 pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
6080 prog->name, err);
6081 return err;
6082 }
6083 if (main_prog->func_info) {
6084 /*
6085 * Some info has already been found but has problem
6086 * in the last btf_ext reloc. Must have to error out.
6087 */
6088 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
6089 return err;
6090 }
6091 /* Have problem loading the very first info. Ignore the rest. */
6092 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
6093 prog->name);
6094 }
6095
6096 line_info:
6097 /* don't relocate line info if main program's relocation failed */
6098 if (main_prog != prog && !main_prog->line_info)
6099 return 0;
6100
6101 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6102 &main_prog->line_info,
6103 &main_prog->line_info_cnt,
6104 &main_prog->line_info_rec_size);
6105 if (err) {
6106 if (err != -ENOENT) {
6107 pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
6108 prog->name, err);
6109 return err;
6110 }
6111 if (main_prog->line_info) {
6112 /*
6113 * Some info has already been found but has problem
6114 * in the last btf_ext reloc. Must have to error out.
6115 */
6116 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
6117 return err;
6118 }
6119 /* Have problem loading the very first info. Ignore the rest. */
6120 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
6121 prog->name);
6122 }
6123 return 0;
6124 }
6125
cmp_relo_by_insn_idx(const void * key,const void * elem)6126 static int cmp_relo_by_insn_idx(const void *key, const void *elem)
6127 {
6128 size_t insn_idx = *(const size_t *)key;
6129 const struct reloc_desc *relo = elem;
6130
6131 if (insn_idx == relo->insn_idx)
6132 return 0;
6133 return insn_idx < relo->insn_idx ? -1 : 1;
6134 }
6135
find_prog_insn_relo(const struct bpf_program * prog,size_t insn_idx)6136 static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
6137 {
6138 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
6139 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
6140 }
6141
6142 static int
bpf_object__reloc_code(struct bpf_object * obj,struct bpf_program * main_prog,struct bpf_program * prog)6143 bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6144 struct bpf_program *prog)
6145 {
6146 size_t sub_insn_idx, insn_idx, new_cnt;
6147 struct bpf_program *subprog;
6148 struct bpf_insn *insns, *insn;
6149 struct reloc_desc *relo;
6150 int err;
6151
6152 err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6153 if (err)
6154 return err;
6155
6156 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6157 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6158 if (!insn_is_subprog_call(insn))
6159 continue;
6160
6161 relo = find_prog_insn_relo(prog, insn_idx);
6162 if (relo && relo->type != RELO_CALL) {
6163 pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
6164 prog->name, insn_idx, relo->type);
6165 return -LIBBPF_ERRNO__RELOC;
6166 }
6167 if (relo) {
6168 /* sub-program instruction index is a combination of
6169 * an offset of a symbol pointed to by relocation and
6170 * call instruction's imm field; for global functions,
6171 * call always has imm = -1, but for static functions
6172 * relocation is against STT_SECTION and insn->imm
6173 * points to a start of a static function
6174 */
6175 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6176 } else {
6177 /* if subprogram call is to a static function within
6178 * the same ELF section, there won't be any relocation
6179 * emitted, but it also means there is no additional
6180 * offset necessary, insns->imm is relative to
6181 * instruction's original position within the section
6182 */
6183 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6184 }
6185
6186 /* we enforce that sub-programs should be in .text section */
6187 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6188 if (!subprog) {
6189 pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6190 prog->name);
6191 return -LIBBPF_ERRNO__RELOC;
6192 }
6193
6194 /* if it's the first call instruction calling into this
6195 * subprogram (meaning this subprog hasn't been processed
6196 * yet) within the context of current main program:
6197 * - append it at the end of main program's instructions blog;
6198 * - process is recursively, while current program is put on hold;
6199 * - if that subprogram calls some other not yet processes
6200 * subprogram, same thing will happen recursively until
6201 * there are no more unprocesses subprograms left to append
6202 * and relocate.
6203 */
6204 if (subprog->sub_insn_off == 0) {
6205 subprog->sub_insn_off = main_prog->insns_cnt;
6206
6207 new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6208 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6209 if (!insns) {
6210 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6211 return -ENOMEM;
6212 }
6213 main_prog->insns = insns;
6214 main_prog->insns_cnt = new_cnt;
6215
6216 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6217 subprog->insns_cnt * sizeof(*insns));
6218
6219 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6220 main_prog->name, subprog->insns_cnt, subprog->name);
6221
6222 err = bpf_object__reloc_code(obj, main_prog, subprog);
6223 if (err)
6224 return err;
6225 }
6226
6227 /* main_prog->insns memory could have been re-allocated, so
6228 * calculate pointer again
6229 */
6230 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6231 /* calculate correct instruction position within current main
6232 * prog; each main prog can have a different set of
6233 * subprograms appended (potentially in different order as
6234 * well), so position of any subprog can be different for
6235 * different main programs */
6236 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6237
6238 if (relo)
6239 relo->processed = true;
6240
6241 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6242 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6243 }
6244
6245 return 0;
6246 }
6247
6248 /*
6249 * Relocate sub-program calls.
6250 *
6251 * Algorithm operates as follows. Each entry-point BPF program (referred to as
6252 * main prog) is processed separately. For each subprog (non-entry functions,
6253 * that can be called from either entry progs or other subprogs) gets their
6254 * sub_insn_off reset to zero. This serves as indicator that this subprogram
6255 * hasn't been yet appended and relocated within current main prog. Once its
6256 * relocated, sub_insn_off will point at the position within current main prog
6257 * where given subprog was appended. This will further be used to relocate all
6258 * the call instructions jumping into this subprog.
6259 *
6260 * We start with main program and process all call instructions. If the call
6261 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6262 * is zero), subprog instructions are appended at the end of main program's
6263 * instruction array. Then main program is "put on hold" while we recursively
6264 * process newly appended subprogram. If that subprogram calls into another
6265 * subprogram that hasn't been appended, new subprogram is appended again to
6266 * the *main* prog's instructions (subprog's instructions are always left
6267 * untouched, as they need to be in unmodified state for subsequent main progs
6268 * and subprog instructions are always sent only as part of a main prog) and
6269 * the process continues recursively. Once all the subprogs called from a main
6270 * prog or any of its subprogs are appended (and relocated), all their
6271 * positions within finalized instructions array are known, so it's easy to
6272 * rewrite call instructions with correct relative offsets, corresponding to
6273 * desired target subprog.
6274 *
6275 * Its important to realize that some subprogs might not be called from some
6276 * main prog and any of its called/used subprogs. Those will keep their
6277 * subprog->sub_insn_off as zero at all times and won't be appended to current
6278 * main prog and won't be relocated within the context of current main prog.
6279 * They might still be used from other main progs later.
6280 *
6281 * Visually this process can be shown as below. Suppose we have two main
6282 * programs mainA and mainB and BPF object contains three subprogs: subA,
6283 * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
6284 * subC both call subB:
6285 *
6286 * +--------+ +-------+
6287 * | v v |
6288 * +--+---+ +--+-+-+ +---+--+
6289 * | subA | | subB | | subC |
6290 * +--+---+ +------+ +---+--+
6291 * ^ ^
6292 * | |
6293 * +---+-------+ +------+----+
6294 * | mainA | | mainB |
6295 * +-----------+ +-----------+
6296 *
6297 * We'll start relocating mainA, will find subA, append it and start
6298 * processing sub A recursively:
6299 *
6300 * +-----------+------+
6301 * | mainA | subA |
6302 * +-----------+------+
6303 *
6304 * At this point we notice that subB is used from subA, so we append it and
6305 * relocate (there are no further subcalls from subB):
6306 *
6307 * +-----------+------+------+
6308 * | mainA | subA | subB |
6309 * +-----------+------+------+
6310 *
6311 * At this point, we relocate subA calls, then go one level up and finish with
6312 * relocatin mainA calls. mainA is done.
6313 *
6314 * For mainB process is similar but results in different order. We start with
6315 * mainB and skip subA and subB, as mainB never calls them (at least
6316 * directly), but we see subC is needed, so we append and start processing it:
6317 *
6318 * +-----------+------+
6319 * | mainB | subC |
6320 * +-----------+------+
6321 * Now we see subC needs subB, so we go back to it, append and relocate it:
6322 *
6323 * +-----------+------+------+
6324 * | mainB | subC | subB |
6325 * +-----------+------+------+
6326 *
6327 * At this point we unwind recursion, relocate calls in subC, then in mainB.
6328 */
6329 static int
bpf_object__relocate_calls(struct bpf_object * obj,struct bpf_program * prog)6330 bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6331 {
6332 struct bpf_program *subprog;
6333 int i, j, err;
6334
6335 /* mark all subprogs as not relocated (yet) within the context of
6336 * current main program
6337 */
6338 for (i = 0; i < obj->nr_programs; i++) {
6339 subprog = &obj->programs[i];
6340 if (!prog_is_subprog(obj, subprog))
6341 continue;
6342
6343 subprog->sub_insn_off = 0;
6344 for (j = 0; j < subprog->nr_reloc; j++)
6345 if (subprog->reloc_desc[j].type == RELO_CALL)
6346 subprog->reloc_desc[j].processed = false;
6347 }
6348
6349 err = bpf_object__reloc_code(obj, prog, prog);
6350 if (err)
6351 return err;
6352
6353
6354 return 0;
6355 }
6356
6357 static int
bpf_object__relocate(struct bpf_object * obj,const char * targ_btf_path)6358 bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
6359 {
6360 struct bpf_program *prog;
6361 size_t i;
6362 int err;
6363
6364 if (obj->btf_ext) {
6365 err = bpf_object__relocate_core(obj, targ_btf_path);
6366 if (err) {
6367 pr_warn("failed to perform CO-RE relocations: %d\n",
6368 err);
6369 return err;
6370 }
6371 }
6372 /* relocate data references first for all programs and sub-programs,
6373 * as they don't change relative to code locations, so subsequent
6374 * subprogram processing won't need to re-calculate any of them
6375 */
6376 for (i = 0; i < obj->nr_programs; i++) {
6377 prog = &obj->programs[i];
6378 err = bpf_object__relocate_data(obj, prog);
6379 if (err) {
6380 pr_warn("prog '%s': failed to relocate data references: %d\n",
6381 prog->name, err);
6382 return err;
6383 }
6384 }
6385 /* now relocate subprogram calls and append used subprograms to main
6386 * programs; each copy of subprogram code needs to be relocated
6387 * differently for each main program, because its code location might
6388 * have changed
6389 */
6390 for (i = 0; i < obj->nr_programs; i++) {
6391 prog = &obj->programs[i];
6392 /* sub-program's sub-calls are relocated within the context of
6393 * its main program only
6394 */
6395 if (prog_is_subprog(obj, prog))
6396 continue;
6397
6398 err = bpf_object__relocate_calls(obj, prog);
6399 if (err) {
6400 pr_warn("prog '%s': failed to relocate calls: %d\n",
6401 prog->name, err);
6402 return err;
6403 }
6404 }
6405 /* free up relocation descriptors */
6406 for (i = 0; i < obj->nr_programs; i++) {
6407 prog = &obj->programs[i];
6408 zfree(&prog->reloc_desc);
6409 prog->nr_reloc = 0;
6410 }
6411 return 0;
6412 }
6413
6414 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
6415 GElf_Shdr *shdr, Elf_Data *data);
6416
bpf_object__collect_map_relos(struct bpf_object * obj,GElf_Shdr * shdr,Elf_Data * data)6417 static int bpf_object__collect_map_relos(struct bpf_object *obj,
6418 GElf_Shdr *shdr, Elf_Data *data)
6419 {
6420 const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
6421 int i, j, nrels, new_sz;
6422 const struct btf_var_secinfo *vi = NULL;
6423 const struct btf_type *sec, *var, *def;
6424 struct bpf_map *map = NULL, *targ_map;
6425 const struct btf_member *member;
6426 const char *name, *mname;
6427 Elf_Data *symbols;
6428 unsigned int moff;
6429 GElf_Sym sym;
6430 GElf_Rel rel;
6431 void *tmp;
6432
6433 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
6434 return -EINVAL;
6435 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
6436 if (!sec)
6437 return -EINVAL;
6438
6439 symbols = obj->efile.symbols;
6440 nrels = shdr->sh_size / shdr->sh_entsize;
6441 for (i = 0; i < nrels; i++) {
6442 if (!gelf_getrel(data, i, &rel)) {
6443 pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
6444 return -LIBBPF_ERRNO__FORMAT;
6445 }
6446 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
6447 pr_warn(".maps relo #%d: symbol %zx not found\n",
6448 i, (size_t)GELF_R_SYM(rel.r_info));
6449 return -LIBBPF_ERRNO__FORMAT;
6450 }
6451 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
6452 if (sym.st_shndx != obj->efile.btf_maps_shndx) {
6453 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
6454 i, name);
6455 return -LIBBPF_ERRNO__RELOC;
6456 }
6457
6458 pr_debug(".maps relo #%d: for %zd value %zd rel.r_offset %zu name %d ('%s')\n",
6459 i, (ssize_t)(rel.r_info >> 32), (size_t)sym.st_value,
6460 (size_t)rel.r_offset, sym.st_name, name);
6461
6462 for (j = 0; j < obj->nr_maps; j++) {
6463 map = &obj->maps[j];
6464 if (map->sec_idx != obj->efile.btf_maps_shndx)
6465 continue;
6466
6467 vi = btf_var_secinfos(sec) + map->btf_var_idx;
6468 if (vi->offset <= rel.r_offset &&
6469 rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
6470 break;
6471 }
6472 if (j == obj->nr_maps) {
6473 pr_warn(".maps relo #%d: cannot find map '%s' at rel.r_offset %zu\n",
6474 i, name, (size_t)rel.r_offset);
6475 return -EINVAL;
6476 }
6477
6478 if (!bpf_map_type__is_map_in_map(map->def.type))
6479 return -EINVAL;
6480 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
6481 map->def.key_size != sizeof(int)) {
6482 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
6483 i, map->name, sizeof(int));
6484 return -EINVAL;
6485 }
6486
6487 targ_map = bpf_object__find_map_by_name(obj, name);
6488 if (!targ_map)
6489 return -ESRCH;
6490
6491 var = btf__type_by_id(obj->btf, vi->type);
6492 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
6493 if (btf_vlen(def) == 0)
6494 return -EINVAL;
6495 member = btf_members(def) + btf_vlen(def) - 1;
6496 mname = btf__name_by_offset(obj->btf, member->name_off);
6497 if (strcmp(mname, "values"))
6498 return -EINVAL;
6499
6500 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
6501 if (rel.r_offset - vi->offset < moff)
6502 return -EINVAL;
6503
6504 moff = rel.r_offset - vi->offset - moff;
6505 /* here we use BPF pointer size, which is always 64 bit, as we
6506 * are parsing ELF that was built for BPF target
6507 */
6508 if (moff % bpf_ptr_sz)
6509 return -EINVAL;
6510 moff /= bpf_ptr_sz;
6511 if (moff >= map->init_slots_sz) {
6512 new_sz = moff + 1;
6513 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
6514 if (!tmp)
6515 return -ENOMEM;
6516 map->init_slots = tmp;
6517 memset(map->init_slots + map->init_slots_sz, 0,
6518 (new_sz - map->init_slots_sz) * host_ptr_sz);
6519 map->init_slots_sz = new_sz;
6520 }
6521 map->init_slots[moff] = targ_map;
6522
6523 pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n",
6524 i, map->name, moff, name);
6525 }
6526
6527 return 0;
6528 }
6529
cmp_relocs(const void * _a,const void * _b)6530 static int cmp_relocs(const void *_a, const void *_b)
6531 {
6532 const struct reloc_desc *a = _a;
6533 const struct reloc_desc *b = _b;
6534
6535 if (a->insn_idx != b->insn_idx)
6536 return a->insn_idx < b->insn_idx ? -1 : 1;
6537
6538 /* no two relocations should have the same insn_idx, but ... */
6539 if (a->type != b->type)
6540 return a->type < b->type ? -1 : 1;
6541
6542 return 0;
6543 }
6544
bpf_object__collect_relos(struct bpf_object * obj)6545 static int bpf_object__collect_relos(struct bpf_object *obj)
6546 {
6547 int i, err;
6548
6549 for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
6550 GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
6551 Elf_Data *data = obj->efile.reloc_sects[i].data;
6552 int idx = shdr->sh_info;
6553
6554 if (shdr->sh_type != SHT_REL) {
6555 pr_warn("internal error at %d\n", __LINE__);
6556 return -LIBBPF_ERRNO__INTERNAL;
6557 }
6558
6559 if (idx == obj->efile.st_ops_shndx)
6560 err = bpf_object__collect_st_ops_relos(obj, shdr, data);
6561 else if (idx == obj->efile.btf_maps_shndx)
6562 err = bpf_object__collect_map_relos(obj, shdr, data);
6563 else
6564 err = bpf_object__collect_prog_relos(obj, shdr, data);
6565 if (err)
6566 return err;
6567 }
6568
6569 for (i = 0; i < obj->nr_programs; i++) {
6570 struct bpf_program *p = &obj->programs[i];
6571
6572 if (!p->nr_reloc)
6573 continue;
6574
6575 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6576 }
6577 return 0;
6578 }
6579
insn_is_helper_call(struct bpf_insn * insn,enum bpf_func_id * func_id)6580 static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
6581 {
6582 if (BPF_CLASS(insn->code) == BPF_JMP &&
6583 BPF_OP(insn->code) == BPF_CALL &&
6584 BPF_SRC(insn->code) == BPF_K &&
6585 insn->src_reg == 0 &&
6586 insn->dst_reg == 0) {
6587 *func_id = insn->imm;
6588 return true;
6589 }
6590 return false;
6591 }
6592
bpf_object__sanitize_prog(struct bpf_object * obj,struct bpf_program * prog)6593 static int bpf_object__sanitize_prog(struct bpf_object* obj, struct bpf_program *prog)
6594 {
6595 struct bpf_insn *insn = prog->insns;
6596 enum bpf_func_id func_id;
6597 int i;
6598
6599 for (i = 0; i < prog->insns_cnt; i++, insn++) {
6600 if (!insn_is_helper_call(insn, &func_id))
6601 continue;
6602
6603 /* on kernels that don't yet support
6604 * bpf_probe_read_{kernel,user}[_str] helpers, fall back
6605 * to bpf_probe_read() which works well for old kernels
6606 */
6607 switch (func_id) {
6608 case BPF_FUNC_probe_read_kernel:
6609 case BPF_FUNC_probe_read_user:
6610 if (!kernel_supports(FEAT_PROBE_READ_KERN))
6611 insn->imm = BPF_FUNC_probe_read;
6612 break;
6613 case BPF_FUNC_probe_read_kernel_str:
6614 case BPF_FUNC_probe_read_user_str:
6615 if (!kernel_supports(FEAT_PROBE_READ_KERN))
6616 insn->imm = BPF_FUNC_probe_read_str;
6617 break;
6618 default:
6619 break;
6620 }
6621 }
6622 return 0;
6623 }
6624
6625 static int
load_program(struct bpf_program * prog,struct bpf_insn * insns,int insns_cnt,char * license,__u32 kern_version,int * pfd)6626 load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
6627 char *license, __u32 kern_version, int *pfd)
6628 {
6629 struct bpf_load_program_attr load_attr;
6630 char *cp, errmsg[STRERR_BUFSIZE];
6631 size_t log_buf_size = 0;
6632 char *log_buf = NULL;
6633 int btf_fd, ret;
6634
6635 if (!insns || !insns_cnt)
6636 return -EINVAL;
6637
6638 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
6639 load_attr.prog_type = prog->type;
6640 /* old kernels might not support specifying expected_attach_type */
6641 if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
6642 prog->sec_def->is_exp_attach_type_optional)
6643 load_attr.expected_attach_type = 0;
6644 else
6645 load_attr.expected_attach_type = prog->expected_attach_type;
6646 if (kernel_supports(FEAT_PROG_NAME))
6647 load_attr.name = prog->name;
6648 load_attr.insns = insns;
6649 load_attr.insns_cnt = insns_cnt;
6650 load_attr.license = license;
6651 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
6652 prog->type == BPF_PROG_TYPE_LSM) {
6653 load_attr.attach_btf_id = prog->attach_btf_id;
6654 } else if (prog->type == BPF_PROG_TYPE_TRACING ||
6655 prog->type == BPF_PROG_TYPE_EXT) {
6656 load_attr.attach_prog_fd = prog->attach_prog_fd;
6657 load_attr.attach_btf_id = prog->attach_btf_id;
6658 } else {
6659 load_attr.kern_version = kern_version;
6660 load_attr.prog_ifindex = prog->prog_ifindex;
6661 }
6662 /* specify func_info/line_info only if kernel supports them */
6663 btf_fd = bpf_object__btf_fd(prog->obj);
6664 if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) {
6665 load_attr.prog_btf_fd = btf_fd;
6666 load_attr.func_info = prog->func_info;
6667 load_attr.func_info_rec_size = prog->func_info_rec_size;
6668 load_attr.func_info_cnt = prog->func_info_cnt;
6669 load_attr.line_info = prog->line_info;
6670 load_attr.line_info_rec_size = prog->line_info_rec_size;
6671 load_attr.line_info_cnt = prog->line_info_cnt;
6672 }
6673 load_attr.log_level = prog->log_level;
6674 load_attr.prog_flags = prog->prog_flags;
6675
6676 retry_load:
6677 if (log_buf_size) {
6678 log_buf = malloc(log_buf_size);
6679 if (!log_buf)
6680 return -ENOMEM;
6681
6682 *log_buf = 0;
6683 }
6684
6685 ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
6686
6687 if (ret >= 0) {
6688 if (log_buf && load_attr.log_level)
6689 pr_debug("verifier log:\n%s", log_buf);
6690
6691 if (prog->obj->rodata_map_idx >= 0 &&
6692 kernel_supports(FEAT_PROG_BIND_MAP)) {
6693 struct bpf_map *rodata_map =
6694 &prog->obj->maps[prog->obj->rodata_map_idx];
6695
6696 if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) {
6697 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6698 pr_warn("prog '%s': failed to bind .rodata map: %s\n",
6699 prog->name, cp);
6700 /* Don't fail hard if can't bind rodata. */
6701 }
6702 }
6703
6704 *pfd = ret;
6705 ret = 0;
6706 goto out;
6707 }
6708
6709 if (!log_buf || errno == ENOSPC) {
6710 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE,
6711 log_buf_size << 1);
6712
6713 free(log_buf);
6714 goto retry_load;
6715 }
6716 ret = errno ? -errno : -LIBBPF_ERRNO__LOAD;
6717 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6718 pr_warn("load bpf program failed: %s\n", cp);
6719 pr_perm_msg(ret);
6720
6721 if (log_buf && log_buf[0] != '\0') {
6722 ret = -LIBBPF_ERRNO__VERIFY;
6723 pr_warn("-- BEGIN DUMP LOG ---\n");
6724 pr_warn("\n%s\n", log_buf);
6725 pr_warn("-- END LOG --\n");
6726 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
6727 pr_warn("Program too large (%zu insns), at most %d insns\n",
6728 load_attr.insns_cnt, BPF_MAXINSNS);
6729 ret = -LIBBPF_ERRNO__PROG2BIG;
6730 } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
6731 /* Wrong program type? */
6732 int fd;
6733
6734 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
6735 load_attr.expected_attach_type = 0;
6736 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
6737 if (fd >= 0) {
6738 close(fd);
6739 ret = -LIBBPF_ERRNO__PROGTYPE;
6740 goto out;
6741 }
6742 }
6743
6744 out:
6745 free(log_buf);
6746 return ret;
6747 }
6748
6749 static int libbpf_find_attach_btf_id(struct bpf_program *prog);
6750
bpf_program__load(struct bpf_program * prog,char * license,__u32 kern_ver)6751 int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
6752 {
6753 int err = 0, fd, i, btf_id;
6754
6755 if (prog->obj->loaded) {
6756 pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
6757 return -EINVAL;
6758 }
6759
6760 if ((prog->type == BPF_PROG_TYPE_TRACING ||
6761 prog->type == BPF_PROG_TYPE_LSM ||
6762 prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
6763 btf_id = libbpf_find_attach_btf_id(prog);
6764 if (btf_id <= 0)
6765 return btf_id;
6766 prog->attach_btf_id = btf_id;
6767 }
6768
6769 if (prog->instances.nr < 0 || !prog->instances.fds) {
6770 if (prog->preprocessor) {
6771 pr_warn("Internal error: can't load program '%s'\n",
6772 prog->name);
6773 return -LIBBPF_ERRNO__INTERNAL;
6774 }
6775
6776 prog->instances.fds = malloc(sizeof(int));
6777 if (!prog->instances.fds) {
6778 pr_warn("Not enough memory for BPF fds\n");
6779 return -ENOMEM;
6780 }
6781 prog->instances.nr = 1;
6782 prog->instances.fds[0] = -1;
6783 }
6784
6785 if (!prog->preprocessor) {
6786 if (prog->instances.nr != 1) {
6787 pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
6788 prog->name, prog->instances.nr);
6789 }
6790 err = load_program(prog, prog->insns, prog->insns_cnt,
6791 license, kern_ver, &fd);
6792 if (!err)
6793 prog->instances.fds[0] = fd;
6794 goto out;
6795 }
6796
6797 for (i = 0; i < prog->instances.nr; i++) {
6798 struct bpf_prog_prep_result result;
6799 bpf_program_prep_t preprocessor = prog->preprocessor;
6800
6801 memset(&result, 0, sizeof(result));
6802 err = preprocessor(prog, i, prog->insns,
6803 prog->insns_cnt, &result);
6804 if (err) {
6805 pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
6806 i, prog->name);
6807 goto out;
6808 }
6809
6810 if (!result.new_insn_ptr || !result.new_insn_cnt) {
6811 pr_debug("Skip loading the %dth instance of program '%s'\n",
6812 i, prog->name);
6813 prog->instances.fds[i] = -1;
6814 if (result.pfd)
6815 *result.pfd = -1;
6816 continue;
6817 }
6818
6819 err = load_program(prog, result.new_insn_ptr,
6820 result.new_insn_cnt, license, kern_ver, &fd);
6821 if (err) {
6822 pr_warn("Loading the %dth instance of program '%s' failed\n",
6823 i, prog->name);
6824 goto out;
6825 }
6826
6827 if (result.pfd)
6828 *result.pfd = fd;
6829 prog->instances.fds[i] = fd;
6830 }
6831 out:
6832 if (err)
6833 pr_warn("failed to load program '%s'\n", prog->name);
6834 zfree(&prog->insns);
6835 prog->insns_cnt = 0;
6836 return err;
6837 }
6838
6839 static int
bpf_object__load_progs(struct bpf_object * obj,int log_level)6840 bpf_object__load_progs(struct bpf_object *obj, int log_level)
6841 {
6842 struct bpf_program *prog;
6843 size_t i;
6844 int err;
6845
6846 for (i = 0; i < obj->nr_programs; i++) {
6847 prog = &obj->programs[i];
6848 err = bpf_object__sanitize_prog(obj, prog);
6849 if (err)
6850 return err;
6851 }
6852
6853 for (i = 0; i < obj->nr_programs; i++) {
6854 prog = &obj->programs[i];
6855 if (prog_is_subprog(obj, prog))
6856 continue;
6857 if (!prog->load) {
6858 pr_debug("prog '%s': skipped loading\n", prog->name);
6859 continue;
6860 }
6861 prog->log_level |= log_level;
6862 err = bpf_program__load(prog, obj->license, obj->kern_version);
6863 if (err)
6864 return err;
6865 }
6866 return 0;
6867 }
6868
6869 static const struct bpf_sec_def *find_sec_def(const char *sec_name);
6870
6871 static struct bpf_object *
__bpf_object__open(const char * path,const void * obj_buf,size_t obj_buf_sz,const struct bpf_object_open_opts * opts)6872 __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
6873 const struct bpf_object_open_opts *opts)
6874 {
6875 const char *obj_name, *kconfig;
6876 struct bpf_program *prog;
6877 struct bpf_object *obj;
6878 char tmp_name[64];
6879 int err;
6880
6881 if (elf_version(EV_CURRENT) == EV_NONE) {
6882 pr_warn("failed to init libelf for %s\n",
6883 path ? : "(mem buf)");
6884 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
6885 }
6886
6887 if (!OPTS_VALID(opts, bpf_object_open_opts))
6888 return ERR_PTR(-EINVAL);
6889
6890 obj_name = OPTS_GET(opts, object_name, NULL);
6891 if (obj_buf) {
6892 if (!obj_name) {
6893 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
6894 (unsigned long)obj_buf,
6895 (unsigned long)obj_buf_sz);
6896 obj_name = tmp_name;
6897 }
6898 path = obj_name;
6899 pr_debug("loading object '%s' from buffer\n", obj_name);
6900 }
6901
6902 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
6903 if (IS_ERR(obj))
6904 return obj;
6905
6906 kconfig = OPTS_GET(opts, kconfig, NULL);
6907 if (kconfig) {
6908 obj->kconfig = strdup(kconfig);
6909 if (!obj->kconfig)
6910 return ERR_PTR(-ENOMEM);
6911 }
6912
6913 err = bpf_object__elf_init(obj);
6914 err = err ? : bpf_object__check_endianness(obj);
6915 err = err ? : bpf_object__elf_collect(obj);
6916 err = err ? : bpf_object__collect_externs(obj);
6917 err = err ? : bpf_object__finalize_btf(obj);
6918 err = err ? : bpf_object__init_maps(obj, opts);
6919 err = err ? : bpf_object__collect_relos(obj);
6920 if (err)
6921 goto out;
6922 bpf_object__elf_finish(obj);
6923
6924 bpf_object__for_each_program(prog, obj) {
6925 prog->sec_def = find_sec_def(prog->sec_name);
6926 if (!prog->sec_def)
6927 /* couldn't guess, but user might manually specify */
6928 continue;
6929
6930 if (prog->sec_def->is_sleepable)
6931 prog->prog_flags |= BPF_F_SLEEPABLE;
6932 bpf_program__set_type(prog, prog->sec_def->prog_type);
6933 bpf_program__set_expected_attach_type(prog,
6934 prog->sec_def->expected_attach_type);
6935
6936 if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
6937 prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
6938 prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
6939 }
6940
6941 return obj;
6942 out:
6943 bpf_object__close(obj);
6944 return ERR_PTR(err);
6945 }
6946
6947 static struct bpf_object *
__bpf_object__open_xattr(struct bpf_object_open_attr * attr,int flags)6948 __bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
6949 {
6950 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
6951 .relaxed_maps = flags & MAPS_RELAX_COMPAT,
6952 );
6953
6954 /* param validation */
6955 if (!attr->file)
6956 return NULL;
6957
6958 pr_debug("loading %s\n", attr->file);
6959 return __bpf_object__open(attr->file, NULL, 0, &opts);
6960 }
6961
bpf_object__open_xattr(struct bpf_object_open_attr * attr)6962 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
6963 {
6964 return __bpf_object__open_xattr(attr, 0);
6965 }
6966
bpf_object__open(const char * path)6967 struct bpf_object *bpf_object__open(const char *path)
6968 {
6969 struct bpf_object_open_attr attr = {
6970 .file = path,
6971 .prog_type = BPF_PROG_TYPE_UNSPEC,
6972 };
6973
6974 return bpf_object__open_xattr(&attr);
6975 }
6976
6977 struct bpf_object *
bpf_object__open_file(const char * path,const struct bpf_object_open_opts * opts)6978 bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
6979 {
6980 if (!path)
6981 return ERR_PTR(-EINVAL);
6982
6983 pr_debug("loading %s\n", path);
6984
6985 return __bpf_object__open(path, NULL, 0, opts);
6986 }
6987
6988 struct bpf_object *
bpf_object__open_mem(const void * obj_buf,size_t obj_buf_sz,const struct bpf_object_open_opts * opts)6989 bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
6990 const struct bpf_object_open_opts *opts)
6991 {
6992 if (!obj_buf || obj_buf_sz == 0)
6993 return ERR_PTR(-EINVAL);
6994
6995 return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts);
6996 }
6997
6998 struct bpf_object *
bpf_object__open_buffer(const void * obj_buf,size_t obj_buf_sz,const char * name)6999 bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
7000 const char *name)
7001 {
7002 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
7003 .object_name = name,
7004 /* wrong default, but backwards-compatible */
7005 .relaxed_maps = true,
7006 );
7007
7008 /* returning NULL is wrong, but backwards-compatible */
7009 if (!obj_buf || obj_buf_sz == 0)
7010 return NULL;
7011
7012 return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
7013 }
7014
bpf_object__unload(struct bpf_object * obj)7015 int bpf_object__unload(struct bpf_object *obj)
7016 {
7017 size_t i;
7018
7019 if (!obj)
7020 return -EINVAL;
7021
7022 for (i = 0; i < obj->nr_maps; i++) {
7023 zclose(obj->maps[i].fd);
7024 if (obj->maps[i].st_ops)
7025 zfree(&obj->maps[i].st_ops->kern_vdata);
7026 }
7027
7028 for (i = 0; i < obj->nr_programs; i++)
7029 bpf_program__unload(&obj->programs[i]);
7030
7031 return 0;
7032 }
7033
bpf_object__sanitize_maps(struct bpf_object * obj)7034 static int bpf_object__sanitize_maps(struct bpf_object *obj)
7035 {
7036 struct bpf_map *m;
7037
7038 bpf_object__for_each_map(m, obj) {
7039 if (!bpf_map__is_internal(m))
7040 continue;
7041 if (!kernel_supports(FEAT_GLOBAL_DATA)) {
7042 pr_warn("kernel doesn't support global data\n");
7043 return -ENOTSUP;
7044 }
7045 if (!kernel_supports(FEAT_ARRAY_MMAP))
7046 m->def.map_flags ^= BPF_F_MMAPABLE;
7047 }
7048
7049 return 0;
7050 }
7051
bpf_object__read_kallsyms_file(struct bpf_object * obj)7052 static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
7053 {
7054 char sym_type, sym_name[500];
7055 unsigned long long sym_addr;
7056 struct extern_desc *ext;
7057 int ret, err = 0;
7058 FILE *f;
7059
7060 f = fopen("/proc/kallsyms", "r");
7061 if (!f) {
7062 err = -errno;
7063 pr_warn("failed to open /proc/kallsyms: %d\n", err);
7064 return err;
7065 }
7066
7067 while (true) {
7068 ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
7069 &sym_addr, &sym_type, sym_name);
7070 if (ret == EOF && feof(f))
7071 break;
7072 if (ret != 3) {
7073 pr_warn("failed to read kallsyms entry: %d\n", ret);
7074 err = -EINVAL;
7075 goto out;
7076 }
7077
7078 ext = find_extern_by_name(obj, sym_name);
7079 if (!ext || ext->type != EXT_KSYM)
7080 continue;
7081
7082 if (ext->is_set && ext->ksym.addr != sym_addr) {
7083 pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n",
7084 sym_name, ext->ksym.addr, sym_addr);
7085 err = -EINVAL;
7086 goto out;
7087 }
7088 if (!ext->is_set) {
7089 ext->is_set = true;
7090 ext->ksym.addr = sym_addr;
7091 pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr);
7092 }
7093 }
7094
7095 out:
7096 fclose(f);
7097 return err;
7098 }
7099
bpf_object__resolve_ksyms_btf_id(struct bpf_object * obj)7100 static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
7101 {
7102 struct extern_desc *ext;
7103 int i, id;
7104
7105 for (i = 0; i < obj->nr_extern; i++) {
7106 const struct btf_type *targ_var, *targ_type;
7107 __u32 targ_type_id, local_type_id;
7108 const char *targ_var_name;
7109 int ret;
7110
7111 ext = &obj->externs[i];
7112 if (ext->type != EXT_KSYM || !ext->ksym.type_id)
7113 continue;
7114
7115 id = btf__find_by_name_kind(obj->btf_vmlinux, ext->name,
7116 BTF_KIND_VAR);
7117 if (id <= 0) {
7118 pr_warn("extern (ksym) '%s': failed to find BTF ID in vmlinux BTF.\n",
7119 ext->name);
7120 return -ESRCH;
7121 }
7122
7123 /* find local type_id */
7124 local_type_id = ext->ksym.type_id;
7125
7126 /* find target type_id */
7127 targ_var = btf__type_by_id(obj->btf_vmlinux, id);
7128 targ_var_name = btf__name_by_offset(obj->btf_vmlinux,
7129 targ_var->name_off);
7130 targ_type = skip_mods_and_typedefs(obj->btf_vmlinux,
7131 targ_var->type,
7132 &targ_type_id);
7133
7134 ret = bpf_core_types_are_compat(obj->btf, local_type_id,
7135 obj->btf_vmlinux, targ_type_id);
7136 if (ret <= 0) {
7137 const struct btf_type *local_type;
7138 const char *targ_name, *local_name;
7139
7140 local_type = btf__type_by_id(obj->btf, local_type_id);
7141 local_name = btf__name_by_offset(obj->btf,
7142 local_type->name_off);
7143 targ_name = btf__name_by_offset(obj->btf_vmlinux,
7144 targ_type->name_off);
7145
7146 pr_warn("extern (ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
7147 ext->name, local_type_id,
7148 btf_kind_str(local_type), local_name, targ_type_id,
7149 btf_kind_str(targ_type), targ_name);
7150 return -EINVAL;
7151 }
7152
7153 ext->is_set = true;
7154 ext->ksym.vmlinux_btf_id = id;
7155 pr_debug("extern (ksym) '%s': resolved to [%d] %s %s\n",
7156 ext->name, id, btf_kind_str(targ_var), targ_var_name);
7157 }
7158 return 0;
7159 }
7160
bpf_object__resolve_externs(struct bpf_object * obj,const char * extra_kconfig)7161 static int bpf_object__resolve_externs(struct bpf_object *obj,
7162 const char *extra_kconfig)
7163 {
7164 bool need_config = false, need_kallsyms = false;
7165 bool need_vmlinux_btf = false;
7166 struct extern_desc *ext;
7167 void *kcfg_data = NULL;
7168 int err, i;
7169
7170 if (obj->nr_extern == 0)
7171 return 0;
7172
7173 if (obj->kconfig_map_idx >= 0)
7174 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
7175
7176 for (i = 0; i < obj->nr_extern; i++) {
7177 ext = &obj->externs[i];
7178
7179 if (ext->type == EXT_KCFG &&
7180 strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
7181 void *ext_val = kcfg_data + ext->kcfg.data_off;
7182 __u32 kver = get_kernel_version();
7183
7184 if (!kver) {
7185 pr_warn("failed to get kernel version\n");
7186 return -EINVAL;
7187 }
7188 err = set_kcfg_value_num(ext, ext_val, kver);
7189 if (err)
7190 return err;
7191 pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver);
7192 } else if (ext->type == EXT_KCFG &&
7193 strncmp(ext->name, "CONFIG_", 7) == 0) {
7194 need_config = true;
7195 } else if (ext->type == EXT_KSYM) {
7196 if (ext->ksym.type_id)
7197 need_vmlinux_btf = true;
7198 else
7199 need_kallsyms = true;
7200 } else {
7201 pr_warn("unrecognized extern '%s'\n", ext->name);
7202 return -EINVAL;
7203 }
7204 }
7205 if (need_config && extra_kconfig) {
7206 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
7207 if (err)
7208 return -EINVAL;
7209 need_config = false;
7210 for (i = 0; i < obj->nr_extern; i++) {
7211 ext = &obj->externs[i];
7212 if (ext->type == EXT_KCFG && !ext->is_set) {
7213 need_config = true;
7214 break;
7215 }
7216 }
7217 }
7218 if (need_config) {
7219 err = bpf_object__read_kconfig_file(obj, kcfg_data);
7220 if (err)
7221 return -EINVAL;
7222 }
7223 if (need_kallsyms) {
7224 err = bpf_object__read_kallsyms_file(obj);
7225 if (err)
7226 return -EINVAL;
7227 }
7228 if (need_vmlinux_btf) {
7229 err = bpf_object__resolve_ksyms_btf_id(obj);
7230 if (err)
7231 return -EINVAL;
7232 }
7233 for (i = 0; i < obj->nr_extern; i++) {
7234 ext = &obj->externs[i];
7235
7236 if (!ext->is_set && !ext->is_weak) {
7237 pr_warn("extern %s (strong) not resolved\n", ext->name);
7238 return -ESRCH;
7239 } else if (!ext->is_set) {
7240 pr_debug("extern %s (weak) not resolved, defaulting to zero\n",
7241 ext->name);
7242 }
7243 }
7244
7245 return 0;
7246 }
7247
bpf_object__load_xattr(struct bpf_object_load_attr * attr)7248 int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
7249 {
7250 struct bpf_object *obj;
7251 int err, i;
7252
7253 if (!attr)
7254 return -EINVAL;
7255 obj = attr->obj;
7256 if (!obj)
7257 return -EINVAL;
7258
7259 if (obj->loaded) {
7260 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
7261 return -EINVAL;
7262 }
7263
7264 err = bpf_object__probe_loading(obj);
7265 err = err ? : bpf_object__load_vmlinux_btf(obj);
7266 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
7267 err = err ? : bpf_object__sanitize_and_load_btf(obj);
7268 err = err ? : bpf_object__sanitize_maps(obj);
7269 err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
7270 err = err ? : bpf_object__create_maps(obj);
7271 err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
7272 err = err ? : bpf_object__load_progs(obj, attr->log_level);
7273
7274 btf__free(obj->btf_vmlinux);
7275 obj->btf_vmlinux = NULL;
7276
7277 obj->loaded = true; /* doesn't matter if successfully or not */
7278
7279 if (err)
7280 goto out;
7281
7282 return 0;
7283 out:
7284 /* unpin any maps that were auto-pinned during load */
7285 for (i = 0; i < obj->nr_maps; i++)
7286 if (obj->maps[i].pinned && !obj->maps[i].reused)
7287 bpf_map__unpin(&obj->maps[i], NULL);
7288
7289 bpf_object__unload(obj);
7290 pr_warn("failed to load object '%s'\n", obj->path);
7291 return err;
7292 }
7293
bpf_object__load(struct bpf_object * obj)7294 int bpf_object__load(struct bpf_object *obj)
7295 {
7296 struct bpf_object_load_attr attr = {
7297 .obj = obj,
7298 };
7299
7300 return bpf_object__load_xattr(&attr);
7301 }
7302
make_parent_dir(const char * path)7303 static int make_parent_dir(const char *path)
7304 {
7305 char *cp, errmsg[STRERR_BUFSIZE];
7306 char *dname, *dir;
7307 int err = 0;
7308
7309 dname = strdup(path);
7310 if (dname == NULL)
7311 return -ENOMEM;
7312
7313 dir = dirname(dname);
7314 if (mkdir(dir, 0700) && errno != EEXIST)
7315 err = -errno;
7316
7317 free(dname);
7318 if (err) {
7319 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7320 pr_warn("failed to mkdir %s: %s\n", path, cp);
7321 }
7322 return err;
7323 }
7324
check_path(const char * path)7325 static int check_path(const char *path)
7326 {
7327 char *cp, errmsg[STRERR_BUFSIZE];
7328 struct statfs st_fs;
7329 char *dname, *dir;
7330 int err = 0;
7331
7332 if (path == NULL)
7333 return -EINVAL;
7334
7335 dname = strdup(path);
7336 if (dname == NULL)
7337 return -ENOMEM;
7338
7339 dir = dirname(dname);
7340 if (statfs(dir, &st_fs)) {
7341 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7342 pr_warn("failed to statfs %s: %s\n", dir, cp);
7343 err = -errno;
7344 }
7345 free(dname);
7346
7347 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
7348 pr_warn("specified path %s is not on BPF FS\n", path);
7349 err = -EINVAL;
7350 }
7351
7352 return err;
7353 }
7354
bpf_program__pin_instance(struct bpf_program * prog,const char * path,int instance)7355 int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
7356 int instance)
7357 {
7358 char *cp, errmsg[STRERR_BUFSIZE];
7359 int err;
7360
7361 err = make_parent_dir(path);
7362 if (err)
7363 return err;
7364
7365 err = check_path(path);
7366 if (err)
7367 return err;
7368
7369 if (prog == NULL) {
7370 pr_warn("invalid program pointer\n");
7371 return -EINVAL;
7372 }
7373
7374 if (instance < 0 || instance >= prog->instances.nr) {
7375 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
7376 instance, prog->name, prog->instances.nr);
7377 return -EINVAL;
7378 }
7379
7380 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
7381 err = -errno;
7382 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
7383 pr_warn("failed to pin program: %s\n", cp);
7384 return err;
7385 }
7386 pr_debug("pinned program '%s'\n", path);
7387
7388 return 0;
7389 }
7390
bpf_program__unpin_instance(struct bpf_program * prog,const char * path,int instance)7391 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
7392 int instance)
7393 {
7394 int err;
7395
7396 err = check_path(path);
7397 if (err)
7398 return err;
7399
7400 if (prog == NULL) {
7401 pr_warn("invalid program pointer\n");
7402 return -EINVAL;
7403 }
7404
7405 if (instance < 0 || instance >= prog->instances.nr) {
7406 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
7407 instance, prog->name, prog->instances.nr);
7408 return -EINVAL;
7409 }
7410
7411 err = unlink(path);
7412 if (err != 0)
7413 return -errno;
7414 pr_debug("unpinned program '%s'\n", path);
7415
7416 return 0;
7417 }
7418
bpf_program__pin(struct bpf_program * prog,const char * path)7419 int bpf_program__pin(struct bpf_program *prog, const char *path)
7420 {
7421 int i, err;
7422
7423 err = make_parent_dir(path);
7424 if (err)
7425 return err;
7426
7427 err = check_path(path);
7428 if (err)
7429 return err;
7430
7431 if (prog == NULL) {
7432 pr_warn("invalid program pointer\n");
7433 return -EINVAL;
7434 }
7435
7436 if (prog->instances.nr <= 0) {
7437 pr_warn("no instances of prog %s to pin\n", prog->name);
7438 return -EINVAL;
7439 }
7440
7441 if (prog->instances.nr == 1) {
7442 /* don't create subdirs when pinning single instance */
7443 return bpf_program__pin_instance(prog, path, 0);
7444 }
7445
7446 for (i = 0; i < prog->instances.nr; i++) {
7447 char buf[PATH_MAX];
7448 int len;
7449
7450 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7451 if (len < 0) {
7452 err = -EINVAL;
7453 goto err_unpin;
7454 } else if (len >= PATH_MAX) {
7455 err = -ENAMETOOLONG;
7456 goto err_unpin;
7457 }
7458
7459 err = bpf_program__pin_instance(prog, buf, i);
7460 if (err)
7461 goto err_unpin;
7462 }
7463
7464 return 0;
7465
7466 err_unpin:
7467 for (i = i - 1; i >= 0; i--) {
7468 char buf[PATH_MAX];
7469 int len;
7470
7471 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7472 if (len < 0)
7473 continue;
7474 else if (len >= PATH_MAX)
7475 continue;
7476
7477 bpf_program__unpin_instance(prog, buf, i);
7478 }
7479
7480 rmdir(path);
7481
7482 return err;
7483 }
7484
bpf_program__unpin(struct bpf_program * prog,const char * path)7485 int bpf_program__unpin(struct bpf_program *prog, const char *path)
7486 {
7487 int i, err;
7488
7489 err = check_path(path);
7490 if (err)
7491 return err;
7492
7493 if (prog == NULL) {
7494 pr_warn("invalid program pointer\n");
7495 return -EINVAL;
7496 }
7497
7498 if (prog->instances.nr <= 0) {
7499 pr_warn("no instances of prog %s to pin\n", prog->name);
7500 return -EINVAL;
7501 }
7502
7503 if (prog->instances.nr == 1) {
7504 /* don't create subdirs when pinning single instance */
7505 return bpf_program__unpin_instance(prog, path, 0);
7506 }
7507
7508 for (i = 0; i < prog->instances.nr; i++) {
7509 char buf[PATH_MAX];
7510 int len;
7511
7512 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7513 if (len < 0)
7514 return -EINVAL;
7515 else if (len >= PATH_MAX)
7516 return -ENAMETOOLONG;
7517
7518 err = bpf_program__unpin_instance(prog, buf, i);
7519 if (err)
7520 return err;
7521 }
7522
7523 err = rmdir(path);
7524 if (err)
7525 return -errno;
7526
7527 return 0;
7528 }
7529
bpf_map__pin(struct bpf_map * map,const char * path)7530 int bpf_map__pin(struct bpf_map *map, const char *path)
7531 {
7532 char *cp, errmsg[STRERR_BUFSIZE];
7533 int err;
7534
7535 if (map == NULL) {
7536 pr_warn("invalid map pointer\n");
7537 return -EINVAL;
7538 }
7539
7540 if (map->pin_path) {
7541 if (path && strcmp(path, map->pin_path)) {
7542 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7543 bpf_map__name(map), map->pin_path, path);
7544 return -EINVAL;
7545 } else if (map->pinned) {
7546 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
7547 bpf_map__name(map), map->pin_path);
7548 return 0;
7549 }
7550 } else {
7551 if (!path) {
7552 pr_warn("missing a path to pin map '%s' at\n",
7553 bpf_map__name(map));
7554 return -EINVAL;
7555 } else if (map->pinned) {
7556 pr_warn("map '%s' already pinned\n", bpf_map__name(map));
7557 return -EEXIST;
7558 }
7559
7560 map->pin_path = strdup(path);
7561 if (!map->pin_path) {
7562 err = -errno;
7563 goto out_err;
7564 }
7565 }
7566
7567 err = make_parent_dir(map->pin_path);
7568 if (err)
7569 return err;
7570
7571 err = check_path(map->pin_path);
7572 if (err)
7573 return err;
7574
7575 if (bpf_obj_pin(map->fd, map->pin_path)) {
7576 err = -errno;
7577 goto out_err;
7578 }
7579
7580 map->pinned = true;
7581 pr_debug("pinned map '%s'\n", map->pin_path);
7582
7583 return 0;
7584
7585 out_err:
7586 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7587 pr_warn("failed to pin map: %s\n", cp);
7588 return err;
7589 }
7590
bpf_map__unpin(struct bpf_map * map,const char * path)7591 int bpf_map__unpin(struct bpf_map *map, const char *path)
7592 {
7593 int err;
7594
7595 if (map == NULL) {
7596 pr_warn("invalid map pointer\n");
7597 return -EINVAL;
7598 }
7599
7600 if (map->pin_path) {
7601 if (path && strcmp(path, map->pin_path)) {
7602 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7603 bpf_map__name(map), map->pin_path, path);
7604 return -EINVAL;
7605 }
7606 path = map->pin_path;
7607 } else if (!path) {
7608 pr_warn("no path to unpin map '%s' from\n",
7609 bpf_map__name(map));
7610 return -EINVAL;
7611 }
7612
7613 err = check_path(path);
7614 if (err)
7615 return err;
7616
7617 err = unlink(path);
7618 if (err != 0)
7619 return -errno;
7620
7621 map->pinned = false;
7622 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
7623
7624 return 0;
7625 }
7626
bpf_map__set_pin_path(struct bpf_map * map,const char * path)7627 int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
7628 {
7629 char *new = NULL;
7630
7631 if (path) {
7632 new = strdup(path);
7633 if (!new)
7634 return -errno;
7635 }
7636
7637 free(map->pin_path);
7638 map->pin_path = new;
7639 return 0;
7640 }
7641
bpf_map__get_pin_path(const struct bpf_map * map)7642 const char *bpf_map__get_pin_path(const struct bpf_map *map)
7643 {
7644 return map->pin_path;
7645 }
7646
bpf_map__is_pinned(const struct bpf_map * map)7647 bool bpf_map__is_pinned(const struct bpf_map *map)
7648 {
7649 return map->pinned;
7650 }
7651
bpf_object__pin_maps(struct bpf_object * obj,const char * path)7652 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
7653 {
7654 struct bpf_map *map;
7655 int err;
7656
7657 if (!obj)
7658 return -ENOENT;
7659
7660 if (!obj->loaded) {
7661 pr_warn("object not yet loaded; load it first\n");
7662 return -ENOENT;
7663 }
7664
7665 bpf_object__for_each_map(map, obj) {
7666 char *pin_path = NULL;
7667 char buf[PATH_MAX];
7668
7669 if (path) {
7670 int len;
7671
7672 len = snprintf(buf, PATH_MAX, "%s/%s", path,
7673 bpf_map__name(map));
7674 if (len < 0) {
7675 err = -EINVAL;
7676 goto err_unpin_maps;
7677 } else if (len >= PATH_MAX) {
7678 err = -ENAMETOOLONG;
7679 goto err_unpin_maps;
7680 }
7681 pin_path = buf;
7682 } else if (!map->pin_path) {
7683 continue;
7684 }
7685
7686 err = bpf_map__pin(map, pin_path);
7687 if (err)
7688 goto err_unpin_maps;
7689 }
7690
7691 return 0;
7692
7693 err_unpin_maps:
7694 while ((map = bpf_map__prev(map, obj))) {
7695 if (!map->pin_path)
7696 continue;
7697
7698 bpf_map__unpin(map, NULL);
7699 }
7700
7701 return err;
7702 }
7703
bpf_object__unpin_maps(struct bpf_object * obj,const char * path)7704 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
7705 {
7706 struct bpf_map *map;
7707 int err;
7708
7709 if (!obj)
7710 return -ENOENT;
7711
7712 bpf_object__for_each_map(map, obj) {
7713 char *pin_path = NULL;
7714 char buf[PATH_MAX];
7715
7716 if (path) {
7717 int len;
7718
7719 len = snprintf(buf, PATH_MAX, "%s/%s", path,
7720 bpf_map__name(map));
7721 if (len < 0)
7722 return -EINVAL;
7723 else if (len >= PATH_MAX)
7724 return -ENAMETOOLONG;
7725 pin_path = buf;
7726 } else if (!map->pin_path) {
7727 continue;
7728 }
7729
7730 err = bpf_map__unpin(map, pin_path);
7731 if (err)
7732 return err;
7733 }
7734
7735 return 0;
7736 }
7737
bpf_object__pin_programs(struct bpf_object * obj,const char * path)7738 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
7739 {
7740 struct bpf_program *prog;
7741 int err;
7742
7743 if (!obj)
7744 return -ENOENT;
7745
7746 if (!obj->loaded) {
7747 pr_warn("object not yet loaded; load it first\n");
7748 return -ENOENT;
7749 }
7750
7751 bpf_object__for_each_program(prog, obj) {
7752 char buf[PATH_MAX];
7753 int len;
7754
7755 len = snprintf(buf, PATH_MAX, "%s/%s", path,
7756 prog->pin_name);
7757 if (len < 0) {
7758 err = -EINVAL;
7759 goto err_unpin_programs;
7760 } else if (len >= PATH_MAX) {
7761 err = -ENAMETOOLONG;
7762 goto err_unpin_programs;
7763 }
7764
7765 err = bpf_program__pin(prog, buf);
7766 if (err)
7767 goto err_unpin_programs;
7768 }
7769
7770 return 0;
7771
7772 err_unpin_programs:
7773 while ((prog = bpf_program__prev(prog, obj))) {
7774 char buf[PATH_MAX];
7775 int len;
7776
7777 len = snprintf(buf, PATH_MAX, "%s/%s", path,
7778 prog->pin_name);
7779 if (len < 0)
7780 continue;
7781 else if (len >= PATH_MAX)
7782 continue;
7783
7784 bpf_program__unpin(prog, buf);
7785 }
7786
7787 return err;
7788 }
7789
bpf_object__unpin_programs(struct bpf_object * obj,const char * path)7790 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
7791 {
7792 struct bpf_program *prog;
7793 int err;
7794
7795 if (!obj)
7796 return -ENOENT;
7797
7798 bpf_object__for_each_program(prog, obj) {
7799 char buf[PATH_MAX];
7800 int len;
7801
7802 len = snprintf(buf, PATH_MAX, "%s/%s", path,
7803 prog->pin_name);
7804 if (len < 0)
7805 return -EINVAL;
7806 else if (len >= PATH_MAX)
7807 return -ENAMETOOLONG;
7808
7809 err = bpf_program__unpin(prog, buf);
7810 if (err)
7811 return err;
7812 }
7813
7814 return 0;
7815 }
7816
bpf_object__pin(struct bpf_object * obj,const char * path)7817 int bpf_object__pin(struct bpf_object *obj, const char *path)
7818 {
7819 int err;
7820
7821 err = bpf_object__pin_maps(obj, path);
7822 if (err)
7823 return err;
7824
7825 err = bpf_object__pin_programs(obj, path);
7826 if (err) {
7827 bpf_object__unpin_maps(obj, path);
7828 return err;
7829 }
7830
7831 return 0;
7832 }
7833
bpf_map__destroy(struct bpf_map * map)7834 static void bpf_map__destroy(struct bpf_map *map)
7835 {
7836 if (map->clear_priv)
7837 map->clear_priv(map, map->priv);
7838 map->priv = NULL;
7839 map->clear_priv = NULL;
7840
7841 if (map->inner_map) {
7842 bpf_map__destroy(map->inner_map);
7843 zfree(&map->inner_map);
7844 }
7845
7846 zfree(&map->init_slots);
7847 map->init_slots_sz = 0;
7848
7849 if (map->mmaped) {
7850 munmap(map->mmaped, bpf_map_mmap_sz(map));
7851 map->mmaped = NULL;
7852 }
7853
7854 if (map->st_ops) {
7855 zfree(&map->st_ops->data);
7856 zfree(&map->st_ops->progs);
7857 zfree(&map->st_ops->kern_func_off);
7858 zfree(&map->st_ops);
7859 }
7860
7861 zfree(&map->name);
7862 zfree(&map->pin_path);
7863
7864 if (map->fd >= 0)
7865 zclose(map->fd);
7866 }
7867
bpf_object__close(struct bpf_object * obj)7868 void bpf_object__close(struct bpf_object *obj)
7869 {
7870 size_t i;
7871
7872 if (IS_ERR_OR_NULL(obj))
7873 return;
7874
7875 if (obj->clear_priv)
7876 obj->clear_priv(obj, obj->priv);
7877
7878 bpf_object__elf_finish(obj);
7879 bpf_object__unload(obj);
7880 btf__free(obj->btf);
7881 btf_ext__free(obj->btf_ext);
7882
7883 for (i = 0; i < obj->nr_maps; i++)
7884 bpf_map__destroy(&obj->maps[i]);
7885
7886 zfree(&obj->kconfig);
7887 zfree(&obj->externs);
7888 obj->nr_extern = 0;
7889
7890 zfree(&obj->maps);
7891 obj->nr_maps = 0;
7892
7893 if (obj->programs && obj->nr_programs) {
7894 for (i = 0; i < obj->nr_programs; i++)
7895 bpf_program__exit(&obj->programs[i]);
7896 }
7897 zfree(&obj->programs);
7898
7899 list_del(&obj->list);
7900 free(obj);
7901 }
7902
7903 struct bpf_object *
bpf_object__next(struct bpf_object * prev)7904 bpf_object__next(struct bpf_object *prev)
7905 {
7906 struct bpf_object *next;
7907
7908 if (!prev)
7909 next = list_first_entry(&bpf_objects_list,
7910 struct bpf_object,
7911 list);
7912 else
7913 next = list_next_entry(prev, list);
7914
7915 /* Empty list is noticed here so don't need checking on entry. */
7916 if (&next->list == &bpf_objects_list)
7917 return NULL;
7918
7919 return next;
7920 }
7921
bpf_object__name(const struct bpf_object * obj)7922 const char *bpf_object__name(const struct bpf_object *obj)
7923 {
7924 return obj ? obj->name : ERR_PTR(-EINVAL);
7925 }
7926
bpf_object__kversion(const struct bpf_object * obj)7927 unsigned int bpf_object__kversion(const struct bpf_object *obj)
7928 {
7929 return obj ? obj->kern_version : 0;
7930 }
7931
bpf_object__btf(const struct bpf_object * obj)7932 struct btf *bpf_object__btf(const struct bpf_object *obj)
7933 {
7934 return obj ? obj->btf : NULL;
7935 }
7936
bpf_object__btf_fd(const struct bpf_object * obj)7937 int bpf_object__btf_fd(const struct bpf_object *obj)
7938 {
7939 return obj->btf ? btf__fd(obj->btf) : -1;
7940 }
7941
bpf_object__set_priv(struct bpf_object * obj,void * priv,bpf_object_clear_priv_t clear_priv)7942 int bpf_object__set_priv(struct bpf_object *obj, void *priv,
7943 bpf_object_clear_priv_t clear_priv)
7944 {
7945 if (obj->priv && obj->clear_priv)
7946 obj->clear_priv(obj, obj->priv);
7947
7948 obj->priv = priv;
7949 obj->clear_priv = clear_priv;
7950 return 0;
7951 }
7952
bpf_object__priv(const struct bpf_object * obj)7953 void *bpf_object__priv(const struct bpf_object *obj)
7954 {
7955 return obj ? obj->priv : ERR_PTR(-EINVAL);
7956 }
7957
7958 static struct bpf_program *
__bpf_program__iter(const struct bpf_program * p,const struct bpf_object * obj,bool forward)7959 __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
7960 bool forward)
7961 {
7962 size_t nr_programs = obj->nr_programs;
7963 ssize_t idx;
7964
7965 if (!nr_programs)
7966 return NULL;
7967
7968 if (!p)
7969 /* Iter from the beginning */
7970 return forward ? &obj->programs[0] :
7971 &obj->programs[nr_programs - 1];
7972
7973 if (p->obj != obj) {
7974 pr_warn("error: program handler doesn't match object\n");
7975 return NULL;
7976 }
7977
7978 idx = (p - obj->programs) + (forward ? 1 : -1);
7979 if (idx >= obj->nr_programs || idx < 0)
7980 return NULL;
7981 return &obj->programs[idx];
7982 }
7983
7984 struct bpf_program *
bpf_program__next(struct bpf_program * prev,const struct bpf_object * obj)7985 bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
7986 {
7987 struct bpf_program *prog = prev;
7988
7989 do {
7990 prog = __bpf_program__iter(prog, obj, true);
7991 } while (prog && prog_is_subprog(obj, prog));
7992
7993 return prog;
7994 }
7995
7996 struct bpf_program *
bpf_program__prev(struct bpf_program * next,const struct bpf_object * obj)7997 bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
7998 {
7999 struct bpf_program *prog = next;
8000
8001 do {
8002 prog = __bpf_program__iter(prog, obj, false);
8003 } while (prog && prog_is_subprog(obj, prog));
8004
8005 return prog;
8006 }
8007
bpf_program__set_priv(struct bpf_program * prog,void * priv,bpf_program_clear_priv_t clear_priv)8008 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
8009 bpf_program_clear_priv_t clear_priv)
8010 {
8011 if (prog->priv && prog->clear_priv)
8012 prog->clear_priv(prog, prog->priv);
8013
8014 prog->priv = priv;
8015 prog->clear_priv = clear_priv;
8016 return 0;
8017 }
8018
bpf_program__priv(const struct bpf_program * prog)8019 void *bpf_program__priv(const struct bpf_program *prog)
8020 {
8021 return prog ? prog->priv : ERR_PTR(-EINVAL);
8022 }
8023
bpf_program__set_ifindex(struct bpf_program * prog,__u32 ifindex)8024 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
8025 {
8026 prog->prog_ifindex = ifindex;
8027 }
8028
bpf_program__name(const struct bpf_program * prog)8029 const char *bpf_program__name(const struct bpf_program *prog)
8030 {
8031 return prog->name;
8032 }
8033
bpf_program__section_name(const struct bpf_program * prog)8034 const char *bpf_program__section_name(const struct bpf_program *prog)
8035 {
8036 return prog->sec_name;
8037 }
8038
bpf_program__title(const struct bpf_program * prog,bool needs_copy)8039 const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
8040 {
8041 const char *title;
8042
8043 title = prog->sec_name;
8044 if (needs_copy) {
8045 title = strdup(title);
8046 if (!title) {
8047 pr_warn("failed to strdup program title\n");
8048 return ERR_PTR(-ENOMEM);
8049 }
8050 }
8051
8052 return title;
8053 }
8054
bpf_program__autoload(const struct bpf_program * prog)8055 bool bpf_program__autoload(const struct bpf_program *prog)
8056 {
8057 return prog->load;
8058 }
8059
bpf_program__set_autoload(struct bpf_program * prog,bool autoload)8060 int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
8061 {
8062 if (prog->obj->loaded)
8063 return -EINVAL;
8064
8065 prog->load = autoload;
8066 return 0;
8067 }
8068
bpf_program__fd(const struct bpf_program * prog)8069 int bpf_program__fd(const struct bpf_program *prog)
8070 {
8071 return bpf_program__nth_fd(prog, 0);
8072 }
8073
bpf_program__size(const struct bpf_program * prog)8074 size_t bpf_program__size(const struct bpf_program *prog)
8075 {
8076 return prog->insns_cnt * BPF_INSN_SZ;
8077 }
8078
bpf_program__set_prep(struct bpf_program * prog,int nr_instances,bpf_program_prep_t prep)8079 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
8080 bpf_program_prep_t prep)
8081 {
8082 int *instances_fds;
8083
8084 if (nr_instances <= 0 || !prep)
8085 return -EINVAL;
8086
8087 if (prog->instances.nr > 0 || prog->instances.fds) {
8088 pr_warn("Can't set pre-processor after loading\n");
8089 return -EINVAL;
8090 }
8091
8092 instances_fds = malloc(sizeof(int) * nr_instances);
8093 if (!instances_fds) {
8094 pr_warn("alloc memory failed for fds\n");
8095 return -ENOMEM;
8096 }
8097
8098 /* fill all fd with -1 */
8099 memset(instances_fds, -1, sizeof(int) * nr_instances);
8100
8101 prog->instances.nr = nr_instances;
8102 prog->instances.fds = instances_fds;
8103 prog->preprocessor = prep;
8104 return 0;
8105 }
8106
bpf_program__nth_fd(const struct bpf_program * prog,int n)8107 int bpf_program__nth_fd(const struct bpf_program *prog, int n)
8108 {
8109 int fd;
8110
8111 if (!prog)
8112 return -EINVAL;
8113
8114 if (n >= prog->instances.nr || n < 0) {
8115 pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
8116 n, prog->name, prog->instances.nr);
8117 return -EINVAL;
8118 }
8119
8120 fd = prog->instances.fds[n];
8121 if (fd < 0) {
8122 pr_warn("%dth instance of program '%s' is invalid\n",
8123 n, prog->name);
8124 return -ENOENT;
8125 }
8126
8127 return fd;
8128 }
8129
bpf_program__get_type(struct bpf_program * prog)8130 enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog)
8131 {
8132 return prog->type;
8133 }
8134
bpf_program__set_type(struct bpf_program * prog,enum bpf_prog_type type)8135 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
8136 {
8137 prog->type = type;
8138 }
8139
bpf_program__is_type(const struct bpf_program * prog,enum bpf_prog_type type)8140 static bool bpf_program__is_type(const struct bpf_program *prog,
8141 enum bpf_prog_type type)
8142 {
8143 return prog ? (prog->type == type) : false;
8144 }
8145
8146 #define BPF_PROG_TYPE_FNS(NAME, TYPE) \
8147 int bpf_program__set_##NAME(struct bpf_program *prog) \
8148 { \
8149 if (!prog) \
8150 return -EINVAL; \
8151 bpf_program__set_type(prog, TYPE); \
8152 return 0; \
8153 } \
8154 \
8155 bool bpf_program__is_##NAME(const struct bpf_program *prog) \
8156 { \
8157 return bpf_program__is_type(prog, TYPE); \
8158 } \
8159
8160 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
8161 BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM);
8162 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
8163 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
8164 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
8165 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
8166 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
8167 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
8168 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
8169 BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
8170 BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
8171 BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
8172 BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP);
8173
8174 enum bpf_attach_type
bpf_program__get_expected_attach_type(struct bpf_program * prog)8175 bpf_program__get_expected_attach_type(struct bpf_program *prog)
8176 {
8177 return prog->expected_attach_type;
8178 }
8179
bpf_program__set_expected_attach_type(struct bpf_program * prog,enum bpf_attach_type type)8180 void bpf_program__set_expected_attach_type(struct bpf_program *prog,
8181 enum bpf_attach_type type)
8182 {
8183 prog->expected_attach_type = type;
8184 }
8185
8186 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype_optional, \
8187 attachable, attach_btf) \
8188 { \
8189 .sec = string, \
8190 .len = sizeof(string) - 1, \
8191 .prog_type = ptype, \
8192 .expected_attach_type = eatype, \
8193 .is_exp_attach_type_optional = eatype_optional, \
8194 .is_attachable = attachable, \
8195 .is_attach_btf = attach_btf, \
8196 }
8197
8198 /* Programs that can NOT be attached. */
8199 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
8200
8201 /* Programs that can be attached. */
8202 #define BPF_APROG_SEC(string, ptype, atype) \
8203 BPF_PROG_SEC_IMPL(string, ptype, atype, true, 1, 0)
8204
8205 /* Programs that must specify expected attach type at load time. */
8206 #define BPF_EAPROG_SEC(string, ptype, eatype) \
8207 BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 1, 0)
8208
8209 /* Programs that use BTF to identify attach point */
8210 #define BPF_PROG_BTF(string, ptype, eatype) \
8211 BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 0, 1)
8212
8213 /* Programs that can be attached but attach type can't be identified by section
8214 * name. Kept for backward compatibility.
8215 */
8216 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
8217
8218 #define SEC_DEF(sec_pfx, ptype, ...) { \
8219 .sec = sec_pfx, \
8220 .len = sizeof(sec_pfx) - 1, \
8221 .prog_type = BPF_PROG_TYPE_##ptype, \
8222 __VA_ARGS__ \
8223 }
8224
8225 static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
8226 struct bpf_program *prog);
8227 static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
8228 struct bpf_program *prog);
8229 static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
8230 struct bpf_program *prog);
8231 static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
8232 struct bpf_program *prog);
8233 static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
8234 struct bpf_program *prog);
8235 static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
8236 struct bpf_program *prog);
8237
8238 static const struct bpf_sec_def section_defs[] = {
8239 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
8240 BPF_PROG_SEC("sk_reuseport", BPF_PROG_TYPE_SK_REUSEPORT),
8241 SEC_DEF("kprobe/", KPROBE,
8242 .attach_fn = attach_kprobe),
8243 BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE),
8244 SEC_DEF("kretprobe/", KPROBE,
8245 .attach_fn = attach_kprobe),
8246 BPF_PROG_SEC("uretprobe/", BPF_PROG_TYPE_KPROBE),
8247 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
8248 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
8249 SEC_DEF("tracepoint/", TRACEPOINT,
8250 .attach_fn = attach_tp),
8251 SEC_DEF("tp/", TRACEPOINT,
8252 .attach_fn = attach_tp),
8253 SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT,
8254 .attach_fn = attach_raw_tp),
8255 SEC_DEF("raw_tp/", RAW_TRACEPOINT,
8256 .attach_fn = attach_raw_tp),
8257 SEC_DEF("tp_btf/", TRACING,
8258 .expected_attach_type = BPF_TRACE_RAW_TP,
8259 .is_attach_btf = true,
8260 .attach_fn = attach_trace),
8261 SEC_DEF("fentry/", TRACING,
8262 .expected_attach_type = BPF_TRACE_FENTRY,
8263 .is_attach_btf = true,
8264 .attach_fn = attach_trace),
8265 SEC_DEF("fmod_ret/", TRACING,
8266 .expected_attach_type = BPF_MODIFY_RETURN,
8267 .is_attach_btf = true,
8268 .attach_fn = attach_trace),
8269 SEC_DEF("fexit/", TRACING,
8270 .expected_attach_type = BPF_TRACE_FEXIT,
8271 .is_attach_btf = true,
8272 .attach_fn = attach_trace),
8273 SEC_DEF("fentry.s/", TRACING,
8274 .expected_attach_type = BPF_TRACE_FENTRY,
8275 .is_attach_btf = true,
8276 .is_sleepable = true,
8277 .attach_fn = attach_trace),
8278 SEC_DEF("fmod_ret.s/", TRACING,
8279 .expected_attach_type = BPF_MODIFY_RETURN,
8280 .is_attach_btf = true,
8281 .is_sleepable = true,
8282 .attach_fn = attach_trace),
8283 SEC_DEF("fexit.s/", TRACING,
8284 .expected_attach_type = BPF_TRACE_FEXIT,
8285 .is_attach_btf = true,
8286 .is_sleepable = true,
8287 .attach_fn = attach_trace),
8288 SEC_DEF("freplace/", EXT,
8289 .is_attach_btf = true,
8290 .attach_fn = attach_trace),
8291 SEC_DEF("lsm/", LSM,
8292 .is_attach_btf = true,
8293 .expected_attach_type = BPF_LSM_MAC,
8294 .attach_fn = attach_lsm),
8295 SEC_DEF("lsm.s/", LSM,
8296 .is_attach_btf = true,
8297 .is_sleepable = true,
8298 .expected_attach_type = BPF_LSM_MAC,
8299 .attach_fn = attach_lsm),
8300 SEC_DEF("iter/", TRACING,
8301 .expected_attach_type = BPF_TRACE_ITER,
8302 .is_attach_btf = true,
8303 .attach_fn = attach_iter),
8304 BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP,
8305 BPF_XDP_DEVMAP),
8306 BPF_EAPROG_SEC("xdp_cpumap/", BPF_PROG_TYPE_XDP,
8307 BPF_XDP_CPUMAP),
8308 BPF_APROG_SEC("xdp", BPF_PROG_TYPE_XDP,
8309 BPF_XDP),
8310 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
8311 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
8312 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
8313 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
8314 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
8315 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
8316 BPF_CGROUP_INET_INGRESS),
8317 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
8318 BPF_CGROUP_INET_EGRESS),
8319 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
8320 BPF_EAPROG_SEC("cgroup/sock_create", BPF_PROG_TYPE_CGROUP_SOCK,
8321 BPF_CGROUP_INET_SOCK_CREATE),
8322 BPF_EAPROG_SEC("cgroup/sock_release", BPF_PROG_TYPE_CGROUP_SOCK,
8323 BPF_CGROUP_INET_SOCK_RELEASE),
8324 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
8325 BPF_CGROUP_INET_SOCK_CREATE),
8326 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
8327 BPF_CGROUP_INET4_POST_BIND),
8328 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
8329 BPF_CGROUP_INET6_POST_BIND),
8330 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
8331 BPF_CGROUP_DEVICE),
8332 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
8333 BPF_CGROUP_SOCK_OPS),
8334 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
8335 BPF_SK_SKB_STREAM_PARSER),
8336 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
8337 BPF_SK_SKB_STREAM_VERDICT),
8338 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
8339 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
8340 BPF_SK_MSG_VERDICT),
8341 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
8342 BPF_LIRC_MODE2),
8343 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
8344 BPF_FLOW_DISSECTOR),
8345 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8346 BPF_CGROUP_INET4_BIND),
8347 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8348 BPF_CGROUP_INET6_BIND),
8349 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8350 BPF_CGROUP_INET4_CONNECT),
8351 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8352 BPF_CGROUP_INET6_CONNECT),
8353 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8354 BPF_CGROUP_UDP4_SENDMSG),
8355 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8356 BPF_CGROUP_UDP6_SENDMSG),
8357 BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8358 BPF_CGROUP_UDP4_RECVMSG),
8359 BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8360 BPF_CGROUP_UDP6_RECVMSG),
8361 BPF_EAPROG_SEC("cgroup/getpeername4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8362 BPF_CGROUP_INET4_GETPEERNAME),
8363 BPF_EAPROG_SEC("cgroup/getpeername6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8364 BPF_CGROUP_INET6_GETPEERNAME),
8365 BPF_EAPROG_SEC("cgroup/getsockname4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8366 BPF_CGROUP_INET4_GETSOCKNAME),
8367 BPF_EAPROG_SEC("cgroup/getsockname6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8368 BPF_CGROUP_INET6_GETSOCKNAME),
8369 BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL,
8370 BPF_CGROUP_SYSCTL),
8371 BPF_EAPROG_SEC("cgroup/getsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
8372 BPF_CGROUP_GETSOCKOPT),
8373 BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
8374 BPF_CGROUP_SETSOCKOPT),
8375 BPF_PROG_SEC("struct_ops", BPF_PROG_TYPE_STRUCT_OPS),
8376 BPF_EAPROG_SEC("sk_lookup/", BPF_PROG_TYPE_SK_LOOKUP,
8377 BPF_SK_LOOKUP),
8378 };
8379
8380 #undef BPF_PROG_SEC_IMPL
8381 #undef BPF_PROG_SEC
8382 #undef BPF_APROG_SEC
8383 #undef BPF_EAPROG_SEC
8384 #undef BPF_APROG_COMPAT
8385 #undef SEC_DEF
8386
8387 #define MAX_TYPE_NAME_SIZE 32
8388
find_sec_def(const char * sec_name)8389 static const struct bpf_sec_def *find_sec_def(const char *sec_name)
8390 {
8391 int i, n = ARRAY_SIZE(section_defs);
8392
8393 for (i = 0; i < n; i++) {
8394 if (strncmp(sec_name,
8395 section_defs[i].sec, section_defs[i].len))
8396 continue;
8397 return §ion_defs[i];
8398 }
8399 return NULL;
8400 }
8401
libbpf_get_type_names(bool attach_type)8402 static char *libbpf_get_type_names(bool attach_type)
8403 {
8404 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
8405 char *buf;
8406
8407 buf = malloc(len);
8408 if (!buf)
8409 return NULL;
8410
8411 buf[0] = '\0';
8412 /* Forge string buf with all available names */
8413 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
8414 if (attach_type && !section_defs[i].is_attachable)
8415 continue;
8416
8417 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
8418 free(buf);
8419 return NULL;
8420 }
8421 strcat(buf, " ");
8422 strcat(buf, section_defs[i].sec);
8423 }
8424
8425 return buf;
8426 }
8427
libbpf_prog_type_by_name(const char * name,enum bpf_prog_type * prog_type,enum bpf_attach_type * expected_attach_type)8428 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
8429 enum bpf_attach_type *expected_attach_type)
8430 {
8431 const struct bpf_sec_def *sec_def;
8432 char *type_names;
8433
8434 if (!name)
8435 return -EINVAL;
8436
8437 sec_def = find_sec_def(name);
8438 if (sec_def) {
8439 *prog_type = sec_def->prog_type;
8440 *expected_attach_type = sec_def->expected_attach_type;
8441 return 0;
8442 }
8443
8444 pr_debug("failed to guess program type from ELF section '%s'\n", name);
8445 type_names = libbpf_get_type_names(false);
8446 if (type_names != NULL) {
8447 pr_debug("supported section(type) names are:%s\n", type_names);
8448 free(type_names);
8449 }
8450
8451 return -ESRCH;
8452 }
8453
find_struct_ops_map_by_offset(struct bpf_object * obj,size_t offset)8454 static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
8455 size_t offset)
8456 {
8457 struct bpf_map *map;
8458 size_t i;
8459
8460 for (i = 0; i < obj->nr_maps; i++) {
8461 map = &obj->maps[i];
8462 if (!bpf_map__is_struct_ops(map))
8463 continue;
8464 if (map->sec_offset <= offset &&
8465 offset - map->sec_offset < map->def.value_size)
8466 return map;
8467 }
8468
8469 return NULL;
8470 }
8471
8472 /* Collect the reloc from ELF and populate the st_ops->progs[] */
bpf_object__collect_st_ops_relos(struct bpf_object * obj,GElf_Shdr * shdr,Elf_Data * data)8473 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
8474 GElf_Shdr *shdr, Elf_Data *data)
8475 {
8476 const struct btf_member *member;
8477 struct bpf_struct_ops *st_ops;
8478 struct bpf_program *prog;
8479 unsigned int shdr_idx;
8480 const struct btf *btf;
8481 struct bpf_map *map;
8482 Elf_Data *symbols;
8483 unsigned int moff, insn_idx;
8484 const char *name;
8485 __u32 member_idx;
8486 GElf_Sym sym;
8487 GElf_Rel rel;
8488 int i, nrels;
8489
8490 symbols = obj->efile.symbols;
8491 btf = obj->btf;
8492 nrels = shdr->sh_size / shdr->sh_entsize;
8493 for (i = 0; i < nrels; i++) {
8494 if (!gelf_getrel(data, i, &rel)) {
8495 pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
8496 return -LIBBPF_ERRNO__FORMAT;
8497 }
8498
8499 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
8500 pr_warn("struct_ops reloc: symbol %zx not found\n",
8501 (size_t)GELF_R_SYM(rel.r_info));
8502 return -LIBBPF_ERRNO__FORMAT;
8503 }
8504
8505 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
8506 map = find_struct_ops_map_by_offset(obj, rel.r_offset);
8507 if (!map) {
8508 pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n",
8509 (size_t)rel.r_offset);
8510 return -EINVAL;
8511 }
8512
8513 moff = rel.r_offset - map->sec_offset;
8514 shdr_idx = sym.st_shndx;
8515 st_ops = map->st_ops;
8516 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
8517 map->name,
8518 (long long)(rel.r_info >> 32),
8519 (long long)sym.st_value,
8520 shdr_idx, (size_t)rel.r_offset,
8521 map->sec_offset, sym.st_name, name);
8522
8523 if (shdr_idx >= SHN_LORESERVE) {
8524 pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n",
8525 map->name, (size_t)rel.r_offset, shdr_idx);
8526 return -LIBBPF_ERRNO__RELOC;
8527 }
8528 if (sym.st_value % BPF_INSN_SZ) {
8529 pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
8530 map->name, (unsigned long long)sym.st_value);
8531 return -LIBBPF_ERRNO__FORMAT;
8532 }
8533 insn_idx = sym.st_value / BPF_INSN_SZ;
8534
8535 member = find_member_by_offset(st_ops->type, moff * 8);
8536 if (!member) {
8537 pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
8538 map->name, moff);
8539 return -EINVAL;
8540 }
8541 member_idx = member - btf_members(st_ops->type);
8542 name = btf__name_by_offset(btf, member->name_off);
8543
8544 if (!resolve_func_ptr(btf, member->type, NULL)) {
8545 pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
8546 map->name, name);
8547 return -EINVAL;
8548 }
8549
8550 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
8551 if (!prog) {
8552 pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
8553 map->name, shdr_idx, name);
8554 return -EINVAL;
8555 }
8556
8557 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
8558 const struct bpf_sec_def *sec_def;
8559
8560 sec_def = find_sec_def(prog->sec_name);
8561 if (sec_def &&
8562 sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) {
8563 /* for pr_warn */
8564 prog->type = sec_def->prog_type;
8565 goto invalid_prog;
8566 }
8567
8568 prog->type = BPF_PROG_TYPE_STRUCT_OPS;
8569 prog->attach_btf_id = st_ops->type_id;
8570 prog->expected_attach_type = member_idx;
8571 } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
8572 prog->attach_btf_id != st_ops->type_id ||
8573 prog->expected_attach_type != member_idx) {
8574 goto invalid_prog;
8575 }
8576 st_ops->progs[member_idx] = prog;
8577 }
8578
8579 return 0;
8580
8581 invalid_prog:
8582 pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
8583 map->name, prog->name, prog->sec_name, prog->type,
8584 prog->attach_btf_id, prog->expected_attach_type, name);
8585 return -EINVAL;
8586 }
8587
8588 #define BTF_TRACE_PREFIX "btf_trace_"
8589 #define BTF_LSM_PREFIX "bpf_lsm_"
8590 #define BTF_ITER_PREFIX "bpf_iter_"
8591 #define BTF_MAX_NAME_SIZE 128
8592
find_btf_by_prefix_kind(const struct btf * btf,const char * prefix,const char * name,__u32 kind)8593 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
8594 const char *name, __u32 kind)
8595 {
8596 char btf_type_name[BTF_MAX_NAME_SIZE];
8597 int ret;
8598
8599 ret = snprintf(btf_type_name, sizeof(btf_type_name),
8600 "%s%s", prefix, name);
8601 /* snprintf returns the number of characters written excluding the
8602 * the terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
8603 * indicates truncation.
8604 */
8605 if (ret < 0 || ret >= sizeof(btf_type_name))
8606 return -ENAMETOOLONG;
8607 return btf__find_by_name_kind(btf, btf_type_name, kind);
8608 }
8609
__find_vmlinux_btf_id(struct btf * btf,const char * name,enum bpf_attach_type attach_type)8610 static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name,
8611 enum bpf_attach_type attach_type)
8612 {
8613 int err;
8614
8615 if (attach_type == BPF_TRACE_RAW_TP)
8616 err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name,
8617 BTF_KIND_TYPEDEF);
8618 else if (attach_type == BPF_LSM_MAC)
8619 err = find_btf_by_prefix_kind(btf, BTF_LSM_PREFIX, name,
8620 BTF_KIND_FUNC);
8621 else if (attach_type == BPF_TRACE_ITER)
8622 err = find_btf_by_prefix_kind(btf, BTF_ITER_PREFIX, name,
8623 BTF_KIND_FUNC);
8624 else
8625 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
8626
8627 if (err <= 0)
8628 pr_warn("%s is not found in vmlinux BTF\n", name);
8629
8630 return err;
8631 }
8632
libbpf_find_vmlinux_btf_id(const char * name,enum bpf_attach_type attach_type)8633 int libbpf_find_vmlinux_btf_id(const char *name,
8634 enum bpf_attach_type attach_type)
8635 {
8636 struct btf *btf;
8637 int err;
8638
8639 btf = libbpf_find_kernel_btf();
8640 if (IS_ERR(btf)) {
8641 pr_warn("vmlinux BTF is not found\n");
8642 return -EINVAL;
8643 }
8644
8645 err = __find_vmlinux_btf_id(btf, name, attach_type);
8646 btf__free(btf);
8647 return err;
8648 }
8649
libbpf_find_prog_btf_id(const char * name,__u32 attach_prog_fd)8650 static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
8651 {
8652 struct bpf_prog_info_linear *info_linear;
8653 struct bpf_prog_info *info;
8654 struct btf *btf = NULL;
8655 int err = -EINVAL;
8656
8657 info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
8658 if (IS_ERR_OR_NULL(info_linear)) {
8659 pr_warn("failed get_prog_info_linear for FD %d\n",
8660 attach_prog_fd);
8661 return -EINVAL;
8662 }
8663 info = &info_linear->info;
8664 if (!info->btf_id) {
8665 pr_warn("The target program doesn't have BTF\n");
8666 goto out;
8667 }
8668 if (btf__get_from_id(info->btf_id, &btf)) {
8669 pr_warn("Failed to get BTF of the program\n");
8670 goto out;
8671 }
8672 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
8673 btf__free(btf);
8674 if (err <= 0) {
8675 pr_warn("%s is not found in prog's BTF\n", name);
8676 goto out;
8677 }
8678 out:
8679 free(info_linear);
8680 return err;
8681 }
8682
libbpf_find_attach_btf_id(struct bpf_program * prog)8683 static int libbpf_find_attach_btf_id(struct bpf_program *prog)
8684 {
8685 enum bpf_attach_type attach_type = prog->expected_attach_type;
8686 __u32 attach_prog_fd = prog->attach_prog_fd;
8687 const char *name = prog->sec_name;
8688 int i, err;
8689
8690 if (!name)
8691 return -EINVAL;
8692
8693 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
8694 if (!section_defs[i].is_attach_btf)
8695 continue;
8696 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
8697 continue;
8698 if (attach_prog_fd)
8699 err = libbpf_find_prog_btf_id(name + section_defs[i].len,
8700 attach_prog_fd);
8701 else
8702 err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
8703 name + section_defs[i].len,
8704 attach_type);
8705 return err;
8706 }
8707 pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name);
8708 return -ESRCH;
8709 }
8710
libbpf_attach_type_by_name(const char * name,enum bpf_attach_type * attach_type)8711 int libbpf_attach_type_by_name(const char *name,
8712 enum bpf_attach_type *attach_type)
8713 {
8714 char *type_names;
8715 int i;
8716
8717 if (!name)
8718 return -EINVAL;
8719
8720 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
8721 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
8722 continue;
8723 if (!section_defs[i].is_attachable)
8724 return -EINVAL;
8725 *attach_type = section_defs[i].expected_attach_type;
8726 return 0;
8727 }
8728 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
8729 type_names = libbpf_get_type_names(true);
8730 if (type_names != NULL) {
8731 pr_debug("attachable section(type) names are:%s\n", type_names);
8732 free(type_names);
8733 }
8734
8735 return -EINVAL;
8736 }
8737
bpf_map__fd(const struct bpf_map * map)8738 int bpf_map__fd(const struct bpf_map *map)
8739 {
8740 return map ? map->fd : -EINVAL;
8741 }
8742
bpf_map__def(const struct bpf_map * map)8743 const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
8744 {
8745 return map ? &map->def : ERR_PTR(-EINVAL);
8746 }
8747
bpf_map__name(const struct bpf_map * map)8748 const char *bpf_map__name(const struct bpf_map *map)
8749 {
8750 return map ? map->name : NULL;
8751 }
8752
bpf_map__type(const struct bpf_map * map)8753 enum bpf_map_type bpf_map__type(const struct bpf_map *map)
8754 {
8755 return map->def.type;
8756 }
8757
bpf_map__set_type(struct bpf_map * map,enum bpf_map_type type)8758 int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
8759 {
8760 if (map->fd >= 0)
8761 return -EBUSY;
8762 map->def.type = type;
8763 return 0;
8764 }
8765
bpf_map__map_flags(const struct bpf_map * map)8766 __u32 bpf_map__map_flags(const struct bpf_map *map)
8767 {
8768 return map->def.map_flags;
8769 }
8770
bpf_map__set_map_flags(struct bpf_map * map,__u32 flags)8771 int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
8772 {
8773 if (map->fd >= 0)
8774 return -EBUSY;
8775 map->def.map_flags = flags;
8776 return 0;
8777 }
8778
bpf_map__numa_node(const struct bpf_map * map)8779 __u32 bpf_map__numa_node(const struct bpf_map *map)
8780 {
8781 return map->numa_node;
8782 }
8783
bpf_map__set_numa_node(struct bpf_map * map,__u32 numa_node)8784 int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
8785 {
8786 if (map->fd >= 0)
8787 return -EBUSY;
8788 map->numa_node = numa_node;
8789 return 0;
8790 }
8791
bpf_map__key_size(const struct bpf_map * map)8792 __u32 bpf_map__key_size(const struct bpf_map *map)
8793 {
8794 return map->def.key_size;
8795 }
8796
bpf_map__set_key_size(struct bpf_map * map,__u32 size)8797 int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
8798 {
8799 if (map->fd >= 0)
8800 return -EBUSY;
8801 map->def.key_size = size;
8802 return 0;
8803 }
8804
bpf_map__value_size(const struct bpf_map * map)8805 __u32 bpf_map__value_size(const struct bpf_map *map)
8806 {
8807 return map->def.value_size;
8808 }
8809
bpf_map__set_value_size(struct bpf_map * map,__u32 size)8810 int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
8811 {
8812 if (map->fd >= 0)
8813 return -EBUSY;
8814 map->def.value_size = size;
8815 return 0;
8816 }
8817
bpf_map__btf_key_type_id(const struct bpf_map * map)8818 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
8819 {
8820 return map ? map->btf_key_type_id : 0;
8821 }
8822
bpf_map__btf_value_type_id(const struct bpf_map * map)8823 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
8824 {
8825 return map ? map->btf_value_type_id : 0;
8826 }
8827
bpf_map__set_priv(struct bpf_map * map,void * priv,bpf_map_clear_priv_t clear_priv)8828 int bpf_map__set_priv(struct bpf_map *map, void *priv,
8829 bpf_map_clear_priv_t clear_priv)
8830 {
8831 if (!map)
8832 return -EINVAL;
8833
8834 if (map->priv) {
8835 if (map->clear_priv)
8836 map->clear_priv(map, map->priv);
8837 }
8838
8839 map->priv = priv;
8840 map->clear_priv = clear_priv;
8841 return 0;
8842 }
8843
bpf_map__priv(const struct bpf_map * map)8844 void *bpf_map__priv(const struct bpf_map *map)
8845 {
8846 return map ? map->priv : ERR_PTR(-EINVAL);
8847 }
8848
bpf_map__set_initial_value(struct bpf_map * map,const void * data,size_t size)8849 int bpf_map__set_initial_value(struct bpf_map *map,
8850 const void *data, size_t size)
8851 {
8852 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
8853 size != map->def.value_size || map->fd >= 0)
8854 return -EINVAL;
8855
8856 memcpy(map->mmaped, data, size);
8857 return 0;
8858 }
8859
bpf_map__is_offload_neutral(const struct bpf_map * map)8860 bool bpf_map__is_offload_neutral(const struct bpf_map *map)
8861 {
8862 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
8863 }
8864
bpf_map__is_internal(const struct bpf_map * map)8865 bool bpf_map__is_internal(const struct bpf_map *map)
8866 {
8867 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
8868 }
8869
bpf_map__ifindex(const struct bpf_map * map)8870 __u32 bpf_map__ifindex(const struct bpf_map *map)
8871 {
8872 return map->map_ifindex;
8873 }
8874
bpf_map__set_ifindex(struct bpf_map * map,__u32 ifindex)8875 int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
8876 {
8877 if (map->fd >= 0)
8878 return -EBUSY;
8879 map->map_ifindex = ifindex;
8880 return 0;
8881 }
8882
bpf_map__set_inner_map_fd(struct bpf_map * map,int fd)8883 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
8884 {
8885 if (!bpf_map_type__is_map_in_map(map->def.type)) {
8886 pr_warn("error: unsupported map type\n");
8887 return -EINVAL;
8888 }
8889 if (map->inner_map_fd != -1) {
8890 pr_warn("error: inner_map_fd already specified\n");
8891 return -EINVAL;
8892 }
8893 map->inner_map_fd = fd;
8894 return 0;
8895 }
8896
8897 static struct bpf_map *
__bpf_map__iter(const struct bpf_map * m,const struct bpf_object * obj,int i)8898 __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
8899 {
8900 ssize_t idx;
8901 struct bpf_map *s, *e;
8902
8903 if (!obj || !obj->maps)
8904 return NULL;
8905
8906 s = obj->maps;
8907 e = obj->maps + obj->nr_maps;
8908
8909 if ((m < s) || (m >= e)) {
8910 pr_warn("error in %s: map handler doesn't belong to object\n",
8911 __func__);
8912 return NULL;
8913 }
8914
8915 idx = (m - obj->maps) + i;
8916 if (idx >= obj->nr_maps || idx < 0)
8917 return NULL;
8918 return &obj->maps[idx];
8919 }
8920
8921 struct bpf_map *
bpf_map__next(const struct bpf_map * prev,const struct bpf_object * obj)8922 bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
8923 {
8924 if (prev == NULL)
8925 return obj->maps;
8926
8927 return __bpf_map__iter(prev, obj, 1);
8928 }
8929
8930 struct bpf_map *
bpf_map__prev(const struct bpf_map * next,const struct bpf_object * obj)8931 bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
8932 {
8933 if (next == NULL) {
8934 if (!obj->nr_maps)
8935 return NULL;
8936 return obj->maps + obj->nr_maps - 1;
8937 }
8938
8939 return __bpf_map__iter(next, obj, -1);
8940 }
8941
8942 struct bpf_map *
bpf_object__find_map_by_name(const struct bpf_object * obj,const char * name)8943 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
8944 {
8945 struct bpf_map *pos;
8946
8947 bpf_object__for_each_map(pos, obj) {
8948 if (pos->name && !strcmp(pos->name, name))
8949 return pos;
8950 }
8951 return NULL;
8952 }
8953
8954 int
bpf_object__find_map_fd_by_name(const struct bpf_object * obj,const char * name)8955 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
8956 {
8957 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
8958 }
8959
8960 struct bpf_map *
bpf_object__find_map_by_offset(struct bpf_object * obj,size_t offset)8961 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
8962 {
8963 return ERR_PTR(-ENOTSUP);
8964 }
8965
libbpf_get_error(const void * ptr)8966 long libbpf_get_error(const void *ptr)
8967 {
8968 return PTR_ERR_OR_ZERO(ptr);
8969 }
8970
bpf_prog_load(const char * file,enum bpf_prog_type type,struct bpf_object ** pobj,int * prog_fd)8971 int bpf_prog_load(const char *file, enum bpf_prog_type type,
8972 struct bpf_object **pobj, int *prog_fd)
8973 {
8974 struct bpf_prog_load_attr attr;
8975
8976 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
8977 attr.file = file;
8978 attr.prog_type = type;
8979 attr.expected_attach_type = 0;
8980
8981 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
8982 }
8983
bpf_prog_load_xattr(const struct bpf_prog_load_attr * attr,struct bpf_object ** pobj,int * prog_fd)8984 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
8985 struct bpf_object **pobj, int *prog_fd)
8986 {
8987 struct bpf_object_open_attr open_attr = {};
8988 struct bpf_program *prog, *first_prog = NULL;
8989 struct bpf_object *obj;
8990 struct bpf_map *map;
8991 int err;
8992
8993 if (!attr)
8994 return -EINVAL;
8995 if (!attr->file)
8996 return -EINVAL;
8997
8998 open_attr.file = attr->file;
8999 open_attr.prog_type = attr->prog_type;
9000
9001 obj = bpf_object__open_xattr(&open_attr);
9002 if (IS_ERR_OR_NULL(obj))
9003 return -ENOENT;
9004
9005 bpf_object__for_each_program(prog, obj) {
9006 enum bpf_attach_type attach_type = attr->expected_attach_type;
9007 /*
9008 * to preserve backwards compatibility, bpf_prog_load treats
9009 * attr->prog_type, if specified, as an override to whatever
9010 * bpf_object__open guessed
9011 */
9012 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
9013 bpf_program__set_type(prog, attr->prog_type);
9014 bpf_program__set_expected_attach_type(prog,
9015 attach_type);
9016 }
9017 if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
9018 /*
9019 * we haven't guessed from section name and user
9020 * didn't provide a fallback type, too bad...
9021 */
9022 bpf_object__close(obj);
9023 return -EINVAL;
9024 }
9025
9026 prog->prog_ifindex = attr->ifindex;
9027 prog->log_level = attr->log_level;
9028 prog->prog_flags |= attr->prog_flags;
9029 if (!first_prog)
9030 first_prog = prog;
9031 }
9032
9033 bpf_object__for_each_map(map, obj) {
9034 if (!bpf_map__is_offload_neutral(map))
9035 map->map_ifindex = attr->ifindex;
9036 }
9037
9038 if (!first_prog) {
9039 pr_warn("object file doesn't contain bpf program\n");
9040 bpf_object__close(obj);
9041 return -ENOENT;
9042 }
9043
9044 err = bpf_object__load(obj);
9045 if (err) {
9046 bpf_object__close(obj);
9047 return err;
9048 }
9049
9050 *pobj = obj;
9051 *prog_fd = bpf_program__fd(first_prog);
9052 return 0;
9053 }
9054
9055 struct bpf_link {
9056 int (*detach)(struct bpf_link *link);
9057 int (*destroy)(struct bpf_link *link);
9058 char *pin_path; /* NULL, if not pinned */
9059 int fd; /* hook FD, -1 if not applicable */
9060 bool disconnected;
9061 };
9062
9063 /* Replace link's underlying BPF program with the new one */
bpf_link__update_program(struct bpf_link * link,struct bpf_program * prog)9064 int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
9065 {
9066 return bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
9067 }
9068
9069 /* Release "ownership" of underlying BPF resource (typically, BPF program
9070 * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
9071 * link, when destructed through bpf_link__destroy() call won't attempt to
9072 * detach/unregisted that BPF resource. This is useful in situations where,
9073 * say, attached BPF program has to outlive userspace program that attached it
9074 * in the system. Depending on type of BPF program, though, there might be
9075 * additional steps (like pinning BPF program in BPF FS) necessary to ensure
9076 * exit of userspace program doesn't trigger automatic detachment and clean up
9077 * inside the kernel.
9078 */
bpf_link__disconnect(struct bpf_link * link)9079 void bpf_link__disconnect(struct bpf_link *link)
9080 {
9081 link->disconnected = true;
9082 }
9083
bpf_link__destroy(struct bpf_link * link)9084 int bpf_link__destroy(struct bpf_link *link)
9085 {
9086 int err = 0;
9087
9088 if (IS_ERR_OR_NULL(link))
9089 return 0;
9090
9091 if (!link->disconnected && link->detach)
9092 err = link->detach(link);
9093 if (link->destroy)
9094 link->destroy(link);
9095 if (link->pin_path)
9096 free(link->pin_path);
9097 free(link);
9098
9099 return err;
9100 }
9101
bpf_link__fd(const struct bpf_link * link)9102 int bpf_link__fd(const struct bpf_link *link)
9103 {
9104 return link->fd;
9105 }
9106
bpf_link__pin_path(const struct bpf_link * link)9107 const char *bpf_link__pin_path(const struct bpf_link *link)
9108 {
9109 return link->pin_path;
9110 }
9111
bpf_link__detach_fd(struct bpf_link * link)9112 static int bpf_link__detach_fd(struct bpf_link *link)
9113 {
9114 return close(link->fd);
9115 }
9116
bpf_link__open(const char * path)9117 struct bpf_link *bpf_link__open(const char *path)
9118 {
9119 struct bpf_link *link;
9120 int fd;
9121
9122 fd = bpf_obj_get(path);
9123 if (fd < 0) {
9124 fd = -errno;
9125 pr_warn("failed to open link at %s: %d\n", path, fd);
9126 return ERR_PTR(fd);
9127 }
9128
9129 link = calloc(1, sizeof(*link));
9130 if (!link) {
9131 close(fd);
9132 return ERR_PTR(-ENOMEM);
9133 }
9134 link->detach = &bpf_link__detach_fd;
9135 link->fd = fd;
9136
9137 link->pin_path = strdup(path);
9138 if (!link->pin_path) {
9139 bpf_link__destroy(link);
9140 return ERR_PTR(-ENOMEM);
9141 }
9142
9143 return link;
9144 }
9145
bpf_link__detach(struct bpf_link * link)9146 int bpf_link__detach(struct bpf_link *link)
9147 {
9148 return bpf_link_detach(link->fd) ? -errno : 0;
9149 }
9150
bpf_link__pin(struct bpf_link * link,const char * path)9151 int bpf_link__pin(struct bpf_link *link, const char *path)
9152 {
9153 int err;
9154
9155 if (link->pin_path)
9156 return -EBUSY;
9157 err = make_parent_dir(path);
9158 if (err)
9159 return err;
9160 err = check_path(path);
9161 if (err)
9162 return err;
9163
9164 link->pin_path = strdup(path);
9165 if (!link->pin_path)
9166 return -ENOMEM;
9167
9168 if (bpf_obj_pin(link->fd, link->pin_path)) {
9169 err = -errno;
9170 zfree(&link->pin_path);
9171 return err;
9172 }
9173
9174 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
9175 return 0;
9176 }
9177
bpf_link__unpin(struct bpf_link * link)9178 int bpf_link__unpin(struct bpf_link *link)
9179 {
9180 int err;
9181
9182 if (!link->pin_path)
9183 return -EINVAL;
9184
9185 err = unlink(link->pin_path);
9186 if (err != 0)
9187 return -errno;
9188
9189 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
9190 zfree(&link->pin_path);
9191 return 0;
9192 }
9193
bpf_link__detach_perf_event(struct bpf_link * link)9194 static int bpf_link__detach_perf_event(struct bpf_link *link)
9195 {
9196 int err;
9197
9198 err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0);
9199 if (err)
9200 err = -errno;
9201
9202 close(link->fd);
9203 return err;
9204 }
9205
bpf_program__attach_perf_event(struct bpf_program * prog,int pfd)9206 struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
9207 int pfd)
9208 {
9209 char errmsg[STRERR_BUFSIZE];
9210 struct bpf_link *link;
9211 int prog_fd, err;
9212
9213 if (pfd < 0) {
9214 pr_warn("prog '%s': invalid perf event FD %d\n",
9215 prog->name, pfd);
9216 return ERR_PTR(-EINVAL);
9217 }
9218 prog_fd = bpf_program__fd(prog);
9219 if (prog_fd < 0) {
9220 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
9221 prog->name);
9222 return ERR_PTR(-EINVAL);
9223 }
9224
9225 link = calloc(1, sizeof(*link));
9226 if (!link)
9227 return ERR_PTR(-ENOMEM);
9228 link->detach = &bpf_link__detach_perf_event;
9229 link->fd = pfd;
9230
9231 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
9232 err = -errno;
9233 free(link);
9234 pr_warn("prog '%s': failed to attach to pfd %d: %s\n",
9235 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9236 if (err == -EPROTO)
9237 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
9238 prog->name, pfd);
9239 return ERR_PTR(err);
9240 }
9241 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
9242 err = -errno;
9243 free(link);
9244 pr_warn("prog '%s': failed to enable pfd %d: %s\n",
9245 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9246 return ERR_PTR(err);
9247 }
9248 return link;
9249 }
9250
9251 /*
9252 * this function is expected to parse integer in the range of [0, 2^31-1] from
9253 * given file using scanf format string fmt. If actual parsed value is
9254 * negative, the result might be indistinguishable from error
9255 */
parse_uint_from_file(const char * file,const char * fmt)9256 static int parse_uint_from_file(const char *file, const char *fmt)
9257 {
9258 char buf[STRERR_BUFSIZE];
9259 int err, ret;
9260 FILE *f;
9261
9262 f = fopen(file, "r");
9263 if (!f) {
9264 err = -errno;
9265 pr_debug("failed to open '%s': %s\n", file,
9266 libbpf_strerror_r(err, buf, sizeof(buf)));
9267 return err;
9268 }
9269 err = fscanf(f, fmt, &ret);
9270 if (err != 1) {
9271 err = err == EOF ? -EIO : -errno;
9272 pr_debug("failed to parse '%s': %s\n", file,
9273 libbpf_strerror_r(err, buf, sizeof(buf)));
9274 fclose(f);
9275 return err;
9276 }
9277 fclose(f);
9278 return ret;
9279 }
9280
determine_kprobe_perf_type(void)9281 static int determine_kprobe_perf_type(void)
9282 {
9283 const char *file = "/sys/bus/event_source/devices/kprobe/type";
9284
9285 return parse_uint_from_file(file, "%d\n");
9286 }
9287
determine_uprobe_perf_type(void)9288 static int determine_uprobe_perf_type(void)
9289 {
9290 const char *file = "/sys/bus/event_source/devices/uprobe/type";
9291
9292 return parse_uint_from_file(file, "%d\n");
9293 }
9294
determine_kprobe_retprobe_bit(void)9295 static int determine_kprobe_retprobe_bit(void)
9296 {
9297 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
9298
9299 return parse_uint_from_file(file, "config:%d\n");
9300 }
9301
determine_uprobe_retprobe_bit(void)9302 static int determine_uprobe_retprobe_bit(void)
9303 {
9304 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
9305
9306 return parse_uint_from_file(file, "config:%d\n");
9307 }
9308
perf_event_open_probe(bool uprobe,bool retprobe,const char * name,uint64_t offset,int pid)9309 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
9310 uint64_t offset, int pid)
9311 {
9312 struct perf_event_attr attr = {};
9313 char errmsg[STRERR_BUFSIZE];
9314 int type, pfd, err;
9315
9316 type = uprobe ? determine_uprobe_perf_type()
9317 : determine_kprobe_perf_type();
9318 if (type < 0) {
9319 pr_warn("failed to determine %s perf type: %s\n",
9320 uprobe ? "uprobe" : "kprobe",
9321 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
9322 return type;
9323 }
9324 if (retprobe) {
9325 int bit = uprobe ? determine_uprobe_retprobe_bit()
9326 : determine_kprobe_retprobe_bit();
9327
9328 if (bit < 0) {
9329 pr_warn("failed to determine %s retprobe bit: %s\n",
9330 uprobe ? "uprobe" : "kprobe",
9331 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
9332 return bit;
9333 }
9334 attr.config |= 1 << bit;
9335 }
9336 attr.size = sizeof(attr);
9337 attr.type = type;
9338 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
9339 attr.config2 = offset; /* kprobe_addr or probe_offset */
9340
9341 /* pid filter is meaningful only for uprobes */
9342 pfd = syscall(__NR_perf_event_open, &attr,
9343 pid < 0 ? -1 : pid /* pid */,
9344 pid == -1 ? 0 : -1 /* cpu */,
9345 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
9346 if (pfd < 0) {
9347 err = -errno;
9348 pr_warn("%s perf_event_open() failed: %s\n",
9349 uprobe ? "uprobe" : "kprobe",
9350 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9351 return err;
9352 }
9353 return pfd;
9354 }
9355
bpf_program__attach_kprobe(struct bpf_program * prog,bool retprobe,const char * func_name)9356 struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
9357 bool retprobe,
9358 const char *func_name)
9359 {
9360 char errmsg[STRERR_BUFSIZE];
9361 struct bpf_link *link;
9362 int pfd, err;
9363
9364 pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
9365 0 /* offset */, -1 /* pid */);
9366 if (pfd < 0) {
9367 pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n",
9368 prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
9369 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9370 return ERR_PTR(pfd);
9371 }
9372 link = bpf_program__attach_perf_event(prog, pfd);
9373 if (IS_ERR(link)) {
9374 close(pfd);
9375 err = PTR_ERR(link);
9376 pr_warn("prog '%s': failed to attach to %s '%s': %s\n",
9377 prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
9378 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9379 return link;
9380 }
9381 return link;
9382 }
9383
attach_kprobe(const struct bpf_sec_def * sec,struct bpf_program * prog)9384 static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
9385 struct bpf_program *prog)
9386 {
9387 const char *func_name;
9388 bool retprobe;
9389
9390 func_name = prog->sec_name + sec->len;
9391 retprobe = strcmp(sec->sec, "kretprobe/") == 0;
9392
9393 return bpf_program__attach_kprobe(prog, retprobe, func_name);
9394 }
9395
bpf_program__attach_uprobe(struct bpf_program * prog,bool retprobe,pid_t pid,const char * binary_path,size_t func_offset)9396 struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
9397 bool retprobe, pid_t pid,
9398 const char *binary_path,
9399 size_t func_offset)
9400 {
9401 char errmsg[STRERR_BUFSIZE];
9402 struct bpf_link *link;
9403 int pfd, err;
9404
9405 pfd = perf_event_open_probe(true /* uprobe */, retprobe,
9406 binary_path, func_offset, pid);
9407 if (pfd < 0) {
9408 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
9409 prog->name, retprobe ? "uretprobe" : "uprobe",
9410 binary_path, func_offset,
9411 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9412 return ERR_PTR(pfd);
9413 }
9414 link = bpf_program__attach_perf_event(prog, pfd);
9415 if (IS_ERR(link)) {
9416 close(pfd);
9417 err = PTR_ERR(link);
9418 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
9419 prog->name, retprobe ? "uretprobe" : "uprobe",
9420 binary_path, func_offset,
9421 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9422 return link;
9423 }
9424 return link;
9425 }
9426
determine_tracepoint_id(const char * tp_category,const char * tp_name)9427 static int determine_tracepoint_id(const char *tp_category,
9428 const char *tp_name)
9429 {
9430 char file[PATH_MAX];
9431 int ret;
9432
9433 ret = snprintf(file, sizeof(file),
9434 "/sys/kernel/debug/tracing/events/%s/%s/id",
9435 tp_category, tp_name);
9436 if (ret < 0)
9437 return -errno;
9438 if (ret >= sizeof(file)) {
9439 pr_debug("tracepoint %s/%s path is too long\n",
9440 tp_category, tp_name);
9441 return -E2BIG;
9442 }
9443 return parse_uint_from_file(file, "%d\n");
9444 }
9445
perf_event_open_tracepoint(const char * tp_category,const char * tp_name)9446 static int perf_event_open_tracepoint(const char *tp_category,
9447 const char *tp_name)
9448 {
9449 struct perf_event_attr attr = {};
9450 char errmsg[STRERR_BUFSIZE];
9451 int tp_id, pfd, err;
9452
9453 tp_id = determine_tracepoint_id(tp_category, tp_name);
9454 if (tp_id < 0) {
9455 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
9456 tp_category, tp_name,
9457 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
9458 return tp_id;
9459 }
9460
9461 attr.type = PERF_TYPE_TRACEPOINT;
9462 attr.size = sizeof(attr);
9463 attr.config = tp_id;
9464
9465 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
9466 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
9467 if (pfd < 0) {
9468 err = -errno;
9469 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
9470 tp_category, tp_name,
9471 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9472 return err;
9473 }
9474 return pfd;
9475 }
9476
bpf_program__attach_tracepoint(struct bpf_program * prog,const char * tp_category,const char * tp_name)9477 struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
9478 const char *tp_category,
9479 const char *tp_name)
9480 {
9481 char errmsg[STRERR_BUFSIZE];
9482 struct bpf_link *link;
9483 int pfd, err;
9484
9485 pfd = perf_event_open_tracepoint(tp_category, tp_name);
9486 if (pfd < 0) {
9487 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
9488 prog->name, tp_category, tp_name,
9489 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9490 return ERR_PTR(pfd);
9491 }
9492 link = bpf_program__attach_perf_event(prog, pfd);
9493 if (IS_ERR(link)) {
9494 close(pfd);
9495 err = PTR_ERR(link);
9496 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
9497 prog->name, tp_category, tp_name,
9498 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9499 return link;
9500 }
9501 return link;
9502 }
9503
attach_tp(const struct bpf_sec_def * sec,struct bpf_program * prog)9504 static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
9505 struct bpf_program *prog)
9506 {
9507 char *sec_name, *tp_cat, *tp_name;
9508 struct bpf_link *link;
9509
9510 sec_name = strdup(prog->sec_name);
9511 if (!sec_name)
9512 return ERR_PTR(-ENOMEM);
9513
9514 /* extract "tp/<category>/<name>" */
9515 tp_cat = sec_name + sec->len;
9516 tp_name = strchr(tp_cat, '/');
9517 if (!tp_name) {
9518 link = ERR_PTR(-EINVAL);
9519 goto out;
9520 }
9521 *tp_name = '\0';
9522 tp_name++;
9523
9524 link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
9525 out:
9526 free(sec_name);
9527 return link;
9528 }
9529
bpf_program__attach_raw_tracepoint(struct bpf_program * prog,const char * tp_name)9530 struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
9531 const char *tp_name)
9532 {
9533 char errmsg[STRERR_BUFSIZE];
9534 struct bpf_link *link;
9535 int prog_fd, pfd;
9536
9537 prog_fd = bpf_program__fd(prog);
9538 if (prog_fd < 0) {
9539 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9540 return ERR_PTR(-EINVAL);
9541 }
9542
9543 link = calloc(1, sizeof(*link));
9544 if (!link)
9545 return ERR_PTR(-ENOMEM);
9546 link->detach = &bpf_link__detach_fd;
9547
9548 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
9549 if (pfd < 0) {
9550 pfd = -errno;
9551 free(link);
9552 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
9553 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9554 return ERR_PTR(pfd);
9555 }
9556 link->fd = pfd;
9557 return link;
9558 }
9559
attach_raw_tp(const struct bpf_sec_def * sec,struct bpf_program * prog)9560 static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
9561 struct bpf_program *prog)
9562 {
9563 const char *tp_name = prog->sec_name + sec->len;
9564
9565 return bpf_program__attach_raw_tracepoint(prog, tp_name);
9566 }
9567
9568 /* Common logic for all BPF program types that attach to a btf_id */
bpf_program__attach_btf_id(struct bpf_program * prog)9569 static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
9570 {
9571 char errmsg[STRERR_BUFSIZE];
9572 struct bpf_link *link;
9573 int prog_fd, pfd;
9574
9575 prog_fd = bpf_program__fd(prog);
9576 if (prog_fd < 0) {
9577 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9578 return ERR_PTR(-EINVAL);
9579 }
9580
9581 link = calloc(1, sizeof(*link));
9582 if (!link)
9583 return ERR_PTR(-ENOMEM);
9584 link->detach = &bpf_link__detach_fd;
9585
9586 pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
9587 if (pfd < 0) {
9588 pfd = -errno;
9589 free(link);
9590 pr_warn("prog '%s': failed to attach: %s\n",
9591 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9592 return ERR_PTR(pfd);
9593 }
9594 link->fd = pfd;
9595 return (struct bpf_link *)link;
9596 }
9597
bpf_program__attach_trace(struct bpf_program * prog)9598 struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
9599 {
9600 return bpf_program__attach_btf_id(prog);
9601 }
9602
bpf_program__attach_lsm(struct bpf_program * prog)9603 struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog)
9604 {
9605 return bpf_program__attach_btf_id(prog);
9606 }
9607
attach_trace(const struct bpf_sec_def * sec,struct bpf_program * prog)9608 static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
9609 struct bpf_program *prog)
9610 {
9611 return bpf_program__attach_trace(prog);
9612 }
9613
attach_lsm(const struct bpf_sec_def * sec,struct bpf_program * prog)9614 static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
9615 struct bpf_program *prog)
9616 {
9617 return bpf_program__attach_lsm(prog);
9618 }
9619
attach_iter(const struct bpf_sec_def * sec,struct bpf_program * prog)9620 static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
9621 struct bpf_program *prog)
9622 {
9623 return bpf_program__attach_iter(prog, NULL);
9624 }
9625
9626 static struct bpf_link *
bpf_program__attach_fd(struct bpf_program * prog,int target_fd,int btf_id,const char * target_name)9627 bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id,
9628 const char *target_name)
9629 {
9630 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
9631 .target_btf_id = btf_id);
9632 enum bpf_attach_type attach_type;
9633 char errmsg[STRERR_BUFSIZE];
9634 struct bpf_link *link;
9635 int prog_fd, link_fd;
9636
9637 prog_fd = bpf_program__fd(prog);
9638 if (prog_fd < 0) {
9639 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9640 return ERR_PTR(-EINVAL);
9641 }
9642
9643 link = calloc(1, sizeof(*link));
9644 if (!link)
9645 return ERR_PTR(-ENOMEM);
9646 link->detach = &bpf_link__detach_fd;
9647
9648 attach_type = bpf_program__get_expected_attach_type(prog);
9649 link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts);
9650 if (link_fd < 0) {
9651 link_fd = -errno;
9652 free(link);
9653 pr_warn("prog '%s': failed to attach to %s: %s\n",
9654 prog->name, target_name,
9655 libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
9656 return ERR_PTR(link_fd);
9657 }
9658 link->fd = link_fd;
9659 return link;
9660 }
9661
9662 struct bpf_link *
bpf_program__attach_cgroup(struct bpf_program * prog,int cgroup_fd)9663 bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
9664 {
9665 return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup");
9666 }
9667
9668 struct bpf_link *
bpf_program__attach_netns(struct bpf_program * prog,int netns_fd)9669 bpf_program__attach_netns(struct bpf_program *prog, int netns_fd)
9670 {
9671 return bpf_program__attach_fd(prog, netns_fd, 0, "netns");
9672 }
9673
bpf_program__attach_xdp(struct bpf_program * prog,int ifindex)9674 struct bpf_link *bpf_program__attach_xdp(struct bpf_program *prog, int ifindex)
9675 {
9676 /* target_fd/target_ifindex use the same field in LINK_CREATE */
9677 return bpf_program__attach_fd(prog, ifindex, 0, "xdp");
9678 }
9679
bpf_program__attach_freplace(struct bpf_program * prog,int target_fd,const char * attach_func_name)9680 struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog,
9681 int target_fd,
9682 const char *attach_func_name)
9683 {
9684 int btf_id;
9685
9686 if (!!target_fd != !!attach_func_name) {
9687 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
9688 prog->name);
9689 return ERR_PTR(-EINVAL);
9690 }
9691
9692 if (prog->type != BPF_PROG_TYPE_EXT) {
9693 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
9694 prog->name);
9695 return ERR_PTR(-EINVAL);
9696 }
9697
9698 if (target_fd) {
9699 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
9700 if (btf_id < 0)
9701 return ERR_PTR(btf_id);
9702
9703 return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace");
9704 } else {
9705 /* no target, so use raw_tracepoint_open for compatibility
9706 * with old kernels
9707 */
9708 return bpf_program__attach_trace(prog);
9709 }
9710 }
9711
9712 struct bpf_link *
bpf_program__attach_iter(struct bpf_program * prog,const struct bpf_iter_attach_opts * opts)9713 bpf_program__attach_iter(struct bpf_program *prog,
9714 const struct bpf_iter_attach_opts *opts)
9715 {
9716 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
9717 char errmsg[STRERR_BUFSIZE];
9718 struct bpf_link *link;
9719 int prog_fd, link_fd;
9720 __u32 target_fd = 0;
9721
9722 if (!OPTS_VALID(opts, bpf_iter_attach_opts))
9723 return ERR_PTR(-EINVAL);
9724
9725 link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
9726 link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
9727
9728 prog_fd = bpf_program__fd(prog);
9729 if (prog_fd < 0) {
9730 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9731 return ERR_PTR(-EINVAL);
9732 }
9733
9734 link = calloc(1, sizeof(*link));
9735 if (!link)
9736 return ERR_PTR(-ENOMEM);
9737 link->detach = &bpf_link__detach_fd;
9738
9739 link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
9740 &link_create_opts);
9741 if (link_fd < 0) {
9742 link_fd = -errno;
9743 free(link);
9744 pr_warn("prog '%s': failed to attach to iterator: %s\n",
9745 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
9746 return ERR_PTR(link_fd);
9747 }
9748 link->fd = link_fd;
9749 return link;
9750 }
9751
bpf_program__attach(struct bpf_program * prog)9752 struct bpf_link *bpf_program__attach(struct bpf_program *prog)
9753 {
9754 const struct bpf_sec_def *sec_def;
9755
9756 sec_def = find_sec_def(prog->sec_name);
9757 if (!sec_def || !sec_def->attach_fn)
9758 return ERR_PTR(-ESRCH);
9759
9760 return sec_def->attach_fn(sec_def, prog);
9761 }
9762
bpf_link__detach_struct_ops(struct bpf_link * link)9763 static int bpf_link__detach_struct_ops(struct bpf_link *link)
9764 {
9765 __u32 zero = 0;
9766
9767 if (bpf_map_delete_elem(link->fd, &zero))
9768 return -errno;
9769
9770 return 0;
9771 }
9772
bpf_map__attach_struct_ops(struct bpf_map * map)9773 struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
9774 {
9775 struct bpf_struct_ops *st_ops;
9776 struct bpf_link *link;
9777 __u32 i, zero = 0;
9778 int err;
9779
9780 if (!bpf_map__is_struct_ops(map) || map->fd == -1)
9781 return ERR_PTR(-EINVAL);
9782
9783 link = calloc(1, sizeof(*link));
9784 if (!link)
9785 return ERR_PTR(-EINVAL);
9786
9787 st_ops = map->st_ops;
9788 for (i = 0; i < btf_vlen(st_ops->type); i++) {
9789 struct bpf_program *prog = st_ops->progs[i];
9790 void *kern_data;
9791 int prog_fd;
9792
9793 if (!prog)
9794 continue;
9795
9796 prog_fd = bpf_program__fd(prog);
9797 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
9798 *(unsigned long *)kern_data = prog_fd;
9799 }
9800
9801 err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
9802 if (err) {
9803 err = -errno;
9804 free(link);
9805 return ERR_PTR(err);
9806 }
9807
9808 link->detach = bpf_link__detach_struct_ops;
9809 link->fd = map->fd;
9810
9811 return link;
9812 }
9813
9814 enum bpf_perf_event_ret
bpf_perf_event_read_simple(void * mmap_mem,size_t mmap_size,size_t page_size,void ** copy_mem,size_t * copy_size,bpf_perf_event_print_t fn,void * private_data)9815 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
9816 void **copy_mem, size_t *copy_size,
9817 bpf_perf_event_print_t fn, void *private_data)
9818 {
9819 struct perf_event_mmap_page *header = mmap_mem;
9820 __u64 data_head = ring_buffer_read_head(header);
9821 __u64 data_tail = header->data_tail;
9822 void *base = ((__u8 *)header) + page_size;
9823 int ret = LIBBPF_PERF_EVENT_CONT;
9824 struct perf_event_header *ehdr;
9825 size_t ehdr_size;
9826
9827 while (data_head != data_tail) {
9828 ehdr = base + (data_tail & (mmap_size - 1));
9829 ehdr_size = ehdr->size;
9830
9831 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
9832 void *copy_start = ehdr;
9833 size_t len_first = base + mmap_size - copy_start;
9834 size_t len_secnd = ehdr_size - len_first;
9835
9836 if (*copy_size < ehdr_size) {
9837 free(*copy_mem);
9838 *copy_mem = malloc(ehdr_size);
9839 if (!*copy_mem) {
9840 *copy_size = 0;
9841 ret = LIBBPF_PERF_EVENT_ERROR;
9842 break;
9843 }
9844 *copy_size = ehdr_size;
9845 }
9846
9847 memcpy(*copy_mem, copy_start, len_first);
9848 memcpy(*copy_mem + len_first, base, len_secnd);
9849 ehdr = *copy_mem;
9850 }
9851
9852 ret = fn(ehdr, private_data);
9853 data_tail += ehdr_size;
9854 if (ret != LIBBPF_PERF_EVENT_CONT)
9855 break;
9856 }
9857
9858 ring_buffer_write_tail(header, data_tail);
9859 return ret;
9860 }
9861
9862 struct perf_buffer;
9863
9864 struct perf_buffer_params {
9865 struct perf_event_attr *attr;
9866 /* if event_cb is specified, it takes precendence */
9867 perf_buffer_event_fn event_cb;
9868 /* sample_cb and lost_cb are higher-level common-case callbacks */
9869 perf_buffer_sample_fn sample_cb;
9870 perf_buffer_lost_fn lost_cb;
9871 void *ctx;
9872 int cpu_cnt;
9873 int *cpus;
9874 int *map_keys;
9875 };
9876
9877 struct perf_cpu_buf {
9878 struct perf_buffer *pb;
9879 void *base; /* mmap()'ed memory */
9880 void *buf; /* for reconstructing segmented data */
9881 size_t buf_size;
9882 int fd;
9883 int cpu;
9884 int map_key;
9885 };
9886
9887 struct perf_buffer {
9888 perf_buffer_event_fn event_cb;
9889 perf_buffer_sample_fn sample_cb;
9890 perf_buffer_lost_fn lost_cb;
9891 void *ctx; /* passed into callbacks */
9892
9893 size_t page_size;
9894 size_t mmap_size;
9895 struct perf_cpu_buf **cpu_bufs;
9896 struct epoll_event *events;
9897 int cpu_cnt; /* number of allocated CPU buffers */
9898 int epoll_fd; /* perf event FD */
9899 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
9900 };
9901
perf_buffer__free_cpu_buf(struct perf_buffer * pb,struct perf_cpu_buf * cpu_buf)9902 static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
9903 struct perf_cpu_buf *cpu_buf)
9904 {
9905 if (!cpu_buf)
9906 return;
9907 if (cpu_buf->base &&
9908 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
9909 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
9910 if (cpu_buf->fd >= 0) {
9911 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
9912 close(cpu_buf->fd);
9913 }
9914 free(cpu_buf->buf);
9915 free(cpu_buf);
9916 }
9917
perf_buffer__free(struct perf_buffer * pb)9918 void perf_buffer__free(struct perf_buffer *pb)
9919 {
9920 int i;
9921
9922 if (IS_ERR_OR_NULL(pb))
9923 return;
9924 if (pb->cpu_bufs) {
9925 for (i = 0; i < pb->cpu_cnt; i++) {
9926 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
9927
9928 if (!cpu_buf)
9929 continue;
9930
9931 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
9932 perf_buffer__free_cpu_buf(pb, cpu_buf);
9933 }
9934 free(pb->cpu_bufs);
9935 }
9936 if (pb->epoll_fd >= 0)
9937 close(pb->epoll_fd);
9938 free(pb->events);
9939 free(pb);
9940 }
9941
9942 static struct perf_cpu_buf *
perf_buffer__open_cpu_buf(struct perf_buffer * pb,struct perf_event_attr * attr,int cpu,int map_key)9943 perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
9944 int cpu, int map_key)
9945 {
9946 struct perf_cpu_buf *cpu_buf;
9947 char msg[STRERR_BUFSIZE];
9948 int err;
9949
9950 cpu_buf = calloc(1, sizeof(*cpu_buf));
9951 if (!cpu_buf)
9952 return ERR_PTR(-ENOMEM);
9953
9954 cpu_buf->pb = pb;
9955 cpu_buf->cpu = cpu;
9956 cpu_buf->map_key = map_key;
9957
9958 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
9959 -1, PERF_FLAG_FD_CLOEXEC);
9960 if (cpu_buf->fd < 0) {
9961 err = -errno;
9962 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
9963 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
9964 goto error;
9965 }
9966
9967 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
9968 PROT_READ | PROT_WRITE, MAP_SHARED,
9969 cpu_buf->fd, 0);
9970 if (cpu_buf->base == MAP_FAILED) {
9971 cpu_buf->base = NULL;
9972 err = -errno;
9973 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
9974 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
9975 goto error;
9976 }
9977
9978 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
9979 err = -errno;
9980 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
9981 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
9982 goto error;
9983 }
9984
9985 return cpu_buf;
9986
9987 error:
9988 perf_buffer__free_cpu_buf(pb, cpu_buf);
9989 return (struct perf_cpu_buf *)ERR_PTR(err);
9990 }
9991
9992 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
9993 struct perf_buffer_params *p);
9994
perf_buffer__new(int map_fd,size_t page_cnt,const struct perf_buffer_opts * opts)9995 struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
9996 const struct perf_buffer_opts *opts)
9997 {
9998 struct perf_buffer_params p = {};
9999 struct perf_event_attr attr = { 0, };
10000
10001 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
10002 attr.type = PERF_TYPE_SOFTWARE;
10003 attr.sample_type = PERF_SAMPLE_RAW;
10004 attr.sample_period = 1;
10005 attr.wakeup_events = 1;
10006
10007 p.attr = &attr;
10008 p.sample_cb = opts ? opts->sample_cb : NULL;
10009 p.lost_cb = opts ? opts->lost_cb : NULL;
10010 p.ctx = opts ? opts->ctx : NULL;
10011
10012 return __perf_buffer__new(map_fd, page_cnt, &p);
10013 }
10014
10015 struct perf_buffer *
perf_buffer__new_raw(int map_fd,size_t page_cnt,const struct perf_buffer_raw_opts * opts)10016 perf_buffer__new_raw(int map_fd, size_t page_cnt,
10017 const struct perf_buffer_raw_opts *opts)
10018 {
10019 struct perf_buffer_params p = {};
10020
10021 p.attr = opts->attr;
10022 p.event_cb = opts->event_cb;
10023 p.ctx = opts->ctx;
10024 p.cpu_cnt = opts->cpu_cnt;
10025 p.cpus = opts->cpus;
10026 p.map_keys = opts->map_keys;
10027
10028 return __perf_buffer__new(map_fd, page_cnt, &p);
10029 }
10030
__perf_buffer__new(int map_fd,size_t page_cnt,struct perf_buffer_params * p)10031 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
10032 struct perf_buffer_params *p)
10033 {
10034 const char *online_cpus_file = "/sys/devices/system/cpu/online";
10035 struct bpf_map_info map;
10036 char msg[STRERR_BUFSIZE];
10037 struct perf_buffer *pb;
10038 bool *online = NULL;
10039 __u32 map_info_len;
10040 int err, i, j, n;
10041
10042 if (page_cnt & (page_cnt - 1)) {
10043 pr_warn("page count should be power of two, but is %zu\n",
10044 page_cnt);
10045 return ERR_PTR(-EINVAL);
10046 }
10047
10048 /* best-effort sanity checks */
10049 memset(&map, 0, sizeof(map));
10050 map_info_len = sizeof(map);
10051 err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
10052 if (err) {
10053 err = -errno;
10054 /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
10055 * -EBADFD, -EFAULT, or -E2BIG on real error
10056 */
10057 if (err != -EINVAL) {
10058 pr_warn("failed to get map info for map FD %d: %s\n",
10059 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
10060 return ERR_PTR(err);
10061 }
10062 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
10063 map_fd);
10064 } else {
10065 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
10066 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
10067 map.name);
10068 return ERR_PTR(-EINVAL);
10069 }
10070 }
10071
10072 pb = calloc(1, sizeof(*pb));
10073 if (!pb)
10074 return ERR_PTR(-ENOMEM);
10075
10076 pb->event_cb = p->event_cb;
10077 pb->sample_cb = p->sample_cb;
10078 pb->lost_cb = p->lost_cb;
10079 pb->ctx = p->ctx;
10080
10081 pb->page_size = getpagesize();
10082 pb->mmap_size = pb->page_size * page_cnt;
10083 pb->map_fd = map_fd;
10084
10085 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
10086 if (pb->epoll_fd < 0) {
10087 err = -errno;
10088 pr_warn("failed to create epoll instance: %s\n",
10089 libbpf_strerror_r(err, msg, sizeof(msg)));
10090 goto error;
10091 }
10092
10093 if (p->cpu_cnt > 0) {
10094 pb->cpu_cnt = p->cpu_cnt;
10095 } else {
10096 pb->cpu_cnt = libbpf_num_possible_cpus();
10097 if (pb->cpu_cnt < 0) {
10098 err = pb->cpu_cnt;
10099 goto error;
10100 }
10101 if (map.max_entries && map.max_entries < pb->cpu_cnt)
10102 pb->cpu_cnt = map.max_entries;
10103 }
10104
10105 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
10106 if (!pb->events) {
10107 err = -ENOMEM;
10108 pr_warn("failed to allocate events: out of memory\n");
10109 goto error;
10110 }
10111 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
10112 if (!pb->cpu_bufs) {
10113 err = -ENOMEM;
10114 pr_warn("failed to allocate buffers: out of memory\n");
10115 goto error;
10116 }
10117
10118 err = parse_cpu_mask_file(online_cpus_file, &online, &n);
10119 if (err) {
10120 pr_warn("failed to get online CPU mask: %d\n", err);
10121 goto error;
10122 }
10123
10124 for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
10125 struct perf_cpu_buf *cpu_buf;
10126 int cpu, map_key;
10127
10128 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
10129 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
10130
10131 /* in case user didn't explicitly requested particular CPUs to
10132 * be attached to, skip offline/not present CPUs
10133 */
10134 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
10135 continue;
10136
10137 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
10138 if (IS_ERR(cpu_buf)) {
10139 err = PTR_ERR(cpu_buf);
10140 goto error;
10141 }
10142
10143 pb->cpu_bufs[j] = cpu_buf;
10144
10145 err = bpf_map_update_elem(pb->map_fd, &map_key,
10146 &cpu_buf->fd, 0);
10147 if (err) {
10148 err = -errno;
10149 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
10150 cpu, map_key, cpu_buf->fd,
10151 libbpf_strerror_r(err, msg, sizeof(msg)));
10152 goto error;
10153 }
10154
10155 pb->events[j].events = EPOLLIN;
10156 pb->events[j].data.ptr = cpu_buf;
10157 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
10158 &pb->events[j]) < 0) {
10159 err = -errno;
10160 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
10161 cpu, cpu_buf->fd,
10162 libbpf_strerror_r(err, msg, sizeof(msg)));
10163 goto error;
10164 }
10165 j++;
10166 }
10167 pb->cpu_cnt = j;
10168 free(online);
10169
10170 return pb;
10171
10172 error:
10173 free(online);
10174 if (pb)
10175 perf_buffer__free(pb);
10176 return ERR_PTR(err);
10177 }
10178
10179 struct perf_sample_raw {
10180 struct perf_event_header header;
10181 uint32_t size;
10182 char data[];
10183 };
10184
10185 struct perf_sample_lost {
10186 struct perf_event_header header;
10187 uint64_t id;
10188 uint64_t lost;
10189 uint64_t sample_id;
10190 };
10191
10192 static enum bpf_perf_event_ret
perf_buffer__process_record(struct perf_event_header * e,void * ctx)10193 perf_buffer__process_record(struct perf_event_header *e, void *ctx)
10194 {
10195 struct perf_cpu_buf *cpu_buf = ctx;
10196 struct perf_buffer *pb = cpu_buf->pb;
10197 void *data = e;
10198
10199 /* user wants full control over parsing perf event */
10200 if (pb->event_cb)
10201 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
10202
10203 switch (e->type) {
10204 case PERF_RECORD_SAMPLE: {
10205 struct perf_sample_raw *s = data;
10206
10207 if (pb->sample_cb)
10208 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
10209 break;
10210 }
10211 case PERF_RECORD_LOST: {
10212 struct perf_sample_lost *s = data;
10213
10214 if (pb->lost_cb)
10215 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
10216 break;
10217 }
10218 default:
10219 pr_warn("unknown perf sample type %d\n", e->type);
10220 return LIBBPF_PERF_EVENT_ERROR;
10221 }
10222 return LIBBPF_PERF_EVENT_CONT;
10223 }
10224
perf_buffer__process_records(struct perf_buffer * pb,struct perf_cpu_buf * cpu_buf)10225 static int perf_buffer__process_records(struct perf_buffer *pb,
10226 struct perf_cpu_buf *cpu_buf)
10227 {
10228 enum bpf_perf_event_ret ret;
10229
10230 ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
10231 pb->page_size, &cpu_buf->buf,
10232 &cpu_buf->buf_size,
10233 perf_buffer__process_record, cpu_buf);
10234 if (ret != LIBBPF_PERF_EVENT_CONT)
10235 return ret;
10236 return 0;
10237 }
10238
perf_buffer__epoll_fd(const struct perf_buffer * pb)10239 int perf_buffer__epoll_fd(const struct perf_buffer *pb)
10240 {
10241 return pb->epoll_fd;
10242 }
10243
perf_buffer__poll(struct perf_buffer * pb,int timeout_ms)10244 int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
10245 {
10246 int i, cnt, err;
10247
10248 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
10249 for (i = 0; i < cnt; i++) {
10250 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
10251
10252 err = perf_buffer__process_records(pb, cpu_buf);
10253 if (err) {
10254 pr_warn("error while processing records: %d\n", err);
10255 return err;
10256 }
10257 }
10258 return cnt < 0 ? -errno : cnt;
10259 }
10260
10261 /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
10262 * manager.
10263 */
perf_buffer__buffer_cnt(const struct perf_buffer * pb)10264 size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
10265 {
10266 return pb->cpu_cnt;
10267 }
10268
10269 /*
10270 * Return perf_event FD of a ring buffer in *buf_idx* slot of
10271 * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
10272 * select()/poll()/epoll() Linux syscalls.
10273 */
perf_buffer__buffer_fd(const struct perf_buffer * pb,size_t buf_idx)10274 int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
10275 {
10276 struct perf_cpu_buf *cpu_buf;
10277
10278 if (buf_idx >= pb->cpu_cnt)
10279 return -EINVAL;
10280
10281 cpu_buf = pb->cpu_bufs[buf_idx];
10282 if (!cpu_buf)
10283 return -ENOENT;
10284
10285 return cpu_buf->fd;
10286 }
10287
10288 /*
10289 * Consume data from perf ring buffer corresponding to slot *buf_idx* in
10290 * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
10291 * consume, do nothing and return success.
10292 * Returns:
10293 * - 0 on success;
10294 * - <0 on failure.
10295 */
perf_buffer__consume_buffer(struct perf_buffer * pb,size_t buf_idx)10296 int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
10297 {
10298 struct perf_cpu_buf *cpu_buf;
10299
10300 if (buf_idx >= pb->cpu_cnt)
10301 return -EINVAL;
10302
10303 cpu_buf = pb->cpu_bufs[buf_idx];
10304 if (!cpu_buf)
10305 return -ENOENT;
10306
10307 return perf_buffer__process_records(pb, cpu_buf);
10308 }
10309
perf_buffer__consume(struct perf_buffer * pb)10310 int perf_buffer__consume(struct perf_buffer *pb)
10311 {
10312 int i, err;
10313
10314 for (i = 0; i < pb->cpu_cnt; i++) {
10315 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
10316
10317 if (!cpu_buf)
10318 continue;
10319
10320 err = perf_buffer__process_records(pb, cpu_buf);
10321 if (err) {
10322 pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
10323 return err;
10324 }
10325 }
10326 return 0;
10327 }
10328
10329 struct bpf_prog_info_array_desc {
10330 int array_offset; /* e.g. offset of jited_prog_insns */
10331 int count_offset; /* e.g. offset of jited_prog_len */
10332 int size_offset; /* > 0: offset of rec size,
10333 * < 0: fix size of -size_offset
10334 */
10335 };
10336
10337 static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
10338 [BPF_PROG_INFO_JITED_INSNS] = {
10339 offsetof(struct bpf_prog_info, jited_prog_insns),
10340 offsetof(struct bpf_prog_info, jited_prog_len),
10341 -1,
10342 },
10343 [BPF_PROG_INFO_XLATED_INSNS] = {
10344 offsetof(struct bpf_prog_info, xlated_prog_insns),
10345 offsetof(struct bpf_prog_info, xlated_prog_len),
10346 -1,
10347 },
10348 [BPF_PROG_INFO_MAP_IDS] = {
10349 offsetof(struct bpf_prog_info, map_ids),
10350 offsetof(struct bpf_prog_info, nr_map_ids),
10351 -(int)sizeof(__u32),
10352 },
10353 [BPF_PROG_INFO_JITED_KSYMS] = {
10354 offsetof(struct bpf_prog_info, jited_ksyms),
10355 offsetof(struct bpf_prog_info, nr_jited_ksyms),
10356 -(int)sizeof(__u64),
10357 },
10358 [BPF_PROG_INFO_JITED_FUNC_LENS] = {
10359 offsetof(struct bpf_prog_info, jited_func_lens),
10360 offsetof(struct bpf_prog_info, nr_jited_func_lens),
10361 -(int)sizeof(__u32),
10362 },
10363 [BPF_PROG_INFO_FUNC_INFO] = {
10364 offsetof(struct bpf_prog_info, func_info),
10365 offsetof(struct bpf_prog_info, nr_func_info),
10366 offsetof(struct bpf_prog_info, func_info_rec_size),
10367 },
10368 [BPF_PROG_INFO_LINE_INFO] = {
10369 offsetof(struct bpf_prog_info, line_info),
10370 offsetof(struct bpf_prog_info, nr_line_info),
10371 offsetof(struct bpf_prog_info, line_info_rec_size),
10372 },
10373 [BPF_PROG_INFO_JITED_LINE_INFO] = {
10374 offsetof(struct bpf_prog_info, jited_line_info),
10375 offsetof(struct bpf_prog_info, nr_jited_line_info),
10376 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
10377 },
10378 [BPF_PROG_INFO_PROG_TAGS] = {
10379 offsetof(struct bpf_prog_info, prog_tags),
10380 offsetof(struct bpf_prog_info, nr_prog_tags),
10381 -(int)sizeof(__u8) * BPF_TAG_SIZE,
10382 },
10383
10384 };
10385
bpf_prog_info_read_offset_u32(struct bpf_prog_info * info,int offset)10386 static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
10387 int offset)
10388 {
10389 __u32 *array = (__u32 *)info;
10390
10391 if (offset >= 0)
10392 return array[offset / sizeof(__u32)];
10393 return -(int)offset;
10394 }
10395
bpf_prog_info_read_offset_u64(struct bpf_prog_info * info,int offset)10396 static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
10397 int offset)
10398 {
10399 __u64 *array = (__u64 *)info;
10400
10401 if (offset >= 0)
10402 return array[offset / sizeof(__u64)];
10403 return -(int)offset;
10404 }
10405
bpf_prog_info_set_offset_u32(struct bpf_prog_info * info,int offset,__u32 val)10406 static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
10407 __u32 val)
10408 {
10409 __u32 *array = (__u32 *)info;
10410
10411 if (offset >= 0)
10412 array[offset / sizeof(__u32)] = val;
10413 }
10414
bpf_prog_info_set_offset_u64(struct bpf_prog_info * info,int offset,__u64 val)10415 static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
10416 __u64 val)
10417 {
10418 __u64 *array = (__u64 *)info;
10419
10420 if (offset >= 0)
10421 array[offset / sizeof(__u64)] = val;
10422 }
10423
10424 struct bpf_prog_info_linear *
bpf_program__get_prog_info_linear(int fd,__u64 arrays)10425 bpf_program__get_prog_info_linear(int fd, __u64 arrays)
10426 {
10427 struct bpf_prog_info_linear *info_linear;
10428 struct bpf_prog_info info = {};
10429 __u32 info_len = sizeof(info);
10430 __u32 data_len = 0;
10431 int i, err;
10432 void *ptr;
10433
10434 if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
10435 return ERR_PTR(-EINVAL);
10436
10437 /* step 1: get array dimensions */
10438 err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
10439 if (err) {
10440 pr_debug("can't get prog info: %s", strerror(errno));
10441 return ERR_PTR(-EFAULT);
10442 }
10443
10444 /* step 2: calculate total size of all arrays */
10445 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10446 bool include_array = (arrays & (1UL << i)) > 0;
10447 struct bpf_prog_info_array_desc *desc;
10448 __u32 count, size;
10449
10450 desc = bpf_prog_info_array_desc + i;
10451
10452 /* kernel is too old to support this field */
10453 if (info_len < desc->array_offset + sizeof(__u32) ||
10454 info_len < desc->count_offset + sizeof(__u32) ||
10455 (desc->size_offset > 0 && info_len < desc->size_offset))
10456 include_array = false;
10457
10458 if (!include_array) {
10459 arrays &= ~(1UL << i); /* clear the bit */
10460 continue;
10461 }
10462
10463 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10464 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10465
10466 data_len += count * size;
10467 }
10468
10469 /* step 3: allocate continuous memory */
10470 data_len = roundup(data_len, sizeof(__u64));
10471 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
10472 if (!info_linear)
10473 return ERR_PTR(-ENOMEM);
10474
10475 /* step 4: fill data to info_linear->info */
10476 info_linear->arrays = arrays;
10477 memset(&info_linear->info, 0, sizeof(info));
10478 ptr = info_linear->data;
10479
10480 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10481 struct bpf_prog_info_array_desc *desc;
10482 __u32 count, size;
10483
10484 if ((arrays & (1UL << i)) == 0)
10485 continue;
10486
10487 desc = bpf_prog_info_array_desc + i;
10488 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10489 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10490 bpf_prog_info_set_offset_u32(&info_linear->info,
10491 desc->count_offset, count);
10492 bpf_prog_info_set_offset_u32(&info_linear->info,
10493 desc->size_offset, size);
10494 bpf_prog_info_set_offset_u64(&info_linear->info,
10495 desc->array_offset,
10496 ptr_to_u64(ptr));
10497 ptr += count * size;
10498 }
10499
10500 /* step 5: call syscall again to get required arrays */
10501 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
10502 if (err) {
10503 pr_debug("can't get prog info: %s", strerror(errno));
10504 free(info_linear);
10505 return ERR_PTR(-EFAULT);
10506 }
10507
10508 /* step 6: verify the data */
10509 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10510 struct bpf_prog_info_array_desc *desc;
10511 __u32 v1, v2;
10512
10513 if ((arrays & (1UL << i)) == 0)
10514 continue;
10515
10516 desc = bpf_prog_info_array_desc + i;
10517 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10518 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
10519 desc->count_offset);
10520 if (v1 != v2)
10521 pr_warn("%s: mismatch in element count\n", __func__);
10522
10523 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10524 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
10525 desc->size_offset);
10526 if (v1 != v2)
10527 pr_warn("%s: mismatch in rec size\n", __func__);
10528 }
10529
10530 /* step 7: update info_len and data_len */
10531 info_linear->info_len = sizeof(struct bpf_prog_info);
10532 info_linear->data_len = data_len;
10533
10534 return info_linear;
10535 }
10536
bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear * info_linear)10537 void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
10538 {
10539 int i;
10540
10541 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10542 struct bpf_prog_info_array_desc *desc;
10543 __u64 addr, offs;
10544
10545 if ((info_linear->arrays & (1UL << i)) == 0)
10546 continue;
10547
10548 desc = bpf_prog_info_array_desc + i;
10549 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
10550 desc->array_offset);
10551 offs = addr - ptr_to_u64(info_linear->data);
10552 bpf_prog_info_set_offset_u64(&info_linear->info,
10553 desc->array_offset, offs);
10554 }
10555 }
10556
bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear * info_linear)10557 void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
10558 {
10559 int i;
10560
10561 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10562 struct bpf_prog_info_array_desc *desc;
10563 __u64 addr, offs;
10564
10565 if ((info_linear->arrays & (1UL << i)) == 0)
10566 continue;
10567
10568 desc = bpf_prog_info_array_desc + i;
10569 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
10570 desc->array_offset);
10571 addr = offs + ptr_to_u64(info_linear->data);
10572 bpf_prog_info_set_offset_u64(&info_linear->info,
10573 desc->array_offset, addr);
10574 }
10575 }
10576
bpf_program__set_attach_target(struct bpf_program * prog,int attach_prog_fd,const char * attach_func_name)10577 int bpf_program__set_attach_target(struct bpf_program *prog,
10578 int attach_prog_fd,
10579 const char *attach_func_name)
10580 {
10581 int btf_id;
10582
10583 if (!prog || attach_prog_fd < 0 || !attach_func_name)
10584 return -EINVAL;
10585
10586 if (attach_prog_fd)
10587 btf_id = libbpf_find_prog_btf_id(attach_func_name,
10588 attach_prog_fd);
10589 else
10590 btf_id = libbpf_find_vmlinux_btf_id(attach_func_name,
10591 prog->expected_attach_type);
10592
10593 if (btf_id < 0)
10594 return btf_id;
10595
10596 prog->attach_btf_id = btf_id;
10597 prog->attach_prog_fd = attach_prog_fd;
10598 return 0;
10599 }
10600
parse_cpu_mask_str(const char * s,bool ** mask,int * mask_sz)10601 int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
10602 {
10603 int err = 0, n, len, start, end = -1;
10604 bool *tmp;
10605
10606 *mask = NULL;
10607 *mask_sz = 0;
10608
10609 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
10610 while (*s) {
10611 if (*s == ',' || *s == '\n') {
10612 s++;
10613 continue;
10614 }
10615 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
10616 if (n <= 0 || n > 2) {
10617 pr_warn("Failed to get CPU range %s: %d\n", s, n);
10618 err = -EINVAL;
10619 goto cleanup;
10620 } else if (n == 1) {
10621 end = start;
10622 }
10623 if (start < 0 || start > end) {
10624 pr_warn("Invalid CPU range [%d,%d] in %s\n",
10625 start, end, s);
10626 err = -EINVAL;
10627 goto cleanup;
10628 }
10629 tmp = realloc(*mask, end + 1);
10630 if (!tmp) {
10631 err = -ENOMEM;
10632 goto cleanup;
10633 }
10634 *mask = tmp;
10635 memset(tmp + *mask_sz, 0, start - *mask_sz);
10636 memset(tmp + start, 1, end - start + 1);
10637 *mask_sz = end + 1;
10638 s += len;
10639 }
10640 if (!*mask_sz) {
10641 pr_warn("Empty CPU range\n");
10642 return -EINVAL;
10643 }
10644 return 0;
10645 cleanup:
10646 free(*mask);
10647 *mask = NULL;
10648 return err;
10649 }
10650
parse_cpu_mask_file(const char * fcpu,bool ** mask,int * mask_sz)10651 int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
10652 {
10653 int fd, err = 0, len;
10654 char buf[128];
10655
10656 fd = open(fcpu, O_RDONLY);
10657 if (fd < 0) {
10658 err = -errno;
10659 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
10660 return err;
10661 }
10662 len = read(fd, buf, sizeof(buf));
10663 close(fd);
10664 if (len <= 0) {
10665 err = len ? -errno : -EINVAL;
10666 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
10667 return err;
10668 }
10669 if (len >= sizeof(buf)) {
10670 pr_warn("CPU mask is too big in file %s\n", fcpu);
10671 return -E2BIG;
10672 }
10673 buf[len] = '\0';
10674
10675 return parse_cpu_mask_str(buf, mask, mask_sz);
10676 }
10677
libbpf_num_possible_cpus(void)10678 int libbpf_num_possible_cpus(void)
10679 {
10680 static const char *fcpu = "/sys/devices/system/cpu/possible";
10681 static int cpus;
10682 int err, n, i, tmp_cpus;
10683 bool *mask;
10684
10685 tmp_cpus = READ_ONCE(cpus);
10686 if (tmp_cpus > 0)
10687 return tmp_cpus;
10688
10689 err = parse_cpu_mask_file(fcpu, &mask, &n);
10690 if (err)
10691 return err;
10692
10693 tmp_cpus = 0;
10694 for (i = 0; i < n; i++) {
10695 if (mask[i])
10696 tmp_cpus++;
10697 }
10698 free(mask);
10699
10700 WRITE_ONCE(cpus, tmp_cpus);
10701 return tmp_cpus;
10702 }
10703
bpf_object__open_skeleton(struct bpf_object_skeleton * s,const struct bpf_object_open_opts * opts)10704 int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
10705 const struct bpf_object_open_opts *opts)
10706 {
10707 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
10708 .object_name = s->name,
10709 );
10710 struct bpf_object *obj;
10711 int i;
10712
10713 /* Attempt to preserve opts->object_name, unless overriden by user
10714 * explicitly. Overwriting object name for skeletons is discouraged,
10715 * as it breaks global data maps, because they contain object name
10716 * prefix as their own map name prefix. When skeleton is generated,
10717 * bpftool is making an assumption that this name will stay the same.
10718 */
10719 if (opts) {
10720 memcpy(&skel_opts, opts, sizeof(*opts));
10721 if (!opts->object_name)
10722 skel_opts.object_name = s->name;
10723 }
10724
10725 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
10726 if (IS_ERR(obj)) {
10727 pr_warn("failed to initialize skeleton BPF object '%s': %ld\n",
10728 s->name, PTR_ERR(obj));
10729 return PTR_ERR(obj);
10730 }
10731
10732 *s->obj = obj;
10733
10734 for (i = 0; i < s->map_cnt; i++) {
10735 struct bpf_map **map = s->maps[i].map;
10736 const char *name = s->maps[i].name;
10737 void **mmaped = s->maps[i].mmaped;
10738
10739 *map = bpf_object__find_map_by_name(obj, name);
10740 if (!*map) {
10741 pr_warn("failed to find skeleton map '%s'\n", name);
10742 return -ESRCH;
10743 }
10744
10745 /* externs shouldn't be pre-setup from user code */
10746 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
10747 *mmaped = (*map)->mmaped;
10748 }
10749
10750 for (i = 0; i < s->prog_cnt; i++) {
10751 struct bpf_program **prog = s->progs[i].prog;
10752 const char *name = s->progs[i].name;
10753
10754 *prog = bpf_object__find_program_by_name(obj, name);
10755 if (!*prog) {
10756 pr_warn("failed to find skeleton program '%s'\n", name);
10757 return -ESRCH;
10758 }
10759 }
10760
10761 return 0;
10762 }
10763
bpf_object__load_skeleton(struct bpf_object_skeleton * s)10764 int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
10765 {
10766 int i, err;
10767
10768 err = bpf_object__load(*s->obj);
10769 if (err) {
10770 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
10771 return err;
10772 }
10773
10774 for (i = 0; i < s->map_cnt; i++) {
10775 struct bpf_map *map = *s->maps[i].map;
10776 size_t mmap_sz = bpf_map_mmap_sz(map);
10777 int prot, map_fd = bpf_map__fd(map);
10778 void **mmaped = s->maps[i].mmaped;
10779
10780 if (!mmaped)
10781 continue;
10782
10783 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
10784 *mmaped = NULL;
10785 continue;
10786 }
10787
10788 if (map->def.map_flags & BPF_F_RDONLY_PROG)
10789 prot = PROT_READ;
10790 else
10791 prot = PROT_READ | PROT_WRITE;
10792
10793 /* Remap anonymous mmap()-ed "map initialization image" as
10794 * a BPF map-backed mmap()-ed memory, but preserving the same
10795 * memory address. This will cause kernel to change process'
10796 * page table to point to a different piece of kernel memory,
10797 * but from userspace point of view memory address (and its
10798 * contents, being identical at this point) will stay the
10799 * same. This mapping will be released by bpf_object__close()
10800 * as per normal clean up procedure, so we don't need to worry
10801 * about it from skeleton's clean up perspective.
10802 */
10803 *mmaped = mmap(map->mmaped, mmap_sz, prot,
10804 MAP_SHARED | MAP_FIXED, map_fd, 0);
10805 if (*mmaped == MAP_FAILED) {
10806 err = -errno;
10807 *mmaped = NULL;
10808 pr_warn("failed to re-mmap() map '%s': %d\n",
10809 bpf_map__name(map), err);
10810 return err;
10811 }
10812 }
10813
10814 return 0;
10815 }
10816
bpf_object__attach_skeleton(struct bpf_object_skeleton * s)10817 int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
10818 {
10819 int i;
10820
10821 for (i = 0; i < s->prog_cnt; i++) {
10822 struct bpf_program *prog = *s->progs[i].prog;
10823 struct bpf_link **link = s->progs[i].link;
10824 const struct bpf_sec_def *sec_def;
10825
10826 if (!prog->load)
10827 continue;
10828
10829 sec_def = find_sec_def(prog->sec_name);
10830 if (!sec_def || !sec_def->attach_fn)
10831 continue;
10832
10833 *link = sec_def->attach_fn(sec_def, prog);
10834 if (IS_ERR(*link)) {
10835 pr_warn("failed to auto-attach program '%s': %ld\n",
10836 bpf_program__name(prog), PTR_ERR(*link));
10837 return PTR_ERR(*link);
10838 }
10839 }
10840
10841 return 0;
10842 }
10843
bpf_object__detach_skeleton(struct bpf_object_skeleton * s)10844 void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
10845 {
10846 int i;
10847
10848 for (i = 0; i < s->prog_cnt; i++) {
10849 struct bpf_link **link = s->progs[i].link;
10850
10851 bpf_link__destroy(*link);
10852 *link = NULL;
10853 }
10854 }
10855
bpf_object__destroy_skeleton(struct bpf_object_skeleton * s)10856 void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
10857 {
10858 if (s->progs)
10859 bpf_object__detach_skeleton(s);
10860 if (s->obj)
10861 bpf_object__close(*s->obj);
10862 free(s->maps);
10863 free(s->progs);
10864 free(s);
10865 }
10866