1 // SPDX-License-Identifier: LGPL-2.1
2
3 /*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
9 * Copyright (C) 2017 Nicira, Inc.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation;
14 * version 2.1 of the License (not later!)
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this program; if not, see <http://www.gnu.org/licenses>
23 */
24
25 #define _GNU_SOURCE
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdarg.h>
29 #include <libgen.h>
30 #include <inttypes.h>
31 #include <string.h>
32 #include <unistd.h>
33 #include <fcntl.h>
34 #include <errno.h>
35 #include <perf-sys.h>
36 #include <asm/unistd.h>
37 #include <linux/err.h>
38 #include <linux/kernel.h>
39 #include <linux/bpf.h>
40 #include <linux/btf.h>
41 #include <linux/list.h>
42 #include <linux/limits.h>
43 #include <sys/stat.h>
44 #include <sys/types.h>
45 #include <sys/vfs.h>
46 #include <tools/libc_compat.h>
47 #include <libelf.h>
48 #include <gelf.h>
49
50 #include "libbpf.h"
51 #include "bpf.h"
52 #include "btf.h"
53 #include "str_error.h"
54
55 #ifndef EM_BPF
56 #define EM_BPF 247
57 #endif
58
59 #ifndef BPF_FS_MAGIC
60 #define BPF_FS_MAGIC 0xcafe4a11
61 #endif
62
63 #define __printf(a, b) __attribute__((format(printf, a, b)))
64
65 __printf(1, 2)
__base_pr(const char * format,...)66 static int __base_pr(const char *format, ...)
67 {
68 va_list args;
69 int err;
70
71 va_start(args, format);
72 err = vfprintf(stderr, format, args);
73 va_end(args);
74 return err;
75 }
76
77 static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
78 static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
79 static __printf(1, 2) libbpf_print_fn_t __pr_debug;
80
81 #define __pr(func, fmt, ...) \
82 do { \
83 if ((func)) \
84 (func)("libbpf: " fmt, ##__VA_ARGS__); \
85 } while (0)
86
87 #define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
88 #define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
89 #define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
90
libbpf_set_print(libbpf_print_fn_t warn,libbpf_print_fn_t info,libbpf_print_fn_t debug)91 void libbpf_set_print(libbpf_print_fn_t warn,
92 libbpf_print_fn_t info,
93 libbpf_print_fn_t debug)
94 {
95 __pr_warning = warn;
96 __pr_info = info;
97 __pr_debug = debug;
98 }
99
100 #define STRERR_BUFSIZE 128
101
102 #define CHECK_ERR(action, err, out) do { \
103 err = action; \
104 if (err) \
105 goto out; \
106 } while(0)
107
108
109 /* Copied from tools/perf/util/util.h */
110 #ifndef zfree
111 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
112 #endif
113
114 #ifndef zclose
115 # define zclose(fd) ({ \
116 int ___err = 0; \
117 if ((fd) >= 0) \
118 ___err = close((fd)); \
119 fd = -1; \
120 ___err; })
121 #endif
122
123 #ifdef HAVE_LIBELF_MMAP_SUPPORT
124 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
125 #else
126 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
127 #endif
128
129 /*
130 * bpf_prog should be a better name but it has been used in
131 * linux/filter.h.
132 */
133 struct bpf_program {
134 /* Index in elf obj file, for relocation use. */
135 int idx;
136 char *name;
137 int prog_ifindex;
138 char *section_name;
139 struct bpf_insn *insns;
140 size_t insns_cnt, main_prog_cnt;
141 enum bpf_prog_type type;
142
143 struct reloc_desc {
144 enum {
145 RELO_LD64,
146 RELO_CALL,
147 } type;
148 int insn_idx;
149 union {
150 int map_idx;
151 int text_off;
152 };
153 } *reloc_desc;
154 int nr_reloc;
155
156 struct {
157 int nr;
158 int *fds;
159 } instances;
160 bpf_program_prep_t preprocessor;
161
162 struct bpf_object *obj;
163 void *priv;
164 bpf_program_clear_priv_t clear_priv;
165
166 enum bpf_attach_type expected_attach_type;
167 };
168
169 struct bpf_map {
170 int fd;
171 char *name;
172 size_t offset;
173 int map_ifindex;
174 struct bpf_map_def def;
175 __u32 btf_key_type_id;
176 __u32 btf_value_type_id;
177 void *priv;
178 bpf_map_clear_priv_t clear_priv;
179 };
180
181 static LIST_HEAD(bpf_objects_list);
182
183 struct bpf_object {
184 char license[64];
185 u32 kern_version;
186
187 struct bpf_program *programs;
188 size_t nr_programs;
189 struct bpf_map *maps;
190 size_t nr_maps;
191
192 bool loaded;
193 bool has_pseudo_calls;
194
195 /*
196 * Information when doing elf related work. Only valid if fd
197 * is valid.
198 */
199 struct {
200 int fd;
201 void *obj_buf;
202 size_t obj_buf_sz;
203 Elf *elf;
204 GElf_Ehdr ehdr;
205 Elf_Data *symbols;
206 size_t strtabidx;
207 struct {
208 GElf_Shdr shdr;
209 Elf_Data *data;
210 } *reloc;
211 int nr_reloc;
212 int maps_shndx;
213 int text_shndx;
214 } efile;
215 /*
216 * All loaded bpf_object is linked in a list, which is
217 * hidden to caller. bpf_objects__<func> handlers deal with
218 * all objects.
219 */
220 struct list_head list;
221
222 struct btf *btf;
223
224 void *priv;
225 bpf_object_clear_priv_t clear_priv;
226
227 char path[];
228 };
229 #define obj_elf_valid(o) ((o)->efile.elf)
230
bpf_program__unload(struct bpf_program * prog)231 static void bpf_program__unload(struct bpf_program *prog)
232 {
233 int i;
234
235 if (!prog)
236 return;
237
238 /*
239 * If the object is opened but the program was never loaded,
240 * it is possible that prog->instances.nr == -1.
241 */
242 if (prog->instances.nr > 0) {
243 for (i = 0; i < prog->instances.nr; i++)
244 zclose(prog->instances.fds[i]);
245 } else if (prog->instances.nr != -1) {
246 pr_warning("Internal error: instances.nr is %d\n",
247 prog->instances.nr);
248 }
249
250 prog->instances.nr = -1;
251 zfree(&prog->instances.fds);
252 }
253
bpf_program__exit(struct bpf_program * prog)254 static void bpf_program__exit(struct bpf_program *prog)
255 {
256 if (!prog)
257 return;
258
259 if (prog->clear_priv)
260 prog->clear_priv(prog, prog->priv);
261
262 prog->priv = NULL;
263 prog->clear_priv = NULL;
264
265 bpf_program__unload(prog);
266 zfree(&prog->name);
267 zfree(&prog->section_name);
268 zfree(&prog->insns);
269 zfree(&prog->reloc_desc);
270
271 prog->nr_reloc = 0;
272 prog->insns_cnt = 0;
273 prog->idx = -1;
274 }
275
276 static int
bpf_program__init(void * data,size_t size,char * section_name,int idx,struct bpf_program * prog)277 bpf_program__init(void *data, size_t size, char *section_name, int idx,
278 struct bpf_program *prog)
279 {
280 if (size < sizeof(struct bpf_insn)) {
281 pr_warning("corrupted section '%s'\n", section_name);
282 return -EINVAL;
283 }
284
285 bzero(prog, sizeof(*prog));
286
287 prog->section_name = strdup(section_name);
288 if (!prog->section_name) {
289 pr_warning("failed to alloc name for prog under section(%d) %s\n",
290 idx, section_name);
291 goto errout;
292 }
293
294 prog->insns = malloc(size);
295 if (!prog->insns) {
296 pr_warning("failed to alloc insns for prog under section %s\n",
297 section_name);
298 goto errout;
299 }
300 prog->insns_cnt = size / sizeof(struct bpf_insn);
301 memcpy(prog->insns, data,
302 prog->insns_cnt * sizeof(struct bpf_insn));
303 prog->idx = idx;
304 prog->instances.fds = NULL;
305 prog->instances.nr = -1;
306 prog->type = BPF_PROG_TYPE_KPROBE;
307
308 return 0;
309 errout:
310 bpf_program__exit(prog);
311 return -ENOMEM;
312 }
313
314 static int
bpf_object__add_program(struct bpf_object * obj,void * data,size_t size,char * section_name,int idx)315 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
316 char *section_name, int idx)
317 {
318 struct bpf_program prog, *progs;
319 int nr_progs, err;
320
321 err = bpf_program__init(data, size, section_name, idx, &prog);
322 if (err)
323 return err;
324
325 progs = obj->programs;
326 nr_progs = obj->nr_programs;
327
328 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
329 if (!progs) {
330 /*
331 * In this case the original obj->programs
332 * is still valid, so don't need special treat for
333 * bpf_close_object().
334 */
335 pr_warning("failed to alloc a new program under section '%s'\n",
336 section_name);
337 bpf_program__exit(&prog);
338 return -ENOMEM;
339 }
340
341 pr_debug("found program %s\n", prog.section_name);
342 obj->programs = progs;
343 obj->nr_programs = nr_progs + 1;
344 prog.obj = obj;
345 progs[nr_progs] = prog;
346 return 0;
347 }
348
349 static int
bpf_object__init_prog_names(struct bpf_object * obj)350 bpf_object__init_prog_names(struct bpf_object *obj)
351 {
352 Elf_Data *symbols = obj->efile.symbols;
353 struct bpf_program *prog;
354 size_t pi, si;
355
356 for (pi = 0; pi < obj->nr_programs; pi++) {
357 const char *name = NULL;
358
359 prog = &obj->programs[pi];
360
361 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
362 si++) {
363 GElf_Sym sym;
364
365 if (!gelf_getsym(symbols, si, &sym))
366 continue;
367 if (sym.st_shndx != prog->idx)
368 continue;
369 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
370 continue;
371
372 name = elf_strptr(obj->efile.elf,
373 obj->efile.strtabidx,
374 sym.st_name);
375 if (!name) {
376 pr_warning("failed to get sym name string for prog %s\n",
377 prog->section_name);
378 return -LIBBPF_ERRNO__LIBELF;
379 }
380 }
381
382 if (!name && prog->idx == obj->efile.text_shndx)
383 name = ".text";
384
385 if (!name) {
386 pr_warning("failed to find sym for prog %s\n",
387 prog->section_name);
388 return -EINVAL;
389 }
390
391 prog->name = strdup(name);
392 if (!prog->name) {
393 pr_warning("failed to allocate memory for prog sym %s\n",
394 name);
395 return -ENOMEM;
396 }
397 }
398
399 return 0;
400 }
401
bpf_object__new(const char * path,void * obj_buf,size_t obj_buf_sz)402 static struct bpf_object *bpf_object__new(const char *path,
403 void *obj_buf,
404 size_t obj_buf_sz)
405 {
406 struct bpf_object *obj;
407
408 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
409 if (!obj) {
410 pr_warning("alloc memory failed for %s\n", path);
411 return ERR_PTR(-ENOMEM);
412 }
413
414 strcpy(obj->path, path);
415 obj->efile.fd = -1;
416
417 /*
418 * Caller of this function should also calls
419 * bpf_object__elf_finish() after data collection to return
420 * obj_buf to user. If not, we should duplicate the buffer to
421 * avoid user freeing them before elf finish.
422 */
423 obj->efile.obj_buf = obj_buf;
424 obj->efile.obj_buf_sz = obj_buf_sz;
425 obj->efile.maps_shndx = -1;
426
427 obj->loaded = false;
428
429 INIT_LIST_HEAD(&obj->list);
430 list_add(&obj->list, &bpf_objects_list);
431 return obj;
432 }
433
bpf_object__elf_finish(struct bpf_object * obj)434 static void bpf_object__elf_finish(struct bpf_object *obj)
435 {
436 if (!obj_elf_valid(obj))
437 return;
438
439 if (obj->efile.elf) {
440 elf_end(obj->efile.elf);
441 obj->efile.elf = NULL;
442 }
443 obj->efile.symbols = NULL;
444
445 zfree(&obj->efile.reloc);
446 obj->efile.nr_reloc = 0;
447 zclose(obj->efile.fd);
448 obj->efile.obj_buf = NULL;
449 obj->efile.obj_buf_sz = 0;
450 }
451
bpf_object__elf_init(struct bpf_object * obj)452 static int bpf_object__elf_init(struct bpf_object *obj)
453 {
454 int err = 0;
455 GElf_Ehdr *ep;
456
457 if (obj_elf_valid(obj)) {
458 pr_warning("elf init: internal error\n");
459 return -LIBBPF_ERRNO__LIBELF;
460 }
461
462 if (obj->efile.obj_buf_sz > 0) {
463 /*
464 * obj_buf should have been validated by
465 * bpf_object__open_buffer().
466 */
467 obj->efile.elf = elf_memory(obj->efile.obj_buf,
468 obj->efile.obj_buf_sz);
469 } else {
470 obj->efile.fd = open(obj->path, O_RDONLY);
471 if (obj->efile.fd < 0) {
472 char errmsg[STRERR_BUFSIZE];
473 char *cp = str_error(errno, errmsg, sizeof(errmsg));
474
475 pr_warning("failed to open %s: %s\n", obj->path, cp);
476 return -errno;
477 }
478
479 obj->efile.elf = elf_begin(obj->efile.fd,
480 LIBBPF_ELF_C_READ_MMAP,
481 NULL);
482 }
483
484 if (!obj->efile.elf) {
485 pr_warning("failed to open %s as ELF file\n",
486 obj->path);
487 err = -LIBBPF_ERRNO__LIBELF;
488 goto errout;
489 }
490
491 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
492 pr_warning("failed to get EHDR from %s\n",
493 obj->path);
494 err = -LIBBPF_ERRNO__FORMAT;
495 goto errout;
496 }
497 ep = &obj->efile.ehdr;
498
499 /* Old LLVM set e_machine to EM_NONE */
500 if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
501 pr_warning("%s is not an eBPF object file\n",
502 obj->path);
503 err = -LIBBPF_ERRNO__FORMAT;
504 goto errout;
505 }
506
507 return 0;
508 errout:
509 bpf_object__elf_finish(obj);
510 return err;
511 }
512
513 static int
bpf_object__check_endianness(struct bpf_object * obj)514 bpf_object__check_endianness(struct bpf_object *obj)
515 {
516 static unsigned int const endian = 1;
517
518 switch (obj->efile.ehdr.e_ident[EI_DATA]) {
519 case ELFDATA2LSB:
520 /* We are big endian, BPF obj is little endian. */
521 if (*(unsigned char const *)&endian != 1)
522 goto mismatch;
523 break;
524
525 case ELFDATA2MSB:
526 /* We are little endian, BPF obj is big endian. */
527 if (*(unsigned char const *)&endian != 0)
528 goto mismatch;
529 break;
530 default:
531 return -LIBBPF_ERRNO__ENDIAN;
532 }
533
534 return 0;
535
536 mismatch:
537 pr_warning("Error: endianness mismatch.\n");
538 return -LIBBPF_ERRNO__ENDIAN;
539 }
540
541 static int
bpf_object__init_license(struct bpf_object * obj,void * data,size_t size)542 bpf_object__init_license(struct bpf_object *obj,
543 void *data, size_t size)
544 {
545 memcpy(obj->license, data,
546 min(size, sizeof(obj->license) - 1));
547 pr_debug("license of %s is %s\n", obj->path, obj->license);
548 return 0;
549 }
550
551 static int
bpf_object__init_kversion(struct bpf_object * obj,void * data,size_t size)552 bpf_object__init_kversion(struct bpf_object *obj,
553 void *data, size_t size)
554 {
555 u32 kver;
556
557 if (size != sizeof(kver)) {
558 pr_warning("invalid kver section in %s\n", obj->path);
559 return -LIBBPF_ERRNO__FORMAT;
560 }
561 memcpy(&kver, data, sizeof(kver));
562 obj->kern_version = kver;
563 pr_debug("kernel version of %s is %x\n", obj->path,
564 obj->kern_version);
565 return 0;
566 }
567
compare_bpf_map(const void * _a,const void * _b)568 static int compare_bpf_map(const void *_a, const void *_b)
569 {
570 const struct bpf_map *a = _a;
571 const struct bpf_map *b = _b;
572
573 return a->offset - b->offset;
574 }
575
576 static int
bpf_object__init_maps(struct bpf_object * obj)577 bpf_object__init_maps(struct bpf_object *obj)
578 {
579 int i, map_idx, map_def_sz, nr_maps = 0;
580 Elf_Scn *scn;
581 Elf_Data *data;
582 Elf_Data *symbols = obj->efile.symbols;
583
584 if (obj->efile.maps_shndx < 0)
585 return -EINVAL;
586 if (!symbols)
587 return -EINVAL;
588
589 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
590 if (scn)
591 data = elf_getdata(scn, NULL);
592 if (!scn || !data) {
593 pr_warning("failed to get Elf_Data from map section %d\n",
594 obj->efile.maps_shndx);
595 return -EINVAL;
596 }
597
598 /*
599 * Count number of maps. Each map has a name.
600 * Array of maps is not supported: only the first element is
601 * considered.
602 *
603 * TODO: Detect array of map and report error.
604 */
605 for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
606 GElf_Sym sym;
607
608 if (!gelf_getsym(symbols, i, &sym))
609 continue;
610 if (sym.st_shndx != obj->efile.maps_shndx)
611 continue;
612 nr_maps++;
613 }
614
615 /* Alloc obj->maps and fill nr_maps. */
616 pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
617 nr_maps, data->d_size);
618
619 if (!nr_maps)
620 return 0;
621
622 /* Assume equally sized map definitions */
623 map_def_sz = data->d_size / nr_maps;
624 if (!data->d_size || (data->d_size % nr_maps) != 0) {
625 pr_warning("unable to determine map definition size "
626 "section %s, %d maps in %zd bytes\n",
627 obj->path, nr_maps, data->d_size);
628 return -EINVAL;
629 }
630
631 obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
632 if (!obj->maps) {
633 pr_warning("alloc maps for object failed\n");
634 return -ENOMEM;
635 }
636 obj->nr_maps = nr_maps;
637
638 /*
639 * fill all fd with -1 so won't close incorrect
640 * fd (fd=0 is stdin) when failure (zclose won't close
641 * negative fd)).
642 */
643 for (i = 0; i < nr_maps; i++)
644 obj->maps[i].fd = -1;
645
646 /*
647 * Fill obj->maps using data in "maps" section.
648 */
649 for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
650 GElf_Sym sym;
651 const char *map_name;
652 struct bpf_map_def *def;
653
654 if (!gelf_getsym(symbols, i, &sym))
655 continue;
656 if (sym.st_shndx != obj->efile.maps_shndx)
657 continue;
658
659 map_name = elf_strptr(obj->efile.elf,
660 obj->efile.strtabidx,
661 sym.st_name);
662 obj->maps[map_idx].offset = sym.st_value;
663 if (sym.st_value + map_def_sz > data->d_size) {
664 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
665 obj->path, map_name);
666 return -EINVAL;
667 }
668
669 obj->maps[map_idx].name = strdup(map_name);
670 if (!obj->maps[map_idx].name) {
671 pr_warning("failed to alloc map name\n");
672 return -ENOMEM;
673 }
674 pr_debug("map %d is \"%s\"\n", map_idx,
675 obj->maps[map_idx].name);
676 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
677 /*
678 * If the definition of the map in the object file fits in
679 * bpf_map_def, copy it. Any extra fields in our version
680 * of bpf_map_def will default to zero as a result of the
681 * calloc above.
682 */
683 if (map_def_sz <= sizeof(struct bpf_map_def)) {
684 memcpy(&obj->maps[map_idx].def, def, map_def_sz);
685 } else {
686 /*
687 * Here the map structure being read is bigger than what
688 * we expect, truncate if the excess bits are all zero.
689 * If they are not zero, reject this map as
690 * incompatible.
691 */
692 char *b;
693 for (b = ((char *)def) + sizeof(struct bpf_map_def);
694 b < ((char *)def) + map_def_sz; b++) {
695 if (*b != 0) {
696 pr_warning("maps section in %s: \"%s\" "
697 "has unrecognized, non-zero "
698 "options\n",
699 obj->path, map_name);
700 return -EINVAL;
701 }
702 }
703 memcpy(&obj->maps[map_idx].def, def,
704 sizeof(struct bpf_map_def));
705 }
706 map_idx++;
707 }
708
709 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
710 return 0;
711 }
712
section_have_execinstr(struct bpf_object * obj,int idx)713 static bool section_have_execinstr(struct bpf_object *obj, int idx)
714 {
715 Elf_Scn *scn;
716 GElf_Shdr sh;
717
718 scn = elf_getscn(obj->efile.elf, idx);
719 if (!scn)
720 return false;
721
722 if (gelf_getshdr(scn, &sh) != &sh)
723 return false;
724
725 if (sh.sh_flags & SHF_EXECINSTR)
726 return true;
727
728 return false;
729 }
730
bpf_object__elf_collect(struct bpf_object * obj)731 static int bpf_object__elf_collect(struct bpf_object *obj)
732 {
733 Elf *elf = obj->efile.elf;
734 GElf_Ehdr *ep = &obj->efile.ehdr;
735 Elf_Scn *scn = NULL;
736 int idx = 0, err = 0;
737
738 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
739 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
740 pr_warning("failed to get e_shstrndx from %s\n",
741 obj->path);
742 return -LIBBPF_ERRNO__FORMAT;
743 }
744
745 while ((scn = elf_nextscn(elf, scn)) != NULL) {
746 char *name;
747 GElf_Shdr sh;
748 Elf_Data *data;
749
750 idx++;
751 if (gelf_getshdr(scn, &sh) != &sh) {
752 pr_warning("failed to get section(%d) header from %s\n",
753 idx, obj->path);
754 err = -LIBBPF_ERRNO__FORMAT;
755 goto out;
756 }
757
758 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
759 if (!name) {
760 pr_warning("failed to get section(%d) name from %s\n",
761 idx, obj->path);
762 err = -LIBBPF_ERRNO__FORMAT;
763 goto out;
764 }
765
766 data = elf_getdata(scn, 0);
767 if (!data) {
768 pr_warning("failed to get section(%d) data from %s(%s)\n",
769 idx, name, obj->path);
770 err = -LIBBPF_ERRNO__FORMAT;
771 goto out;
772 }
773 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
774 idx, name, (unsigned long)data->d_size,
775 (int)sh.sh_link, (unsigned long)sh.sh_flags,
776 (int)sh.sh_type);
777
778 if (strcmp(name, "license") == 0)
779 err = bpf_object__init_license(obj,
780 data->d_buf,
781 data->d_size);
782 else if (strcmp(name, "version") == 0)
783 err = bpf_object__init_kversion(obj,
784 data->d_buf,
785 data->d_size);
786 else if (strcmp(name, "maps") == 0)
787 obj->efile.maps_shndx = idx;
788 else if (strcmp(name, BTF_ELF_SEC) == 0) {
789 obj->btf = btf__new(data->d_buf, data->d_size,
790 __pr_debug);
791 if (IS_ERR(obj->btf)) {
792 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
793 BTF_ELF_SEC, PTR_ERR(obj->btf));
794 obj->btf = NULL;
795 }
796 } else if (sh.sh_type == SHT_SYMTAB) {
797 if (obj->efile.symbols) {
798 pr_warning("bpf: multiple SYMTAB in %s\n",
799 obj->path);
800 err = -LIBBPF_ERRNO__FORMAT;
801 } else {
802 obj->efile.symbols = data;
803 obj->efile.strtabidx = sh.sh_link;
804 }
805 } else if ((sh.sh_type == SHT_PROGBITS) &&
806 (sh.sh_flags & SHF_EXECINSTR) &&
807 (data->d_size > 0)) {
808 if (strcmp(name, ".text") == 0)
809 obj->efile.text_shndx = idx;
810 err = bpf_object__add_program(obj, data->d_buf,
811 data->d_size, name, idx);
812 if (err) {
813 char errmsg[STRERR_BUFSIZE];
814 char *cp = str_error(-err, errmsg, sizeof(errmsg));
815
816 pr_warning("failed to alloc program %s (%s): %s",
817 name, obj->path, cp);
818 }
819 } else if (sh.sh_type == SHT_REL) {
820 void *reloc = obj->efile.reloc;
821 int nr_reloc = obj->efile.nr_reloc + 1;
822 int sec = sh.sh_info; /* points to other section */
823
824 /* Only do relo for section with exec instructions */
825 if (!section_have_execinstr(obj, sec)) {
826 pr_debug("skip relo %s(%d) for section(%d)\n",
827 name, idx, sec);
828 continue;
829 }
830
831 reloc = reallocarray(reloc, nr_reloc,
832 sizeof(*obj->efile.reloc));
833 if (!reloc) {
834 pr_warning("realloc failed\n");
835 err = -ENOMEM;
836 } else {
837 int n = nr_reloc - 1;
838
839 obj->efile.reloc = reloc;
840 obj->efile.nr_reloc = nr_reloc;
841
842 obj->efile.reloc[n].shdr = sh;
843 obj->efile.reloc[n].data = data;
844 }
845 } else {
846 pr_debug("skip section(%d) %s\n", idx, name);
847 }
848 if (err)
849 goto out;
850 }
851
852 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
853 pr_warning("Corrupted ELF file: index of strtab invalid\n");
854 return LIBBPF_ERRNO__FORMAT;
855 }
856 if (obj->efile.maps_shndx >= 0) {
857 err = bpf_object__init_maps(obj);
858 if (err)
859 goto out;
860 }
861 err = bpf_object__init_prog_names(obj);
862 out:
863 return err;
864 }
865
866 static struct bpf_program *
bpf_object__find_prog_by_idx(struct bpf_object * obj,int idx)867 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
868 {
869 struct bpf_program *prog;
870 size_t i;
871
872 for (i = 0; i < obj->nr_programs; i++) {
873 prog = &obj->programs[i];
874 if (prog->idx == idx)
875 return prog;
876 }
877 return NULL;
878 }
879
880 struct bpf_program *
bpf_object__find_program_by_title(struct bpf_object * obj,const char * title)881 bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
882 {
883 struct bpf_program *pos;
884
885 bpf_object__for_each_program(pos, obj) {
886 if (pos->section_name && !strcmp(pos->section_name, title))
887 return pos;
888 }
889 return NULL;
890 }
891
892 static int
bpf_program__collect_reloc(struct bpf_program * prog,GElf_Shdr * shdr,Elf_Data * data,struct bpf_object * obj)893 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
894 Elf_Data *data, struct bpf_object *obj)
895 {
896 Elf_Data *symbols = obj->efile.symbols;
897 int text_shndx = obj->efile.text_shndx;
898 int maps_shndx = obj->efile.maps_shndx;
899 struct bpf_map *maps = obj->maps;
900 size_t nr_maps = obj->nr_maps;
901 int i, nrels;
902
903 pr_debug("collecting relocating info for: '%s'\n",
904 prog->section_name);
905 nrels = shdr->sh_size / shdr->sh_entsize;
906
907 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
908 if (!prog->reloc_desc) {
909 pr_warning("failed to alloc memory in relocation\n");
910 return -ENOMEM;
911 }
912 prog->nr_reloc = nrels;
913
914 for (i = 0; i < nrels; i++) {
915 GElf_Sym sym;
916 GElf_Rel rel;
917 unsigned int insn_idx;
918 struct bpf_insn *insns = prog->insns;
919 size_t map_idx;
920
921 if (!gelf_getrel(data, i, &rel)) {
922 pr_warning("relocation: failed to get %d reloc\n", i);
923 return -LIBBPF_ERRNO__FORMAT;
924 }
925
926 if (!gelf_getsym(symbols,
927 GELF_R_SYM(rel.r_info),
928 &sym)) {
929 pr_warning("relocation: symbol %"PRIx64" not found\n",
930 GELF_R_SYM(rel.r_info));
931 return -LIBBPF_ERRNO__FORMAT;
932 }
933 pr_debug("relo for %lld value %lld name %d\n",
934 (long long) (rel.r_info >> 32),
935 (long long) sym.st_value, sym.st_name);
936
937 if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
938 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
939 prog->section_name, sym.st_shndx);
940 return -LIBBPF_ERRNO__RELOC;
941 }
942
943 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
944 pr_debug("relocation: insn_idx=%u\n", insn_idx);
945
946 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
947 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
948 pr_warning("incorrect bpf_call opcode\n");
949 return -LIBBPF_ERRNO__RELOC;
950 }
951 prog->reloc_desc[i].type = RELO_CALL;
952 prog->reloc_desc[i].insn_idx = insn_idx;
953 prog->reloc_desc[i].text_off = sym.st_value;
954 obj->has_pseudo_calls = true;
955 continue;
956 }
957
958 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
959 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
960 insn_idx, insns[insn_idx].code);
961 return -LIBBPF_ERRNO__RELOC;
962 }
963
964 /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
965 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
966 if (maps[map_idx].offset == sym.st_value) {
967 pr_debug("relocation: find map %zd (%s) for insn %u\n",
968 map_idx, maps[map_idx].name, insn_idx);
969 break;
970 }
971 }
972
973 if (map_idx >= nr_maps) {
974 pr_warning("bpf relocation: map_idx %d large than %d\n",
975 (int)map_idx, (int)nr_maps - 1);
976 return -LIBBPF_ERRNO__RELOC;
977 }
978
979 prog->reloc_desc[i].type = RELO_LD64;
980 prog->reloc_desc[i].insn_idx = insn_idx;
981 prog->reloc_desc[i].map_idx = map_idx;
982 }
983 return 0;
984 }
985
bpf_map_find_btf_info(struct bpf_map * map,const struct btf * btf)986 static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
987 {
988 const struct btf_type *container_type;
989 const struct btf_member *key, *value;
990 struct bpf_map_def *def = &map->def;
991 const size_t max_name = 256;
992 char container_name[max_name];
993 __s64 key_size, value_size;
994 __s32 container_id;
995
996 if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
997 max_name) {
998 pr_warning("map:%s length of '____btf_map_%s' is too long\n",
999 map->name, map->name);
1000 return -EINVAL;
1001 }
1002
1003 container_id = btf__find_by_name(btf, container_name);
1004 if (container_id < 0) {
1005 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
1006 map->name, container_name);
1007 return container_id;
1008 }
1009
1010 container_type = btf__type_by_id(btf, container_id);
1011 if (!container_type) {
1012 pr_warning("map:%s cannot find BTF type for container_id:%u\n",
1013 map->name, container_id);
1014 return -EINVAL;
1015 }
1016
1017 if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
1018 BTF_INFO_VLEN(container_type->info) < 2) {
1019 pr_warning("map:%s container_name:%s is an invalid container struct\n",
1020 map->name, container_name);
1021 return -EINVAL;
1022 }
1023
1024 key = (struct btf_member *)(container_type + 1);
1025 value = key + 1;
1026
1027 key_size = btf__resolve_size(btf, key->type);
1028 if (key_size < 0) {
1029 pr_warning("map:%s invalid BTF key_type_size\n",
1030 map->name);
1031 return key_size;
1032 }
1033
1034 if (def->key_size != key_size) {
1035 pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
1036 map->name, (__u32)key_size, def->key_size);
1037 return -EINVAL;
1038 }
1039
1040 value_size = btf__resolve_size(btf, value->type);
1041 if (value_size < 0) {
1042 pr_warning("map:%s invalid BTF value_type_size\n", map->name);
1043 return value_size;
1044 }
1045
1046 if (def->value_size != value_size) {
1047 pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
1048 map->name, (__u32)value_size, def->value_size);
1049 return -EINVAL;
1050 }
1051
1052 map->btf_key_type_id = key->type;
1053 map->btf_value_type_id = value->type;
1054
1055 return 0;
1056 }
1057
bpf_map__reuse_fd(struct bpf_map * map,int fd)1058 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1059 {
1060 struct bpf_map_info info = {};
1061 __u32 len = sizeof(info);
1062 int new_fd, err;
1063 char *new_name;
1064
1065 err = bpf_obj_get_info_by_fd(fd, &info, &len);
1066 if (err)
1067 return err;
1068
1069 new_name = strdup(info.name);
1070 if (!new_name)
1071 return -errno;
1072
1073 new_fd = open("/", O_RDONLY | O_CLOEXEC);
1074 if (new_fd < 0)
1075 goto err_free_new_name;
1076
1077 new_fd = dup3(fd, new_fd, O_CLOEXEC);
1078 if (new_fd < 0)
1079 goto err_close_new_fd;
1080
1081 err = zclose(map->fd);
1082 if (err)
1083 goto err_close_new_fd;
1084 free(map->name);
1085
1086 map->fd = new_fd;
1087 map->name = new_name;
1088 map->def.type = info.type;
1089 map->def.key_size = info.key_size;
1090 map->def.value_size = info.value_size;
1091 map->def.max_entries = info.max_entries;
1092 map->def.map_flags = info.map_flags;
1093 map->btf_key_type_id = info.btf_key_type_id;
1094 map->btf_value_type_id = info.btf_value_type_id;
1095
1096 return 0;
1097
1098 err_close_new_fd:
1099 close(new_fd);
1100 err_free_new_name:
1101 free(new_name);
1102 return -errno;
1103 }
1104
1105 static int
bpf_object__create_maps(struct bpf_object * obj)1106 bpf_object__create_maps(struct bpf_object *obj)
1107 {
1108 struct bpf_create_map_attr create_attr = {};
1109 unsigned int i;
1110 int err;
1111
1112 for (i = 0; i < obj->nr_maps; i++) {
1113 struct bpf_map *map = &obj->maps[i];
1114 struct bpf_map_def *def = &map->def;
1115 char *cp, errmsg[STRERR_BUFSIZE];
1116 int *pfd = &map->fd;
1117
1118 if (map->fd >= 0) {
1119 pr_debug("skip map create (preset) %s: fd=%d\n",
1120 map->name, map->fd);
1121 continue;
1122 }
1123
1124 create_attr.name = map->name;
1125 create_attr.map_ifindex = map->map_ifindex;
1126 create_attr.map_type = def->type;
1127 create_attr.map_flags = def->map_flags;
1128 create_attr.key_size = def->key_size;
1129 create_attr.value_size = def->value_size;
1130 create_attr.max_entries = def->max_entries;
1131 create_attr.btf_fd = 0;
1132 create_attr.btf_key_type_id = 0;
1133 create_attr.btf_value_type_id = 0;
1134
1135 if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1136 create_attr.btf_fd = btf__fd(obj->btf);
1137 create_attr.btf_key_type_id = map->btf_key_type_id;
1138 create_attr.btf_value_type_id = map->btf_value_type_id;
1139 }
1140
1141 *pfd = bpf_create_map_xattr(&create_attr);
1142 if (*pfd < 0 && create_attr.btf_key_type_id) {
1143 cp = str_error(errno, errmsg, sizeof(errmsg));
1144 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
1145 map->name, cp, errno);
1146 create_attr.btf_fd = 0;
1147 create_attr.btf_key_type_id = 0;
1148 create_attr.btf_value_type_id = 0;
1149 map->btf_key_type_id = 0;
1150 map->btf_value_type_id = 0;
1151 *pfd = bpf_create_map_xattr(&create_attr);
1152 }
1153
1154 if (*pfd < 0) {
1155 size_t j;
1156
1157 err = *pfd;
1158 cp = str_error(errno, errmsg, sizeof(errmsg));
1159 pr_warning("failed to create map (name: '%s'): %s\n",
1160 map->name, cp);
1161 for (j = 0; j < i; j++)
1162 zclose(obj->maps[j].fd);
1163 return err;
1164 }
1165 pr_debug("create map %s: fd=%d\n", map->name, *pfd);
1166 }
1167
1168 return 0;
1169 }
1170
1171 static int
bpf_program__reloc_text(struct bpf_program * prog,struct bpf_object * obj,struct reloc_desc * relo)1172 bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1173 struct reloc_desc *relo)
1174 {
1175 struct bpf_insn *insn, *new_insn;
1176 struct bpf_program *text;
1177 size_t new_cnt;
1178
1179 if (relo->type != RELO_CALL)
1180 return -LIBBPF_ERRNO__RELOC;
1181
1182 if (prog->idx == obj->efile.text_shndx) {
1183 pr_warning("relo in .text insn %d into off %d\n",
1184 relo->insn_idx, relo->text_off);
1185 return -LIBBPF_ERRNO__RELOC;
1186 }
1187
1188 if (prog->main_prog_cnt == 0) {
1189 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1190 if (!text) {
1191 pr_warning("no .text section found yet relo into text exist\n");
1192 return -LIBBPF_ERRNO__RELOC;
1193 }
1194 new_cnt = prog->insns_cnt + text->insns_cnt;
1195 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
1196 if (!new_insn) {
1197 pr_warning("oom in prog realloc\n");
1198 return -ENOMEM;
1199 }
1200 memcpy(new_insn + prog->insns_cnt, text->insns,
1201 text->insns_cnt * sizeof(*insn));
1202 prog->insns = new_insn;
1203 prog->main_prog_cnt = prog->insns_cnt;
1204 prog->insns_cnt = new_cnt;
1205 pr_debug("added %zd insn from %s to prog %s\n",
1206 text->insns_cnt, text->section_name,
1207 prog->section_name);
1208 }
1209 insn = &prog->insns[relo->insn_idx];
1210 insn->imm += prog->main_prog_cnt - relo->insn_idx;
1211 return 0;
1212 }
1213
1214 static int
bpf_program__relocate(struct bpf_program * prog,struct bpf_object * obj)1215 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
1216 {
1217 int i, err;
1218
1219 if (!prog || !prog->reloc_desc)
1220 return 0;
1221
1222 for (i = 0; i < prog->nr_reloc; i++) {
1223 if (prog->reloc_desc[i].type == RELO_LD64) {
1224 struct bpf_insn *insns = prog->insns;
1225 int insn_idx, map_idx;
1226
1227 insn_idx = prog->reloc_desc[i].insn_idx;
1228 map_idx = prog->reloc_desc[i].map_idx;
1229
1230 if (insn_idx >= (int)prog->insns_cnt) {
1231 pr_warning("relocation out of range: '%s'\n",
1232 prog->section_name);
1233 return -LIBBPF_ERRNO__RELOC;
1234 }
1235 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1236 insns[insn_idx].imm = obj->maps[map_idx].fd;
1237 } else {
1238 err = bpf_program__reloc_text(prog, obj,
1239 &prog->reloc_desc[i]);
1240 if (err)
1241 return err;
1242 }
1243 }
1244
1245 zfree(&prog->reloc_desc);
1246 prog->nr_reloc = 0;
1247 return 0;
1248 }
1249
1250
1251 static int
bpf_object__relocate(struct bpf_object * obj)1252 bpf_object__relocate(struct bpf_object *obj)
1253 {
1254 struct bpf_program *prog;
1255 size_t i;
1256 int err;
1257
1258 for (i = 0; i < obj->nr_programs; i++) {
1259 prog = &obj->programs[i];
1260
1261 err = bpf_program__relocate(prog, obj);
1262 if (err) {
1263 pr_warning("failed to relocate '%s'\n",
1264 prog->section_name);
1265 return err;
1266 }
1267 }
1268 return 0;
1269 }
1270
bpf_object__collect_reloc(struct bpf_object * obj)1271 static int bpf_object__collect_reloc(struct bpf_object *obj)
1272 {
1273 int i, err;
1274
1275 if (!obj_elf_valid(obj)) {
1276 pr_warning("Internal error: elf object is closed\n");
1277 return -LIBBPF_ERRNO__INTERNAL;
1278 }
1279
1280 for (i = 0; i < obj->efile.nr_reloc; i++) {
1281 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1282 Elf_Data *data = obj->efile.reloc[i].data;
1283 int idx = shdr->sh_info;
1284 struct bpf_program *prog;
1285
1286 if (shdr->sh_type != SHT_REL) {
1287 pr_warning("internal error at %d\n", __LINE__);
1288 return -LIBBPF_ERRNO__INTERNAL;
1289 }
1290
1291 prog = bpf_object__find_prog_by_idx(obj, idx);
1292 if (!prog) {
1293 pr_warning("relocation failed: no section(%d)\n", idx);
1294 return -LIBBPF_ERRNO__RELOC;
1295 }
1296
1297 err = bpf_program__collect_reloc(prog,
1298 shdr, data,
1299 obj);
1300 if (err)
1301 return err;
1302 }
1303 return 0;
1304 }
1305
1306 static int
load_program(enum bpf_prog_type type,enum bpf_attach_type expected_attach_type,const char * name,struct bpf_insn * insns,int insns_cnt,char * license,u32 kern_version,int * pfd,int prog_ifindex)1307 load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
1308 const char *name, struct bpf_insn *insns, int insns_cnt,
1309 char *license, u32 kern_version, int *pfd, int prog_ifindex)
1310 {
1311 struct bpf_load_program_attr load_attr;
1312 char *cp, errmsg[STRERR_BUFSIZE];
1313 char *log_buf;
1314 int ret;
1315
1316 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
1317 load_attr.prog_type = type;
1318 load_attr.expected_attach_type = expected_attach_type;
1319 load_attr.name = name;
1320 load_attr.insns = insns;
1321 load_attr.insns_cnt = insns_cnt;
1322 load_attr.license = license;
1323 load_attr.kern_version = kern_version;
1324 load_attr.prog_ifindex = prog_ifindex;
1325
1326 if (!load_attr.insns || !load_attr.insns_cnt)
1327 return -EINVAL;
1328
1329 log_buf = malloc(BPF_LOG_BUF_SIZE);
1330 if (!log_buf)
1331 pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1332
1333 ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
1334
1335 if (ret >= 0) {
1336 *pfd = ret;
1337 ret = 0;
1338 goto out;
1339 }
1340
1341 ret = -LIBBPF_ERRNO__LOAD;
1342 cp = str_error(errno, errmsg, sizeof(errmsg));
1343 pr_warning("load bpf program failed: %s\n", cp);
1344
1345 if (log_buf && log_buf[0] != '\0') {
1346 ret = -LIBBPF_ERRNO__VERIFY;
1347 pr_warning("-- BEGIN DUMP LOG ---\n");
1348 pr_warning("\n%s\n", log_buf);
1349 pr_warning("-- END LOG --\n");
1350 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1351 pr_warning("Program too large (%zu insns), at most %d insns\n",
1352 load_attr.insns_cnt, BPF_MAXINSNS);
1353 ret = -LIBBPF_ERRNO__PROG2BIG;
1354 } else {
1355 /* Wrong program type? */
1356 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
1357 int fd;
1358
1359 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1360 load_attr.expected_attach_type = 0;
1361 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
1362 if (fd >= 0) {
1363 close(fd);
1364 ret = -LIBBPF_ERRNO__PROGTYPE;
1365 goto out;
1366 }
1367 }
1368
1369 if (log_buf)
1370 ret = -LIBBPF_ERRNO__KVER;
1371 }
1372
1373 out:
1374 free(log_buf);
1375 return ret;
1376 }
1377
1378 static int
bpf_program__load(struct bpf_program * prog,char * license,u32 kern_version)1379 bpf_program__load(struct bpf_program *prog,
1380 char *license, u32 kern_version)
1381 {
1382 int err = 0, fd, i;
1383
1384 if (prog->instances.nr < 0 || !prog->instances.fds) {
1385 if (prog->preprocessor) {
1386 pr_warning("Internal error: can't load program '%s'\n",
1387 prog->section_name);
1388 return -LIBBPF_ERRNO__INTERNAL;
1389 }
1390
1391 prog->instances.fds = malloc(sizeof(int));
1392 if (!prog->instances.fds) {
1393 pr_warning("Not enough memory for BPF fds\n");
1394 return -ENOMEM;
1395 }
1396 prog->instances.nr = 1;
1397 prog->instances.fds[0] = -1;
1398 }
1399
1400 if (!prog->preprocessor) {
1401 if (prog->instances.nr != 1) {
1402 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1403 prog->section_name, prog->instances.nr);
1404 }
1405 err = load_program(prog->type, prog->expected_attach_type,
1406 prog->name, prog->insns, prog->insns_cnt,
1407 license, kern_version, &fd,
1408 prog->prog_ifindex);
1409 if (!err)
1410 prog->instances.fds[0] = fd;
1411 goto out;
1412 }
1413
1414 for (i = 0; i < prog->instances.nr; i++) {
1415 struct bpf_prog_prep_result result;
1416 bpf_program_prep_t preprocessor = prog->preprocessor;
1417
1418 bzero(&result, sizeof(result));
1419 err = preprocessor(prog, i, prog->insns,
1420 prog->insns_cnt, &result);
1421 if (err) {
1422 pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1423 i, prog->section_name);
1424 goto out;
1425 }
1426
1427 if (!result.new_insn_ptr || !result.new_insn_cnt) {
1428 pr_debug("Skip loading the %dth instance of program '%s'\n",
1429 i, prog->section_name);
1430 prog->instances.fds[i] = -1;
1431 if (result.pfd)
1432 *result.pfd = -1;
1433 continue;
1434 }
1435
1436 err = load_program(prog->type, prog->expected_attach_type,
1437 prog->name, result.new_insn_ptr,
1438 result.new_insn_cnt,
1439 license, kern_version, &fd,
1440 prog->prog_ifindex);
1441
1442 if (err) {
1443 pr_warning("Loading the %dth instance of program '%s' failed\n",
1444 i, prog->section_name);
1445 goto out;
1446 }
1447
1448 if (result.pfd)
1449 *result.pfd = fd;
1450 prog->instances.fds[i] = fd;
1451 }
1452 out:
1453 if (err)
1454 pr_warning("failed to load program '%s'\n",
1455 prog->section_name);
1456 zfree(&prog->insns);
1457 prog->insns_cnt = 0;
1458 return err;
1459 }
1460
bpf_program__is_function_storage(struct bpf_program * prog,struct bpf_object * obj)1461 static bool bpf_program__is_function_storage(struct bpf_program *prog,
1462 struct bpf_object *obj)
1463 {
1464 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
1465 }
1466
1467 static int
bpf_object__load_progs(struct bpf_object * obj)1468 bpf_object__load_progs(struct bpf_object *obj)
1469 {
1470 size_t i;
1471 int err;
1472
1473 for (i = 0; i < obj->nr_programs; i++) {
1474 if (bpf_program__is_function_storage(&obj->programs[i], obj))
1475 continue;
1476 err = bpf_program__load(&obj->programs[i],
1477 obj->license,
1478 obj->kern_version);
1479 if (err)
1480 return err;
1481 }
1482 return 0;
1483 }
1484
bpf_prog_type__needs_kver(enum bpf_prog_type type)1485 static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
1486 {
1487 switch (type) {
1488 case BPF_PROG_TYPE_SOCKET_FILTER:
1489 case BPF_PROG_TYPE_SCHED_CLS:
1490 case BPF_PROG_TYPE_SCHED_ACT:
1491 case BPF_PROG_TYPE_XDP:
1492 case BPF_PROG_TYPE_CGROUP_SKB:
1493 case BPF_PROG_TYPE_CGROUP_SOCK:
1494 case BPF_PROG_TYPE_LWT_IN:
1495 case BPF_PROG_TYPE_LWT_OUT:
1496 case BPF_PROG_TYPE_LWT_XMIT:
1497 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
1498 case BPF_PROG_TYPE_SOCK_OPS:
1499 case BPF_PROG_TYPE_SK_SKB:
1500 case BPF_PROG_TYPE_CGROUP_DEVICE:
1501 case BPF_PROG_TYPE_SK_MSG:
1502 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1503 case BPF_PROG_TYPE_LIRC_MODE2:
1504 case BPF_PROG_TYPE_SK_REUSEPORT:
1505 return false;
1506 case BPF_PROG_TYPE_UNSPEC:
1507 case BPF_PROG_TYPE_KPROBE:
1508 case BPF_PROG_TYPE_TRACEPOINT:
1509 case BPF_PROG_TYPE_PERF_EVENT:
1510 case BPF_PROG_TYPE_RAW_TRACEPOINT:
1511 default:
1512 return true;
1513 }
1514 }
1515
bpf_object__validate(struct bpf_object * obj,bool needs_kver)1516 static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
1517 {
1518 if (needs_kver && obj->kern_version == 0) {
1519 pr_warning("%s doesn't provide kernel version\n",
1520 obj->path);
1521 return -LIBBPF_ERRNO__KVERSION;
1522 }
1523 return 0;
1524 }
1525
1526 static struct bpf_object *
__bpf_object__open(const char * path,void * obj_buf,size_t obj_buf_sz,bool needs_kver)1527 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
1528 bool needs_kver)
1529 {
1530 struct bpf_object *obj;
1531 int err;
1532
1533 if (elf_version(EV_CURRENT) == EV_NONE) {
1534 pr_warning("failed to init libelf for %s\n", path);
1535 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1536 }
1537
1538 obj = bpf_object__new(path, obj_buf, obj_buf_sz);
1539 if (IS_ERR(obj))
1540 return obj;
1541
1542 CHECK_ERR(bpf_object__elf_init(obj), err, out);
1543 CHECK_ERR(bpf_object__check_endianness(obj), err, out);
1544 CHECK_ERR(bpf_object__elf_collect(obj), err, out);
1545 CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
1546 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
1547
1548 bpf_object__elf_finish(obj);
1549 return obj;
1550 out:
1551 bpf_object__close(obj);
1552 return ERR_PTR(err);
1553 }
1554
bpf_object__open_xattr(struct bpf_object_open_attr * attr)1555 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
1556 {
1557 /* param validation */
1558 if (!attr->file)
1559 return NULL;
1560
1561 pr_debug("loading %s\n", attr->file);
1562
1563 return __bpf_object__open(attr->file, NULL, 0,
1564 bpf_prog_type__needs_kver(attr->prog_type));
1565 }
1566
bpf_object__open(const char * path)1567 struct bpf_object *bpf_object__open(const char *path)
1568 {
1569 struct bpf_object_open_attr attr = {
1570 .file = path,
1571 .prog_type = BPF_PROG_TYPE_UNSPEC,
1572 };
1573
1574 return bpf_object__open_xattr(&attr);
1575 }
1576
bpf_object__open_buffer(void * obj_buf,size_t obj_buf_sz,const char * name)1577 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
1578 size_t obj_buf_sz,
1579 const char *name)
1580 {
1581 char tmp_name[64];
1582
1583 /* param validation */
1584 if (!obj_buf || obj_buf_sz <= 0)
1585 return NULL;
1586
1587 if (!name) {
1588 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1589 (unsigned long)obj_buf,
1590 (unsigned long)obj_buf_sz);
1591 tmp_name[sizeof(tmp_name) - 1] = '\0';
1592 name = tmp_name;
1593 }
1594 pr_debug("loading object '%s' from buffer\n",
1595 name);
1596
1597 return __bpf_object__open(name, obj_buf, obj_buf_sz, true);
1598 }
1599
bpf_object__unload(struct bpf_object * obj)1600 int bpf_object__unload(struct bpf_object *obj)
1601 {
1602 size_t i;
1603
1604 if (!obj)
1605 return -EINVAL;
1606
1607 for (i = 0; i < obj->nr_maps; i++)
1608 zclose(obj->maps[i].fd);
1609
1610 for (i = 0; i < obj->nr_programs; i++)
1611 bpf_program__unload(&obj->programs[i]);
1612
1613 return 0;
1614 }
1615
bpf_object__load(struct bpf_object * obj)1616 int bpf_object__load(struct bpf_object *obj)
1617 {
1618 int err;
1619
1620 if (!obj)
1621 return -EINVAL;
1622
1623 if (obj->loaded) {
1624 pr_warning("object should not be loaded twice\n");
1625 return -EINVAL;
1626 }
1627
1628 obj->loaded = true;
1629
1630 CHECK_ERR(bpf_object__create_maps(obj), err, out);
1631 CHECK_ERR(bpf_object__relocate(obj), err, out);
1632 CHECK_ERR(bpf_object__load_progs(obj), err, out);
1633
1634 return 0;
1635 out:
1636 bpf_object__unload(obj);
1637 pr_warning("failed to load object '%s'\n", obj->path);
1638 return err;
1639 }
1640
check_path(const char * path)1641 static int check_path(const char *path)
1642 {
1643 char *cp, errmsg[STRERR_BUFSIZE];
1644 struct statfs st_fs;
1645 char *dname, *dir;
1646 int err = 0;
1647
1648 if (path == NULL)
1649 return -EINVAL;
1650
1651 dname = strdup(path);
1652 if (dname == NULL)
1653 return -ENOMEM;
1654
1655 dir = dirname(dname);
1656 if (statfs(dir, &st_fs)) {
1657 cp = str_error(errno, errmsg, sizeof(errmsg));
1658 pr_warning("failed to statfs %s: %s\n", dir, cp);
1659 err = -errno;
1660 }
1661 free(dname);
1662
1663 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1664 pr_warning("specified path %s is not on BPF FS\n", path);
1665 err = -EINVAL;
1666 }
1667
1668 return err;
1669 }
1670
bpf_program__pin_instance(struct bpf_program * prog,const char * path,int instance)1671 int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1672 int instance)
1673 {
1674 char *cp, errmsg[STRERR_BUFSIZE];
1675 int err;
1676
1677 err = check_path(path);
1678 if (err)
1679 return err;
1680
1681 if (prog == NULL) {
1682 pr_warning("invalid program pointer\n");
1683 return -EINVAL;
1684 }
1685
1686 if (instance < 0 || instance >= prog->instances.nr) {
1687 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1688 instance, prog->section_name, prog->instances.nr);
1689 return -EINVAL;
1690 }
1691
1692 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
1693 cp = str_error(errno, errmsg, sizeof(errmsg));
1694 pr_warning("failed to pin program: %s\n", cp);
1695 return -errno;
1696 }
1697 pr_debug("pinned program '%s'\n", path);
1698
1699 return 0;
1700 }
1701
make_dir(const char * path)1702 static int make_dir(const char *path)
1703 {
1704 char *cp, errmsg[STRERR_BUFSIZE];
1705 int err = 0;
1706
1707 if (mkdir(path, 0700) && errno != EEXIST)
1708 err = -errno;
1709
1710 if (err) {
1711 cp = str_error(-err, errmsg, sizeof(errmsg));
1712 pr_warning("failed to mkdir %s: %s\n", path, cp);
1713 }
1714 return err;
1715 }
1716
bpf_program__pin(struct bpf_program * prog,const char * path)1717 int bpf_program__pin(struct bpf_program *prog, const char *path)
1718 {
1719 int i, err;
1720
1721 err = check_path(path);
1722 if (err)
1723 return err;
1724
1725 if (prog == NULL) {
1726 pr_warning("invalid program pointer\n");
1727 return -EINVAL;
1728 }
1729
1730 if (prog->instances.nr <= 0) {
1731 pr_warning("no instances of prog %s to pin\n",
1732 prog->section_name);
1733 return -EINVAL;
1734 }
1735
1736 err = make_dir(path);
1737 if (err)
1738 return err;
1739
1740 for (i = 0; i < prog->instances.nr; i++) {
1741 char buf[PATH_MAX];
1742 int len;
1743
1744 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1745 if (len < 0)
1746 return -EINVAL;
1747 else if (len >= PATH_MAX)
1748 return -ENAMETOOLONG;
1749
1750 err = bpf_program__pin_instance(prog, buf, i);
1751 if (err)
1752 return err;
1753 }
1754
1755 return 0;
1756 }
1757
bpf_map__pin(struct bpf_map * map,const char * path)1758 int bpf_map__pin(struct bpf_map *map, const char *path)
1759 {
1760 char *cp, errmsg[STRERR_BUFSIZE];
1761 int err;
1762
1763 err = check_path(path);
1764 if (err)
1765 return err;
1766
1767 if (map == NULL) {
1768 pr_warning("invalid map pointer\n");
1769 return -EINVAL;
1770 }
1771
1772 if (bpf_obj_pin(map->fd, path)) {
1773 cp = str_error(errno, errmsg, sizeof(errmsg));
1774 pr_warning("failed to pin map: %s\n", cp);
1775 return -errno;
1776 }
1777
1778 pr_debug("pinned map '%s'\n", path);
1779 return 0;
1780 }
1781
bpf_object__pin(struct bpf_object * obj,const char * path)1782 int bpf_object__pin(struct bpf_object *obj, const char *path)
1783 {
1784 struct bpf_program *prog;
1785 struct bpf_map *map;
1786 int err;
1787
1788 if (!obj)
1789 return -ENOENT;
1790
1791 if (!obj->loaded) {
1792 pr_warning("object not yet loaded; load it first\n");
1793 return -ENOENT;
1794 }
1795
1796 err = make_dir(path);
1797 if (err)
1798 return err;
1799
1800 bpf_map__for_each(map, obj) {
1801 char buf[PATH_MAX];
1802 int len;
1803
1804 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1805 bpf_map__name(map));
1806 if (len < 0)
1807 return -EINVAL;
1808 else if (len >= PATH_MAX)
1809 return -ENAMETOOLONG;
1810
1811 err = bpf_map__pin(map, buf);
1812 if (err)
1813 return err;
1814 }
1815
1816 bpf_object__for_each_program(prog, obj) {
1817 char buf[PATH_MAX];
1818 int len;
1819
1820 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1821 prog->section_name);
1822 if (len < 0)
1823 return -EINVAL;
1824 else if (len >= PATH_MAX)
1825 return -ENAMETOOLONG;
1826
1827 err = bpf_program__pin(prog, buf);
1828 if (err)
1829 return err;
1830 }
1831
1832 return 0;
1833 }
1834
bpf_object__close(struct bpf_object * obj)1835 void bpf_object__close(struct bpf_object *obj)
1836 {
1837 size_t i;
1838
1839 if (!obj)
1840 return;
1841
1842 if (obj->clear_priv)
1843 obj->clear_priv(obj, obj->priv);
1844
1845 bpf_object__elf_finish(obj);
1846 bpf_object__unload(obj);
1847 btf__free(obj->btf);
1848
1849 for (i = 0; i < obj->nr_maps; i++) {
1850 zfree(&obj->maps[i].name);
1851 if (obj->maps[i].clear_priv)
1852 obj->maps[i].clear_priv(&obj->maps[i],
1853 obj->maps[i].priv);
1854 obj->maps[i].priv = NULL;
1855 obj->maps[i].clear_priv = NULL;
1856 }
1857 zfree(&obj->maps);
1858 obj->nr_maps = 0;
1859
1860 if (obj->programs && obj->nr_programs) {
1861 for (i = 0; i < obj->nr_programs; i++)
1862 bpf_program__exit(&obj->programs[i]);
1863 }
1864 zfree(&obj->programs);
1865
1866 list_del(&obj->list);
1867 free(obj);
1868 }
1869
1870 struct bpf_object *
bpf_object__next(struct bpf_object * prev)1871 bpf_object__next(struct bpf_object *prev)
1872 {
1873 struct bpf_object *next;
1874
1875 if (!prev)
1876 next = list_first_entry(&bpf_objects_list,
1877 struct bpf_object,
1878 list);
1879 else
1880 next = list_next_entry(prev, list);
1881
1882 /* Empty list is noticed here so don't need checking on entry. */
1883 if (&next->list == &bpf_objects_list)
1884 return NULL;
1885
1886 return next;
1887 }
1888
bpf_object__name(struct bpf_object * obj)1889 const char *bpf_object__name(struct bpf_object *obj)
1890 {
1891 return obj ? obj->path : ERR_PTR(-EINVAL);
1892 }
1893
bpf_object__kversion(struct bpf_object * obj)1894 unsigned int bpf_object__kversion(struct bpf_object *obj)
1895 {
1896 return obj ? obj->kern_version : 0;
1897 }
1898
bpf_object__btf_fd(const struct bpf_object * obj)1899 int bpf_object__btf_fd(const struct bpf_object *obj)
1900 {
1901 return obj->btf ? btf__fd(obj->btf) : -1;
1902 }
1903
bpf_object__set_priv(struct bpf_object * obj,void * priv,bpf_object_clear_priv_t clear_priv)1904 int bpf_object__set_priv(struct bpf_object *obj, void *priv,
1905 bpf_object_clear_priv_t clear_priv)
1906 {
1907 if (obj->priv && obj->clear_priv)
1908 obj->clear_priv(obj, obj->priv);
1909
1910 obj->priv = priv;
1911 obj->clear_priv = clear_priv;
1912 return 0;
1913 }
1914
bpf_object__priv(struct bpf_object * obj)1915 void *bpf_object__priv(struct bpf_object *obj)
1916 {
1917 return obj ? obj->priv : ERR_PTR(-EINVAL);
1918 }
1919
1920 static struct bpf_program *
__bpf_program__next(struct bpf_program * prev,struct bpf_object * obj)1921 __bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
1922 {
1923 size_t idx;
1924
1925 if (!obj->programs)
1926 return NULL;
1927 /* First handler */
1928 if (prev == NULL)
1929 return &obj->programs[0];
1930
1931 if (prev->obj != obj) {
1932 pr_warning("error: program handler doesn't match object\n");
1933 return NULL;
1934 }
1935
1936 idx = (prev - obj->programs) + 1;
1937 if (idx >= obj->nr_programs)
1938 return NULL;
1939 return &obj->programs[idx];
1940 }
1941
1942 struct bpf_program *
bpf_program__next(struct bpf_program * prev,struct bpf_object * obj)1943 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
1944 {
1945 struct bpf_program *prog = prev;
1946
1947 do {
1948 prog = __bpf_program__next(prog, obj);
1949 } while (prog && bpf_program__is_function_storage(prog, obj));
1950
1951 return prog;
1952 }
1953
bpf_program__set_priv(struct bpf_program * prog,void * priv,bpf_program_clear_priv_t clear_priv)1954 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
1955 bpf_program_clear_priv_t clear_priv)
1956 {
1957 if (prog->priv && prog->clear_priv)
1958 prog->clear_priv(prog, prog->priv);
1959
1960 prog->priv = priv;
1961 prog->clear_priv = clear_priv;
1962 return 0;
1963 }
1964
bpf_program__priv(struct bpf_program * prog)1965 void *bpf_program__priv(struct bpf_program *prog)
1966 {
1967 return prog ? prog->priv : ERR_PTR(-EINVAL);
1968 }
1969
bpf_program__set_ifindex(struct bpf_program * prog,__u32 ifindex)1970 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
1971 {
1972 prog->prog_ifindex = ifindex;
1973 }
1974
bpf_program__title(struct bpf_program * prog,bool needs_copy)1975 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
1976 {
1977 const char *title;
1978
1979 title = prog->section_name;
1980 if (needs_copy) {
1981 title = strdup(title);
1982 if (!title) {
1983 pr_warning("failed to strdup program title\n");
1984 return ERR_PTR(-ENOMEM);
1985 }
1986 }
1987
1988 return title;
1989 }
1990
bpf_program__fd(struct bpf_program * prog)1991 int bpf_program__fd(struct bpf_program *prog)
1992 {
1993 return bpf_program__nth_fd(prog, 0);
1994 }
1995
bpf_program__set_prep(struct bpf_program * prog,int nr_instances,bpf_program_prep_t prep)1996 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
1997 bpf_program_prep_t prep)
1998 {
1999 int *instances_fds;
2000
2001 if (nr_instances <= 0 || !prep)
2002 return -EINVAL;
2003
2004 if (prog->instances.nr > 0 || prog->instances.fds) {
2005 pr_warning("Can't set pre-processor after loading\n");
2006 return -EINVAL;
2007 }
2008
2009 instances_fds = malloc(sizeof(int) * nr_instances);
2010 if (!instances_fds) {
2011 pr_warning("alloc memory failed for fds\n");
2012 return -ENOMEM;
2013 }
2014
2015 /* fill all fd with -1 */
2016 memset(instances_fds, -1, sizeof(int) * nr_instances);
2017
2018 prog->instances.nr = nr_instances;
2019 prog->instances.fds = instances_fds;
2020 prog->preprocessor = prep;
2021 return 0;
2022 }
2023
bpf_program__nth_fd(struct bpf_program * prog,int n)2024 int bpf_program__nth_fd(struct bpf_program *prog, int n)
2025 {
2026 int fd;
2027
2028 if (!prog)
2029 return -EINVAL;
2030
2031 if (n >= prog->instances.nr || n < 0) {
2032 pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
2033 n, prog->section_name, prog->instances.nr);
2034 return -EINVAL;
2035 }
2036
2037 fd = prog->instances.fds[n];
2038 if (fd < 0) {
2039 pr_warning("%dth instance of program '%s' is invalid\n",
2040 n, prog->section_name);
2041 return -ENOENT;
2042 }
2043
2044 return fd;
2045 }
2046
bpf_program__set_type(struct bpf_program * prog,enum bpf_prog_type type)2047 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
2048 {
2049 prog->type = type;
2050 }
2051
bpf_program__is_type(struct bpf_program * prog,enum bpf_prog_type type)2052 static bool bpf_program__is_type(struct bpf_program *prog,
2053 enum bpf_prog_type type)
2054 {
2055 return prog ? (prog->type == type) : false;
2056 }
2057
2058 #define BPF_PROG_TYPE_FNS(NAME, TYPE) \
2059 int bpf_program__set_##NAME(struct bpf_program *prog) \
2060 { \
2061 if (!prog) \
2062 return -EINVAL; \
2063 bpf_program__set_type(prog, TYPE); \
2064 return 0; \
2065 } \
2066 \
2067 bool bpf_program__is_##NAME(struct bpf_program *prog) \
2068 { \
2069 return bpf_program__is_type(prog, TYPE); \
2070 } \
2071
2072 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
2073 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
2074 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
2075 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
2076 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
2077 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
2078 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
2079 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
2080
bpf_program__set_expected_attach_type(struct bpf_program * prog,enum bpf_attach_type type)2081 void bpf_program__set_expected_attach_type(struct bpf_program *prog,
2082 enum bpf_attach_type type)
2083 {
2084 prog->expected_attach_type = type;
2085 }
2086
2087 #define BPF_PROG_SEC_FULL(string, ptype, atype) \
2088 { string, sizeof(string) - 1, ptype, atype }
2089
2090 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_FULL(string, ptype, 0)
2091
2092 #define BPF_S_PROG_SEC(string, ptype) \
2093 BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK, ptype)
2094
2095 #define BPF_SA_PROG_SEC(string, ptype) \
2096 BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, ptype)
2097
2098 static const struct {
2099 const char *sec;
2100 size_t len;
2101 enum bpf_prog_type prog_type;
2102 enum bpf_attach_type expected_attach_type;
2103 } section_names[] = {
2104 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
2105 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
2106 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
2107 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
2108 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
2109 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
2110 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
2111 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
2112 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
2113 BPF_PROG_SEC("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
2114 BPF_PROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK),
2115 BPF_PROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE),
2116 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
2117 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
2118 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
2119 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
2120 BPF_PROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS),
2121 BPF_PROG_SEC("sk_skb", BPF_PROG_TYPE_SK_SKB),
2122 BPF_PROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG),
2123 BPF_PROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2),
2124 BPF_SA_PROG_SEC("cgroup/bind4", BPF_CGROUP_INET4_BIND),
2125 BPF_SA_PROG_SEC("cgroup/bind6", BPF_CGROUP_INET6_BIND),
2126 BPF_SA_PROG_SEC("cgroup/connect4", BPF_CGROUP_INET4_CONNECT),
2127 BPF_SA_PROG_SEC("cgroup/connect6", BPF_CGROUP_INET6_CONNECT),
2128 BPF_SA_PROG_SEC("cgroup/sendmsg4", BPF_CGROUP_UDP4_SENDMSG),
2129 BPF_SA_PROG_SEC("cgroup/sendmsg6", BPF_CGROUP_UDP6_SENDMSG),
2130 BPF_S_PROG_SEC("cgroup/post_bind4", BPF_CGROUP_INET4_POST_BIND),
2131 BPF_S_PROG_SEC("cgroup/post_bind6", BPF_CGROUP_INET6_POST_BIND),
2132 };
2133
2134 #undef BPF_PROG_SEC
2135 #undef BPF_PROG_SEC_FULL
2136 #undef BPF_S_PROG_SEC
2137 #undef BPF_SA_PROG_SEC
2138
libbpf_prog_type_by_name(const char * name,enum bpf_prog_type * prog_type,enum bpf_attach_type * expected_attach_type)2139 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
2140 enum bpf_attach_type *expected_attach_type)
2141 {
2142 int i;
2143
2144 if (!name)
2145 return -EINVAL;
2146
2147 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2148 if (strncmp(name, section_names[i].sec, section_names[i].len))
2149 continue;
2150 *prog_type = section_names[i].prog_type;
2151 *expected_attach_type = section_names[i].expected_attach_type;
2152 return 0;
2153 }
2154 return -EINVAL;
2155 }
2156
2157 static int
bpf_program__identify_section(struct bpf_program * prog,enum bpf_prog_type * prog_type,enum bpf_attach_type * expected_attach_type)2158 bpf_program__identify_section(struct bpf_program *prog,
2159 enum bpf_prog_type *prog_type,
2160 enum bpf_attach_type *expected_attach_type)
2161 {
2162 return libbpf_prog_type_by_name(prog->section_name, prog_type,
2163 expected_attach_type);
2164 }
2165
bpf_map__fd(struct bpf_map * map)2166 int bpf_map__fd(struct bpf_map *map)
2167 {
2168 return map ? map->fd : -EINVAL;
2169 }
2170
bpf_map__def(struct bpf_map * map)2171 const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
2172 {
2173 return map ? &map->def : ERR_PTR(-EINVAL);
2174 }
2175
bpf_map__name(struct bpf_map * map)2176 const char *bpf_map__name(struct bpf_map *map)
2177 {
2178 return map ? map->name : NULL;
2179 }
2180
bpf_map__btf_key_type_id(const struct bpf_map * map)2181 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
2182 {
2183 return map ? map->btf_key_type_id : 0;
2184 }
2185
bpf_map__btf_value_type_id(const struct bpf_map * map)2186 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
2187 {
2188 return map ? map->btf_value_type_id : 0;
2189 }
2190
bpf_map__set_priv(struct bpf_map * map,void * priv,bpf_map_clear_priv_t clear_priv)2191 int bpf_map__set_priv(struct bpf_map *map, void *priv,
2192 bpf_map_clear_priv_t clear_priv)
2193 {
2194 if (!map)
2195 return -EINVAL;
2196
2197 if (map->priv) {
2198 if (map->clear_priv)
2199 map->clear_priv(map, map->priv);
2200 }
2201
2202 map->priv = priv;
2203 map->clear_priv = clear_priv;
2204 return 0;
2205 }
2206
bpf_map__priv(struct bpf_map * map)2207 void *bpf_map__priv(struct bpf_map *map)
2208 {
2209 return map ? map->priv : ERR_PTR(-EINVAL);
2210 }
2211
bpf_map__is_offload_neutral(struct bpf_map * map)2212 bool bpf_map__is_offload_neutral(struct bpf_map *map)
2213 {
2214 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
2215 }
2216
bpf_map__set_ifindex(struct bpf_map * map,__u32 ifindex)2217 void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
2218 {
2219 map->map_ifindex = ifindex;
2220 }
2221
2222 struct bpf_map *
bpf_map__next(struct bpf_map * prev,struct bpf_object * obj)2223 bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
2224 {
2225 size_t idx;
2226 struct bpf_map *s, *e;
2227
2228 if (!obj || !obj->maps)
2229 return NULL;
2230
2231 s = obj->maps;
2232 e = obj->maps + obj->nr_maps;
2233
2234 if (prev == NULL)
2235 return s;
2236
2237 if ((prev < s) || (prev >= e)) {
2238 pr_warning("error in %s: map handler doesn't belong to object\n",
2239 __func__);
2240 return NULL;
2241 }
2242
2243 idx = (prev - obj->maps) + 1;
2244 if (idx >= obj->nr_maps)
2245 return NULL;
2246 return &obj->maps[idx];
2247 }
2248
2249 struct bpf_map *
bpf_object__find_map_by_name(struct bpf_object * obj,const char * name)2250 bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
2251 {
2252 struct bpf_map *pos;
2253
2254 bpf_map__for_each(pos, obj) {
2255 if (pos->name && !strcmp(pos->name, name))
2256 return pos;
2257 }
2258 return NULL;
2259 }
2260
2261 struct bpf_map *
bpf_object__find_map_by_offset(struct bpf_object * obj,size_t offset)2262 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
2263 {
2264 int i;
2265
2266 for (i = 0; i < obj->nr_maps; i++) {
2267 if (obj->maps[i].offset == offset)
2268 return &obj->maps[i];
2269 }
2270 return ERR_PTR(-ENOENT);
2271 }
2272
libbpf_get_error(const void * ptr)2273 long libbpf_get_error(const void *ptr)
2274 {
2275 if (IS_ERR(ptr))
2276 return PTR_ERR(ptr);
2277 return 0;
2278 }
2279
bpf_prog_load(const char * file,enum bpf_prog_type type,struct bpf_object ** pobj,int * prog_fd)2280 int bpf_prog_load(const char *file, enum bpf_prog_type type,
2281 struct bpf_object **pobj, int *prog_fd)
2282 {
2283 struct bpf_prog_load_attr attr;
2284
2285 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2286 attr.file = file;
2287 attr.prog_type = type;
2288 attr.expected_attach_type = 0;
2289
2290 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2291 }
2292
bpf_prog_load_xattr(const struct bpf_prog_load_attr * attr,struct bpf_object ** pobj,int * prog_fd)2293 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2294 struct bpf_object **pobj, int *prog_fd)
2295 {
2296 struct bpf_object_open_attr open_attr = {
2297 .file = attr->file,
2298 .prog_type = attr->prog_type,
2299 };
2300 struct bpf_program *prog, *first_prog = NULL;
2301 enum bpf_attach_type expected_attach_type;
2302 enum bpf_prog_type prog_type;
2303 struct bpf_object *obj;
2304 struct bpf_map *map;
2305 int err;
2306
2307 if (!attr)
2308 return -EINVAL;
2309 if (!attr->file)
2310 return -EINVAL;
2311
2312 obj = bpf_object__open_xattr(&open_attr);
2313 if (IS_ERR_OR_NULL(obj))
2314 return -ENOENT;
2315
2316 bpf_object__for_each_program(prog, obj) {
2317 /*
2318 * If type is not specified, try to guess it based on
2319 * section name.
2320 */
2321 prog_type = attr->prog_type;
2322 prog->prog_ifindex = attr->ifindex;
2323 expected_attach_type = attr->expected_attach_type;
2324 if (prog_type == BPF_PROG_TYPE_UNSPEC) {
2325 err = bpf_program__identify_section(prog, &prog_type,
2326 &expected_attach_type);
2327 if (err < 0) {
2328 pr_warning("failed to guess program type based on section name %s\n",
2329 prog->section_name);
2330 bpf_object__close(obj);
2331 return -EINVAL;
2332 }
2333 }
2334
2335 bpf_program__set_type(prog, prog_type);
2336 bpf_program__set_expected_attach_type(prog,
2337 expected_attach_type);
2338
2339 if (!bpf_program__is_function_storage(prog, obj) && !first_prog)
2340 first_prog = prog;
2341 }
2342
2343 bpf_map__for_each(map, obj) {
2344 if (!bpf_map__is_offload_neutral(map))
2345 map->map_ifindex = attr->ifindex;
2346 }
2347
2348 if (!first_prog) {
2349 pr_warning("object file doesn't contain bpf program\n");
2350 bpf_object__close(obj);
2351 return -ENOENT;
2352 }
2353
2354 err = bpf_object__load(obj);
2355 if (err) {
2356 bpf_object__close(obj);
2357 return -EINVAL;
2358 }
2359
2360 *pobj = obj;
2361 *prog_fd = bpf_program__fd(first_prog);
2362 return 0;
2363 }
2364
2365 enum bpf_perf_event_ret
bpf_perf_event_read_simple(void * mem,unsigned long size,unsigned long page_size,void ** buf,size_t * buf_len,bpf_perf_event_print_t fn,void * priv)2366 bpf_perf_event_read_simple(void *mem, unsigned long size,
2367 unsigned long page_size, void **buf, size_t *buf_len,
2368 bpf_perf_event_print_t fn, void *priv)
2369 {
2370 volatile struct perf_event_mmap_page *header = mem;
2371 __u64 data_tail = header->data_tail;
2372 __u64 data_head = header->data_head;
2373 int ret = LIBBPF_PERF_EVENT_ERROR;
2374 void *base, *begin, *end;
2375
2376 asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
2377 if (data_head == data_tail)
2378 return LIBBPF_PERF_EVENT_CONT;
2379
2380 base = ((char *)header) + page_size;
2381
2382 begin = base + data_tail % size;
2383 end = base + data_head % size;
2384
2385 while (begin != end) {
2386 struct perf_event_header *ehdr;
2387
2388 ehdr = begin;
2389 if (begin + ehdr->size > base + size) {
2390 long len = base + size - begin;
2391
2392 if (*buf_len < ehdr->size) {
2393 free(*buf);
2394 *buf = malloc(ehdr->size);
2395 if (!*buf) {
2396 ret = LIBBPF_PERF_EVENT_ERROR;
2397 break;
2398 }
2399 *buf_len = ehdr->size;
2400 }
2401
2402 memcpy(*buf, begin, len);
2403 memcpy(*buf + len, base, ehdr->size - len);
2404 ehdr = (void *)*buf;
2405 begin = base + ehdr->size - len;
2406 } else if (begin + ehdr->size == base + size) {
2407 begin = base;
2408 } else {
2409 begin += ehdr->size;
2410 }
2411
2412 ret = fn(ehdr, priv);
2413 if (ret != LIBBPF_PERF_EVENT_CONT)
2414 break;
2415
2416 data_tail += ehdr->size;
2417 }
2418
2419 __sync_synchronize(); /* smp_mb() */
2420 header->data_tail = data_tail;
2421
2422 return ret;
2423 }
2424