1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Linux Socket Filter - Kernel level socket filtering
4 *
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
7 *
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 *
10 * Authors:
11 *
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
15 *
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18 */
19
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/moduleloader.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/objtool.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35
36 #include <asm/barrier.h>
37 #include <asm/unaligned.h>
38
39 /* Registers */
40 #define BPF_R0 regs[BPF_REG_0]
41 #define BPF_R1 regs[BPF_REG_1]
42 #define BPF_R2 regs[BPF_REG_2]
43 #define BPF_R3 regs[BPF_REG_3]
44 #define BPF_R4 regs[BPF_REG_4]
45 #define BPF_R5 regs[BPF_REG_5]
46 #define BPF_R6 regs[BPF_REG_6]
47 #define BPF_R7 regs[BPF_REG_7]
48 #define BPF_R8 regs[BPF_REG_8]
49 #define BPF_R9 regs[BPF_REG_9]
50 #define BPF_R10 regs[BPF_REG_10]
51
52 /* Named registers */
53 #define DST regs[insn->dst_reg]
54 #define SRC regs[insn->src_reg]
55 #define FP regs[BPF_REG_FP]
56 #define AX regs[BPF_REG_AX]
57 #define ARG1 regs[BPF_REG_ARG1]
58 #define CTX regs[BPF_REG_CTX]
59 #define IMM insn->imm
60
61 /* No hurry in this branch
62 *
63 * Exported for the bpf jit load helper.
64 */
bpf_internal_load_pointer_neg_helper(const struct sk_buff * skb,int k,unsigned int size)65 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
66 {
67 u8 *ptr = NULL;
68
69 if (k >= SKF_NET_OFF)
70 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
71 else if (k >= SKF_LL_OFF)
72 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
73
74 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
75 return ptr;
76
77 return NULL;
78 }
79
bpf_prog_alloc_no_stats(unsigned int size,gfp_t gfp_extra_flags)80 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
81 {
82 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
83 struct bpf_prog_aux *aux;
84 struct bpf_prog *fp;
85
86 size = round_up(size, PAGE_SIZE);
87 fp = __vmalloc(size, gfp_flags);
88 if (fp == NULL)
89 return NULL;
90
91 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags);
92 if (aux == NULL) {
93 vfree(fp);
94 return NULL;
95 }
96 fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
97 if (!fp->active) {
98 vfree(fp);
99 kfree(aux);
100 return NULL;
101 }
102
103 fp->pages = size / PAGE_SIZE;
104 fp->aux = aux;
105 fp->aux->prog = fp;
106 fp->jit_requested = ebpf_jit_enabled();
107
108 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
109 mutex_init(&fp->aux->used_maps_mutex);
110 mutex_init(&fp->aux->dst_mutex);
111
112 return fp;
113 }
114
bpf_prog_alloc(unsigned int size,gfp_t gfp_extra_flags)115 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
116 {
117 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
118 struct bpf_prog *prog;
119 int cpu;
120
121 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
122 if (!prog)
123 return NULL;
124
125 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
126 if (!prog->stats) {
127 free_percpu(prog->active);
128 kfree(prog->aux);
129 vfree(prog);
130 return NULL;
131 }
132
133 for_each_possible_cpu(cpu) {
134 struct bpf_prog_stats *pstats;
135
136 pstats = per_cpu_ptr(prog->stats, cpu);
137 u64_stats_init(&pstats->syncp);
138 }
139 return prog;
140 }
141 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
142
bpf_prog_alloc_jited_linfo(struct bpf_prog * prog)143 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
144 {
145 if (!prog->aux->nr_linfo || !prog->jit_requested)
146 return 0;
147
148 prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
149 sizeof(*prog->aux->jited_linfo),
150 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
151 if (!prog->aux->jited_linfo)
152 return -ENOMEM;
153
154 return 0;
155 }
156
bpf_prog_jit_attempt_done(struct bpf_prog * prog)157 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
158 {
159 if (prog->aux->jited_linfo &&
160 (!prog->jited || !prog->aux->jited_linfo[0])) {
161 kvfree(prog->aux->jited_linfo);
162 prog->aux->jited_linfo = NULL;
163 }
164
165 kfree(prog->aux->kfunc_tab);
166 prog->aux->kfunc_tab = NULL;
167 }
168
169 /* The jit engine is responsible to provide an array
170 * for insn_off to the jited_off mapping (insn_to_jit_off).
171 *
172 * The idx to this array is the insn_off. Hence, the insn_off
173 * here is relative to the prog itself instead of the main prog.
174 * This array has one entry for each xlated bpf insn.
175 *
176 * jited_off is the byte off to the last byte of the jited insn.
177 *
178 * Hence, with
179 * insn_start:
180 * The first bpf insn off of the prog. The insn off
181 * here is relative to the main prog.
182 * e.g. if prog is a subprog, insn_start > 0
183 * linfo_idx:
184 * The prog's idx to prog->aux->linfo and jited_linfo
185 *
186 * jited_linfo[linfo_idx] = prog->bpf_func
187 *
188 * For i > linfo_idx,
189 *
190 * jited_linfo[i] = prog->bpf_func +
191 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
192 */
bpf_prog_fill_jited_linfo(struct bpf_prog * prog,const u32 * insn_to_jit_off)193 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
194 const u32 *insn_to_jit_off)
195 {
196 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
197 const struct bpf_line_info *linfo;
198 void **jited_linfo;
199
200 if (!prog->aux->jited_linfo)
201 /* Userspace did not provide linfo */
202 return;
203
204 linfo_idx = prog->aux->linfo_idx;
205 linfo = &prog->aux->linfo[linfo_idx];
206 insn_start = linfo[0].insn_off;
207 insn_end = insn_start + prog->len;
208
209 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
210 jited_linfo[0] = prog->bpf_func;
211
212 nr_linfo = prog->aux->nr_linfo - linfo_idx;
213
214 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
215 /* The verifier ensures that linfo[i].insn_off is
216 * strictly increasing
217 */
218 jited_linfo[i] = prog->bpf_func +
219 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
220 }
221
bpf_prog_realloc(struct bpf_prog * fp_old,unsigned int size,gfp_t gfp_extra_flags)222 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
223 gfp_t gfp_extra_flags)
224 {
225 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
226 struct bpf_prog *fp;
227 u32 pages;
228
229 size = round_up(size, PAGE_SIZE);
230 pages = size / PAGE_SIZE;
231 if (pages <= fp_old->pages)
232 return fp_old;
233
234 fp = __vmalloc(size, gfp_flags);
235 if (fp) {
236 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
237 fp->pages = pages;
238 fp->aux->prog = fp;
239
240 /* We keep fp->aux from fp_old around in the new
241 * reallocated structure.
242 */
243 fp_old->aux = NULL;
244 fp_old->stats = NULL;
245 fp_old->active = NULL;
246 __bpf_prog_free(fp_old);
247 }
248
249 return fp;
250 }
251
__bpf_prog_free(struct bpf_prog * fp)252 void __bpf_prog_free(struct bpf_prog *fp)
253 {
254 if (fp->aux) {
255 mutex_destroy(&fp->aux->used_maps_mutex);
256 mutex_destroy(&fp->aux->dst_mutex);
257 kfree(fp->aux->poke_tab);
258 kfree(fp->aux);
259 }
260 free_percpu(fp->stats);
261 free_percpu(fp->active);
262 vfree(fp);
263 }
264
bpf_prog_calc_tag(struct bpf_prog * fp)265 int bpf_prog_calc_tag(struct bpf_prog *fp)
266 {
267 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
268 u32 raw_size = bpf_prog_tag_scratch_size(fp);
269 u32 digest[SHA1_DIGEST_WORDS];
270 u32 ws[SHA1_WORKSPACE_WORDS];
271 u32 i, bsize, psize, blocks;
272 struct bpf_insn *dst;
273 bool was_ld_map;
274 u8 *raw, *todo;
275 __be32 *result;
276 __be64 *bits;
277
278 raw = vmalloc(raw_size);
279 if (!raw)
280 return -ENOMEM;
281
282 sha1_init(digest);
283 memset(ws, 0, sizeof(ws));
284
285 /* We need to take out the map fd for the digest calculation
286 * since they are unstable from user space side.
287 */
288 dst = (void *)raw;
289 for (i = 0, was_ld_map = false; i < fp->len; i++) {
290 dst[i] = fp->insnsi[i];
291 if (!was_ld_map &&
292 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
293 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
294 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
295 was_ld_map = true;
296 dst[i].imm = 0;
297 } else if (was_ld_map &&
298 dst[i].code == 0 &&
299 dst[i].dst_reg == 0 &&
300 dst[i].src_reg == 0 &&
301 dst[i].off == 0) {
302 was_ld_map = false;
303 dst[i].imm = 0;
304 } else {
305 was_ld_map = false;
306 }
307 }
308
309 psize = bpf_prog_insn_size(fp);
310 memset(&raw[psize], 0, raw_size - psize);
311 raw[psize++] = 0x80;
312
313 bsize = round_up(psize, SHA1_BLOCK_SIZE);
314 blocks = bsize / SHA1_BLOCK_SIZE;
315 todo = raw;
316 if (bsize - psize >= sizeof(__be64)) {
317 bits = (__be64 *)(todo + bsize - sizeof(__be64));
318 } else {
319 bits = (__be64 *)(todo + bsize + bits_offset);
320 blocks++;
321 }
322 *bits = cpu_to_be64((psize - 1) << 3);
323
324 while (blocks--) {
325 sha1_transform(digest, todo, ws);
326 todo += SHA1_BLOCK_SIZE;
327 }
328
329 result = (__force __be32 *)digest;
330 for (i = 0; i < SHA1_DIGEST_WORDS; i++)
331 result[i] = cpu_to_be32(digest[i]);
332 memcpy(fp->tag, result, sizeof(fp->tag));
333
334 vfree(raw);
335 return 0;
336 }
337
bpf_adj_delta_to_imm(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)338 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
339 s32 end_new, s32 curr, const bool probe_pass)
340 {
341 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
342 s32 delta = end_new - end_old;
343 s64 imm = insn->imm;
344
345 if (curr < pos && curr + imm + 1 >= end_old)
346 imm += delta;
347 else if (curr >= end_new && curr + imm + 1 < end_new)
348 imm -= delta;
349 if (imm < imm_min || imm > imm_max)
350 return -ERANGE;
351 if (!probe_pass)
352 insn->imm = imm;
353 return 0;
354 }
355
bpf_adj_delta_to_off(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)356 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
357 s32 end_new, s32 curr, const bool probe_pass)
358 {
359 const s32 off_min = S16_MIN, off_max = S16_MAX;
360 s32 delta = end_new - end_old;
361 s32 off = insn->off;
362
363 if (curr < pos && curr + off + 1 >= end_old)
364 off += delta;
365 else if (curr >= end_new && curr + off + 1 < end_new)
366 off -= delta;
367 if (off < off_min || off > off_max)
368 return -ERANGE;
369 if (!probe_pass)
370 insn->off = off;
371 return 0;
372 }
373
bpf_adj_branches(struct bpf_prog * prog,u32 pos,s32 end_old,s32 end_new,const bool probe_pass)374 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
375 s32 end_new, const bool probe_pass)
376 {
377 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
378 struct bpf_insn *insn = prog->insnsi;
379 int ret = 0;
380
381 for (i = 0; i < insn_cnt; i++, insn++) {
382 u8 code;
383
384 /* In the probing pass we still operate on the original,
385 * unpatched image in order to check overflows before we
386 * do any other adjustments. Therefore skip the patchlet.
387 */
388 if (probe_pass && i == pos) {
389 i = end_new;
390 insn = prog->insnsi + end_old;
391 }
392 code = insn->code;
393 if ((BPF_CLASS(code) != BPF_JMP &&
394 BPF_CLASS(code) != BPF_JMP32) ||
395 BPF_OP(code) == BPF_EXIT)
396 continue;
397 /* Adjust offset of jmps if we cross patch boundaries. */
398 if (BPF_OP(code) == BPF_CALL) {
399 if (insn->src_reg != BPF_PSEUDO_CALL)
400 continue;
401 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
402 end_new, i, probe_pass);
403 } else {
404 ret = bpf_adj_delta_to_off(insn, pos, end_old,
405 end_new, i, probe_pass);
406 }
407 if (ret)
408 break;
409 }
410
411 return ret;
412 }
413
bpf_adj_linfo(struct bpf_prog * prog,u32 off,u32 delta)414 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
415 {
416 struct bpf_line_info *linfo;
417 u32 i, nr_linfo;
418
419 nr_linfo = prog->aux->nr_linfo;
420 if (!nr_linfo || !delta)
421 return;
422
423 linfo = prog->aux->linfo;
424
425 for (i = 0; i < nr_linfo; i++)
426 if (off < linfo[i].insn_off)
427 break;
428
429 /* Push all off < linfo[i].insn_off by delta */
430 for (; i < nr_linfo; i++)
431 linfo[i].insn_off += delta;
432 }
433
bpf_patch_insn_single(struct bpf_prog * prog,u32 off,const struct bpf_insn * patch,u32 len)434 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
435 const struct bpf_insn *patch, u32 len)
436 {
437 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
438 const u32 cnt_max = S16_MAX;
439 struct bpf_prog *prog_adj;
440 int err;
441
442 /* Since our patchlet doesn't expand the image, we're done. */
443 if (insn_delta == 0) {
444 memcpy(prog->insnsi + off, patch, sizeof(*patch));
445 return prog;
446 }
447
448 insn_adj_cnt = prog->len + insn_delta;
449
450 /* Reject anything that would potentially let the insn->off
451 * target overflow when we have excessive program expansions.
452 * We need to probe here before we do any reallocation where
453 * we afterwards may not fail anymore.
454 */
455 if (insn_adj_cnt > cnt_max &&
456 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
457 return ERR_PTR(err);
458
459 /* Several new instructions need to be inserted. Make room
460 * for them. Likely, there's no need for a new allocation as
461 * last page could have large enough tailroom.
462 */
463 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
464 GFP_USER);
465 if (!prog_adj)
466 return ERR_PTR(-ENOMEM);
467
468 prog_adj->len = insn_adj_cnt;
469
470 /* Patching happens in 3 steps:
471 *
472 * 1) Move over tail of insnsi from next instruction onwards,
473 * so we can patch the single target insn with one or more
474 * new ones (patching is always from 1 to n insns, n > 0).
475 * 2) Inject new instructions at the target location.
476 * 3) Adjust branch offsets if necessary.
477 */
478 insn_rest = insn_adj_cnt - off - len;
479
480 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
481 sizeof(*patch) * insn_rest);
482 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
483
484 /* We are guaranteed to not fail at this point, otherwise
485 * the ship has sailed to reverse to the original state. An
486 * overflow cannot happen at this point.
487 */
488 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
489
490 bpf_adj_linfo(prog_adj, off, insn_delta);
491
492 return prog_adj;
493 }
494
bpf_remove_insns(struct bpf_prog * prog,u32 off,u32 cnt)495 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
496 {
497 /* Branch offsets can't overflow when program is shrinking, no need
498 * to call bpf_adj_branches(..., true) here
499 */
500 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
501 sizeof(struct bpf_insn) * (prog->len - off - cnt));
502 prog->len -= cnt;
503
504 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
505 }
506
bpf_prog_kallsyms_del_subprogs(struct bpf_prog * fp)507 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
508 {
509 int i;
510
511 for (i = 0; i < fp->aux->func_cnt; i++)
512 bpf_prog_kallsyms_del(fp->aux->func[i]);
513 }
514
bpf_prog_kallsyms_del_all(struct bpf_prog * fp)515 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
516 {
517 bpf_prog_kallsyms_del_subprogs(fp);
518 bpf_prog_kallsyms_del(fp);
519 }
520
521 #ifdef CONFIG_BPF_JIT
522 /* All BPF JIT sysctl knobs here. */
523 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
524 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
525 int bpf_jit_harden __read_mostly;
526 long bpf_jit_limit __read_mostly;
527 long bpf_jit_limit_max __read_mostly;
528
529 static void
bpf_prog_ksym_set_addr(struct bpf_prog * prog)530 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
531 {
532 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
533 unsigned long addr = (unsigned long)hdr;
534
535 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
536
537 prog->aux->ksym.start = (unsigned long) prog->bpf_func;
538 prog->aux->ksym.end = addr + hdr->pages * PAGE_SIZE;
539 }
540
541 static void
bpf_prog_ksym_set_name(struct bpf_prog * prog)542 bpf_prog_ksym_set_name(struct bpf_prog *prog)
543 {
544 char *sym = prog->aux->ksym.name;
545 const char *end = sym + KSYM_NAME_LEN;
546 const struct btf_type *type;
547 const char *func_name;
548
549 BUILD_BUG_ON(sizeof("bpf_prog_") +
550 sizeof(prog->tag) * 2 +
551 /* name has been null terminated.
552 * We should need +1 for the '_' preceding
553 * the name. However, the null character
554 * is double counted between the name and the
555 * sizeof("bpf_prog_") above, so we omit
556 * the +1 here.
557 */
558 sizeof(prog->aux->name) > KSYM_NAME_LEN);
559
560 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
561 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
562
563 /* prog->aux->name will be ignored if full btf name is available */
564 if (prog->aux->func_info_cnt) {
565 type = btf_type_by_id(prog->aux->btf,
566 prog->aux->func_info[prog->aux->func_idx].type_id);
567 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
568 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
569 return;
570 }
571
572 if (prog->aux->name[0])
573 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
574 else
575 *sym = 0;
576 }
577
bpf_get_ksym_start(struct latch_tree_node * n)578 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
579 {
580 return container_of(n, struct bpf_ksym, tnode)->start;
581 }
582
bpf_tree_less(struct latch_tree_node * a,struct latch_tree_node * b)583 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
584 struct latch_tree_node *b)
585 {
586 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
587 }
588
bpf_tree_comp(void * key,struct latch_tree_node * n)589 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
590 {
591 unsigned long val = (unsigned long)key;
592 const struct bpf_ksym *ksym;
593
594 ksym = container_of(n, struct bpf_ksym, tnode);
595
596 if (val < ksym->start)
597 return -1;
598 if (val >= ksym->end)
599 return 1;
600
601 return 0;
602 }
603
604 static const struct latch_tree_ops bpf_tree_ops = {
605 .less = bpf_tree_less,
606 .comp = bpf_tree_comp,
607 };
608
609 static DEFINE_SPINLOCK(bpf_lock);
610 static LIST_HEAD(bpf_kallsyms);
611 static struct latch_tree_root bpf_tree __cacheline_aligned;
612
bpf_ksym_add(struct bpf_ksym * ksym)613 void bpf_ksym_add(struct bpf_ksym *ksym)
614 {
615 spin_lock_bh(&bpf_lock);
616 WARN_ON_ONCE(!list_empty(&ksym->lnode));
617 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
618 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
619 spin_unlock_bh(&bpf_lock);
620 }
621
__bpf_ksym_del(struct bpf_ksym * ksym)622 static void __bpf_ksym_del(struct bpf_ksym *ksym)
623 {
624 if (list_empty(&ksym->lnode))
625 return;
626
627 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
628 list_del_rcu(&ksym->lnode);
629 }
630
bpf_ksym_del(struct bpf_ksym * ksym)631 void bpf_ksym_del(struct bpf_ksym *ksym)
632 {
633 spin_lock_bh(&bpf_lock);
634 __bpf_ksym_del(ksym);
635 spin_unlock_bh(&bpf_lock);
636 }
637
bpf_prog_kallsyms_candidate(const struct bpf_prog * fp)638 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
639 {
640 return fp->jited && !bpf_prog_was_classic(fp);
641 }
642
bpf_prog_kallsyms_verify_off(const struct bpf_prog * fp)643 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
644 {
645 return list_empty(&fp->aux->ksym.lnode) ||
646 fp->aux->ksym.lnode.prev == LIST_POISON2;
647 }
648
bpf_prog_kallsyms_add(struct bpf_prog * fp)649 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
650 {
651 if (!bpf_prog_kallsyms_candidate(fp) ||
652 !bpf_capable())
653 return;
654
655 bpf_prog_ksym_set_addr(fp);
656 bpf_prog_ksym_set_name(fp);
657 fp->aux->ksym.prog = true;
658
659 bpf_ksym_add(&fp->aux->ksym);
660 }
661
bpf_prog_kallsyms_del(struct bpf_prog * fp)662 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
663 {
664 if (!bpf_prog_kallsyms_candidate(fp))
665 return;
666
667 bpf_ksym_del(&fp->aux->ksym);
668 }
669
bpf_ksym_find(unsigned long addr)670 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
671 {
672 struct latch_tree_node *n;
673
674 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
675 return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
676 }
677
__bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char * sym)678 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
679 unsigned long *off, char *sym)
680 {
681 struct bpf_ksym *ksym;
682 char *ret = NULL;
683
684 rcu_read_lock();
685 ksym = bpf_ksym_find(addr);
686 if (ksym) {
687 unsigned long symbol_start = ksym->start;
688 unsigned long symbol_end = ksym->end;
689
690 strncpy(sym, ksym->name, KSYM_NAME_LEN);
691
692 ret = sym;
693 if (size)
694 *size = symbol_end - symbol_start;
695 if (off)
696 *off = addr - symbol_start;
697 }
698 rcu_read_unlock();
699
700 return ret;
701 }
702
is_bpf_text_address(unsigned long addr)703 bool is_bpf_text_address(unsigned long addr)
704 {
705 bool ret;
706
707 rcu_read_lock();
708 ret = bpf_ksym_find(addr) != NULL;
709 rcu_read_unlock();
710
711 return ret;
712 }
713
bpf_prog_ksym_find(unsigned long addr)714 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
715 {
716 struct bpf_ksym *ksym = bpf_ksym_find(addr);
717
718 return ksym && ksym->prog ?
719 container_of(ksym, struct bpf_prog_aux, ksym)->prog :
720 NULL;
721 }
722
search_bpf_extables(unsigned long addr)723 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
724 {
725 const struct exception_table_entry *e = NULL;
726 struct bpf_prog *prog;
727
728 rcu_read_lock();
729 prog = bpf_prog_ksym_find(addr);
730 if (!prog)
731 goto out;
732 if (!prog->aux->num_exentries)
733 goto out;
734
735 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
736 out:
737 rcu_read_unlock();
738 return e;
739 }
740
bpf_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)741 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
742 char *sym)
743 {
744 struct bpf_ksym *ksym;
745 unsigned int it = 0;
746 int ret = -ERANGE;
747
748 if (!bpf_jit_kallsyms_enabled())
749 return ret;
750
751 rcu_read_lock();
752 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
753 if (it++ != symnum)
754 continue;
755
756 strncpy(sym, ksym->name, KSYM_NAME_LEN);
757
758 *value = ksym->start;
759 *type = BPF_SYM_ELF_TYPE;
760
761 ret = 0;
762 break;
763 }
764 rcu_read_unlock();
765
766 return ret;
767 }
768
bpf_jit_add_poke_descriptor(struct bpf_prog * prog,struct bpf_jit_poke_descriptor * poke)769 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
770 struct bpf_jit_poke_descriptor *poke)
771 {
772 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
773 static const u32 poke_tab_max = 1024;
774 u32 slot = prog->aux->size_poke_tab;
775 u32 size = slot + 1;
776
777 if (size > poke_tab_max)
778 return -ENOSPC;
779 if (poke->tailcall_target || poke->tailcall_target_stable ||
780 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
781 return -EINVAL;
782
783 switch (poke->reason) {
784 case BPF_POKE_REASON_TAIL_CALL:
785 if (!poke->tail_call.map)
786 return -EINVAL;
787 break;
788 default:
789 return -EINVAL;
790 }
791
792 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
793 if (!tab)
794 return -ENOMEM;
795
796 memcpy(&tab[slot], poke, sizeof(*poke));
797 prog->aux->size_poke_tab = size;
798 prog->aux->poke_tab = tab;
799
800 return slot;
801 }
802
803 static atomic_long_t bpf_jit_current;
804
805 /* Can be overridden by an arch's JIT compiler if it has a custom,
806 * dedicated BPF backend memory area, or if neither of the two
807 * below apply.
808 */
bpf_jit_alloc_exec_limit(void)809 u64 __weak bpf_jit_alloc_exec_limit(void)
810 {
811 #if defined(MODULES_VADDR)
812 return MODULES_END - MODULES_VADDR;
813 #else
814 return VMALLOC_END - VMALLOC_START;
815 #endif
816 }
817
bpf_jit_charge_init(void)818 static int __init bpf_jit_charge_init(void)
819 {
820 /* Only used as heuristic here to derive limit. */
821 bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
822 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
823 PAGE_SIZE), LONG_MAX);
824 return 0;
825 }
826 pure_initcall(bpf_jit_charge_init);
827
bpf_jit_charge_modmem(u32 pages)828 int bpf_jit_charge_modmem(u32 pages)
829 {
830 if (atomic_long_add_return(pages, &bpf_jit_current) >
831 (bpf_jit_limit >> PAGE_SHIFT)) {
832 if (!bpf_capable()) {
833 atomic_long_sub(pages, &bpf_jit_current);
834 return -EPERM;
835 }
836 }
837
838 return 0;
839 }
840
bpf_jit_uncharge_modmem(u32 pages)841 void bpf_jit_uncharge_modmem(u32 pages)
842 {
843 atomic_long_sub(pages, &bpf_jit_current);
844 }
845
bpf_jit_alloc_exec(unsigned long size)846 void *__weak bpf_jit_alloc_exec(unsigned long size)
847 {
848 return module_alloc(size);
849 }
850
bpf_jit_free_exec(void * addr)851 void __weak bpf_jit_free_exec(void *addr)
852 {
853 module_memfree(addr);
854 }
855
856 struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,bpf_jit_fill_hole_t bpf_fill_ill_insns)857 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
858 unsigned int alignment,
859 bpf_jit_fill_hole_t bpf_fill_ill_insns)
860 {
861 struct bpf_binary_header *hdr;
862 u32 size, hole, start, pages;
863
864 WARN_ON_ONCE(!is_power_of_2(alignment) ||
865 alignment > BPF_IMAGE_ALIGNMENT);
866
867 /* Most of BPF filters are really small, but if some of them
868 * fill a page, allow at least 128 extra bytes to insert a
869 * random section of illegal instructions.
870 */
871 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
872 pages = size / PAGE_SIZE;
873
874 if (bpf_jit_charge_modmem(pages))
875 return NULL;
876 hdr = bpf_jit_alloc_exec(size);
877 if (!hdr) {
878 bpf_jit_uncharge_modmem(pages);
879 return NULL;
880 }
881
882 /* Fill space with illegal/arch-dep instructions. */
883 bpf_fill_ill_insns(hdr, size);
884
885 hdr->pages = pages;
886 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
887 PAGE_SIZE - sizeof(*hdr));
888 start = (get_random_int() % hole) & ~(alignment - 1);
889
890 /* Leave a random number of instructions before BPF code. */
891 *image_ptr = &hdr->image[start];
892
893 return hdr;
894 }
895
bpf_jit_binary_free(struct bpf_binary_header * hdr)896 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
897 {
898 u32 pages = hdr->pages;
899
900 bpf_jit_free_exec(hdr);
901 bpf_jit_uncharge_modmem(pages);
902 }
903
904 /* This symbol is only overridden by archs that have different
905 * requirements than the usual eBPF JITs, f.e. when they only
906 * implement cBPF JIT, do not set images read-only, etc.
907 */
bpf_jit_free(struct bpf_prog * fp)908 void __weak bpf_jit_free(struct bpf_prog *fp)
909 {
910 if (fp->jited) {
911 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
912
913 bpf_jit_binary_free(hdr);
914
915 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
916 }
917
918 bpf_prog_unlock_free(fp);
919 }
920
bpf_jit_get_func_addr(const struct bpf_prog * prog,const struct bpf_insn * insn,bool extra_pass,u64 * func_addr,bool * func_addr_fixed)921 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
922 const struct bpf_insn *insn, bool extra_pass,
923 u64 *func_addr, bool *func_addr_fixed)
924 {
925 s16 off = insn->off;
926 s32 imm = insn->imm;
927 u8 *addr;
928
929 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
930 if (!*func_addr_fixed) {
931 /* Place-holder address till the last pass has collected
932 * all addresses for JITed subprograms in which case we
933 * can pick them up from prog->aux.
934 */
935 if (!extra_pass)
936 addr = NULL;
937 else if (prog->aux->func &&
938 off >= 0 && off < prog->aux->func_cnt)
939 addr = (u8 *)prog->aux->func[off]->bpf_func;
940 else
941 return -EINVAL;
942 } else {
943 /* Address of a BPF helper call. Since part of the core
944 * kernel, it's always at a fixed location. __bpf_call_base
945 * and the helper with imm relative to it are both in core
946 * kernel.
947 */
948 addr = (u8 *)__bpf_call_base + imm;
949 }
950
951 *func_addr = (unsigned long)addr;
952 return 0;
953 }
954
bpf_jit_blind_insn(const struct bpf_insn * from,const struct bpf_insn * aux,struct bpf_insn * to_buff,bool emit_zext)955 static int bpf_jit_blind_insn(const struct bpf_insn *from,
956 const struct bpf_insn *aux,
957 struct bpf_insn *to_buff,
958 bool emit_zext)
959 {
960 struct bpf_insn *to = to_buff;
961 u32 imm_rnd = get_random_int();
962 s16 off;
963
964 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
965 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
966
967 /* Constraints on AX register:
968 *
969 * AX register is inaccessible from user space. It is mapped in
970 * all JITs, and used here for constant blinding rewrites. It is
971 * typically "stateless" meaning its contents are only valid within
972 * the executed instruction, but not across several instructions.
973 * There are a few exceptions however which are further detailed
974 * below.
975 *
976 * Constant blinding is only used by JITs, not in the interpreter.
977 * The interpreter uses AX in some occasions as a local temporary
978 * register e.g. in DIV or MOD instructions.
979 *
980 * In restricted circumstances, the verifier can also use the AX
981 * register for rewrites as long as they do not interfere with
982 * the above cases!
983 */
984 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
985 goto out;
986
987 if (from->imm == 0 &&
988 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
989 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
990 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
991 goto out;
992 }
993
994 switch (from->code) {
995 case BPF_ALU | BPF_ADD | BPF_K:
996 case BPF_ALU | BPF_SUB | BPF_K:
997 case BPF_ALU | BPF_AND | BPF_K:
998 case BPF_ALU | BPF_OR | BPF_K:
999 case BPF_ALU | BPF_XOR | BPF_K:
1000 case BPF_ALU | BPF_MUL | BPF_K:
1001 case BPF_ALU | BPF_MOV | BPF_K:
1002 case BPF_ALU | BPF_DIV | BPF_K:
1003 case BPF_ALU | BPF_MOD | BPF_K:
1004 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1005 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1006 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1007 break;
1008
1009 case BPF_ALU64 | BPF_ADD | BPF_K:
1010 case BPF_ALU64 | BPF_SUB | BPF_K:
1011 case BPF_ALU64 | BPF_AND | BPF_K:
1012 case BPF_ALU64 | BPF_OR | BPF_K:
1013 case BPF_ALU64 | BPF_XOR | BPF_K:
1014 case BPF_ALU64 | BPF_MUL | BPF_K:
1015 case BPF_ALU64 | BPF_MOV | BPF_K:
1016 case BPF_ALU64 | BPF_DIV | BPF_K:
1017 case BPF_ALU64 | BPF_MOD | BPF_K:
1018 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1019 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1020 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1021 break;
1022
1023 case BPF_JMP | BPF_JEQ | BPF_K:
1024 case BPF_JMP | BPF_JNE | BPF_K:
1025 case BPF_JMP | BPF_JGT | BPF_K:
1026 case BPF_JMP | BPF_JLT | BPF_K:
1027 case BPF_JMP | BPF_JGE | BPF_K:
1028 case BPF_JMP | BPF_JLE | BPF_K:
1029 case BPF_JMP | BPF_JSGT | BPF_K:
1030 case BPF_JMP | BPF_JSLT | BPF_K:
1031 case BPF_JMP | BPF_JSGE | BPF_K:
1032 case BPF_JMP | BPF_JSLE | BPF_K:
1033 case BPF_JMP | BPF_JSET | BPF_K:
1034 /* Accommodate for extra offset in case of a backjump. */
1035 off = from->off;
1036 if (off < 0)
1037 off -= 2;
1038 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1039 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1040 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1041 break;
1042
1043 case BPF_JMP32 | BPF_JEQ | BPF_K:
1044 case BPF_JMP32 | BPF_JNE | BPF_K:
1045 case BPF_JMP32 | BPF_JGT | BPF_K:
1046 case BPF_JMP32 | BPF_JLT | BPF_K:
1047 case BPF_JMP32 | BPF_JGE | BPF_K:
1048 case BPF_JMP32 | BPF_JLE | BPF_K:
1049 case BPF_JMP32 | BPF_JSGT | BPF_K:
1050 case BPF_JMP32 | BPF_JSLT | BPF_K:
1051 case BPF_JMP32 | BPF_JSGE | BPF_K:
1052 case BPF_JMP32 | BPF_JSLE | BPF_K:
1053 case BPF_JMP32 | BPF_JSET | BPF_K:
1054 /* Accommodate for extra offset in case of a backjump. */
1055 off = from->off;
1056 if (off < 0)
1057 off -= 2;
1058 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1059 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1060 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1061 off);
1062 break;
1063
1064 case BPF_LD | BPF_IMM | BPF_DW:
1065 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1066 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1067 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1068 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1069 break;
1070 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1071 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1072 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1073 if (emit_zext)
1074 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1075 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1076 break;
1077
1078 case BPF_ST | BPF_MEM | BPF_DW:
1079 case BPF_ST | BPF_MEM | BPF_W:
1080 case BPF_ST | BPF_MEM | BPF_H:
1081 case BPF_ST | BPF_MEM | BPF_B:
1082 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1083 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1084 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1085 break;
1086 }
1087 out:
1088 return to - to_buff;
1089 }
1090
bpf_prog_clone_create(struct bpf_prog * fp_other,gfp_t gfp_extra_flags)1091 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1092 gfp_t gfp_extra_flags)
1093 {
1094 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1095 struct bpf_prog *fp;
1096
1097 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1098 if (fp != NULL) {
1099 /* aux->prog still points to the fp_other one, so
1100 * when promoting the clone to the real program,
1101 * this still needs to be adapted.
1102 */
1103 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1104 }
1105
1106 return fp;
1107 }
1108
bpf_prog_clone_free(struct bpf_prog * fp)1109 static void bpf_prog_clone_free(struct bpf_prog *fp)
1110 {
1111 /* aux was stolen by the other clone, so we cannot free
1112 * it from this path! It will be freed eventually by the
1113 * other program on release.
1114 *
1115 * At this point, we don't need a deferred release since
1116 * clone is guaranteed to not be locked.
1117 */
1118 fp->aux = NULL;
1119 fp->stats = NULL;
1120 fp->active = NULL;
1121 __bpf_prog_free(fp);
1122 }
1123
bpf_jit_prog_release_other(struct bpf_prog * fp,struct bpf_prog * fp_other)1124 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1125 {
1126 /* We have to repoint aux->prog to self, as we don't
1127 * know whether fp here is the clone or the original.
1128 */
1129 fp->aux->prog = fp;
1130 bpf_prog_clone_free(fp_other);
1131 }
1132
bpf_jit_blind_constants(struct bpf_prog * prog)1133 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1134 {
1135 struct bpf_insn insn_buff[16], aux[2];
1136 struct bpf_prog *clone, *tmp;
1137 int insn_delta, insn_cnt;
1138 struct bpf_insn *insn;
1139 int i, rewritten;
1140
1141 if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
1142 return prog;
1143
1144 clone = bpf_prog_clone_create(prog, GFP_USER);
1145 if (!clone)
1146 return ERR_PTR(-ENOMEM);
1147
1148 insn_cnt = clone->len;
1149 insn = clone->insnsi;
1150
1151 for (i = 0; i < insn_cnt; i++, insn++) {
1152 /* We temporarily need to hold the original ld64 insn
1153 * so that we can still access the first part in the
1154 * second blinding run.
1155 */
1156 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1157 insn[1].code == 0)
1158 memcpy(aux, insn, sizeof(aux));
1159
1160 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1161 clone->aux->verifier_zext);
1162 if (!rewritten)
1163 continue;
1164
1165 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1166 if (IS_ERR(tmp)) {
1167 /* Patching may have repointed aux->prog during
1168 * realloc from the original one, so we need to
1169 * fix it up here on error.
1170 */
1171 bpf_jit_prog_release_other(prog, clone);
1172 return tmp;
1173 }
1174
1175 clone = tmp;
1176 insn_delta = rewritten - 1;
1177
1178 /* Walk new program and skip insns we just inserted. */
1179 insn = clone->insnsi + i + insn_delta;
1180 insn_cnt += insn_delta;
1181 i += insn_delta;
1182 }
1183
1184 clone->blinded = 1;
1185 return clone;
1186 }
1187 #endif /* CONFIG_BPF_JIT */
1188
1189 /* Base function for offset calculation. Needs to go into .text section,
1190 * therefore keeping it non-static as well; will also be used by JITs
1191 * anyway later on, so do not let the compiler omit it. This also needs
1192 * to go into kallsyms for correlation from e.g. bpftool, so naming
1193 * must not change.
1194 */
__bpf_call_base(u64 r1,u64 r2,u64 r3,u64 r4,u64 r5)1195 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1196 {
1197 return 0;
1198 }
1199 EXPORT_SYMBOL_GPL(__bpf_call_base);
1200
1201 /* All UAPI available opcodes. */
1202 #define BPF_INSN_MAP(INSN_2, INSN_3) \
1203 /* 32 bit ALU operations. */ \
1204 /* Register based. */ \
1205 INSN_3(ALU, ADD, X), \
1206 INSN_3(ALU, SUB, X), \
1207 INSN_3(ALU, AND, X), \
1208 INSN_3(ALU, OR, X), \
1209 INSN_3(ALU, LSH, X), \
1210 INSN_3(ALU, RSH, X), \
1211 INSN_3(ALU, XOR, X), \
1212 INSN_3(ALU, MUL, X), \
1213 INSN_3(ALU, MOV, X), \
1214 INSN_3(ALU, ARSH, X), \
1215 INSN_3(ALU, DIV, X), \
1216 INSN_3(ALU, MOD, X), \
1217 INSN_2(ALU, NEG), \
1218 INSN_3(ALU, END, TO_BE), \
1219 INSN_3(ALU, END, TO_LE), \
1220 /* Immediate based. */ \
1221 INSN_3(ALU, ADD, K), \
1222 INSN_3(ALU, SUB, K), \
1223 INSN_3(ALU, AND, K), \
1224 INSN_3(ALU, OR, K), \
1225 INSN_3(ALU, LSH, K), \
1226 INSN_3(ALU, RSH, K), \
1227 INSN_3(ALU, XOR, K), \
1228 INSN_3(ALU, MUL, K), \
1229 INSN_3(ALU, MOV, K), \
1230 INSN_3(ALU, ARSH, K), \
1231 INSN_3(ALU, DIV, K), \
1232 INSN_3(ALU, MOD, K), \
1233 /* 64 bit ALU operations. */ \
1234 /* Register based. */ \
1235 INSN_3(ALU64, ADD, X), \
1236 INSN_3(ALU64, SUB, X), \
1237 INSN_3(ALU64, AND, X), \
1238 INSN_3(ALU64, OR, X), \
1239 INSN_3(ALU64, LSH, X), \
1240 INSN_3(ALU64, RSH, X), \
1241 INSN_3(ALU64, XOR, X), \
1242 INSN_3(ALU64, MUL, X), \
1243 INSN_3(ALU64, MOV, X), \
1244 INSN_3(ALU64, ARSH, X), \
1245 INSN_3(ALU64, DIV, X), \
1246 INSN_3(ALU64, MOD, X), \
1247 INSN_2(ALU64, NEG), \
1248 /* Immediate based. */ \
1249 INSN_3(ALU64, ADD, K), \
1250 INSN_3(ALU64, SUB, K), \
1251 INSN_3(ALU64, AND, K), \
1252 INSN_3(ALU64, OR, K), \
1253 INSN_3(ALU64, LSH, K), \
1254 INSN_3(ALU64, RSH, K), \
1255 INSN_3(ALU64, XOR, K), \
1256 INSN_3(ALU64, MUL, K), \
1257 INSN_3(ALU64, MOV, K), \
1258 INSN_3(ALU64, ARSH, K), \
1259 INSN_3(ALU64, DIV, K), \
1260 INSN_3(ALU64, MOD, K), \
1261 /* Call instruction. */ \
1262 INSN_2(JMP, CALL), \
1263 /* Exit instruction. */ \
1264 INSN_2(JMP, EXIT), \
1265 /* 32-bit Jump instructions. */ \
1266 /* Register based. */ \
1267 INSN_3(JMP32, JEQ, X), \
1268 INSN_3(JMP32, JNE, X), \
1269 INSN_3(JMP32, JGT, X), \
1270 INSN_3(JMP32, JLT, X), \
1271 INSN_3(JMP32, JGE, X), \
1272 INSN_3(JMP32, JLE, X), \
1273 INSN_3(JMP32, JSGT, X), \
1274 INSN_3(JMP32, JSLT, X), \
1275 INSN_3(JMP32, JSGE, X), \
1276 INSN_3(JMP32, JSLE, X), \
1277 INSN_3(JMP32, JSET, X), \
1278 /* Immediate based. */ \
1279 INSN_3(JMP32, JEQ, K), \
1280 INSN_3(JMP32, JNE, K), \
1281 INSN_3(JMP32, JGT, K), \
1282 INSN_3(JMP32, JLT, K), \
1283 INSN_3(JMP32, JGE, K), \
1284 INSN_3(JMP32, JLE, K), \
1285 INSN_3(JMP32, JSGT, K), \
1286 INSN_3(JMP32, JSLT, K), \
1287 INSN_3(JMP32, JSGE, K), \
1288 INSN_3(JMP32, JSLE, K), \
1289 INSN_3(JMP32, JSET, K), \
1290 /* Jump instructions. */ \
1291 /* Register based. */ \
1292 INSN_3(JMP, JEQ, X), \
1293 INSN_3(JMP, JNE, X), \
1294 INSN_3(JMP, JGT, X), \
1295 INSN_3(JMP, JLT, X), \
1296 INSN_3(JMP, JGE, X), \
1297 INSN_3(JMP, JLE, X), \
1298 INSN_3(JMP, JSGT, X), \
1299 INSN_3(JMP, JSLT, X), \
1300 INSN_3(JMP, JSGE, X), \
1301 INSN_3(JMP, JSLE, X), \
1302 INSN_3(JMP, JSET, X), \
1303 /* Immediate based. */ \
1304 INSN_3(JMP, JEQ, K), \
1305 INSN_3(JMP, JNE, K), \
1306 INSN_3(JMP, JGT, K), \
1307 INSN_3(JMP, JLT, K), \
1308 INSN_3(JMP, JGE, K), \
1309 INSN_3(JMP, JLE, K), \
1310 INSN_3(JMP, JSGT, K), \
1311 INSN_3(JMP, JSLT, K), \
1312 INSN_3(JMP, JSGE, K), \
1313 INSN_3(JMP, JSLE, K), \
1314 INSN_3(JMP, JSET, K), \
1315 INSN_2(JMP, JA), \
1316 /* Store instructions. */ \
1317 /* Register based. */ \
1318 INSN_3(STX, MEM, B), \
1319 INSN_3(STX, MEM, H), \
1320 INSN_3(STX, MEM, W), \
1321 INSN_3(STX, MEM, DW), \
1322 INSN_3(STX, ATOMIC, W), \
1323 INSN_3(STX, ATOMIC, DW), \
1324 /* Immediate based. */ \
1325 INSN_3(ST, MEM, B), \
1326 INSN_3(ST, MEM, H), \
1327 INSN_3(ST, MEM, W), \
1328 INSN_3(ST, MEM, DW), \
1329 /* Load instructions. */ \
1330 /* Register based. */ \
1331 INSN_3(LDX, MEM, B), \
1332 INSN_3(LDX, MEM, H), \
1333 INSN_3(LDX, MEM, W), \
1334 INSN_3(LDX, MEM, DW), \
1335 /* Immediate based. */ \
1336 INSN_3(LD, IMM, DW)
1337
bpf_opcode_in_insntable(u8 code)1338 bool bpf_opcode_in_insntable(u8 code)
1339 {
1340 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1341 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1342 static const bool public_insntable[256] = {
1343 [0 ... 255] = false,
1344 /* Now overwrite non-defaults ... */
1345 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1346 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1347 [BPF_LD | BPF_ABS | BPF_B] = true,
1348 [BPF_LD | BPF_ABS | BPF_H] = true,
1349 [BPF_LD | BPF_ABS | BPF_W] = true,
1350 [BPF_LD | BPF_IND | BPF_B] = true,
1351 [BPF_LD | BPF_IND | BPF_H] = true,
1352 [BPF_LD | BPF_IND | BPF_W] = true,
1353 };
1354 #undef BPF_INSN_3_TBL
1355 #undef BPF_INSN_2_TBL
1356 return public_insntable[code];
1357 }
1358
1359 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
bpf_probe_read_kernel(void * dst,u32 size,const void * unsafe_ptr)1360 u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1361 {
1362 memset(dst, 0, size);
1363 return -EFAULT;
1364 }
1365
1366 /**
1367 * ___bpf_prog_run - run eBPF program on a given context
1368 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1369 * @insn: is the array of eBPF instructions
1370 *
1371 * Decode and execute eBPF instructions.
1372 *
1373 * Return: whatever value is in %BPF_R0 at program exit
1374 */
___bpf_prog_run(u64 * regs,const struct bpf_insn * insn)1375 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1376 {
1377 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1378 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1379 static const void * const jumptable[256] __annotate_jump_table = {
1380 [0 ... 255] = &&default_label,
1381 /* Now overwrite non-defaults ... */
1382 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1383 /* Non-UAPI available opcodes. */
1384 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1385 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1386 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
1387 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1388 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1389 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1390 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1391 };
1392 #undef BPF_INSN_3_LBL
1393 #undef BPF_INSN_2_LBL
1394 u32 tail_call_cnt = 0;
1395
1396 #define CONT ({ insn++; goto select_insn; })
1397 #define CONT_JMP ({ insn++; goto select_insn; })
1398
1399 select_insn:
1400 goto *jumptable[insn->code];
1401
1402 /* Explicitly mask the register-based shift amounts with 63 or 31
1403 * to avoid undefined behavior. Normally this won't affect the
1404 * generated code, for example, in case of native 64 bit archs such
1405 * as x86-64 or arm64, the compiler is optimizing the AND away for
1406 * the interpreter. In case of JITs, each of the JIT backends compiles
1407 * the BPF shift operations to machine instructions which produce
1408 * implementation-defined results in such a case; the resulting
1409 * contents of the register may be arbitrary, but program behaviour
1410 * as a whole remains defined. In other words, in case of JIT backends,
1411 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1412 */
1413 /* ALU (shifts) */
1414 #define SHT(OPCODE, OP) \
1415 ALU64_##OPCODE##_X: \
1416 DST = DST OP (SRC & 63); \
1417 CONT; \
1418 ALU_##OPCODE##_X: \
1419 DST = (u32) DST OP ((u32) SRC & 31); \
1420 CONT; \
1421 ALU64_##OPCODE##_K: \
1422 DST = DST OP IMM; \
1423 CONT; \
1424 ALU_##OPCODE##_K: \
1425 DST = (u32) DST OP (u32) IMM; \
1426 CONT;
1427 /* ALU (rest) */
1428 #define ALU(OPCODE, OP) \
1429 ALU64_##OPCODE##_X: \
1430 DST = DST OP SRC; \
1431 CONT; \
1432 ALU_##OPCODE##_X: \
1433 DST = (u32) DST OP (u32) SRC; \
1434 CONT; \
1435 ALU64_##OPCODE##_K: \
1436 DST = DST OP IMM; \
1437 CONT; \
1438 ALU_##OPCODE##_K: \
1439 DST = (u32) DST OP (u32) IMM; \
1440 CONT;
1441 ALU(ADD, +)
1442 ALU(SUB, -)
1443 ALU(AND, &)
1444 ALU(OR, |)
1445 ALU(XOR, ^)
1446 ALU(MUL, *)
1447 SHT(LSH, <<)
1448 SHT(RSH, >>)
1449 #undef SHT
1450 #undef ALU
1451 ALU_NEG:
1452 DST = (u32) -DST;
1453 CONT;
1454 ALU64_NEG:
1455 DST = -DST;
1456 CONT;
1457 ALU_MOV_X:
1458 DST = (u32) SRC;
1459 CONT;
1460 ALU_MOV_K:
1461 DST = (u32) IMM;
1462 CONT;
1463 ALU64_MOV_X:
1464 DST = SRC;
1465 CONT;
1466 ALU64_MOV_K:
1467 DST = IMM;
1468 CONT;
1469 LD_IMM_DW:
1470 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1471 insn++;
1472 CONT;
1473 ALU_ARSH_X:
1474 DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1475 CONT;
1476 ALU_ARSH_K:
1477 DST = (u64) (u32) (((s32) DST) >> IMM);
1478 CONT;
1479 ALU64_ARSH_X:
1480 (*(s64 *) &DST) >>= (SRC & 63);
1481 CONT;
1482 ALU64_ARSH_K:
1483 (*(s64 *) &DST) >>= IMM;
1484 CONT;
1485 ALU64_MOD_X:
1486 div64_u64_rem(DST, SRC, &AX);
1487 DST = AX;
1488 CONT;
1489 ALU_MOD_X:
1490 AX = (u32) DST;
1491 DST = do_div(AX, (u32) SRC);
1492 CONT;
1493 ALU64_MOD_K:
1494 div64_u64_rem(DST, IMM, &AX);
1495 DST = AX;
1496 CONT;
1497 ALU_MOD_K:
1498 AX = (u32) DST;
1499 DST = do_div(AX, (u32) IMM);
1500 CONT;
1501 ALU64_DIV_X:
1502 DST = div64_u64(DST, SRC);
1503 CONT;
1504 ALU_DIV_X:
1505 AX = (u32) DST;
1506 do_div(AX, (u32) SRC);
1507 DST = (u32) AX;
1508 CONT;
1509 ALU64_DIV_K:
1510 DST = div64_u64(DST, IMM);
1511 CONT;
1512 ALU_DIV_K:
1513 AX = (u32) DST;
1514 do_div(AX, (u32) IMM);
1515 DST = (u32) AX;
1516 CONT;
1517 ALU_END_TO_BE:
1518 switch (IMM) {
1519 case 16:
1520 DST = (__force u16) cpu_to_be16(DST);
1521 break;
1522 case 32:
1523 DST = (__force u32) cpu_to_be32(DST);
1524 break;
1525 case 64:
1526 DST = (__force u64) cpu_to_be64(DST);
1527 break;
1528 }
1529 CONT;
1530 ALU_END_TO_LE:
1531 switch (IMM) {
1532 case 16:
1533 DST = (__force u16) cpu_to_le16(DST);
1534 break;
1535 case 32:
1536 DST = (__force u32) cpu_to_le32(DST);
1537 break;
1538 case 64:
1539 DST = (__force u64) cpu_to_le64(DST);
1540 break;
1541 }
1542 CONT;
1543
1544 /* CALL */
1545 JMP_CALL:
1546 /* Function call scratches BPF_R1-BPF_R5 registers,
1547 * preserves BPF_R6-BPF_R9, and stores return value
1548 * into BPF_R0.
1549 */
1550 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1551 BPF_R4, BPF_R5);
1552 CONT;
1553
1554 JMP_CALL_ARGS:
1555 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1556 BPF_R3, BPF_R4,
1557 BPF_R5,
1558 insn + insn->off + 1);
1559 CONT;
1560
1561 JMP_TAIL_CALL: {
1562 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1563 struct bpf_array *array = container_of(map, struct bpf_array, map);
1564 struct bpf_prog *prog;
1565 u32 index = BPF_R3;
1566
1567 if (unlikely(index >= array->map.max_entries))
1568 goto out;
1569 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1570 goto out;
1571
1572 tail_call_cnt++;
1573
1574 prog = READ_ONCE(array->ptrs[index]);
1575 if (!prog)
1576 goto out;
1577
1578 /* ARG1 at this point is guaranteed to point to CTX from
1579 * the verifier side due to the fact that the tail call is
1580 * handled like a helper, that is, bpf_tail_call_proto,
1581 * where arg1_type is ARG_PTR_TO_CTX.
1582 */
1583 insn = prog->insnsi;
1584 goto select_insn;
1585 out:
1586 CONT;
1587 }
1588 JMP_JA:
1589 insn += insn->off;
1590 CONT;
1591 JMP_EXIT:
1592 return BPF_R0;
1593 /* JMP */
1594 #define COND_JMP(SIGN, OPCODE, CMP_OP) \
1595 JMP_##OPCODE##_X: \
1596 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
1597 insn += insn->off; \
1598 CONT_JMP; \
1599 } \
1600 CONT; \
1601 JMP32_##OPCODE##_X: \
1602 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
1603 insn += insn->off; \
1604 CONT_JMP; \
1605 } \
1606 CONT; \
1607 JMP_##OPCODE##_K: \
1608 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
1609 insn += insn->off; \
1610 CONT_JMP; \
1611 } \
1612 CONT; \
1613 JMP32_##OPCODE##_K: \
1614 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
1615 insn += insn->off; \
1616 CONT_JMP; \
1617 } \
1618 CONT;
1619 COND_JMP(u, JEQ, ==)
1620 COND_JMP(u, JNE, !=)
1621 COND_JMP(u, JGT, >)
1622 COND_JMP(u, JLT, <)
1623 COND_JMP(u, JGE, >=)
1624 COND_JMP(u, JLE, <=)
1625 COND_JMP(u, JSET, &)
1626 COND_JMP(s, JSGT, >)
1627 COND_JMP(s, JSLT, <)
1628 COND_JMP(s, JSGE, >=)
1629 COND_JMP(s, JSLE, <=)
1630 #undef COND_JMP
1631 /* ST, STX and LDX*/
1632 ST_NOSPEC:
1633 /* Speculation barrier for mitigating Speculative Store Bypass.
1634 * In case of arm64, we rely on the firmware mitigation as
1635 * controlled via the ssbd kernel parameter. Whenever the
1636 * mitigation is enabled, it works for all of the kernel code
1637 * with no need to provide any additional instructions here.
1638 * In case of x86, we use 'lfence' insn for mitigation. We
1639 * reuse preexisting logic from Spectre v1 mitigation that
1640 * happens to produce the required code on x86 for v4 as well.
1641 */
1642 #ifdef CONFIG_X86
1643 barrier_nospec();
1644 #endif
1645 CONT;
1646 #define LDST(SIZEOP, SIZE) \
1647 STX_MEM_##SIZEOP: \
1648 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1649 CONT; \
1650 ST_MEM_##SIZEOP: \
1651 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1652 CONT; \
1653 LDX_MEM_##SIZEOP: \
1654 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1655 CONT;
1656
1657 LDST(B, u8)
1658 LDST(H, u16)
1659 LDST(W, u32)
1660 LDST(DW, u64)
1661 #undef LDST
1662 #define LDX_PROBE(SIZEOP, SIZE) \
1663 LDX_PROBE_MEM_##SIZEOP: \
1664 bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off)); \
1665 CONT;
1666 LDX_PROBE(B, 1)
1667 LDX_PROBE(H, 2)
1668 LDX_PROBE(W, 4)
1669 LDX_PROBE(DW, 8)
1670 #undef LDX_PROBE
1671
1672 #define ATOMIC_ALU_OP(BOP, KOP) \
1673 case BOP: \
1674 if (BPF_SIZE(insn->code) == BPF_W) \
1675 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
1676 (DST + insn->off)); \
1677 else \
1678 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
1679 (DST + insn->off)); \
1680 break; \
1681 case BOP | BPF_FETCH: \
1682 if (BPF_SIZE(insn->code) == BPF_W) \
1683 SRC = (u32) atomic_fetch_##KOP( \
1684 (u32) SRC, \
1685 (atomic_t *)(unsigned long) (DST + insn->off)); \
1686 else \
1687 SRC = (u64) atomic64_fetch_##KOP( \
1688 (u64) SRC, \
1689 (atomic64_t *)(unsigned long) (DST + insn->off)); \
1690 break;
1691
1692 STX_ATOMIC_DW:
1693 STX_ATOMIC_W:
1694 switch (IMM) {
1695 ATOMIC_ALU_OP(BPF_ADD, add)
1696 ATOMIC_ALU_OP(BPF_AND, and)
1697 ATOMIC_ALU_OP(BPF_OR, or)
1698 ATOMIC_ALU_OP(BPF_XOR, xor)
1699 #undef ATOMIC_ALU_OP
1700
1701 case BPF_XCHG:
1702 if (BPF_SIZE(insn->code) == BPF_W)
1703 SRC = (u32) atomic_xchg(
1704 (atomic_t *)(unsigned long) (DST + insn->off),
1705 (u32) SRC);
1706 else
1707 SRC = (u64) atomic64_xchg(
1708 (atomic64_t *)(unsigned long) (DST + insn->off),
1709 (u64) SRC);
1710 break;
1711 case BPF_CMPXCHG:
1712 if (BPF_SIZE(insn->code) == BPF_W)
1713 BPF_R0 = (u32) atomic_cmpxchg(
1714 (atomic_t *)(unsigned long) (DST + insn->off),
1715 (u32) BPF_R0, (u32) SRC);
1716 else
1717 BPF_R0 = (u64) atomic64_cmpxchg(
1718 (atomic64_t *)(unsigned long) (DST + insn->off),
1719 (u64) BPF_R0, (u64) SRC);
1720 break;
1721
1722 default:
1723 goto default_label;
1724 }
1725 CONT;
1726
1727 default_label:
1728 /* If we ever reach this, we have a bug somewhere. Die hard here
1729 * instead of just returning 0; we could be somewhere in a subprog,
1730 * so execution could continue otherwise which we do /not/ want.
1731 *
1732 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1733 */
1734 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
1735 insn->code, insn->imm);
1736 BUG_ON(1);
1737 return 0;
1738 }
1739
1740 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1741 #define DEFINE_BPF_PROG_RUN(stack_size) \
1742 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1743 { \
1744 u64 stack[stack_size / sizeof(u64)]; \
1745 u64 regs[MAX_BPF_EXT_REG]; \
1746 \
1747 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1748 ARG1 = (u64) (unsigned long) ctx; \
1749 return ___bpf_prog_run(regs, insn); \
1750 }
1751
1752 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1753 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1754 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1755 const struct bpf_insn *insn) \
1756 { \
1757 u64 stack[stack_size / sizeof(u64)]; \
1758 u64 regs[MAX_BPF_EXT_REG]; \
1759 \
1760 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1761 BPF_R1 = r1; \
1762 BPF_R2 = r2; \
1763 BPF_R3 = r3; \
1764 BPF_R4 = r4; \
1765 BPF_R5 = r5; \
1766 return ___bpf_prog_run(regs, insn); \
1767 }
1768
1769 #define EVAL1(FN, X) FN(X)
1770 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1771 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1772 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1773 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1774 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1775
1776 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1777 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1778 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1779
1780 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1781 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1782 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1783
1784 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1785
1786 static unsigned int (*interpreters[])(const void *ctx,
1787 const struct bpf_insn *insn) = {
1788 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1789 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1790 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1791 };
1792 #undef PROG_NAME_LIST
1793 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1794 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1795 const struct bpf_insn *insn) = {
1796 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1797 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1798 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1799 };
1800 #undef PROG_NAME_LIST
1801
bpf_patch_call_args(struct bpf_insn * insn,u32 stack_depth)1802 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1803 {
1804 stack_depth = max_t(u32, stack_depth, 1);
1805 insn->off = (s16) insn->imm;
1806 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1807 __bpf_call_base_args;
1808 insn->code = BPF_JMP | BPF_CALL_ARGS;
1809 }
1810
1811 #else
__bpf_prog_ret0_warn(const void * ctx,const struct bpf_insn * insn)1812 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1813 const struct bpf_insn *insn)
1814 {
1815 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1816 * is not working properly, so warn about it!
1817 */
1818 WARN_ON_ONCE(1);
1819 return 0;
1820 }
1821 #endif
1822
bpf_prog_array_compatible(struct bpf_array * array,const struct bpf_prog * fp)1823 bool bpf_prog_array_compatible(struct bpf_array *array,
1824 const struct bpf_prog *fp)
1825 {
1826 bool ret;
1827
1828 if (fp->kprobe_override)
1829 return false;
1830
1831 spin_lock(&array->aux->owner.lock);
1832
1833 if (!array->aux->owner.type) {
1834 /* There's no owner yet where we could check for
1835 * compatibility.
1836 */
1837 array->aux->owner.type = fp->type;
1838 array->aux->owner.jited = fp->jited;
1839 ret = true;
1840 } else {
1841 ret = array->aux->owner.type == fp->type &&
1842 array->aux->owner.jited == fp->jited;
1843 }
1844 spin_unlock(&array->aux->owner.lock);
1845 return ret;
1846 }
1847
bpf_check_tail_call(const struct bpf_prog * fp)1848 static int bpf_check_tail_call(const struct bpf_prog *fp)
1849 {
1850 struct bpf_prog_aux *aux = fp->aux;
1851 int i, ret = 0;
1852
1853 mutex_lock(&aux->used_maps_mutex);
1854 for (i = 0; i < aux->used_map_cnt; i++) {
1855 struct bpf_map *map = aux->used_maps[i];
1856 struct bpf_array *array;
1857
1858 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1859 continue;
1860
1861 array = container_of(map, struct bpf_array, map);
1862 if (!bpf_prog_array_compatible(array, fp)) {
1863 ret = -EINVAL;
1864 goto out;
1865 }
1866 }
1867
1868 out:
1869 mutex_unlock(&aux->used_maps_mutex);
1870 return ret;
1871 }
1872
bpf_prog_select_func(struct bpf_prog * fp)1873 static void bpf_prog_select_func(struct bpf_prog *fp)
1874 {
1875 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1876 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1877
1878 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1879 #else
1880 fp->bpf_func = __bpf_prog_ret0_warn;
1881 #endif
1882 }
1883
1884 /**
1885 * bpf_prog_select_runtime - select exec runtime for BPF program
1886 * @fp: bpf_prog populated with internal BPF program
1887 * @err: pointer to error variable
1888 *
1889 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1890 * The BPF program will be executed via bpf_prog_run() function.
1891 *
1892 * Return: the &fp argument along with &err set to 0 for success or
1893 * a negative errno code on failure
1894 */
bpf_prog_select_runtime(struct bpf_prog * fp,int * err)1895 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1896 {
1897 /* In case of BPF to BPF calls, verifier did all the prep
1898 * work with regards to JITing, etc.
1899 */
1900 bool jit_needed = false;
1901
1902 if (fp->bpf_func)
1903 goto finalize;
1904
1905 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
1906 bpf_prog_has_kfunc_call(fp))
1907 jit_needed = true;
1908
1909 bpf_prog_select_func(fp);
1910
1911 /* eBPF JITs can rewrite the program in case constant
1912 * blinding is active. However, in case of error during
1913 * blinding, bpf_int_jit_compile() must always return a
1914 * valid program, which in this case would simply not
1915 * be JITed, but falls back to the interpreter.
1916 */
1917 if (!bpf_prog_is_dev_bound(fp->aux)) {
1918 *err = bpf_prog_alloc_jited_linfo(fp);
1919 if (*err)
1920 return fp;
1921
1922 fp = bpf_int_jit_compile(fp);
1923 bpf_prog_jit_attempt_done(fp);
1924 if (!fp->jited && jit_needed) {
1925 *err = -ENOTSUPP;
1926 return fp;
1927 }
1928 } else {
1929 *err = bpf_prog_offload_compile(fp);
1930 if (*err)
1931 return fp;
1932 }
1933
1934 finalize:
1935 bpf_prog_lock_ro(fp);
1936
1937 /* The tail call compatibility check can only be done at
1938 * this late stage as we need to determine, if we deal
1939 * with JITed or non JITed program concatenations and not
1940 * all eBPF JITs might immediately support all features.
1941 */
1942 *err = bpf_check_tail_call(fp);
1943
1944 return fp;
1945 }
1946 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1947
__bpf_prog_ret1(const void * ctx,const struct bpf_insn * insn)1948 static unsigned int __bpf_prog_ret1(const void *ctx,
1949 const struct bpf_insn *insn)
1950 {
1951 return 1;
1952 }
1953
1954 static struct bpf_prog_dummy {
1955 struct bpf_prog prog;
1956 } dummy_bpf_prog = {
1957 .prog = {
1958 .bpf_func = __bpf_prog_ret1,
1959 },
1960 };
1961
1962 /* to avoid allocating empty bpf_prog_array for cgroups that
1963 * don't have bpf program attached use one global 'empty_prog_array'
1964 * It will not be modified the caller of bpf_prog_array_alloc()
1965 * (since caller requested prog_cnt == 0)
1966 * that pointer should be 'freed' by bpf_prog_array_free()
1967 */
1968 static struct {
1969 struct bpf_prog_array hdr;
1970 struct bpf_prog *null_prog;
1971 } empty_prog_array = {
1972 .null_prog = NULL,
1973 };
1974
bpf_prog_array_alloc(u32 prog_cnt,gfp_t flags)1975 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1976 {
1977 if (prog_cnt)
1978 return kzalloc(sizeof(struct bpf_prog_array) +
1979 sizeof(struct bpf_prog_array_item) *
1980 (prog_cnt + 1),
1981 flags);
1982
1983 return &empty_prog_array.hdr;
1984 }
1985
bpf_prog_array_free(struct bpf_prog_array * progs)1986 void bpf_prog_array_free(struct bpf_prog_array *progs)
1987 {
1988 if (!progs || progs == &empty_prog_array.hdr)
1989 return;
1990 kfree_rcu(progs, rcu);
1991 }
1992
bpf_prog_array_length(struct bpf_prog_array * array)1993 int bpf_prog_array_length(struct bpf_prog_array *array)
1994 {
1995 struct bpf_prog_array_item *item;
1996 u32 cnt = 0;
1997
1998 for (item = array->items; item->prog; item++)
1999 if (item->prog != &dummy_bpf_prog.prog)
2000 cnt++;
2001 return cnt;
2002 }
2003
bpf_prog_array_is_empty(struct bpf_prog_array * array)2004 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2005 {
2006 struct bpf_prog_array_item *item;
2007
2008 for (item = array->items; item->prog; item++)
2009 if (item->prog != &dummy_bpf_prog.prog)
2010 return false;
2011 return true;
2012 }
2013
bpf_prog_array_copy_core(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt)2014 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2015 u32 *prog_ids,
2016 u32 request_cnt)
2017 {
2018 struct bpf_prog_array_item *item;
2019 int i = 0;
2020
2021 for (item = array->items; item->prog; item++) {
2022 if (item->prog == &dummy_bpf_prog.prog)
2023 continue;
2024 prog_ids[i] = item->prog->aux->id;
2025 if (++i == request_cnt) {
2026 item++;
2027 break;
2028 }
2029 }
2030
2031 return !!(item->prog);
2032 }
2033
bpf_prog_array_copy_to_user(struct bpf_prog_array * array,__u32 __user * prog_ids,u32 cnt)2034 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2035 __u32 __user *prog_ids, u32 cnt)
2036 {
2037 unsigned long err = 0;
2038 bool nospc;
2039 u32 *ids;
2040
2041 /* users of this function are doing:
2042 * cnt = bpf_prog_array_length();
2043 * if (cnt > 0)
2044 * bpf_prog_array_copy_to_user(..., cnt);
2045 * so below kcalloc doesn't need extra cnt > 0 check.
2046 */
2047 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2048 if (!ids)
2049 return -ENOMEM;
2050 nospc = bpf_prog_array_copy_core(array, ids, cnt);
2051 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2052 kfree(ids);
2053 if (err)
2054 return -EFAULT;
2055 if (nospc)
2056 return -ENOSPC;
2057 return 0;
2058 }
2059
bpf_prog_array_delete_safe(struct bpf_prog_array * array,struct bpf_prog * old_prog)2060 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2061 struct bpf_prog *old_prog)
2062 {
2063 struct bpf_prog_array_item *item;
2064
2065 for (item = array->items; item->prog; item++)
2066 if (item->prog == old_prog) {
2067 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2068 break;
2069 }
2070 }
2071
2072 /**
2073 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2074 * index into the program array with
2075 * a dummy no-op program.
2076 * @array: a bpf_prog_array
2077 * @index: the index of the program to replace
2078 *
2079 * Skips over dummy programs, by not counting them, when calculating
2080 * the position of the program to replace.
2081 *
2082 * Return:
2083 * * 0 - Success
2084 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2085 * * -ENOENT - Index out of range
2086 */
bpf_prog_array_delete_safe_at(struct bpf_prog_array * array,int index)2087 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2088 {
2089 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2090 }
2091
2092 /**
2093 * bpf_prog_array_update_at() - Updates the program at the given index
2094 * into the program array.
2095 * @array: a bpf_prog_array
2096 * @index: the index of the program to update
2097 * @prog: the program to insert into the array
2098 *
2099 * Skips over dummy programs, by not counting them, when calculating
2100 * the position of the program to update.
2101 *
2102 * Return:
2103 * * 0 - Success
2104 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2105 * * -ENOENT - Index out of range
2106 */
bpf_prog_array_update_at(struct bpf_prog_array * array,int index,struct bpf_prog * prog)2107 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2108 struct bpf_prog *prog)
2109 {
2110 struct bpf_prog_array_item *item;
2111
2112 if (unlikely(index < 0))
2113 return -EINVAL;
2114
2115 for (item = array->items; item->prog; item++) {
2116 if (item->prog == &dummy_bpf_prog.prog)
2117 continue;
2118 if (!index) {
2119 WRITE_ONCE(item->prog, prog);
2120 return 0;
2121 }
2122 index--;
2123 }
2124 return -ENOENT;
2125 }
2126
bpf_prog_array_copy(struct bpf_prog_array * old_array,struct bpf_prog * exclude_prog,struct bpf_prog * include_prog,u64 bpf_cookie,struct bpf_prog_array ** new_array)2127 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2128 struct bpf_prog *exclude_prog,
2129 struct bpf_prog *include_prog,
2130 u64 bpf_cookie,
2131 struct bpf_prog_array **new_array)
2132 {
2133 int new_prog_cnt, carry_prog_cnt = 0;
2134 struct bpf_prog_array_item *existing, *new;
2135 struct bpf_prog_array *array;
2136 bool found_exclude = false;
2137
2138 /* Figure out how many existing progs we need to carry over to
2139 * the new array.
2140 */
2141 if (old_array) {
2142 existing = old_array->items;
2143 for (; existing->prog; existing++) {
2144 if (existing->prog == exclude_prog) {
2145 found_exclude = true;
2146 continue;
2147 }
2148 if (existing->prog != &dummy_bpf_prog.prog)
2149 carry_prog_cnt++;
2150 if (existing->prog == include_prog)
2151 return -EEXIST;
2152 }
2153 }
2154
2155 if (exclude_prog && !found_exclude)
2156 return -ENOENT;
2157
2158 /* How many progs (not NULL) will be in the new array? */
2159 new_prog_cnt = carry_prog_cnt;
2160 if (include_prog)
2161 new_prog_cnt += 1;
2162
2163 /* Do we have any prog (not NULL) in the new array? */
2164 if (!new_prog_cnt) {
2165 *new_array = NULL;
2166 return 0;
2167 }
2168
2169 /* +1 as the end of prog_array is marked with NULL */
2170 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2171 if (!array)
2172 return -ENOMEM;
2173 new = array->items;
2174
2175 /* Fill in the new prog array */
2176 if (carry_prog_cnt) {
2177 existing = old_array->items;
2178 for (; existing->prog; existing++) {
2179 if (existing->prog == exclude_prog ||
2180 existing->prog == &dummy_bpf_prog.prog)
2181 continue;
2182
2183 new->prog = existing->prog;
2184 new->bpf_cookie = existing->bpf_cookie;
2185 new++;
2186 }
2187 }
2188 if (include_prog) {
2189 new->prog = include_prog;
2190 new->bpf_cookie = bpf_cookie;
2191 new++;
2192 }
2193 new->prog = NULL;
2194 *new_array = array;
2195 return 0;
2196 }
2197
bpf_prog_array_copy_info(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt,u32 * prog_cnt)2198 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2199 u32 *prog_ids, u32 request_cnt,
2200 u32 *prog_cnt)
2201 {
2202 u32 cnt = 0;
2203
2204 if (array)
2205 cnt = bpf_prog_array_length(array);
2206
2207 *prog_cnt = cnt;
2208
2209 /* return early if user requested only program count or nothing to copy */
2210 if (!request_cnt || !cnt)
2211 return 0;
2212
2213 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2214 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2215 : 0;
2216 }
2217
__bpf_free_used_maps(struct bpf_prog_aux * aux,struct bpf_map ** used_maps,u32 len)2218 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2219 struct bpf_map **used_maps, u32 len)
2220 {
2221 struct bpf_map *map;
2222 u32 i;
2223
2224 for (i = 0; i < len; i++) {
2225 map = used_maps[i];
2226 if (map->ops->map_poke_untrack)
2227 map->ops->map_poke_untrack(map, aux);
2228 bpf_map_put(map);
2229 }
2230 }
2231
bpf_free_used_maps(struct bpf_prog_aux * aux)2232 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2233 {
2234 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2235 kfree(aux->used_maps);
2236 }
2237
__bpf_free_used_btfs(struct bpf_prog_aux * aux,struct btf_mod_pair * used_btfs,u32 len)2238 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2239 struct btf_mod_pair *used_btfs, u32 len)
2240 {
2241 #ifdef CONFIG_BPF_SYSCALL
2242 struct btf_mod_pair *btf_mod;
2243 u32 i;
2244
2245 for (i = 0; i < len; i++) {
2246 btf_mod = &used_btfs[i];
2247 if (btf_mod->module)
2248 module_put(btf_mod->module);
2249 btf_put(btf_mod->btf);
2250 }
2251 #endif
2252 }
2253
bpf_free_used_btfs(struct bpf_prog_aux * aux)2254 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2255 {
2256 __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2257 kfree(aux->used_btfs);
2258 }
2259
bpf_prog_free_deferred(struct work_struct * work)2260 static void bpf_prog_free_deferred(struct work_struct *work)
2261 {
2262 struct bpf_prog_aux *aux;
2263 int i;
2264
2265 aux = container_of(work, struct bpf_prog_aux, work);
2266 bpf_free_used_maps(aux);
2267 bpf_free_used_btfs(aux);
2268 if (bpf_prog_is_dev_bound(aux))
2269 bpf_prog_offload_destroy(aux->prog);
2270 #ifdef CONFIG_PERF_EVENTS
2271 if (aux->prog->has_callchain_buf)
2272 put_callchain_buffers();
2273 #endif
2274 if (aux->dst_trampoline)
2275 bpf_trampoline_put(aux->dst_trampoline);
2276 for (i = 0; i < aux->func_cnt; i++) {
2277 /* We can just unlink the subprog poke descriptor table as
2278 * it was originally linked to the main program and is also
2279 * released along with it.
2280 */
2281 aux->func[i]->aux->poke_tab = NULL;
2282 bpf_jit_free(aux->func[i]);
2283 }
2284 if (aux->func_cnt) {
2285 kfree(aux->func);
2286 bpf_prog_unlock_free(aux->prog);
2287 } else {
2288 bpf_jit_free(aux->prog);
2289 }
2290 }
2291
2292 /* Free internal BPF program */
bpf_prog_free(struct bpf_prog * fp)2293 void bpf_prog_free(struct bpf_prog *fp)
2294 {
2295 struct bpf_prog_aux *aux = fp->aux;
2296
2297 if (aux->dst_prog)
2298 bpf_prog_put(aux->dst_prog);
2299 INIT_WORK(&aux->work, bpf_prog_free_deferred);
2300 schedule_work(&aux->work);
2301 }
2302 EXPORT_SYMBOL_GPL(bpf_prog_free);
2303
2304 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
2305 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2306
bpf_user_rnd_init_once(void)2307 void bpf_user_rnd_init_once(void)
2308 {
2309 prandom_init_once(&bpf_user_rnd_state);
2310 }
2311
BPF_CALL_0(bpf_user_rnd_u32)2312 BPF_CALL_0(bpf_user_rnd_u32)
2313 {
2314 /* Should someone ever have the rather unwise idea to use some
2315 * of the registers passed into this function, then note that
2316 * this function is called from native eBPF and classic-to-eBPF
2317 * transformations. Register assignments from both sides are
2318 * different, f.e. classic always sets fn(ctx, A, X) here.
2319 */
2320 struct rnd_state *state;
2321 u32 res;
2322
2323 state = &get_cpu_var(bpf_user_rnd_state);
2324 res = prandom_u32_state(state);
2325 put_cpu_var(bpf_user_rnd_state);
2326
2327 return res;
2328 }
2329
BPF_CALL_0(bpf_get_raw_cpu_id)2330 BPF_CALL_0(bpf_get_raw_cpu_id)
2331 {
2332 return raw_smp_processor_id();
2333 }
2334
2335 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2336 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2337 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2338 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2339 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2340 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2341 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2342 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2343 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2344 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2345
2346 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2347 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2348 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2349 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2350 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2351 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2352
2353 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2354 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2355 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2356 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2357 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2358 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2359 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2360 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2361 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2362
bpf_get_trace_printk_proto(void)2363 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2364 {
2365 return NULL;
2366 }
2367
2368 u64 __weak
bpf_event_output(struct bpf_map * map,u64 flags,void * meta,u64 meta_size,void * ctx,u64 ctx_size,bpf_ctx_copy_t ctx_copy)2369 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2370 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2371 {
2372 return -ENOTSUPP;
2373 }
2374 EXPORT_SYMBOL_GPL(bpf_event_output);
2375
2376 /* Always built-in helper functions. */
2377 const struct bpf_func_proto bpf_tail_call_proto = {
2378 .func = NULL,
2379 .gpl_only = false,
2380 .ret_type = RET_VOID,
2381 .arg1_type = ARG_PTR_TO_CTX,
2382 .arg2_type = ARG_CONST_MAP_PTR,
2383 .arg3_type = ARG_ANYTHING,
2384 };
2385
2386 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2387 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2388 * eBPF and implicitly also cBPF can get JITed!
2389 */
bpf_int_jit_compile(struct bpf_prog * prog)2390 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2391 {
2392 return prog;
2393 }
2394
2395 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2396 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2397 */
bpf_jit_compile(struct bpf_prog * prog)2398 void __weak bpf_jit_compile(struct bpf_prog *prog)
2399 {
2400 }
2401
bpf_helper_changes_pkt_data(void * func)2402 bool __weak bpf_helper_changes_pkt_data(void *func)
2403 {
2404 return false;
2405 }
2406
2407 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2408 * analysis code and wants explicit zero extension inserted by verifier.
2409 * Otherwise, return FALSE.
2410 *
2411 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2412 * you don't override this. JITs that don't want these extra insns can detect
2413 * them using insn_is_zext.
2414 */
bpf_jit_needs_zext(void)2415 bool __weak bpf_jit_needs_zext(void)
2416 {
2417 return false;
2418 }
2419
bpf_jit_supports_kfunc_call(void)2420 bool __weak bpf_jit_supports_kfunc_call(void)
2421 {
2422 return false;
2423 }
2424
2425 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2426 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2427 */
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)2428 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2429 int len)
2430 {
2431 return -EFAULT;
2432 }
2433
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * addr1,void * addr2)2434 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2435 void *addr1, void *addr2)
2436 {
2437 return -ENOTSUPP;
2438 }
2439
2440 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2441 EXPORT_SYMBOL(bpf_stats_enabled_key);
2442
2443 /* All definitions of tracepoints related to BPF. */
2444 #define CREATE_TRACE_POINTS
2445 #include <linux/bpf_trace.h>
2446
2447 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2448 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
2449