1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Linux Socket Filter Data Structures
4  */
5 #ifndef __LINUX_FILTER_H__
6 #define __LINUX_FILTER_H__
7 
8 #include <linux/atomic.h>
9 #include <linux/refcount.h>
10 #include <linux/compat.h>
11 #include <linux/skbuff.h>
12 #include <linux/linkage.h>
13 #include <linux/printk.h>
14 #include <linux/workqueue.h>
15 #include <linux/sched.h>
16 #include <linux/capability.h>
17 #include <linux/set_memory.h>
18 #include <linux/kallsyms.h>
19 #include <linux/if_vlan.h>
20 #include <linux/vmalloc.h>
21 #include <linux/sockptr.h>
22 #include <crypto/sha1.h>
23 #include <linux/u64_stats_sync.h>
24 
25 #include <net/sch_generic.h>
26 
27 #include <asm/byteorder.h>
28 #include <uapi/linux/filter.h>
29 #include <uapi/linux/bpf.h>
30 
31 struct sk_buff;
32 struct sock;
33 struct seccomp_data;
34 struct bpf_prog_aux;
35 struct xdp_rxq_info;
36 struct xdp_buff;
37 struct sock_reuseport;
38 struct ctl_table;
39 struct ctl_table_header;
40 
41 /* ArgX, context and stack frame pointer register positions. Note,
42  * Arg1, Arg2, Arg3, etc are used as argument mappings of function
43  * calls in BPF_CALL instruction.
44  */
45 #define BPF_REG_ARG1	BPF_REG_1
46 #define BPF_REG_ARG2	BPF_REG_2
47 #define BPF_REG_ARG3	BPF_REG_3
48 #define BPF_REG_ARG4	BPF_REG_4
49 #define BPF_REG_ARG5	BPF_REG_5
50 #define BPF_REG_CTX	BPF_REG_6
51 #define BPF_REG_FP	BPF_REG_10
52 
53 /* Additional register mappings for converted user programs. */
54 #define BPF_REG_A	BPF_REG_0
55 #define BPF_REG_X	BPF_REG_7
56 #define BPF_REG_TMP	BPF_REG_2	/* scratch reg */
57 #define BPF_REG_D	BPF_REG_8	/* data, callee-saved */
58 #define BPF_REG_H	BPF_REG_9	/* hlen, callee-saved */
59 
60 /* Kernel hidden auxiliary/helper register. */
61 #define BPF_REG_AX		MAX_BPF_REG
62 #define MAX_BPF_EXT_REG		(MAX_BPF_REG + 1)
63 #define MAX_BPF_JIT_REG		MAX_BPF_EXT_REG
64 
65 /* unused opcode to mark special call to bpf_tail_call() helper */
66 #define BPF_TAIL_CALL	0xf0
67 
68 /* unused opcode to mark special load instruction. Same as BPF_ABS */
69 #define BPF_PROBE_MEM	0x20
70 
71 /* unused opcode to mark call to interpreter with arguments */
72 #define BPF_CALL_ARGS	0xe0
73 
74 /* unused opcode to mark speculation barrier for mitigating
75  * Speculative Store Bypass
76  */
77 #define BPF_NOSPEC	0xc0
78 
79 /* As per nm, we expose JITed images as text (code) section for
80  * kallsyms. That way, tools like perf can find it to match
81  * addresses.
82  */
83 #define BPF_SYM_ELF_TYPE	't'
84 
85 /* BPF program can access up to 512 bytes of stack space. */
86 #define MAX_BPF_STACK	512
87 
88 /* Helper macros for filter block array initializers. */
89 
90 /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
91 
92 #define BPF_ALU64_REG(OP, DST, SRC)				\
93 	((struct bpf_insn) {					\
94 		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,	\
95 		.dst_reg = DST,					\
96 		.src_reg = SRC,					\
97 		.off   = 0,					\
98 		.imm   = 0 })
99 
100 #define BPF_ALU32_REG(OP, DST, SRC)				\
101 	((struct bpf_insn) {					\
102 		.code  = BPF_ALU | BPF_OP(OP) | BPF_X,		\
103 		.dst_reg = DST,					\
104 		.src_reg = SRC,					\
105 		.off   = 0,					\
106 		.imm   = 0 })
107 
108 /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
109 
110 #define BPF_ALU64_IMM(OP, DST, IMM)				\
111 	((struct bpf_insn) {					\
112 		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,	\
113 		.dst_reg = DST,					\
114 		.src_reg = 0,					\
115 		.off   = 0,					\
116 		.imm   = IMM })
117 
118 #define BPF_ALU32_IMM(OP, DST, IMM)				\
119 	((struct bpf_insn) {					\
120 		.code  = BPF_ALU | BPF_OP(OP) | BPF_K,		\
121 		.dst_reg = DST,					\
122 		.src_reg = 0,					\
123 		.off   = 0,					\
124 		.imm   = IMM })
125 
126 /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
127 
128 #define BPF_ENDIAN(TYPE, DST, LEN)				\
129 	((struct bpf_insn) {					\
130 		.code  = BPF_ALU | BPF_END | BPF_SRC(TYPE),	\
131 		.dst_reg = DST,					\
132 		.src_reg = 0,					\
133 		.off   = 0,					\
134 		.imm   = LEN })
135 
136 /* Short form of mov, dst_reg = src_reg */
137 
138 #define BPF_MOV64_REG(DST, SRC)					\
139 	((struct bpf_insn) {					\
140 		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
141 		.dst_reg = DST,					\
142 		.src_reg = SRC,					\
143 		.off   = 0,					\
144 		.imm   = 0 })
145 
146 #define BPF_MOV32_REG(DST, SRC)					\
147 	((struct bpf_insn) {					\
148 		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
149 		.dst_reg = DST,					\
150 		.src_reg = SRC,					\
151 		.off   = 0,					\
152 		.imm   = 0 })
153 
154 /* Short form of mov, dst_reg = imm32 */
155 
156 #define BPF_MOV64_IMM(DST, IMM)					\
157 	((struct bpf_insn) {					\
158 		.code  = BPF_ALU64 | BPF_MOV | BPF_K,		\
159 		.dst_reg = DST,					\
160 		.src_reg = 0,					\
161 		.off   = 0,					\
162 		.imm   = IMM })
163 
164 #define BPF_MOV32_IMM(DST, IMM)					\
165 	((struct bpf_insn) {					\
166 		.code  = BPF_ALU | BPF_MOV | BPF_K,		\
167 		.dst_reg = DST,					\
168 		.src_reg = 0,					\
169 		.off   = 0,					\
170 		.imm   = IMM })
171 
172 /* Special form of mov32, used for doing explicit zero extension on dst. */
173 #define BPF_ZEXT_REG(DST)					\
174 	((struct bpf_insn) {					\
175 		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
176 		.dst_reg = DST,					\
177 		.src_reg = DST,					\
178 		.off   = 0,					\
179 		.imm   = 1 })
180 
insn_is_zext(const struct bpf_insn * insn)181 static inline bool insn_is_zext(const struct bpf_insn *insn)
182 {
183 	return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1;
184 }
185 
186 /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
187 #define BPF_LD_IMM64(DST, IMM)					\
188 	BPF_LD_IMM64_RAW(DST, 0, IMM)
189 
190 #define BPF_LD_IMM64_RAW(DST, SRC, IMM)				\
191 	((struct bpf_insn) {					\
192 		.code  = BPF_LD | BPF_DW | BPF_IMM,		\
193 		.dst_reg = DST,					\
194 		.src_reg = SRC,					\
195 		.off   = 0,					\
196 		.imm   = (__u32) (IMM) }),			\
197 	((struct bpf_insn) {					\
198 		.code  = 0, /* zero is reserved opcode */	\
199 		.dst_reg = 0,					\
200 		.src_reg = 0,					\
201 		.off   = 0,					\
202 		.imm   = ((__u64) (IMM)) >> 32 })
203 
204 /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
205 #define BPF_LD_MAP_FD(DST, MAP_FD)				\
206 	BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
207 
208 /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
209 
210 #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM)			\
211 	((struct bpf_insn) {					\
212 		.code  = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE),	\
213 		.dst_reg = DST,					\
214 		.src_reg = SRC,					\
215 		.off   = 0,					\
216 		.imm   = IMM })
217 
218 #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM)			\
219 	((struct bpf_insn) {					\
220 		.code  = BPF_ALU | BPF_MOV | BPF_SRC(TYPE),	\
221 		.dst_reg = DST,					\
222 		.src_reg = SRC,					\
223 		.off   = 0,					\
224 		.imm   = IMM })
225 
226 /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
227 
228 #define BPF_LD_ABS(SIZE, IMM)					\
229 	((struct bpf_insn) {					\
230 		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,	\
231 		.dst_reg = 0,					\
232 		.src_reg = 0,					\
233 		.off   = 0,					\
234 		.imm   = IMM })
235 
236 /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
237 
238 #define BPF_LD_IND(SIZE, SRC, IMM)				\
239 	((struct bpf_insn) {					\
240 		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_IND,	\
241 		.dst_reg = 0,					\
242 		.src_reg = SRC,					\
243 		.off   = 0,					\
244 		.imm   = IMM })
245 
246 /* Memory load, dst_reg = *(uint *) (src_reg + off16) */
247 
248 #define BPF_LDX_MEM(SIZE, DST, SRC, OFF)			\
249 	((struct bpf_insn) {					\
250 		.code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,	\
251 		.dst_reg = DST,					\
252 		.src_reg = SRC,					\
253 		.off   = OFF,					\
254 		.imm   = 0 })
255 
256 /* Memory store, *(uint *) (dst_reg + off16) = src_reg */
257 
258 #define BPF_STX_MEM(SIZE, DST, SRC, OFF)			\
259 	((struct bpf_insn) {					\
260 		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,	\
261 		.dst_reg = DST,					\
262 		.src_reg = SRC,					\
263 		.off   = OFF,					\
264 		.imm   = 0 })
265 
266 
267 /*
268  * Atomic operations:
269  *
270  *   BPF_ADD                  *(uint *) (dst_reg + off16) += src_reg
271  *   BPF_AND                  *(uint *) (dst_reg + off16) &= src_reg
272  *   BPF_OR                   *(uint *) (dst_reg + off16) |= src_reg
273  *   BPF_XOR                  *(uint *) (dst_reg + off16) ^= src_reg
274  *   BPF_ADD | BPF_FETCH      src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
275  *   BPF_AND | BPF_FETCH      src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
276  *   BPF_OR | BPF_FETCH       src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
277  *   BPF_XOR | BPF_FETCH      src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
278  *   BPF_XCHG                 src_reg = atomic_xchg(dst_reg + off16, src_reg)
279  *   BPF_CMPXCHG              r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
280  */
281 
282 #define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF)			\
283 	((struct bpf_insn) {					\
284 		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC,	\
285 		.dst_reg = DST,					\
286 		.src_reg = SRC,					\
287 		.off   = OFF,					\
288 		.imm   = OP })
289 
290 /* Legacy alias */
291 #define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
292 
293 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
294 
295 #define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
296 	((struct bpf_insn) {					\
297 		.code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,	\
298 		.dst_reg = DST,					\
299 		.src_reg = 0,					\
300 		.off   = OFF,					\
301 		.imm   = IMM })
302 
303 /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
304 
305 #define BPF_JMP_REG(OP, DST, SRC, OFF)				\
306 	((struct bpf_insn) {					\
307 		.code  = BPF_JMP | BPF_OP(OP) | BPF_X,		\
308 		.dst_reg = DST,					\
309 		.src_reg = SRC,					\
310 		.off   = OFF,					\
311 		.imm   = 0 })
312 
313 /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
314 
315 #define BPF_JMP_IMM(OP, DST, IMM, OFF)				\
316 	((struct bpf_insn) {					\
317 		.code  = BPF_JMP | BPF_OP(OP) | BPF_K,		\
318 		.dst_reg = DST,					\
319 		.src_reg = 0,					\
320 		.off   = OFF,					\
321 		.imm   = IMM })
322 
323 /* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
324 
325 #define BPF_JMP32_REG(OP, DST, SRC, OFF)			\
326 	((struct bpf_insn) {					\
327 		.code  = BPF_JMP32 | BPF_OP(OP) | BPF_X,	\
328 		.dst_reg = DST,					\
329 		.src_reg = SRC,					\
330 		.off   = OFF,					\
331 		.imm   = 0 })
332 
333 /* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
334 
335 #define BPF_JMP32_IMM(OP, DST, IMM, OFF)			\
336 	((struct bpf_insn) {					\
337 		.code  = BPF_JMP32 | BPF_OP(OP) | BPF_K,	\
338 		.dst_reg = DST,					\
339 		.src_reg = 0,					\
340 		.off   = OFF,					\
341 		.imm   = IMM })
342 
343 /* Unconditional jumps, goto pc + off16 */
344 
345 #define BPF_JMP_A(OFF)						\
346 	((struct bpf_insn) {					\
347 		.code  = BPF_JMP | BPF_JA,			\
348 		.dst_reg = 0,					\
349 		.src_reg = 0,					\
350 		.off   = OFF,					\
351 		.imm   = 0 })
352 
353 /* Relative call */
354 
355 #define BPF_CALL_REL(TGT)					\
356 	((struct bpf_insn) {					\
357 		.code  = BPF_JMP | BPF_CALL,			\
358 		.dst_reg = 0,					\
359 		.src_reg = BPF_PSEUDO_CALL,			\
360 		.off   = 0,					\
361 		.imm   = TGT })
362 
363 /* Function call */
364 
365 #define BPF_CAST_CALL(x)					\
366 		((u64 (*)(u64, u64, u64, u64, u64))(x))
367 
368 #define BPF_EMIT_CALL(FUNC)					\
369 	((struct bpf_insn) {					\
370 		.code  = BPF_JMP | BPF_CALL,			\
371 		.dst_reg = 0,					\
372 		.src_reg = 0,					\
373 		.off   = 0,					\
374 		.imm   = ((FUNC) - __bpf_call_base) })
375 
376 /* Raw code statement block */
377 
378 #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)			\
379 	((struct bpf_insn) {					\
380 		.code  = CODE,					\
381 		.dst_reg = DST,					\
382 		.src_reg = SRC,					\
383 		.off   = OFF,					\
384 		.imm   = IMM })
385 
386 /* Program exit */
387 
388 #define BPF_EXIT_INSN()						\
389 	((struct bpf_insn) {					\
390 		.code  = BPF_JMP | BPF_EXIT,			\
391 		.dst_reg = 0,					\
392 		.src_reg = 0,					\
393 		.off   = 0,					\
394 		.imm   = 0 })
395 
396 /* Speculation barrier */
397 
398 #define BPF_ST_NOSPEC()						\
399 	((struct bpf_insn) {					\
400 		.code  = BPF_ST | BPF_NOSPEC,			\
401 		.dst_reg = 0,					\
402 		.src_reg = 0,					\
403 		.off   = 0,					\
404 		.imm   = 0 })
405 
406 /* Internal classic blocks for direct assignment */
407 
408 #define __BPF_STMT(CODE, K)					\
409 	((struct sock_filter) BPF_STMT(CODE, K))
410 
411 #define __BPF_JUMP(CODE, K, JT, JF)				\
412 	((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))
413 
414 #define bytes_to_bpf_size(bytes)				\
415 ({								\
416 	int bpf_size = -EINVAL;					\
417 								\
418 	if (bytes == sizeof(u8))				\
419 		bpf_size = BPF_B;				\
420 	else if (bytes == sizeof(u16))				\
421 		bpf_size = BPF_H;				\
422 	else if (bytes == sizeof(u32))				\
423 		bpf_size = BPF_W;				\
424 	else if (bytes == sizeof(u64))				\
425 		bpf_size = BPF_DW;				\
426 								\
427 	bpf_size;						\
428 })
429 
430 #define bpf_size_to_bytes(bpf_size)				\
431 ({								\
432 	int bytes = -EINVAL;					\
433 								\
434 	if (bpf_size == BPF_B)					\
435 		bytes = sizeof(u8);				\
436 	else if (bpf_size == BPF_H)				\
437 		bytes = sizeof(u16);				\
438 	else if (bpf_size == BPF_W)				\
439 		bytes = sizeof(u32);				\
440 	else if (bpf_size == BPF_DW)				\
441 		bytes = sizeof(u64);				\
442 								\
443 	bytes;							\
444 })
445 
446 #define BPF_SIZEOF(type)					\
447 	({							\
448 		const int __size = bytes_to_bpf_size(sizeof(type)); \
449 		BUILD_BUG_ON(__size < 0);			\
450 		__size;						\
451 	})
452 
453 #define BPF_FIELD_SIZEOF(type, field)				\
454 	({							\
455 		const int __size = bytes_to_bpf_size(sizeof_field(type, field)); \
456 		BUILD_BUG_ON(__size < 0);			\
457 		__size;						\
458 	})
459 
460 #define BPF_LDST_BYTES(insn)					\
461 	({							\
462 		const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \
463 		WARN_ON(__size < 0);				\
464 		__size;						\
465 	})
466 
467 #define __BPF_MAP_0(m, v, ...) v
468 #define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
469 #define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
470 #define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
471 #define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
472 #define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)
473 
474 #define __BPF_REG_0(...) __BPF_PAD(5)
475 #define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4)
476 #define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3)
477 #define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2)
478 #define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1)
479 #define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__)
480 
481 #define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__)
482 #define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__)
483 
484 #define __BPF_CAST(t, a)						       \
485 	(__force t)							       \
486 	(__force							       \
487 	 typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long),      \
488 				      (unsigned long)0, (t)0))) a
489 #define __BPF_V void
490 #define __BPF_N
491 
492 #define __BPF_DECL_ARGS(t, a) t   a
493 #define __BPF_DECL_REGS(t, a) u64 a
494 
495 #define __BPF_PAD(n)							       \
496 	__BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2,       \
497 		  u64, __ur_3, u64, __ur_4, u64, __ur_5)
498 
499 #define BPF_CALL_x(x, name, ...)					       \
500 	static __always_inline						       \
501 	u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__));   \
502 	typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
503 	u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__));	       \
504 	u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__))	       \
505 	{								       \
506 		return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
507 	}								       \
508 	static __always_inline						       \
509 	u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
510 
511 #define BPF_CALL_0(name, ...)	BPF_CALL_x(0, name, __VA_ARGS__)
512 #define BPF_CALL_1(name, ...)	BPF_CALL_x(1, name, __VA_ARGS__)
513 #define BPF_CALL_2(name, ...)	BPF_CALL_x(2, name, __VA_ARGS__)
514 #define BPF_CALL_3(name, ...)	BPF_CALL_x(3, name, __VA_ARGS__)
515 #define BPF_CALL_4(name, ...)	BPF_CALL_x(4, name, __VA_ARGS__)
516 #define BPF_CALL_5(name, ...)	BPF_CALL_x(5, name, __VA_ARGS__)
517 
518 #define bpf_ctx_range(TYPE, MEMBER)						\
519 	offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
520 #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2)				\
521 	offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
522 #if BITS_PER_LONG == 64
523 # define bpf_ctx_range_ptr(TYPE, MEMBER)					\
524 	offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
525 #else
526 # define bpf_ctx_range_ptr(TYPE, MEMBER)					\
527 	offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1
528 #endif /* BITS_PER_LONG == 64 */
529 
530 #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE)				\
531 	({									\
532 		BUILD_BUG_ON(sizeof_field(TYPE, MEMBER) != (SIZE));		\
533 		*(PTR_SIZE) = (SIZE);						\
534 		offsetof(TYPE, MEMBER);						\
535 	})
536 
537 /* A struct sock_filter is architecture independent. */
538 struct compat_sock_fprog {
539 	u16		len;
540 	compat_uptr_t	filter;	/* struct sock_filter * */
541 };
542 
543 struct sock_fprog_kern {
544 	u16			len;
545 	struct sock_filter	*filter;
546 };
547 
548 /* Some arches need doubleword alignment for their instructions and/or data */
549 #define BPF_IMAGE_ALIGNMENT 8
550 
551 struct bpf_binary_header {
552 	u32 pages;
553 	u8 image[] __aligned(BPF_IMAGE_ALIGNMENT);
554 };
555 
556 struct bpf_prog_stats {
557 	u64 cnt;
558 	u64 nsecs;
559 	u64 misses;
560 	struct u64_stats_sync syncp;
561 } __aligned(2 * sizeof(u64));
562 
563 struct bpf_prog {
564 	u16			pages;		/* Number of allocated pages */
565 	u16			jited:1,	/* Is our filter JIT'ed? */
566 				jit_requested:1,/* archs need to JIT the prog */
567 				gpl_compatible:1, /* Is filter GPL compatible? */
568 				cb_access:1,	/* Is control block accessed? */
569 				dst_needed:1,	/* Do we need dst entry? */
570 				blinded:1,	/* Was blinded */
571 				is_func:1,	/* program is a bpf function */
572 				kprobe_override:1, /* Do we override a kprobe? */
573 				has_callchain_buf:1, /* callchain buffer allocated? */
574 				enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
575 				call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
576 				call_get_func_ip:1; /* Do we call get_func_ip() */
577 	enum bpf_prog_type	type;		/* Type of BPF program */
578 	enum bpf_attach_type	expected_attach_type; /* For some prog types */
579 	u32			len;		/* Number of filter blocks */
580 	u32			jited_len;	/* Size of jited insns in bytes */
581 	u8			tag[BPF_TAG_SIZE];
582 	struct bpf_prog_stats __percpu *stats;
583 	int __percpu		*active;
584 	unsigned int		(*bpf_func)(const void *ctx,
585 					    const struct bpf_insn *insn);
586 	struct bpf_prog_aux	*aux;		/* Auxiliary fields */
587 	struct sock_fprog_kern	*orig_prog;	/* Original BPF program */
588 	/* Instructions for interpreter */
589 	struct sock_filter	insns[0];
590 	struct bpf_insn		insnsi[];
591 };
592 
593 struct sk_filter {
594 	refcount_t	refcnt;
595 	struct rcu_head	rcu;
596 	struct bpf_prog	*prog;
597 };
598 
599 DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
600 
601 typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
602 					  const struct bpf_insn *insnsi,
603 					  unsigned int (*bpf_func)(const void *,
604 								   const struct bpf_insn *));
605 
__bpf_prog_run(const struct bpf_prog * prog,const void * ctx,bpf_dispatcher_fn dfunc)606 static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
607 					  const void *ctx,
608 					  bpf_dispatcher_fn dfunc)
609 {
610 	u32 ret;
611 
612 	cant_migrate();
613 	if (static_branch_unlikely(&bpf_stats_enabled_key)) {
614 		struct bpf_prog_stats *stats;
615 		u64 start = sched_clock();
616 
617 		ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
618 		stats = this_cpu_ptr(prog->stats);
619 		u64_stats_update_begin(&stats->syncp);
620 		stats->cnt++;
621 		stats->nsecs += sched_clock() - start;
622 		u64_stats_update_end(&stats->syncp);
623 	} else {
624 		ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
625 	}
626 	return ret;
627 }
628 
bpf_prog_run(const struct bpf_prog * prog,const void * ctx)629 static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx)
630 {
631 	return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
632 }
633 
634 /*
635  * Use in preemptible and therefore migratable context to make sure that
636  * the execution of the BPF program runs on one CPU.
637  *
638  * This uses migrate_disable/enable() explicitly to document that the
639  * invocation of a BPF program does not require reentrancy protection
640  * against a BPF program which is invoked from a preempting task.
641  *
642  * For non RT enabled kernels migrate_disable/enable() maps to
643  * preempt_disable/enable(), i.e. it disables also preemption.
644  */
bpf_prog_run_pin_on_cpu(const struct bpf_prog * prog,const void * ctx)645 static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
646 					  const void *ctx)
647 {
648 	u32 ret;
649 
650 	migrate_disable();
651 	ret = bpf_prog_run(prog, ctx);
652 	migrate_enable();
653 	return ret;
654 }
655 
656 #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
657 
658 struct bpf_skb_data_end {
659 	struct qdisc_skb_cb qdisc_cb;
660 	void *data_meta;
661 	void *data_end;
662 };
663 
664 struct bpf_nh_params {
665 	u32 nh_family;
666 	union {
667 		u32 ipv4_nh;
668 		struct in6_addr ipv6_nh;
669 	};
670 };
671 
672 struct bpf_redirect_info {
673 	u32 flags;
674 	u32 tgt_index;
675 	void *tgt_value;
676 	struct bpf_map *map;
677 	u32 map_id;
678 	enum bpf_map_type map_type;
679 	u32 kern_flags;
680 	struct bpf_nh_params nh;
681 };
682 
683 DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
684 
685 /* flags for bpf_redirect_info kern_flags */
686 #define BPF_RI_F_RF_NO_DIRECT	BIT(0)	/* no napi_direct on return_frame */
687 
688 /* Compute the linear packet data range [data, data_end) which
689  * will be accessed by various program types (cls_bpf, act_bpf,
690  * lwt, ...). Subsystems allowing direct data access must (!)
691  * ensure that cb[] area can be written to when BPF program is
692  * invoked (otherwise cb[] save/restore is necessary).
693  */
bpf_compute_data_pointers(struct sk_buff * skb)694 static inline void bpf_compute_data_pointers(struct sk_buff *skb)
695 {
696 	struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
697 
698 	BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
699 	cb->data_meta = skb->data - skb_metadata_len(skb);
700 	cb->data_end  = skb->data + skb_headlen(skb);
701 }
702 
703 /* Similar to bpf_compute_data_pointers(), except that save orginal
704  * data in cb->data and cb->meta_data for restore.
705  */
bpf_compute_and_save_data_end(struct sk_buff * skb,void ** saved_data_end)706 static inline void bpf_compute_and_save_data_end(
707 	struct sk_buff *skb, void **saved_data_end)
708 {
709 	struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
710 
711 	*saved_data_end = cb->data_end;
712 	cb->data_end  = skb->data + skb_headlen(skb);
713 }
714 
715 /* Restore data saved by bpf_compute_data_pointers(). */
bpf_restore_data_end(struct sk_buff * skb,void * saved_data_end)716 static inline void bpf_restore_data_end(
717 	struct sk_buff *skb, void *saved_data_end)
718 {
719 	struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
720 
721 	cb->data_end = saved_data_end;
722 }
723 
bpf_skb_cb(const struct sk_buff * skb)724 static inline u8 *bpf_skb_cb(const struct sk_buff *skb)
725 {
726 	/* eBPF programs may read/write skb->cb[] area to transfer meta
727 	 * data between tail calls. Since this also needs to work with
728 	 * tc, that scratch memory is mapped to qdisc_skb_cb's data area.
729 	 *
730 	 * In some socket filter cases, the cb unfortunately needs to be
731 	 * saved/restored so that protocol specific skb->cb[] data won't
732 	 * be lost. In any case, due to unpriviledged eBPF programs
733 	 * attached to sockets, we need to clear the bpf_skb_cb() area
734 	 * to not leak previous contents to user space.
735 	 */
736 	BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
737 	BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) !=
738 		     sizeof_field(struct qdisc_skb_cb, data));
739 
740 	return qdisc_skb_cb(skb)->data;
741 }
742 
743 /* Must be invoked with migration disabled */
__bpf_prog_run_save_cb(const struct bpf_prog * prog,const void * ctx)744 static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
745 					 const void *ctx)
746 {
747 	const struct sk_buff *skb = ctx;
748 	u8 *cb_data = bpf_skb_cb(skb);
749 	u8 cb_saved[BPF_SKB_CB_LEN];
750 	u32 res;
751 
752 	if (unlikely(prog->cb_access)) {
753 		memcpy(cb_saved, cb_data, sizeof(cb_saved));
754 		memset(cb_data, 0, sizeof(cb_saved));
755 	}
756 
757 	res = bpf_prog_run(prog, skb);
758 
759 	if (unlikely(prog->cb_access))
760 		memcpy(cb_data, cb_saved, sizeof(cb_saved));
761 
762 	return res;
763 }
764 
bpf_prog_run_save_cb(const struct bpf_prog * prog,struct sk_buff * skb)765 static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
766 				       struct sk_buff *skb)
767 {
768 	u32 res;
769 
770 	migrate_disable();
771 	res = __bpf_prog_run_save_cb(prog, skb);
772 	migrate_enable();
773 	return res;
774 }
775 
bpf_prog_run_clear_cb(const struct bpf_prog * prog,struct sk_buff * skb)776 static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
777 					struct sk_buff *skb)
778 {
779 	u8 *cb_data = bpf_skb_cb(skb);
780 	u32 res;
781 
782 	if (unlikely(prog->cb_access))
783 		memset(cb_data, 0, BPF_SKB_CB_LEN);
784 
785 	res = bpf_prog_run_pin_on_cpu(prog, skb);
786 	return res;
787 }
788 
789 DECLARE_BPF_DISPATCHER(xdp)
790 
791 DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
792 
793 u32 xdp_master_redirect(struct xdp_buff *xdp);
794 
bpf_prog_run_xdp(const struct bpf_prog * prog,struct xdp_buff * xdp)795 static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
796 					    struct xdp_buff *xdp)
797 {
798 	/* Driver XDP hooks are invoked within a single NAPI poll cycle and thus
799 	 * under local_bh_disable(), which provides the needed RCU protection
800 	 * for accessing map entries.
801 	 */
802 	u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
803 
804 	if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) {
805 		if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev))
806 			act = xdp_master_redirect(xdp);
807 	}
808 
809 	return act;
810 }
811 
812 void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
813 
bpf_prog_insn_size(const struct bpf_prog * prog)814 static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
815 {
816 	return prog->len * sizeof(struct bpf_insn);
817 }
818 
bpf_prog_tag_scratch_size(const struct bpf_prog * prog)819 static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
820 {
821 	return round_up(bpf_prog_insn_size(prog) +
822 			sizeof(__be64) + 1, SHA1_BLOCK_SIZE);
823 }
824 
bpf_prog_size(unsigned int proglen)825 static inline unsigned int bpf_prog_size(unsigned int proglen)
826 {
827 	return max(sizeof(struct bpf_prog),
828 		   offsetof(struct bpf_prog, insns[proglen]));
829 }
830 
bpf_prog_was_classic(const struct bpf_prog * prog)831 static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
832 {
833 	/* When classic BPF programs have been loaded and the arch
834 	 * does not have a classic BPF JIT (anymore), they have been
835 	 * converted via bpf_migrate_filter() to eBPF and thus always
836 	 * have an unspec program type.
837 	 */
838 	return prog->type == BPF_PROG_TYPE_UNSPEC;
839 }
840 
bpf_ctx_off_adjust_machine(u32 size)841 static inline u32 bpf_ctx_off_adjust_machine(u32 size)
842 {
843 	const u32 size_machine = sizeof(unsigned long);
844 
845 	if (size > size_machine && size % size_machine == 0)
846 		size = size_machine;
847 
848 	return size;
849 }
850 
851 static inline bool
bpf_ctx_narrow_access_ok(u32 off,u32 size,u32 size_default)852 bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
853 {
854 	return size <= size_default && (size & (size - 1)) == 0;
855 }
856 
857 static inline u8
bpf_ctx_narrow_access_offset(u32 off,u32 size,u32 size_default)858 bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default)
859 {
860 	u8 access_off = off & (size_default - 1);
861 
862 #ifdef __LITTLE_ENDIAN
863 	return access_off;
864 #else
865 	return size_default - (access_off + size);
866 #endif
867 }
868 
869 #define bpf_ctx_wide_access_ok(off, size, type, field)			\
870 	(size == sizeof(__u64) &&					\
871 	off >= offsetof(type, field) &&					\
872 	off + sizeof(__u64) <= offsetofend(type, field) &&		\
873 	off % sizeof(__u64) == 0)
874 
875 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
876 
bpf_prog_lock_ro(struct bpf_prog * fp)877 static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
878 {
879 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
880 	if (!fp->jited) {
881 		set_vm_flush_reset_perms(fp);
882 		set_memory_ro((unsigned long)fp, fp->pages);
883 	}
884 #endif
885 }
886 
bpf_jit_binary_lock_ro(struct bpf_binary_header * hdr)887 static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
888 {
889 	set_vm_flush_reset_perms(hdr);
890 	set_memory_ro((unsigned long)hdr, hdr->pages);
891 	set_memory_x((unsigned long)hdr, hdr->pages);
892 }
893 
894 static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog * fp)895 bpf_jit_binary_hdr(const struct bpf_prog *fp)
896 {
897 	unsigned long real_start = (unsigned long)fp->bpf_func;
898 	unsigned long addr = real_start & PAGE_MASK;
899 
900 	return (void *)addr;
901 }
902 
903 int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
sk_filter(struct sock * sk,struct sk_buff * skb)904 static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
905 {
906 	return sk_filter_trim_cap(sk, skb, 1);
907 }
908 
909 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
910 void bpf_prog_free(struct bpf_prog *fp);
911 
912 bool bpf_opcode_in_insntable(u8 code);
913 
914 void bpf_prog_free_linfo(struct bpf_prog *prog);
915 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
916 			       const u32 *insn_to_jit_off);
917 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog);
918 void bpf_prog_jit_attempt_done(struct bpf_prog *prog);
919 
920 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
921 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags);
922 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
923 				  gfp_t gfp_extra_flags);
924 void __bpf_prog_free(struct bpf_prog *fp);
925 
bpf_prog_unlock_free(struct bpf_prog * fp)926 static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
927 {
928 	__bpf_prog_free(fp);
929 }
930 
931 typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
932 				       unsigned int flen);
933 
934 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
935 int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
936 			      bpf_aux_classic_check_t trans, bool save_orig);
937 void bpf_prog_destroy(struct bpf_prog *fp);
938 
939 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
940 int sk_attach_bpf(u32 ufd, struct sock *sk);
941 int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
942 int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
943 void sk_reuseport_prog_free(struct bpf_prog *prog);
944 int sk_detach_filter(struct sock *sk);
945 int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
946 		  unsigned int len);
947 
948 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
949 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
950 
951 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
952 #define __bpf_call_base_args \
953 	((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
954 	 (void *)__bpf_call_base)
955 
956 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
957 void bpf_jit_compile(struct bpf_prog *prog);
958 bool bpf_jit_needs_zext(void);
959 bool bpf_jit_supports_kfunc_call(void);
960 bool bpf_helper_changes_pkt_data(void *func);
961 
bpf_dump_raw_ok(const struct cred * cred)962 static inline bool bpf_dump_raw_ok(const struct cred *cred)
963 {
964 	/* Reconstruction of call-sites is dependent on kallsyms,
965 	 * thus make dump the same restriction.
966 	 */
967 	return kallsyms_show_value(cred);
968 }
969 
970 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
971 				       const struct bpf_insn *patch, u32 len);
972 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);
973 
974 void bpf_clear_redirect_map(struct bpf_map *map);
975 
xdp_return_frame_no_direct(void)976 static inline bool xdp_return_frame_no_direct(void)
977 {
978 	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
979 
980 	return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
981 }
982 
xdp_set_return_frame_no_direct(void)983 static inline void xdp_set_return_frame_no_direct(void)
984 {
985 	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
986 
987 	ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
988 }
989 
xdp_clear_return_frame_no_direct(void)990 static inline void xdp_clear_return_frame_no_direct(void)
991 {
992 	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
993 
994 	ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
995 }
996 
xdp_ok_fwd_dev(const struct net_device * fwd,unsigned int pktlen)997 static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
998 				 unsigned int pktlen)
999 {
1000 	unsigned int len;
1001 
1002 	if (unlikely(!(fwd->flags & IFF_UP)))
1003 		return -ENETDOWN;
1004 
1005 	len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
1006 	if (pktlen > len)
1007 		return -EMSGSIZE;
1008 
1009 	return 0;
1010 }
1011 
1012 /* The pair of xdp_do_redirect and xdp_do_flush MUST be called in the
1013  * same cpu context. Further for best results no more than a single map
1014  * for the do_redirect/do_flush pair should be used. This limitation is
1015  * because we only track one map and force a flush when the map changes.
1016  * This does not appear to be a real limitation for existing software.
1017  */
1018 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
1019 			    struct xdp_buff *xdp, struct bpf_prog *prog);
1020 int xdp_do_redirect(struct net_device *dev,
1021 		    struct xdp_buff *xdp,
1022 		    struct bpf_prog *prog);
1023 void xdp_do_flush(void);
1024 
1025 /* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
1026  * it is no longer only flushing maps. Keep this define for compatibility
1027  * until all drivers are updated - do not use xdp_do_flush_map() in new code!
1028  */
1029 #define xdp_do_flush_map xdp_do_flush
1030 
1031 void bpf_warn_invalid_xdp_action(u32 act);
1032 
1033 #ifdef CONFIG_INET
1034 struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
1035 				  struct bpf_prog *prog, struct sk_buff *skb,
1036 				  struct sock *migrating_sk,
1037 				  u32 hash);
1038 #else
1039 static inline struct sock *
bpf_run_sk_reuseport(struct sock_reuseport * reuse,struct sock * sk,struct bpf_prog * prog,struct sk_buff * skb,struct sock * migrating_sk,u32 hash)1040 bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
1041 		     struct bpf_prog *prog, struct sk_buff *skb,
1042 		     struct sock *migrating_sk,
1043 		     u32 hash)
1044 {
1045 	return NULL;
1046 }
1047 #endif
1048 
1049 #ifdef CONFIG_BPF_JIT
1050 extern int bpf_jit_enable;
1051 extern int bpf_jit_harden;
1052 extern int bpf_jit_kallsyms;
1053 extern long bpf_jit_limit;
1054 extern long bpf_jit_limit_max;
1055 
1056 typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
1057 
1058 struct bpf_binary_header *
1059 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1060 		     unsigned int alignment,
1061 		     bpf_jit_fill_hole_t bpf_fill_ill_insns);
1062 void bpf_jit_binary_free(struct bpf_binary_header *hdr);
1063 u64 bpf_jit_alloc_exec_limit(void);
1064 void *bpf_jit_alloc_exec(unsigned long size);
1065 void bpf_jit_free_exec(void *addr);
1066 void bpf_jit_free(struct bpf_prog *fp);
1067 
1068 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
1069 				struct bpf_jit_poke_descriptor *poke);
1070 
1071 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1072 			  const struct bpf_insn *insn, bool extra_pass,
1073 			  u64 *func_addr, bool *func_addr_fixed);
1074 
1075 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
1076 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
1077 
bpf_jit_dump(unsigned int flen,unsigned int proglen,u32 pass,void * image)1078 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
1079 				u32 pass, void *image)
1080 {
1081 	pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
1082 	       proglen, pass, image, current->comm, task_pid_nr(current));
1083 
1084 	if (image)
1085 		print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
1086 			       16, 1, image, proglen, false);
1087 }
1088 
bpf_jit_is_ebpf(void)1089 static inline bool bpf_jit_is_ebpf(void)
1090 {
1091 # ifdef CONFIG_HAVE_EBPF_JIT
1092 	return true;
1093 # else
1094 	return false;
1095 # endif
1096 }
1097 
ebpf_jit_enabled(void)1098 static inline bool ebpf_jit_enabled(void)
1099 {
1100 	return bpf_jit_enable && bpf_jit_is_ebpf();
1101 }
1102 
bpf_prog_ebpf_jited(const struct bpf_prog * fp)1103 static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
1104 {
1105 	return fp->jited && bpf_jit_is_ebpf();
1106 }
1107 
bpf_jit_blinding_enabled(struct bpf_prog * prog)1108 static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
1109 {
1110 	/* These are the prerequisites, should someone ever have the
1111 	 * idea to call blinding outside of them, we make sure to
1112 	 * bail out.
1113 	 */
1114 	if (!bpf_jit_is_ebpf())
1115 		return false;
1116 	if (!prog->jit_requested)
1117 		return false;
1118 	if (!bpf_jit_harden)
1119 		return false;
1120 	if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
1121 		return false;
1122 
1123 	return true;
1124 }
1125 
bpf_jit_kallsyms_enabled(void)1126 static inline bool bpf_jit_kallsyms_enabled(void)
1127 {
1128 	/* There are a couple of corner cases where kallsyms should
1129 	 * not be enabled f.e. on hardening.
1130 	 */
1131 	if (bpf_jit_harden)
1132 		return false;
1133 	if (!bpf_jit_kallsyms)
1134 		return false;
1135 	if (bpf_jit_kallsyms == 1)
1136 		return true;
1137 
1138 	return false;
1139 }
1140 
1141 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
1142 				 unsigned long *off, char *sym);
1143 bool is_bpf_text_address(unsigned long addr);
1144 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
1145 		    char *sym);
1146 
1147 static inline const char *
bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char ** modname,char * sym)1148 bpf_address_lookup(unsigned long addr, unsigned long *size,
1149 		   unsigned long *off, char **modname, char *sym)
1150 {
1151 	const char *ret = __bpf_address_lookup(addr, size, off, sym);
1152 
1153 	if (ret && modname)
1154 		*modname = NULL;
1155 	return ret;
1156 }
1157 
1158 void bpf_prog_kallsyms_add(struct bpf_prog *fp);
1159 void bpf_prog_kallsyms_del(struct bpf_prog *fp);
1160 
1161 #else /* CONFIG_BPF_JIT */
1162 
ebpf_jit_enabled(void)1163 static inline bool ebpf_jit_enabled(void)
1164 {
1165 	return false;
1166 }
1167 
bpf_jit_blinding_enabled(struct bpf_prog * prog)1168 static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
1169 {
1170 	return false;
1171 }
1172 
bpf_prog_ebpf_jited(const struct bpf_prog * fp)1173 static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
1174 {
1175 	return false;
1176 }
1177 
1178 static inline int
bpf_jit_add_poke_descriptor(struct bpf_prog * prog,struct bpf_jit_poke_descriptor * poke)1179 bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
1180 			    struct bpf_jit_poke_descriptor *poke)
1181 {
1182 	return -ENOTSUPP;
1183 }
1184 
bpf_jit_free(struct bpf_prog * fp)1185 static inline void bpf_jit_free(struct bpf_prog *fp)
1186 {
1187 	bpf_prog_unlock_free(fp);
1188 }
1189 
bpf_jit_kallsyms_enabled(void)1190 static inline bool bpf_jit_kallsyms_enabled(void)
1191 {
1192 	return false;
1193 }
1194 
1195 static inline const char *
__bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char * sym)1196 __bpf_address_lookup(unsigned long addr, unsigned long *size,
1197 		     unsigned long *off, char *sym)
1198 {
1199 	return NULL;
1200 }
1201 
is_bpf_text_address(unsigned long addr)1202 static inline bool is_bpf_text_address(unsigned long addr)
1203 {
1204 	return false;
1205 }
1206 
bpf_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)1207 static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
1208 				  char *type, char *sym)
1209 {
1210 	return -ERANGE;
1211 }
1212 
1213 static inline const char *
bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char ** modname,char * sym)1214 bpf_address_lookup(unsigned long addr, unsigned long *size,
1215 		   unsigned long *off, char **modname, char *sym)
1216 {
1217 	return NULL;
1218 }
1219 
bpf_prog_kallsyms_add(struct bpf_prog * fp)1220 static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
1221 {
1222 }
1223 
bpf_prog_kallsyms_del(struct bpf_prog * fp)1224 static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
1225 {
1226 }
1227 
1228 #endif /* CONFIG_BPF_JIT */
1229 
1230 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
1231 
1232 #define BPF_ANC		BIT(15)
1233 
bpf_needs_clear_a(const struct sock_filter * first)1234 static inline bool bpf_needs_clear_a(const struct sock_filter *first)
1235 {
1236 	switch (first->code) {
1237 	case BPF_RET | BPF_K:
1238 	case BPF_LD | BPF_W | BPF_LEN:
1239 		return false;
1240 
1241 	case BPF_LD | BPF_W | BPF_ABS:
1242 	case BPF_LD | BPF_H | BPF_ABS:
1243 	case BPF_LD | BPF_B | BPF_ABS:
1244 		if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X)
1245 			return true;
1246 		return false;
1247 
1248 	default:
1249 		return true;
1250 	}
1251 }
1252 
bpf_anc_helper(const struct sock_filter * ftest)1253 static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
1254 {
1255 	BUG_ON(ftest->code & BPF_ANC);
1256 
1257 	switch (ftest->code) {
1258 	case BPF_LD | BPF_W | BPF_ABS:
1259 	case BPF_LD | BPF_H | BPF_ABS:
1260 	case BPF_LD | BPF_B | BPF_ABS:
1261 #define BPF_ANCILLARY(CODE)	case SKF_AD_OFF + SKF_AD_##CODE:	\
1262 				return BPF_ANC | SKF_AD_##CODE
1263 		switch (ftest->k) {
1264 		BPF_ANCILLARY(PROTOCOL);
1265 		BPF_ANCILLARY(PKTTYPE);
1266 		BPF_ANCILLARY(IFINDEX);
1267 		BPF_ANCILLARY(NLATTR);
1268 		BPF_ANCILLARY(NLATTR_NEST);
1269 		BPF_ANCILLARY(MARK);
1270 		BPF_ANCILLARY(QUEUE);
1271 		BPF_ANCILLARY(HATYPE);
1272 		BPF_ANCILLARY(RXHASH);
1273 		BPF_ANCILLARY(CPU);
1274 		BPF_ANCILLARY(ALU_XOR_X);
1275 		BPF_ANCILLARY(VLAN_TAG);
1276 		BPF_ANCILLARY(VLAN_TAG_PRESENT);
1277 		BPF_ANCILLARY(PAY_OFFSET);
1278 		BPF_ANCILLARY(RANDOM);
1279 		BPF_ANCILLARY(VLAN_TPID);
1280 		}
1281 		fallthrough;
1282 	default:
1283 		return ftest->code;
1284 	}
1285 }
1286 
1287 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
1288 					   int k, unsigned int size);
1289 
bpf_tell_extensions(void)1290 static inline int bpf_tell_extensions(void)
1291 {
1292 	return SKF_AD_MAX;
1293 }
1294 
1295 struct bpf_sock_addr_kern {
1296 	struct sock *sk;
1297 	struct sockaddr *uaddr;
1298 	/* Temporary "register" to make indirect stores to nested structures
1299 	 * defined above. We need three registers to make such a store, but
1300 	 * only two (src and dst) are available at convert_ctx_access time
1301 	 */
1302 	u64 tmp_reg;
1303 	void *t_ctx;	/* Attach type specific context. */
1304 };
1305 
1306 struct bpf_sock_ops_kern {
1307 	struct	sock *sk;
1308 	union {
1309 		u32 args[4];
1310 		u32 reply;
1311 		u32 replylong[4];
1312 	};
1313 	struct sk_buff	*syn_skb;
1314 	struct sk_buff	*skb;
1315 	void	*skb_data_end;
1316 	u8	op;
1317 	u8	is_fullsock;
1318 	u8	remaining_opt_len;
1319 	u64	temp;			/* temp and everything after is not
1320 					 * initialized to 0 before calling
1321 					 * the BPF program. New fields that
1322 					 * should be initialized to 0 should
1323 					 * be inserted before temp.
1324 					 * temp is scratch storage used by
1325 					 * sock_ops_convert_ctx_access
1326 					 * as temporary storage of a register.
1327 					 */
1328 };
1329 
1330 struct bpf_sysctl_kern {
1331 	struct ctl_table_header *head;
1332 	struct ctl_table *table;
1333 	void *cur_val;
1334 	size_t cur_len;
1335 	void *new_val;
1336 	size_t new_len;
1337 	int new_updated;
1338 	int write;
1339 	loff_t *ppos;
1340 	/* Temporary "register" for indirect stores to ppos. */
1341 	u64 tmp_reg;
1342 };
1343 
1344 #define BPF_SOCKOPT_KERN_BUF_SIZE	32
1345 struct bpf_sockopt_buf {
1346 	u8		data[BPF_SOCKOPT_KERN_BUF_SIZE];
1347 };
1348 
1349 struct bpf_sockopt_kern {
1350 	struct sock	*sk;
1351 	u8		*optval;
1352 	u8		*optval_end;
1353 	s32		level;
1354 	s32		optname;
1355 	s32		optlen;
1356 	s32		retval;
1357 };
1358 
1359 int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len);
1360 
1361 struct bpf_sk_lookup_kern {
1362 	u16		family;
1363 	u16		protocol;
1364 	__be16		sport;
1365 	u16		dport;
1366 	struct {
1367 		__be32 saddr;
1368 		__be32 daddr;
1369 	} v4;
1370 	struct {
1371 		const struct in6_addr *saddr;
1372 		const struct in6_addr *daddr;
1373 	} v6;
1374 	struct sock	*selected_sk;
1375 	bool		no_reuseport;
1376 };
1377 
1378 extern struct static_key_false bpf_sk_lookup_enabled;
1379 
1380 /* Runners for BPF_SK_LOOKUP programs to invoke on socket lookup.
1381  *
1382  * Allowed return values for a BPF SK_LOOKUP program are SK_PASS and
1383  * SK_DROP. Their meaning is as follows:
1384  *
1385  *  SK_PASS && ctx.selected_sk != NULL: use selected_sk as lookup result
1386  *  SK_PASS && ctx.selected_sk == NULL: continue to htable-based socket lookup
1387  *  SK_DROP                           : terminate lookup with -ECONNREFUSED
1388  *
1389  * This macro aggregates return values and selected sockets from
1390  * multiple BPF programs according to following rules in order:
1391  *
1392  *  1. If any program returned SK_PASS and a non-NULL ctx.selected_sk,
1393  *     macro result is SK_PASS and last ctx.selected_sk is used.
1394  *  2. If any program returned SK_DROP return value,
1395  *     macro result is SK_DROP.
1396  *  3. Otherwise result is SK_PASS and ctx.selected_sk is NULL.
1397  *
1398  * Caller must ensure that the prog array is non-NULL, and that the
1399  * array as well as the programs it contains remain valid.
1400  */
1401 #define BPF_PROG_SK_LOOKUP_RUN_ARRAY(array, ctx, func)			\
1402 	({								\
1403 		struct bpf_sk_lookup_kern *_ctx = &(ctx);		\
1404 		struct bpf_prog_array_item *_item;			\
1405 		struct sock *_selected_sk = NULL;			\
1406 		bool _no_reuseport = false;				\
1407 		struct bpf_prog *_prog;					\
1408 		bool _all_pass = true;					\
1409 		u32 _ret;						\
1410 									\
1411 		migrate_disable();					\
1412 		_item = &(array)->items[0];				\
1413 		while ((_prog = READ_ONCE(_item->prog))) {		\
1414 			/* restore most recent selection */		\
1415 			_ctx->selected_sk = _selected_sk;		\
1416 			_ctx->no_reuseport = _no_reuseport;		\
1417 									\
1418 			_ret = func(_prog, _ctx);			\
1419 			if (_ret == SK_PASS && _ctx->selected_sk) {	\
1420 				/* remember last non-NULL socket */	\
1421 				_selected_sk = _ctx->selected_sk;	\
1422 				_no_reuseport = _ctx->no_reuseport;	\
1423 			} else if (_ret == SK_DROP && _all_pass) {	\
1424 				_all_pass = false;			\
1425 			}						\
1426 			_item++;					\
1427 		}							\
1428 		_ctx->selected_sk = _selected_sk;			\
1429 		_ctx->no_reuseport = _no_reuseport;			\
1430 		migrate_enable();					\
1431 		_all_pass || _selected_sk ? SK_PASS : SK_DROP;		\
1432 	 })
1433 
bpf_sk_lookup_run_v4(struct net * net,int protocol,const __be32 saddr,const __be16 sport,const __be32 daddr,const u16 dport,struct sock ** psk)1434 static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
1435 					const __be32 saddr, const __be16 sport,
1436 					const __be32 daddr, const u16 dport,
1437 					struct sock **psk)
1438 {
1439 	struct bpf_prog_array *run_array;
1440 	struct sock *selected_sk = NULL;
1441 	bool no_reuseport = false;
1442 
1443 	rcu_read_lock();
1444 	run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]);
1445 	if (run_array) {
1446 		struct bpf_sk_lookup_kern ctx = {
1447 			.family		= AF_INET,
1448 			.protocol	= protocol,
1449 			.v4.saddr	= saddr,
1450 			.v4.daddr	= daddr,
1451 			.sport		= sport,
1452 			.dport		= dport,
1453 		};
1454 		u32 act;
1455 
1456 		act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
1457 		if (act == SK_PASS) {
1458 			selected_sk = ctx.selected_sk;
1459 			no_reuseport = ctx.no_reuseport;
1460 		} else {
1461 			selected_sk = ERR_PTR(-ECONNREFUSED);
1462 		}
1463 	}
1464 	rcu_read_unlock();
1465 	*psk = selected_sk;
1466 	return no_reuseport;
1467 }
1468 
1469 #if IS_ENABLED(CONFIG_IPV6)
bpf_sk_lookup_run_v6(struct net * net,int protocol,const struct in6_addr * saddr,const __be16 sport,const struct in6_addr * daddr,const u16 dport,struct sock ** psk)1470 static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
1471 					const struct in6_addr *saddr,
1472 					const __be16 sport,
1473 					const struct in6_addr *daddr,
1474 					const u16 dport,
1475 					struct sock **psk)
1476 {
1477 	struct bpf_prog_array *run_array;
1478 	struct sock *selected_sk = NULL;
1479 	bool no_reuseport = false;
1480 
1481 	rcu_read_lock();
1482 	run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]);
1483 	if (run_array) {
1484 		struct bpf_sk_lookup_kern ctx = {
1485 			.family		= AF_INET6,
1486 			.protocol	= protocol,
1487 			.v6.saddr	= saddr,
1488 			.v6.daddr	= daddr,
1489 			.sport		= sport,
1490 			.dport		= dport,
1491 		};
1492 		u32 act;
1493 
1494 		act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
1495 		if (act == SK_PASS) {
1496 			selected_sk = ctx.selected_sk;
1497 			no_reuseport = ctx.no_reuseport;
1498 		} else {
1499 			selected_sk = ERR_PTR(-ECONNREFUSED);
1500 		}
1501 	}
1502 	rcu_read_unlock();
1503 	*psk = selected_sk;
1504 	return no_reuseport;
1505 }
1506 #endif /* IS_ENABLED(CONFIG_IPV6) */
1507 
__bpf_xdp_redirect_map(struct bpf_map * map,u32 ifindex,u64 flags,const u64 flag_mask,void * lookup_elem (struct bpf_map * map,u32 key))1508 static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifindex,
1509 						  u64 flags, const u64 flag_mask,
1510 						  void *lookup_elem(struct bpf_map *map, u32 key))
1511 {
1512 	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
1513 	const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX;
1514 
1515 	/* Lower bits of the flags are used as return code on lookup failure */
1516 	if (unlikely(flags & ~(action_mask | flag_mask)))
1517 		return XDP_ABORTED;
1518 
1519 	ri->tgt_value = lookup_elem(map, ifindex);
1520 	if (unlikely(!ri->tgt_value) && !(flags & BPF_F_BROADCAST)) {
1521 		/* If the lookup fails we want to clear out the state in the
1522 		 * redirect_info struct completely, so that if an eBPF program
1523 		 * performs multiple lookups, the last one always takes
1524 		 * precedence.
1525 		 */
1526 		ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */
1527 		ri->map_type = BPF_MAP_TYPE_UNSPEC;
1528 		return flags & action_mask;
1529 	}
1530 
1531 	ri->tgt_index = ifindex;
1532 	ri->map_id = map->id;
1533 	ri->map_type = map->map_type;
1534 
1535 	if (flags & BPF_F_BROADCAST) {
1536 		WRITE_ONCE(ri->map, map);
1537 		ri->flags = flags;
1538 	} else {
1539 		WRITE_ONCE(ri->map, NULL);
1540 		ri->flags = 0;
1541 	}
1542 
1543 	return XDP_REDIRECT;
1544 }
1545 
1546 #endif /* __LINUX_FILTER_H__ */
1547