1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  * Copyright (c) 2016 Facebook
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/filter.h>
19 #include <net/netlink.h>
20 #include <linux/file.h>
21 #include <linux/vmalloc.h>
22 #include <linux/stringify.h>
23 #include <linux/bsearch.h>
24 #include <linux/sort.h>
25 #include <linux/perf_event.h>
26 
27 #include "disasm.h"
28 
29 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
30 #define BPF_PROG_TYPE(_id, _name) \
31 	[_id] = & _name ## _verifier_ops,
32 #define BPF_MAP_TYPE(_id, _ops)
33 #include <linux/bpf_types.h>
34 #undef BPF_PROG_TYPE
35 #undef BPF_MAP_TYPE
36 };
37 
38 /* bpf_check() is a static code analyzer that walks eBPF program
39  * instruction by instruction and updates register/stack state.
40  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
41  *
42  * The first pass is depth-first-search to check that the program is a DAG.
43  * It rejects the following programs:
44  * - larger than BPF_MAXINSNS insns
45  * - if loop is present (detected via back-edge)
46  * - unreachable insns exist (shouldn't be a forest. program = one function)
47  * - out of bounds or malformed jumps
48  * The second pass is all possible path descent from the 1st insn.
49  * Since it's analyzing all pathes through the program, the length of the
50  * analysis is limited to 64k insn, which may be hit even if total number of
51  * insn is less then 4K, but there are too many branches that change stack/regs.
52  * Number of 'branches to be analyzed' is limited to 1k
53  *
54  * On entry to each instruction, each register has a type, and the instruction
55  * changes the types of the registers depending on instruction semantics.
56  * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
57  * copied to R1.
58  *
59  * All registers are 64-bit.
60  * R0 - return register
61  * R1-R5 argument passing registers
62  * R6-R9 callee saved registers
63  * R10 - frame pointer read-only
64  *
65  * At the start of BPF program the register R1 contains a pointer to bpf_context
66  * and has type PTR_TO_CTX.
67  *
68  * Verifier tracks arithmetic operations on pointers in case:
69  *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
70  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
71  * 1st insn copies R10 (which has FRAME_PTR) type into R1
72  * and 2nd arithmetic instruction is pattern matched to recognize
73  * that it wants to construct a pointer to some element within stack.
74  * So after 2nd insn, the register R1 has type PTR_TO_STACK
75  * (and -20 constant is saved for further stack bounds checking).
76  * Meaning that this reg is a pointer to stack plus known immediate constant.
77  *
78  * Most of the time the registers have SCALAR_VALUE type, which
79  * means the register has some value, but it's not a valid pointer.
80  * (like pointer plus pointer becomes SCALAR_VALUE type)
81  *
82  * When verifier sees load or store instructions the type of base register
83  * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer
84  * types recognized by check_mem_access() function.
85  *
86  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
87  * and the range of [ptr, ptr + map's value_size) is accessible.
88  *
89  * registers used to pass values to function calls are checked against
90  * function argument constraints.
91  *
92  * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
93  * It means that the register type passed to this function must be
94  * PTR_TO_STACK and it will be used inside the function as
95  * 'pointer to map element key'
96  *
97  * For example the argument constraints for bpf_map_lookup_elem():
98  *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
99  *   .arg1_type = ARG_CONST_MAP_PTR,
100  *   .arg2_type = ARG_PTR_TO_MAP_KEY,
101  *
102  * ret_type says that this function returns 'pointer to map elem value or null'
103  * function expects 1st argument to be a const pointer to 'struct bpf_map' and
104  * 2nd argument should be a pointer to stack, which will be used inside
105  * the helper function as a pointer to map element key.
106  *
107  * On the kernel side the helper function looks like:
108  * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
109  * {
110  *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
111  *    void *key = (void *) (unsigned long) r2;
112  *    void *value;
113  *
114  *    here kernel can access 'key' and 'map' pointers safely, knowing that
115  *    [key, key + map->key_size) bytes are valid and were initialized on
116  *    the stack of eBPF program.
117  * }
118  *
119  * Corresponding eBPF program may look like:
120  *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
121  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
122  *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
123  *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
124  * here verifier looks at prototype of map_lookup_elem() and sees:
125  * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
126  * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
127  *
128  * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
129  * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
130  * and were initialized prior to this call.
131  * If it's ok, then verifier allows this BPF_CALL insn and looks at
132  * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
133  * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
134  * returns ether pointer to map value or NULL.
135  *
136  * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
137  * insn, the register holding that pointer in the true branch changes state to
138  * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
139  * branch. See check_cond_jmp_op().
140  *
141  * After the call R0 is set to return type of the function and registers R1-R5
142  * are set to NOT_INIT to indicate that they are no longer readable.
143  */
144 
145 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
146 struct bpf_verifier_stack_elem {
147 	/* verifer state is 'st'
148 	 * before processing instruction 'insn_idx'
149 	 * and after processing instruction 'prev_insn_idx'
150 	 */
151 	struct bpf_verifier_state st;
152 	int insn_idx;
153 	int prev_insn_idx;
154 	struct bpf_verifier_stack_elem *next;
155 };
156 
157 #define BPF_COMPLEXITY_LIMIT_INSNS	131072
158 #define BPF_COMPLEXITY_LIMIT_STACK	1024
159 
160 #define BPF_MAP_PTR_UNPRIV	1UL
161 #define BPF_MAP_PTR_POISON	((void *)((0xeB9FUL << 1) +	\
162 					  POISON_POINTER_DELTA))
163 #define BPF_MAP_PTR(X)		((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
164 
bpf_map_ptr_poisoned(const struct bpf_insn_aux_data * aux)165 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
166 {
167 	return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON;
168 }
169 
bpf_map_ptr_unpriv(const struct bpf_insn_aux_data * aux)170 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
171 {
172 	return aux->map_state & BPF_MAP_PTR_UNPRIV;
173 }
174 
bpf_map_ptr_store(struct bpf_insn_aux_data * aux,const struct bpf_map * map,bool unpriv)175 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
176 			      const struct bpf_map *map, bool unpriv)
177 {
178 	BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
179 	unpriv |= bpf_map_ptr_unpriv(aux);
180 	aux->map_state = (unsigned long)map |
181 			 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
182 }
183 
184 struct bpf_call_arg_meta {
185 	struct bpf_map *map_ptr;
186 	bool raw_mode;
187 	bool pkt_access;
188 	int regno;
189 	int access_size;
190 	s64 msize_smax_value;
191 	u64 msize_umax_value;
192 };
193 
194 static DEFINE_MUTEX(bpf_verifier_lock);
195 
bpf_verifier_vlog(struct bpf_verifier_log * log,const char * fmt,va_list args)196 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
197 		       va_list args)
198 {
199 	unsigned int n;
200 
201 	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
202 
203 	WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
204 		  "verifier log line truncated - local buffer too short\n");
205 
206 	n = min(log->len_total - log->len_used - 1, n);
207 	log->kbuf[n] = '\0';
208 
209 	if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
210 		log->len_used += n;
211 	else
212 		log->ubuf = NULL;
213 }
214 
215 /* log_level controls verbosity level of eBPF verifier.
216  * bpf_verifier_log_write() is used to dump the verification trace to the log,
217  * so the user can figure out what's wrong with the program
218  */
bpf_verifier_log_write(struct bpf_verifier_env * env,const char * fmt,...)219 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
220 					   const char *fmt, ...)
221 {
222 	va_list args;
223 
224 	if (!bpf_verifier_log_needed(&env->log))
225 		return;
226 
227 	va_start(args, fmt);
228 	bpf_verifier_vlog(&env->log, fmt, args);
229 	va_end(args);
230 }
231 EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
232 
verbose(void * private_data,const char * fmt,...)233 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
234 {
235 	struct bpf_verifier_env *env = private_data;
236 	va_list args;
237 
238 	if (!bpf_verifier_log_needed(&env->log))
239 		return;
240 
241 	va_start(args, fmt);
242 	bpf_verifier_vlog(&env->log, fmt, args);
243 	va_end(args);
244 }
245 
type_is_pkt_pointer(enum bpf_reg_type type)246 static bool type_is_pkt_pointer(enum bpf_reg_type type)
247 {
248 	return type == PTR_TO_PACKET ||
249 	       type == PTR_TO_PACKET_META;
250 }
251 
252 /* string representation of 'enum bpf_reg_type' */
253 static const char * const reg_type_str[] = {
254 	[NOT_INIT]		= "?",
255 	[SCALAR_VALUE]		= "inv",
256 	[PTR_TO_CTX]		= "ctx",
257 	[CONST_PTR_TO_MAP]	= "map_ptr",
258 	[PTR_TO_MAP_VALUE]	= "map_value",
259 	[PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
260 	[PTR_TO_STACK]		= "fp",
261 	[PTR_TO_PACKET]		= "pkt",
262 	[PTR_TO_PACKET_META]	= "pkt_meta",
263 	[PTR_TO_PACKET_END]	= "pkt_end",
264 };
265 
print_liveness(struct bpf_verifier_env * env,enum bpf_reg_liveness live)266 static void print_liveness(struct bpf_verifier_env *env,
267 			   enum bpf_reg_liveness live)
268 {
269 	if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN))
270 	    verbose(env, "_");
271 	if (live & REG_LIVE_READ)
272 		verbose(env, "r");
273 	if (live & REG_LIVE_WRITTEN)
274 		verbose(env, "w");
275 }
276 
func(struct bpf_verifier_env * env,const struct bpf_reg_state * reg)277 static struct bpf_func_state *func(struct bpf_verifier_env *env,
278 				   const struct bpf_reg_state *reg)
279 {
280 	struct bpf_verifier_state *cur = env->cur_state;
281 
282 	return cur->frame[reg->frameno];
283 }
284 
print_verifier_state(struct bpf_verifier_env * env,const struct bpf_func_state * state)285 static void print_verifier_state(struct bpf_verifier_env *env,
286 				 const struct bpf_func_state *state)
287 {
288 	const struct bpf_reg_state *reg;
289 	enum bpf_reg_type t;
290 	int i;
291 
292 	if (state->frameno)
293 		verbose(env, " frame%d:", state->frameno);
294 	for (i = 0; i < MAX_BPF_REG; i++) {
295 		reg = &state->regs[i];
296 		t = reg->type;
297 		if (t == NOT_INIT)
298 			continue;
299 		verbose(env, " R%d", i);
300 		print_liveness(env, reg->live);
301 		verbose(env, "=%s", reg_type_str[t]);
302 		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
303 		    tnum_is_const(reg->var_off)) {
304 			/* reg->off should be 0 for SCALAR_VALUE */
305 			verbose(env, "%lld", reg->var_off.value + reg->off);
306 			if (t == PTR_TO_STACK)
307 				verbose(env, ",call_%d", func(env, reg)->callsite);
308 		} else {
309 			verbose(env, "(id=%d", reg->id);
310 			if (t != SCALAR_VALUE)
311 				verbose(env, ",off=%d", reg->off);
312 			if (type_is_pkt_pointer(t))
313 				verbose(env, ",r=%d", reg->range);
314 			else if (t == CONST_PTR_TO_MAP ||
315 				 t == PTR_TO_MAP_VALUE ||
316 				 t == PTR_TO_MAP_VALUE_OR_NULL)
317 				verbose(env, ",ks=%d,vs=%d",
318 					reg->map_ptr->key_size,
319 					reg->map_ptr->value_size);
320 			if (tnum_is_const(reg->var_off)) {
321 				/* Typically an immediate SCALAR_VALUE, but
322 				 * could be a pointer whose offset is too big
323 				 * for reg->off
324 				 */
325 				verbose(env, ",imm=%llx", reg->var_off.value);
326 			} else {
327 				if (reg->smin_value != reg->umin_value &&
328 				    reg->smin_value != S64_MIN)
329 					verbose(env, ",smin_value=%lld",
330 						(long long)reg->smin_value);
331 				if (reg->smax_value != reg->umax_value &&
332 				    reg->smax_value != S64_MAX)
333 					verbose(env, ",smax_value=%lld",
334 						(long long)reg->smax_value);
335 				if (reg->umin_value != 0)
336 					verbose(env, ",umin_value=%llu",
337 						(unsigned long long)reg->umin_value);
338 				if (reg->umax_value != U64_MAX)
339 					verbose(env, ",umax_value=%llu",
340 						(unsigned long long)reg->umax_value);
341 				if (!tnum_is_unknown(reg->var_off)) {
342 					char tn_buf[48];
343 
344 					tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
345 					verbose(env, ",var_off=%s", tn_buf);
346 				}
347 			}
348 			verbose(env, ")");
349 		}
350 	}
351 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
352 		if (state->stack[i].slot_type[0] == STACK_SPILL) {
353 			verbose(env, " fp%d",
354 				(-i - 1) * BPF_REG_SIZE);
355 			print_liveness(env, state->stack[i].spilled_ptr.live);
356 			verbose(env, "=%s",
357 				reg_type_str[state->stack[i].spilled_ptr.type]);
358 		}
359 		if (state->stack[i].slot_type[0] == STACK_ZERO)
360 			verbose(env, " fp%d=0", (-i - 1) * BPF_REG_SIZE);
361 	}
362 	verbose(env, "\n");
363 }
364 
copy_stack_state(struct bpf_func_state * dst,const struct bpf_func_state * src)365 static int copy_stack_state(struct bpf_func_state *dst,
366 			    const struct bpf_func_state *src)
367 {
368 	if (!src->stack)
369 		return 0;
370 	if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) {
371 		/* internal bug, make state invalid to reject the program */
372 		memset(dst, 0, sizeof(*dst));
373 		return -EFAULT;
374 	}
375 	memcpy(dst->stack, src->stack,
376 	       sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE));
377 	return 0;
378 }
379 
380 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to
381  * make it consume minimal amount of memory. check_stack_write() access from
382  * the program calls into realloc_func_state() to grow the stack size.
383  * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
384  * which this function copies over. It points to previous bpf_verifier_state
385  * which is never reallocated
386  */
realloc_func_state(struct bpf_func_state * state,int size,bool copy_old)387 static int realloc_func_state(struct bpf_func_state *state, int size,
388 			      bool copy_old)
389 {
390 	u32 old_size = state->allocated_stack;
391 	struct bpf_stack_state *new_stack;
392 	int slot = size / BPF_REG_SIZE;
393 
394 	if (size <= old_size || !size) {
395 		if (copy_old)
396 			return 0;
397 		state->allocated_stack = slot * BPF_REG_SIZE;
398 		if (!size && old_size) {
399 			kfree(state->stack);
400 			state->stack = NULL;
401 		}
402 		return 0;
403 	}
404 	new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state),
405 				  GFP_KERNEL);
406 	if (!new_stack)
407 		return -ENOMEM;
408 	if (copy_old) {
409 		if (state->stack)
410 			memcpy(new_stack, state->stack,
411 			       sizeof(*new_stack) * (old_size / BPF_REG_SIZE));
412 		memset(new_stack + old_size / BPF_REG_SIZE, 0,
413 		       sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE);
414 	}
415 	state->allocated_stack = slot * BPF_REG_SIZE;
416 	kfree(state->stack);
417 	state->stack = new_stack;
418 	return 0;
419 }
420 
free_func_state(struct bpf_func_state * state)421 static void free_func_state(struct bpf_func_state *state)
422 {
423 	if (!state)
424 		return;
425 	kfree(state->stack);
426 	kfree(state);
427 }
428 
free_verifier_state(struct bpf_verifier_state * state,bool free_self)429 static void free_verifier_state(struct bpf_verifier_state *state,
430 				bool free_self)
431 {
432 	int i;
433 
434 	for (i = 0; i <= state->curframe; i++) {
435 		free_func_state(state->frame[i]);
436 		state->frame[i] = NULL;
437 	}
438 	if (free_self)
439 		kfree(state);
440 }
441 
442 /* copy verifier state from src to dst growing dst stack space
443  * when necessary to accommodate larger src stack
444  */
copy_func_state(struct bpf_func_state * dst,const struct bpf_func_state * src)445 static int copy_func_state(struct bpf_func_state *dst,
446 			   const struct bpf_func_state *src)
447 {
448 	int err;
449 
450 	err = realloc_func_state(dst, src->allocated_stack, false);
451 	if (err)
452 		return err;
453 	memcpy(dst, src, offsetof(struct bpf_func_state, allocated_stack));
454 	return copy_stack_state(dst, src);
455 }
456 
copy_verifier_state(struct bpf_verifier_state * dst_state,const struct bpf_verifier_state * src)457 static int copy_verifier_state(struct bpf_verifier_state *dst_state,
458 			       const struct bpf_verifier_state *src)
459 {
460 	struct bpf_func_state *dst;
461 	int i, err;
462 
463 	/* if dst has more stack frames then src frame, free them */
464 	for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
465 		free_func_state(dst_state->frame[i]);
466 		dst_state->frame[i] = NULL;
467 	}
468 	dst_state->curframe = src->curframe;
469 	dst_state->parent = src->parent;
470 	for (i = 0; i <= src->curframe; i++) {
471 		dst = dst_state->frame[i];
472 		if (!dst) {
473 			dst = kzalloc(sizeof(*dst), GFP_KERNEL);
474 			if (!dst)
475 				return -ENOMEM;
476 			dst_state->frame[i] = dst;
477 		}
478 		err = copy_func_state(dst, src->frame[i]);
479 		if (err)
480 			return err;
481 	}
482 	return 0;
483 }
484 
pop_stack(struct bpf_verifier_env * env,int * prev_insn_idx,int * insn_idx)485 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
486 		     int *insn_idx)
487 {
488 	struct bpf_verifier_state *cur = env->cur_state;
489 	struct bpf_verifier_stack_elem *elem, *head = env->head;
490 	int err;
491 
492 	if (env->head == NULL)
493 		return -ENOENT;
494 
495 	if (cur) {
496 		err = copy_verifier_state(cur, &head->st);
497 		if (err)
498 			return err;
499 	}
500 	if (insn_idx)
501 		*insn_idx = head->insn_idx;
502 	if (prev_insn_idx)
503 		*prev_insn_idx = head->prev_insn_idx;
504 	elem = head->next;
505 	free_verifier_state(&head->st, false);
506 	kfree(head);
507 	env->head = elem;
508 	env->stack_size--;
509 	return 0;
510 }
511 
push_stack(struct bpf_verifier_env * env,int insn_idx,int prev_insn_idx)512 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
513 					     int insn_idx, int prev_insn_idx)
514 {
515 	struct bpf_verifier_state *cur = env->cur_state;
516 	struct bpf_verifier_stack_elem *elem;
517 	int err;
518 
519 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
520 	if (!elem)
521 		goto err;
522 
523 	elem->insn_idx = insn_idx;
524 	elem->prev_insn_idx = prev_insn_idx;
525 	elem->next = env->head;
526 	env->head = elem;
527 	env->stack_size++;
528 	err = copy_verifier_state(&elem->st, cur);
529 	if (err)
530 		goto err;
531 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
532 		verbose(env, "BPF program is too complex\n");
533 		goto err;
534 	}
535 	return &elem->st;
536 err:
537 	free_verifier_state(env->cur_state, true);
538 	env->cur_state = NULL;
539 	/* pop all elements and return */
540 	while (!pop_stack(env, NULL, NULL));
541 	return NULL;
542 }
543 
544 #define CALLER_SAVED_REGS 6
545 static const int caller_saved[CALLER_SAVED_REGS] = {
546 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
547 };
548 
549 static void __mark_reg_not_init(struct bpf_reg_state *reg);
550 
551 /* Mark the unknown part of a register (variable offset or scalar value) as
552  * known to have the value @imm.
553  */
__mark_reg_known(struct bpf_reg_state * reg,u64 imm)554 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
555 {
556 	reg->id = 0;
557 	reg->var_off = tnum_const(imm);
558 	reg->smin_value = (s64)imm;
559 	reg->smax_value = (s64)imm;
560 	reg->umin_value = imm;
561 	reg->umax_value = imm;
562 }
563 
564 /* Mark the 'variable offset' part of a register as zero.  This should be
565  * used only on registers holding a pointer type.
566  */
__mark_reg_known_zero(struct bpf_reg_state * reg)567 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
568 {
569 	__mark_reg_known(reg, 0);
570 }
571 
__mark_reg_const_zero(struct bpf_reg_state * reg)572 static void __mark_reg_const_zero(struct bpf_reg_state *reg)
573 {
574 	__mark_reg_known(reg, 0);
575 	reg->off = 0;
576 	reg->type = SCALAR_VALUE;
577 }
578 
mark_reg_known_zero(struct bpf_verifier_env * env,struct bpf_reg_state * regs,u32 regno)579 static void mark_reg_known_zero(struct bpf_verifier_env *env,
580 				struct bpf_reg_state *regs, u32 regno)
581 {
582 	if (WARN_ON(regno >= MAX_BPF_REG)) {
583 		verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
584 		/* Something bad happened, let's kill all regs */
585 		for (regno = 0; regno < MAX_BPF_REG; regno++)
586 			__mark_reg_not_init(regs + regno);
587 		return;
588 	}
589 	__mark_reg_known_zero(regs + regno);
590 }
591 
reg_is_pkt_pointer(const struct bpf_reg_state * reg)592 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
593 {
594 	return type_is_pkt_pointer(reg->type);
595 }
596 
reg_is_pkt_pointer_any(const struct bpf_reg_state * reg)597 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
598 {
599 	return reg_is_pkt_pointer(reg) ||
600 	       reg->type == PTR_TO_PACKET_END;
601 }
602 
603 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
reg_is_init_pkt_pointer(const struct bpf_reg_state * reg,enum bpf_reg_type which)604 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
605 				    enum bpf_reg_type which)
606 {
607 	/* The register can already have a range from prior markings.
608 	 * This is fine as long as it hasn't been advanced from its
609 	 * origin.
610 	 */
611 	return reg->type == which &&
612 	       reg->id == 0 &&
613 	       reg->off == 0 &&
614 	       tnum_equals_const(reg->var_off, 0);
615 }
616 
617 /* Attempts to improve min/max values based on var_off information */
__update_reg_bounds(struct bpf_reg_state * reg)618 static void __update_reg_bounds(struct bpf_reg_state *reg)
619 {
620 	/* min signed is max(sign bit) | min(other bits) */
621 	reg->smin_value = max_t(s64, reg->smin_value,
622 				reg->var_off.value | (reg->var_off.mask & S64_MIN));
623 	/* max signed is min(sign bit) | max(other bits) */
624 	reg->smax_value = min_t(s64, reg->smax_value,
625 				reg->var_off.value | (reg->var_off.mask & S64_MAX));
626 	reg->umin_value = max(reg->umin_value, reg->var_off.value);
627 	reg->umax_value = min(reg->umax_value,
628 			      reg->var_off.value | reg->var_off.mask);
629 }
630 
631 /* Uses signed min/max values to inform unsigned, and vice-versa */
__reg_deduce_bounds(struct bpf_reg_state * reg)632 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
633 {
634 	/* Learn sign from signed bounds.
635 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
636 	 * are the same, so combine.  This works even in the negative case, e.g.
637 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
638 	 */
639 	if (reg->smin_value >= 0 || reg->smax_value < 0) {
640 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
641 							  reg->umin_value);
642 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
643 							  reg->umax_value);
644 		return;
645 	}
646 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
647 	 * boundary, so we must be careful.
648 	 */
649 	if ((s64)reg->umax_value >= 0) {
650 		/* Positive.  We can't learn anything from the smin, but smax
651 		 * is positive, hence safe.
652 		 */
653 		reg->smin_value = reg->umin_value;
654 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
655 							  reg->umax_value);
656 	} else if ((s64)reg->umin_value < 0) {
657 		/* Negative.  We can't learn anything from the smax, but smin
658 		 * is negative, hence safe.
659 		 */
660 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
661 							  reg->umin_value);
662 		reg->smax_value = reg->umax_value;
663 	}
664 }
665 
666 /* Attempts to improve var_off based on unsigned min/max information */
__reg_bound_offset(struct bpf_reg_state * reg)667 static void __reg_bound_offset(struct bpf_reg_state *reg)
668 {
669 	reg->var_off = tnum_intersect(reg->var_off,
670 				      tnum_range(reg->umin_value,
671 						 reg->umax_value));
672 }
673 
674 /* Reset the min/max bounds of a register */
__mark_reg_unbounded(struct bpf_reg_state * reg)675 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
676 {
677 	reg->smin_value = S64_MIN;
678 	reg->smax_value = S64_MAX;
679 	reg->umin_value = 0;
680 	reg->umax_value = U64_MAX;
681 }
682 
683 /* Mark a register as having a completely unknown (scalar) value. */
__mark_reg_unknown(struct bpf_reg_state * reg)684 static void __mark_reg_unknown(struct bpf_reg_state *reg)
685 {
686 	reg->type = SCALAR_VALUE;
687 	reg->id = 0;
688 	reg->off = 0;
689 	reg->var_off = tnum_unknown;
690 	reg->frameno = 0;
691 	__mark_reg_unbounded(reg);
692 }
693 
mark_reg_unknown(struct bpf_verifier_env * env,struct bpf_reg_state * regs,u32 regno)694 static void mark_reg_unknown(struct bpf_verifier_env *env,
695 			     struct bpf_reg_state *regs, u32 regno)
696 {
697 	if (WARN_ON(regno >= MAX_BPF_REG)) {
698 		verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
699 		/* Something bad happened, let's kill all regs except FP */
700 		for (regno = 0; regno < BPF_REG_FP; regno++)
701 			__mark_reg_not_init(regs + regno);
702 		return;
703 	}
704 	__mark_reg_unknown(regs + regno);
705 }
706 
__mark_reg_not_init(struct bpf_reg_state * reg)707 static void __mark_reg_not_init(struct bpf_reg_state *reg)
708 {
709 	__mark_reg_unknown(reg);
710 	reg->type = NOT_INIT;
711 }
712 
mark_reg_not_init(struct bpf_verifier_env * env,struct bpf_reg_state * regs,u32 regno)713 static void mark_reg_not_init(struct bpf_verifier_env *env,
714 			      struct bpf_reg_state *regs, u32 regno)
715 {
716 	if (WARN_ON(regno >= MAX_BPF_REG)) {
717 		verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
718 		/* Something bad happened, let's kill all regs except FP */
719 		for (regno = 0; regno < BPF_REG_FP; regno++)
720 			__mark_reg_not_init(regs + regno);
721 		return;
722 	}
723 	__mark_reg_not_init(regs + regno);
724 }
725 
init_reg_state(struct bpf_verifier_env * env,struct bpf_func_state * state)726 static void init_reg_state(struct bpf_verifier_env *env,
727 			   struct bpf_func_state *state)
728 {
729 	struct bpf_reg_state *regs = state->regs;
730 	int i;
731 
732 	for (i = 0; i < MAX_BPF_REG; i++) {
733 		mark_reg_not_init(env, regs, i);
734 		regs[i].live = REG_LIVE_NONE;
735 	}
736 
737 	/* frame pointer */
738 	regs[BPF_REG_FP].type = PTR_TO_STACK;
739 	mark_reg_known_zero(env, regs, BPF_REG_FP);
740 	regs[BPF_REG_FP].frameno = state->frameno;
741 
742 	/* 1st arg to a function */
743 	regs[BPF_REG_1].type = PTR_TO_CTX;
744 	mark_reg_known_zero(env, regs, BPF_REG_1);
745 }
746 
747 #define BPF_MAIN_FUNC (-1)
init_func_state(struct bpf_verifier_env * env,struct bpf_func_state * state,int callsite,int frameno,int subprogno)748 static void init_func_state(struct bpf_verifier_env *env,
749 			    struct bpf_func_state *state,
750 			    int callsite, int frameno, int subprogno)
751 {
752 	state->callsite = callsite;
753 	state->frameno = frameno;
754 	state->subprogno = subprogno;
755 	init_reg_state(env, state);
756 }
757 
758 enum reg_arg_type {
759 	SRC_OP,		/* register is used as source operand */
760 	DST_OP,		/* register is used as destination operand */
761 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
762 };
763 
cmp_subprogs(const void * a,const void * b)764 static int cmp_subprogs(const void *a, const void *b)
765 {
766 	return ((struct bpf_subprog_info *)a)->start -
767 	       ((struct bpf_subprog_info *)b)->start;
768 }
769 
find_subprog(struct bpf_verifier_env * env,int off)770 static int find_subprog(struct bpf_verifier_env *env, int off)
771 {
772 	struct bpf_subprog_info *p;
773 
774 	p = bsearch(&off, env->subprog_info, env->subprog_cnt,
775 		    sizeof(env->subprog_info[0]), cmp_subprogs);
776 	if (!p)
777 		return -ENOENT;
778 	return p - env->subprog_info;
779 
780 }
781 
add_subprog(struct bpf_verifier_env * env,int off)782 static int add_subprog(struct bpf_verifier_env *env, int off)
783 {
784 	int insn_cnt = env->prog->len;
785 	int ret;
786 
787 	if (off >= insn_cnt || off < 0) {
788 		verbose(env, "call to invalid destination\n");
789 		return -EINVAL;
790 	}
791 	ret = find_subprog(env, off);
792 	if (ret >= 0)
793 		return 0;
794 	if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
795 		verbose(env, "too many subprograms\n");
796 		return -E2BIG;
797 	}
798 	env->subprog_info[env->subprog_cnt++].start = off;
799 	sort(env->subprog_info, env->subprog_cnt,
800 	     sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
801 	return 0;
802 }
803 
check_subprogs(struct bpf_verifier_env * env)804 static int check_subprogs(struct bpf_verifier_env *env)
805 {
806 	int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
807 	struct bpf_subprog_info *subprog = env->subprog_info;
808 	struct bpf_insn *insn = env->prog->insnsi;
809 	int insn_cnt = env->prog->len;
810 
811 	/* Add entry function. */
812 	ret = add_subprog(env, 0);
813 	if (ret < 0)
814 		return ret;
815 
816 	/* determine subprog starts. The end is one before the next starts */
817 	for (i = 0; i < insn_cnt; i++) {
818 		if (insn[i].code != (BPF_JMP | BPF_CALL))
819 			continue;
820 		if (insn[i].src_reg != BPF_PSEUDO_CALL)
821 			continue;
822 		if (!env->allow_ptr_leaks) {
823 			verbose(env, "function calls to other bpf functions are allowed for root only\n");
824 			return -EPERM;
825 		}
826 		if (bpf_prog_is_dev_bound(env->prog->aux)) {
827 			verbose(env, "function calls in offloaded programs are not supported yet\n");
828 			return -EINVAL;
829 		}
830 		ret = add_subprog(env, i + insn[i].imm + 1);
831 		if (ret < 0)
832 			return ret;
833 	}
834 
835 	/* Add a fake 'exit' subprog which could simplify subprog iteration
836 	 * logic. 'subprog_cnt' should not be increased.
837 	 */
838 	subprog[env->subprog_cnt].start = insn_cnt;
839 
840 	if (env->log.level > 1)
841 		for (i = 0; i < env->subprog_cnt; i++)
842 			verbose(env, "func#%d @%d\n", i, subprog[i].start);
843 
844 	/* now check that all jumps are within the same subprog */
845 	subprog_start = subprog[cur_subprog].start;
846 	subprog_end = subprog[cur_subprog + 1].start;
847 	for (i = 0; i < insn_cnt; i++) {
848 		u8 code = insn[i].code;
849 
850 		if (BPF_CLASS(code) != BPF_JMP)
851 			goto next;
852 		if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
853 			goto next;
854 		off = i + insn[i].off + 1;
855 		if (off < subprog_start || off >= subprog_end) {
856 			verbose(env, "jump out of range from insn %d to %d\n", i, off);
857 			return -EINVAL;
858 		}
859 next:
860 		if (i == subprog_end - 1) {
861 			/* to avoid fall-through from one subprog into another
862 			 * the last insn of the subprog should be either exit
863 			 * or unconditional jump back
864 			 */
865 			if (code != (BPF_JMP | BPF_EXIT) &&
866 			    code != (BPF_JMP | BPF_JA)) {
867 				verbose(env, "last insn is not an exit or jmp\n");
868 				return -EINVAL;
869 			}
870 			subprog_start = subprog_end;
871 			cur_subprog++;
872 			if (cur_subprog < env->subprog_cnt)
873 				subprog_end = subprog[cur_subprog + 1].start;
874 		}
875 	}
876 	return 0;
877 }
878 
879 static
skip_callee(struct bpf_verifier_env * env,const struct bpf_verifier_state * state,struct bpf_verifier_state * parent,u32 regno)880 struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env,
881 				       const struct bpf_verifier_state *state,
882 				       struct bpf_verifier_state *parent,
883 				       u32 regno)
884 {
885 	struct bpf_verifier_state *tmp = NULL;
886 
887 	/* 'parent' could be a state of caller and
888 	 * 'state' could be a state of callee. In such case
889 	 * parent->curframe < state->curframe
890 	 * and it's ok for r1 - r5 registers
891 	 *
892 	 * 'parent' could be a callee's state after it bpf_exit-ed.
893 	 * In such case parent->curframe > state->curframe
894 	 * and it's ok for r0 only
895 	 */
896 	if (parent->curframe == state->curframe ||
897 	    (parent->curframe < state->curframe &&
898 	     regno >= BPF_REG_1 && regno <= BPF_REG_5) ||
899 	    (parent->curframe > state->curframe &&
900 	       regno == BPF_REG_0))
901 		return parent;
902 
903 	if (parent->curframe > state->curframe &&
904 	    regno >= BPF_REG_6) {
905 		/* for callee saved regs we have to skip the whole chain
906 		 * of states that belong to callee and mark as LIVE_READ
907 		 * the registers before the call
908 		 */
909 		tmp = parent;
910 		while (tmp && tmp->curframe != state->curframe) {
911 			tmp = tmp->parent;
912 		}
913 		if (!tmp)
914 			goto bug;
915 		parent = tmp;
916 	} else {
917 		goto bug;
918 	}
919 	return parent;
920 bug:
921 	verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp);
922 	verbose(env, "regno %d parent frame %d current frame %d\n",
923 		regno, parent->curframe, state->curframe);
924 	return NULL;
925 }
926 
mark_reg_read(struct bpf_verifier_env * env,const struct bpf_verifier_state * state,struct bpf_verifier_state * parent,u32 regno)927 static int mark_reg_read(struct bpf_verifier_env *env,
928 			 const struct bpf_verifier_state *state,
929 			 struct bpf_verifier_state *parent,
930 			 u32 regno)
931 {
932 	bool writes = parent == state->parent; /* Observe write marks */
933 
934 	if (regno == BPF_REG_FP)
935 		/* We don't need to worry about FP liveness because it's read-only */
936 		return 0;
937 
938 	while (parent) {
939 		/* if read wasn't screened by an earlier write ... */
940 		if (writes && state->frame[state->curframe]->regs[regno].live & REG_LIVE_WRITTEN)
941 			break;
942 		parent = skip_callee(env, state, parent, regno);
943 		if (!parent)
944 			return -EFAULT;
945 		/* ... then we depend on parent's value */
946 		parent->frame[parent->curframe]->regs[regno].live |= REG_LIVE_READ;
947 		state = parent;
948 		parent = state->parent;
949 		writes = true;
950 	}
951 	return 0;
952 }
953 
check_reg_arg(struct bpf_verifier_env * env,u32 regno,enum reg_arg_type t)954 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
955 			 enum reg_arg_type t)
956 {
957 	struct bpf_verifier_state *vstate = env->cur_state;
958 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
959 	struct bpf_reg_state *regs = state->regs;
960 
961 	if (regno >= MAX_BPF_REG) {
962 		verbose(env, "R%d is invalid\n", regno);
963 		return -EINVAL;
964 	}
965 
966 	if (t == SRC_OP) {
967 		/* check whether register used as source operand can be read */
968 		if (regs[regno].type == NOT_INIT) {
969 			verbose(env, "R%d !read_ok\n", regno);
970 			return -EACCES;
971 		}
972 		return mark_reg_read(env, vstate, vstate->parent, regno);
973 	} else {
974 		/* check whether register used as dest operand can be written to */
975 		if (regno == BPF_REG_FP) {
976 			verbose(env, "frame pointer is read only\n");
977 			return -EACCES;
978 		}
979 		regs[regno].live |= REG_LIVE_WRITTEN;
980 		if (t == DST_OP)
981 			mark_reg_unknown(env, regs, regno);
982 	}
983 	return 0;
984 }
985 
is_spillable_regtype(enum bpf_reg_type type)986 static bool is_spillable_regtype(enum bpf_reg_type type)
987 {
988 	switch (type) {
989 	case PTR_TO_MAP_VALUE:
990 	case PTR_TO_MAP_VALUE_OR_NULL:
991 	case PTR_TO_STACK:
992 	case PTR_TO_CTX:
993 	case PTR_TO_PACKET:
994 	case PTR_TO_PACKET_META:
995 	case PTR_TO_PACKET_END:
996 	case CONST_PTR_TO_MAP:
997 		return true;
998 	default:
999 		return false;
1000 	}
1001 }
1002 
1003 /* Does this register contain a constant zero? */
register_is_null(struct bpf_reg_state * reg)1004 static bool register_is_null(struct bpf_reg_state *reg)
1005 {
1006 	return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
1007 }
1008 
1009 /* check_stack_read/write functions track spill/fill of registers,
1010  * stack boundary and alignment are checked in check_mem_access()
1011  */
check_stack_write(struct bpf_verifier_env * env,struct bpf_func_state * state,int off,int size,int value_regno,int insn_idx)1012 static int check_stack_write(struct bpf_verifier_env *env,
1013 			     struct bpf_func_state *state, /* func where register points to */
1014 			     int off, int size, int value_regno, int insn_idx)
1015 {
1016 	struct bpf_func_state *cur; /* state of the current function */
1017 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
1018 	enum bpf_reg_type type;
1019 
1020 	err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
1021 				 true);
1022 	if (err)
1023 		return err;
1024 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
1025 	 * so it's aligned access and [off, off + size) are within stack limits
1026 	 */
1027 	if (!env->allow_ptr_leaks &&
1028 	    state->stack[spi].slot_type[0] == STACK_SPILL &&
1029 	    size != BPF_REG_SIZE) {
1030 		verbose(env, "attempt to corrupt spilled pointer on stack\n");
1031 		return -EACCES;
1032 	}
1033 
1034 	cur = env->cur_state->frame[env->cur_state->curframe];
1035 	if (value_regno >= 0 &&
1036 	    is_spillable_regtype((type = cur->regs[value_regno].type))) {
1037 
1038 		/* register containing pointer is being spilled into stack */
1039 		if (size != BPF_REG_SIZE) {
1040 			verbose(env, "invalid size of register spill\n");
1041 			return -EACCES;
1042 		}
1043 
1044 		if (state != cur && type == PTR_TO_STACK) {
1045 			verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
1046 			return -EINVAL;
1047 		}
1048 
1049 		/* save register state */
1050 		state->stack[spi].spilled_ptr = cur->regs[value_regno];
1051 		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1052 
1053 		for (i = 0; i < BPF_REG_SIZE; i++) {
1054 			if (state->stack[spi].slot_type[i] == STACK_MISC &&
1055 			    !env->allow_ptr_leaks) {
1056 				int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
1057 				int soff = (-spi - 1) * BPF_REG_SIZE;
1058 
1059 				/* detected reuse of integer stack slot with a pointer
1060 				 * which means either llvm is reusing stack slot or
1061 				 * an attacker is trying to exploit CVE-2018-3639
1062 				 * (speculative store bypass)
1063 				 * Have to sanitize that slot with preemptive
1064 				 * store of zero.
1065 				 */
1066 				if (*poff && *poff != soff) {
1067 					/* disallow programs where single insn stores
1068 					 * into two different stack slots, since verifier
1069 					 * cannot sanitize them
1070 					 */
1071 					verbose(env,
1072 						"insn %d cannot access two stack slots fp%d and fp%d",
1073 						insn_idx, *poff, soff);
1074 					return -EINVAL;
1075 				}
1076 				*poff = soff;
1077 			}
1078 			state->stack[spi].slot_type[i] = STACK_SPILL;
1079 		}
1080 	} else {
1081 		u8 type = STACK_MISC;
1082 
1083 		/* regular write of data into stack */
1084 		state->stack[spi].spilled_ptr = (struct bpf_reg_state) {};
1085 
1086 		/* only mark the slot as written if all 8 bytes were written
1087 		 * otherwise read propagation may incorrectly stop too soon
1088 		 * when stack slots are partially written.
1089 		 * This heuristic means that read propagation will be
1090 		 * conservative, since it will add reg_live_read marks
1091 		 * to stack slots all the way to first state when programs
1092 		 * writes+reads less than 8 bytes
1093 		 */
1094 		if (size == BPF_REG_SIZE)
1095 			state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1096 
1097 		/* when we zero initialize stack slots mark them as such */
1098 		if (value_regno >= 0 &&
1099 		    register_is_null(&cur->regs[value_regno]))
1100 			type = STACK_ZERO;
1101 
1102 		for (i = 0; i < size; i++)
1103 			state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
1104 				type;
1105 	}
1106 	return 0;
1107 }
1108 
1109 /* registers of every function are unique and mark_reg_read() propagates
1110  * the liveness in the following cases:
1111  * - from callee into caller for R1 - R5 that were used as arguments
1112  * - from caller into callee for R0 that used as result of the call
1113  * - from caller to the same caller skipping states of the callee for R6 - R9,
1114  *   since R6 - R9 are callee saved by implicit function prologue and
1115  *   caller's R6 != callee's R6, so when we propagate liveness up to
1116  *   parent states we need to skip callee states for R6 - R9.
1117  *
1118  * stack slot marking is different, since stacks of caller and callee are
1119  * accessible in both (since caller can pass a pointer to caller's stack to
1120  * callee which can pass it to another function), hence mark_stack_slot_read()
1121  * has to propagate the stack liveness to all parent states at given frame number.
1122  * Consider code:
1123  * f1() {
1124  *   ptr = fp - 8;
1125  *   *ptr = ctx;
1126  *   call f2 {
1127  *      .. = *ptr;
1128  *   }
1129  *   .. = *ptr;
1130  * }
1131  * First *ptr is reading from f1's stack and mark_stack_slot_read() has
1132  * to mark liveness at the f1's frame and not f2's frame.
1133  * Second *ptr is also reading from f1's stack and mark_stack_slot_read() has
1134  * to propagate liveness to f2 states at f1's frame level and further into
1135  * f1 states at f1's frame level until write into that stack slot
1136  */
mark_stack_slot_read(struct bpf_verifier_env * env,const struct bpf_verifier_state * state,struct bpf_verifier_state * parent,int slot,int frameno)1137 static void mark_stack_slot_read(struct bpf_verifier_env *env,
1138 				 const struct bpf_verifier_state *state,
1139 				 struct bpf_verifier_state *parent,
1140 				 int slot, int frameno)
1141 {
1142 	bool writes = parent == state->parent; /* Observe write marks */
1143 
1144 	while (parent) {
1145 		if (parent->frame[frameno]->allocated_stack <= slot * BPF_REG_SIZE)
1146 			/* since LIVE_WRITTEN mark is only done for full 8-byte
1147 			 * write the read marks are conservative and parent
1148 			 * state may not even have the stack allocated. In such case
1149 			 * end the propagation, since the loop reached beginning
1150 			 * of the function
1151 			 */
1152 			break;
1153 		/* if read wasn't screened by an earlier write ... */
1154 		if (writes && state->frame[frameno]->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
1155 			break;
1156 		/* ... then we depend on parent's value */
1157 		parent->frame[frameno]->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
1158 		state = parent;
1159 		parent = state->parent;
1160 		writes = true;
1161 	}
1162 }
1163 
check_stack_read(struct bpf_verifier_env * env,struct bpf_func_state * reg_state,int off,int size,int value_regno)1164 static int check_stack_read(struct bpf_verifier_env *env,
1165 			    struct bpf_func_state *reg_state /* func where register points to */,
1166 			    int off, int size, int value_regno)
1167 {
1168 	struct bpf_verifier_state *vstate = env->cur_state;
1169 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
1170 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
1171 	u8 *stype;
1172 
1173 	if (reg_state->allocated_stack <= slot) {
1174 		verbose(env, "invalid read from stack off %d+0 size %d\n",
1175 			off, size);
1176 		return -EACCES;
1177 	}
1178 	stype = reg_state->stack[spi].slot_type;
1179 
1180 	if (stype[0] == STACK_SPILL) {
1181 		if (size != BPF_REG_SIZE) {
1182 			verbose(env, "invalid size of register spill\n");
1183 			return -EACCES;
1184 		}
1185 		for (i = 1; i < BPF_REG_SIZE; i++) {
1186 			if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
1187 				verbose(env, "corrupted spill memory\n");
1188 				return -EACCES;
1189 			}
1190 		}
1191 
1192 		if (value_regno >= 0) {
1193 			/* restore register state from stack */
1194 			state->regs[value_regno] = reg_state->stack[spi].spilled_ptr;
1195 			/* mark reg as written since spilled pointer state likely
1196 			 * has its liveness marks cleared by is_state_visited()
1197 			 * which resets stack/reg liveness for state transitions
1198 			 */
1199 			state->regs[value_regno].live |= REG_LIVE_WRITTEN;
1200 		}
1201 		mark_stack_slot_read(env, vstate, vstate->parent, spi,
1202 				     reg_state->frameno);
1203 		return 0;
1204 	} else {
1205 		int zeros = 0;
1206 
1207 		for (i = 0; i < size; i++) {
1208 			if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
1209 				continue;
1210 			if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
1211 				zeros++;
1212 				continue;
1213 			}
1214 			verbose(env, "invalid read from stack off %d+%d size %d\n",
1215 				off, i, size);
1216 			return -EACCES;
1217 		}
1218 		mark_stack_slot_read(env, vstate, vstate->parent, spi,
1219 				     reg_state->frameno);
1220 		if (value_regno >= 0) {
1221 			if (zeros == size) {
1222 				/* any size read into register is zero extended,
1223 				 * so the whole register == const_zero
1224 				 */
1225 				__mark_reg_const_zero(&state->regs[value_regno]);
1226 			} else {
1227 				/* have read misc data from the stack */
1228 				mark_reg_unknown(env, state->regs, value_regno);
1229 			}
1230 			state->regs[value_regno].live |= REG_LIVE_WRITTEN;
1231 		}
1232 		return 0;
1233 	}
1234 }
1235 
1236 /* check read/write into map element returned by bpf_map_lookup_elem() */
__check_map_access(struct bpf_verifier_env * env,u32 regno,int off,int size,bool zero_size_allowed)1237 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
1238 			      int size, bool zero_size_allowed)
1239 {
1240 	struct bpf_reg_state *regs = cur_regs(env);
1241 	struct bpf_map *map = regs[regno].map_ptr;
1242 
1243 	if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
1244 	    off + size > map->value_size) {
1245 		verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
1246 			map->value_size, off, size);
1247 		return -EACCES;
1248 	}
1249 	return 0;
1250 }
1251 
1252 /* check read/write into a map element with possible variable offset */
check_map_access(struct bpf_verifier_env * env,u32 regno,int off,int size,bool zero_size_allowed)1253 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
1254 			    int off, int size, bool zero_size_allowed)
1255 {
1256 	struct bpf_verifier_state *vstate = env->cur_state;
1257 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
1258 	struct bpf_reg_state *reg = &state->regs[regno];
1259 	int err;
1260 
1261 	/* We may have adjusted the register to this map value, so we
1262 	 * need to try adding each of min_value and max_value to off
1263 	 * to make sure our theoretical access will be safe.
1264 	 */
1265 	if (env->log.level)
1266 		print_verifier_state(env, state);
1267 	/* The minimum value is only important with signed
1268 	 * comparisons where we can't assume the floor of a
1269 	 * value is 0.  If we are using signed variables for our
1270 	 * index'es we need to make sure that whatever we use
1271 	 * will have a set floor within our range.
1272 	 */
1273 	if (reg->smin_value < 0) {
1274 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
1275 			regno);
1276 		return -EACCES;
1277 	}
1278 	err = __check_map_access(env, regno, reg->smin_value + off, size,
1279 				 zero_size_allowed);
1280 	if (err) {
1281 		verbose(env, "R%d min value is outside of the array range\n",
1282 			regno);
1283 		return err;
1284 	}
1285 
1286 	/* If we haven't set a max value then we need to bail since we can't be
1287 	 * sure we won't do bad things.
1288 	 * If reg->umax_value + off could overflow, treat that as unbounded too.
1289 	 */
1290 	if (reg->umax_value >= BPF_MAX_VAR_OFF) {
1291 		verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
1292 			regno);
1293 		return -EACCES;
1294 	}
1295 	err = __check_map_access(env, regno, reg->umax_value + off, size,
1296 				 zero_size_allowed);
1297 	if (err)
1298 		verbose(env, "R%d max value is outside of the array range\n",
1299 			regno);
1300 	return err;
1301 }
1302 
1303 #define MAX_PACKET_OFF 0xffff
1304 
may_access_direct_pkt_data(struct bpf_verifier_env * env,const struct bpf_call_arg_meta * meta,enum bpf_access_type t)1305 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
1306 				       const struct bpf_call_arg_meta *meta,
1307 				       enum bpf_access_type t)
1308 {
1309 	switch (env->prog->type) {
1310 	case BPF_PROG_TYPE_LWT_IN:
1311 	case BPF_PROG_TYPE_LWT_OUT:
1312 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
1313 	case BPF_PROG_TYPE_SK_REUSEPORT:
1314 		/* dst_input() and dst_output() can't write for now */
1315 		if (t == BPF_WRITE)
1316 			return false;
1317 		/* fallthrough */
1318 	case BPF_PROG_TYPE_SCHED_CLS:
1319 	case BPF_PROG_TYPE_SCHED_ACT:
1320 	case BPF_PROG_TYPE_XDP:
1321 	case BPF_PROG_TYPE_LWT_XMIT:
1322 	case BPF_PROG_TYPE_SK_SKB:
1323 	case BPF_PROG_TYPE_SK_MSG:
1324 		if (meta)
1325 			return meta->pkt_access;
1326 
1327 		env->seen_direct_write = true;
1328 		return true;
1329 	default:
1330 		return false;
1331 	}
1332 }
1333 
__check_packet_access(struct bpf_verifier_env * env,u32 regno,int off,int size,bool zero_size_allowed)1334 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
1335 				 int off, int size, bool zero_size_allowed)
1336 {
1337 	struct bpf_reg_state *regs = cur_regs(env);
1338 	struct bpf_reg_state *reg = &regs[regno];
1339 
1340 	if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
1341 	    (u64)off + size > reg->range) {
1342 		verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
1343 			off, size, regno, reg->id, reg->off, reg->range);
1344 		return -EACCES;
1345 	}
1346 	return 0;
1347 }
1348 
check_packet_access(struct bpf_verifier_env * env,u32 regno,int off,int size,bool zero_size_allowed)1349 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
1350 			       int size, bool zero_size_allowed)
1351 {
1352 	struct bpf_reg_state *regs = cur_regs(env);
1353 	struct bpf_reg_state *reg = &regs[regno];
1354 	int err;
1355 
1356 	/* We may have added a variable offset to the packet pointer; but any
1357 	 * reg->range we have comes after that.  We are only checking the fixed
1358 	 * offset.
1359 	 */
1360 
1361 	/* We don't allow negative numbers, because we aren't tracking enough
1362 	 * detail to prove they're safe.
1363 	 */
1364 	if (reg->smin_value < 0) {
1365 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
1366 			regno);
1367 		return -EACCES;
1368 	}
1369 	err = __check_packet_access(env, regno, off, size, zero_size_allowed);
1370 	if (err) {
1371 		verbose(env, "R%d offset is outside of the packet\n", regno);
1372 		return err;
1373 	}
1374 	return err;
1375 }
1376 
1377 /* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
check_ctx_access(struct bpf_verifier_env * env,int insn_idx,int off,int size,enum bpf_access_type t,enum bpf_reg_type * reg_type)1378 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
1379 			    enum bpf_access_type t, enum bpf_reg_type *reg_type)
1380 {
1381 	struct bpf_insn_access_aux info = {
1382 		.reg_type = *reg_type,
1383 	};
1384 
1385 	if (env->ops->is_valid_access &&
1386 	    env->ops->is_valid_access(off, size, t, env->prog, &info)) {
1387 		/* A non zero info.ctx_field_size indicates that this field is a
1388 		 * candidate for later verifier transformation to load the whole
1389 		 * field and then apply a mask when accessed with a narrower
1390 		 * access than actual ctx access size. A zero info.ctx_field_size
1391 		 * will only allow for whole field access and rejects any other
1392 		 * type of narrower access.
1393 		 */
1394 		*reg_type = info.reg_type;
1395 
1396 		env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
1397 		/* remember the offset of last byte accessed in ctx */
1398 		if (env->prog->aux->max_ctx_offset < off + size)
1399 			env->prog->aux->max_ctx_offset = off + size;
1400 		return 0;
1401 	}
1402 
1403 	verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
1404 	return -EACCES;
1405 }
1406 
__is_pointer_value(bool allow_ptr_leaks,const struct bpf_reg_state * reg)1407 static bool __is_pointer_value(bool allow_ptr_leaks,
1408 			       const struct bpf_reg_state *reg)
1409 {
1410 	if (allow_ptr_leaks)
1411 		return false;
1412 
1413 	return reg->type != SCALAR_VALUE;
1414 }
1415 
is_pointer_value(struct bpf_verifier_env * env,int regno)1416 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
1417 {
1418 	return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno);
1419 }
1420 
is_ctx_reg(struct bpf_verifier_env * env,int regno)1421 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
1422 {
1423 	const struct bpf_reg_state *reg = cur_regs(env) + regno;
1424 
1425 	return reg->type == PTR_TO_CTX;
1426 }
1427 
is_pkt_reg(struct bpf_verifier_env * env,int regno)1428 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
1429 {
1430 	const struct bpf_reg_state *reg = cur_regs(env) + regno;
1431 
1432 	return type_is_pkt_pointer(reg->type);
1433 }
1434 
check_pkt_ptr_alignment(struct bpf_verifier_env * env,const struct bpf_reg_state * reg,int off,int size,bool strict)1435 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
1436 				   const struct bpf_reg_state *reg,
1437 				   int off, int size, bool strict)
1438 {
1439 	struct tnum reg_off;
1440 	int ip_align;
1441 
1442 	/* Byte size accesses are always allowed. */
1443 	if (!strict || size == 1)
1444 		return 0;
1445 
1446 	/* For platforms that do not have a Kconfig enabling
1447 	 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
1448 	 * NET_IP_ALIGN is universally set to '2'.  And on platforms
1449 	 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
1450 	 * to this code only in strict mode where we want to emulate
1451 	 * the NET_IP_ALIGN==2 checking.  Therefore use an
1452 	 * unconditional IP align value of '2'.
1453 	 */
1454 	ip_align = 2;
1455 
1456 	reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
1457 	if (!tnum_is_aligned(reg_off, size)) {
1458 		char tn_buf[48];
1459 
1460 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1461 		verbose(env,
1462 			"misaligned packet access off %d+%s+%d+%d size %d\n",
1463 			ip_align, tn_buf, reg->off, off, size);
1464 		return -EACCES;
1465 	}
1466 
1467 	return 0;
1468 }
1469 
check_generic_ptr_alignment(struct bpf_verifier_env * env,const struct bpf_reg_state * reg,const char * pointer_desc,int off,int size,bool strict)1470 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
1471 				       const struct bpf_reg_state *reg,
1472 				       const char *pointer_desc,
1473 				       int off, int size, bool strict)
1474 {
1475 	struct tnum reg_off;
1476 
1477 	/* Byte size accesses are always allowed. */
1478 	if (!strict || size == 1)
1479 		return 0;
1480 
1481 	reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
1482 	if (!tnum_is_aligned(reg_off, size)) {
1483 		char tn_buf[48];
1484 
1485 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1486 		verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
1487 			pointer_desc, tn_buf, reg->off, off, size);
1488 		return -EACCES;
1489 	}
1490 
1491 	return 0;
1492 }
1493 
check_ptr_alignment(struct bpf_verifier_env * env,const struct bpf_reg_state * reg,int off,int size,bool strict_alignment_once)1494 static int check_ptr_alignment(struct bpf_verifier_env *env,
1495 			       const struct bpf_reg_state *reg, int off,
1496 			       int size, bool strict_alignment_once)
1497 {
1498 	bool strict = env->strict_alignment || strict_alignment_once;
1499 	const char *pointer_desc = "";
1500 
1501 	switch (reg->type) {
1502 	case PTR_TO_PACKET:
1503 	case PTR_TO_PACKET_META:
1504 		/* Special case, because of NET_IP_ALIGN. Given metadata sits
1505 		 * right in front, treat it the very same way.
1506 		 */
1507 		return check_pkt_ptr_alignment(env, reg, off, size, strict);
1508 	case PTR_TO_MAP_VALUE:
1509 		pointer_desc = "value ";
1510 		break;
1511 	case PTR_TO_CTX:
1512 		pointer_desc = "context ";
1513 		break;
1514 	case PTR_TO_STACK:
1515 		pointer_desc = "stack ";
1516 		/* The stack spill tracking logic in check_stack_write()
1517 		 * and check_stack_read() relies on stack accesses being
1518 		 * aligned.
1519 		 */
1520 		strict = true;
1521 		break;
1522 	default:
1523 		break;
1524 	}
1525 	return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
1526 					   strict);
1527 }
1528 
update_stack_depth(struct bpf_verifier_env * env,const struct bpf_func_state * func,int off)1529 static int update_stack_depth(struct bpf_verifier_env *env,
1530 			      const struct bpf_func_state *func,
1531 			      int off)
1532 {
1533 	u16 stack = env->subprog_info[func->subprogno].stack_depth;
1534 
1535 	if (stack >= -off)
1536 		return 0;
1537 
1538 	/* update known max for given subprogram */
1539 	env->subprog_info[func->subprogno].stack_depth = -off;
1540 	return 0;
1541 }
1542 
1543 /* starting from main bpf function walk all instructions of the function
1544  * and recursively walk all callees that given function can call.
1545  * Ignore jump and exit insns.
1546  * Since recursion is prevented by check_cfg() this algorithm
1547  * only needs a local stack of MAX_CALL_FRAMES to remember callsites
1548  */
check_max_stack_depth(struct bpf_verifier_env * env)1549 static int check_max_stack_depth(struct bpf_verifier_env *env)
1550 {
1551 	int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
1552 	struct bpf_subprog_info *subprog = env->subprog_info;
1553 	struct bpf_insn *insn = env->prog->insnsi;
1554 	int ret_insn[MAX_CALL_FRAMES];
1555 	int ret_prog[MAX_CALL_FRAMES];
1556 
1557 process_func:
1558 	/* round up to 32-bytes, since this is granularity
1559 	 * of interpreter stack size
1560 	 */
1561 	depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
1562 	if (depth > MAX_BPF_STACK) {
1563 		verbose(env, "combined stack size of %d calls is %d. Too large\n",
1564 			frame + 1, depth);
1565 		return -EACCES;
1566 	}
1567 continue_func:
1568 	subprog_end = subprog[idx + 1].start;
1569 	for (; i < subprog_end; i++) {
1570 		if (insn[i].code != (BPF_JMP | BPF_CALL))
1571 			continue;
1572 		if (insn[i].src_reg != BPF_PSEUDO_CALL)
1573 			continue;
1574 		/* remember insn and function to return to */
1575 		ret_insn[frame] = i + 1;
1576 		ret_prog[frame] = idx;
1577 
1578 		/* find the callee */
1579 		i = i + insn[i].imm + 1;
1580 		idx = find_subprog(env, i);
1581 		if (idx < 0) {
1582 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
1583 				  i);
1584 			return -EFAULT;
1585 		}
1586 		frame++;
1587 		if (frame >= MAX_CALL_FRAMES) {
1588 			WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
1589 			return -EFAULT;
1590 		}
1591 		goto process_func;
1592 	}
1593 	/* end of for() loop means the last insn of the 'subprog'
1594 	 * was reached. Doesn't matter whether it was JA or EXIT
1595 	 */
1596 	if (frame == 0)
1597 		return 0;
1598 	depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
1599 	frame--;
1600 	i = ret_insn[frame];
1601 	idx = ret_prog[frame];
1602 	goto continue_func;
1603 }
1604 
1605 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
get_callee_stack_depth(struct bpf_verifier_env * env,const struct bpf_insn * insn,int idx)1606 static int get_callee_stack_depth(struct bpf_verifier_env *env,
1607 				  const struct bpf_insn *insn, int idx)
1608 {
1609 	int start = idx + insn->imm + 1, subprog;
1610 
1611 	subprog = find_subprog(env, start);
1612 	if (subprog < 0) {
1613 		WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
1614 			  start);
1615 		return -EFAULT;
1616 	}
1617 	return env->subprog_info[subprog].stack_depth;
1618 }
1619 #endif
1620 
check_ctx_reg(struct bpf_verifier_env * env,const struct bpf_reg_state * reg,int regno)1621 static int check_ctx_reg(struct bpf_verifier_env *env,
1622 			 const struct bpf_reg_state *reg, int regno)
1623 {
1624 	/* Access to ctx or passing it to a helper is only allowed in
1625 	 * its original, unmodified form.
1626 	 */
1627 
1628 	if (reg->off) {
1629 		verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
1630 			regno, reg->off);
1631 		return -EACCES;
1632 	}
1633 
1634 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
1635 		char tn_buf[48];
1636 
1637 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1638 		verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
1639 		return -EACCES;
1640 	}
1641 
1642 	return 0;
1643 }
1644 
1645 /* truncate register to smaller size (in bytes)
1646  * must be called with size < BPF_REG_SIZE
1647  */
coerce_reg_to_size(struct bpf_reg_state * reg,int size)1648 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
1649 {
1650 	u64 mask;
1651 
1652 	/* clear high bits in bit representation */
1653 	reg->var_off = tnum_cast(reg->var_off, size);
1654 
1655 	/* fix arithmetic bounds */
1656 	mask = ((u64)1 << (size * 8)) - 1;
1657 	if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
1658 		reg->umin_value &= mask;
1659 		reg->umax_value &= mask;
1660 	} else {
1661 		reg->umin_value = 0;
1662 		reg->umax_value = mask;
1663 	}
1664 	reg->smin_value = reg->umin_value;
1665 	reg->smax_value = reg->umax_value;
1666 }
1667 
1668 /* check whether memory at (regno + off) is accessible for t = (read | write)
1669  * if t==write, value_regno is a register which value is stored into memory
1670  * if t==read, value_regno is a register which will receive the value from memory
1671  * if t==write && value_regno==-1, some unknown value is stored into memory
1672  * if t==read && value_regno==-1, don't care what we read from memory
1673  */
check_mem_access(struct bpf_verifier_env * env,int insn_idx,u32 regno,int off,int bpf_size,enum bpf_access_type t,int value_regno,bool strict_alignment_once)1674 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
1675 			    int off, int bpf_size, enum bpf_access_type t,
1676 			    int value_regno, bool strict_alignment_once)
1677 {
1678 	struct bpf_reg_state *regs = cur_regs(env);
1679 	struct bpf_reg_state *reg = regs + regno;
1680 	struct bpf_func_state *state;
1681 	int size, err = 0;
1682 
1683 	size = bpf_size_to_bytes(bpf_size);
1684 	if (size < 0)
1685 		return size;
1686 
1687 	/* alignment checks will add in reg->off themselves */
1688 	err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
1689 	if (err)
1690 		return err;
1691 
1692 	/* for access checks, reg->off is just part of off */
1693 	off += reg->off;
1694 
1695 	if (reg->type == PTR_TO_MAP_VALUE) {
1696 		if (t == BPF_WRITE && value_regno >= 0 &&
1697 		    is_pointer_value(env, value_regno)) {
1698 			verbose(env, "R%d leaks addr into map\n", value_regno);
1699 			return -EACCES;
1700 		}
1701 
1702 		err = check_map_access(env, regno, off, size, false);
1703 		if (!err && t == BPF_READ && value_regno >= 0)
1704 			mark_reg_unknown(env, regs, value_regno);
1705 
1706 	} else if (reg->type == PTR_TO_CTX) {
1707 		enum bpf_reg_type reg_type = SCALAR_VALUE;
1708 
1709 		if (t == BPF_WRITE && value_regno >= 0 &&
1710 		    is_pointer_value(env, value_regno)) {
1711 			verbose(env, "R%d leaks addr into ctx\n", value_regno);
1712 			return -EACCES;
1713 		}
1714 
1715 		err = check_ctx_reg(env, reg, regno);
1716 		if (err < 0)
1717 			return err;
1718 
1719 		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
1720 		if (!err && t == BPF_READ && value_regno >= 0) {
1721 			/* ctx access returns either a scalar, or a
1722 			 * PTR_TO_PACKET[_META,_END]. In the latter
1723 			 * case, we know the offset is zero.
1724 			 */
1725 			if (reg_type == SCALAR_VALUE)
1726 				mark_reg_unknown(env, regs, value_regno);
1727 			else
1728 				mark_reg_known_zero(env, regs,
1729 						    value_regno);
1730 			regs[value_regno].id = 0;
1731 			regs[value_regno].off = 0;
1732 			regs[value_regno].range = 0;
1733 			regs[value_regno].type = reg_type;
1734 		}
1735 
1736 	} else if (reg->type == PTR_TO_STACK) {
1737 		/* stack accesses must be at a fixed offset, so that we can
1738 		 * determine what type of data were returned.
1739 		 * See check_stack_read().
1740 		 */
1741 		if (!tnum_is_const(reg->var_off)) {
1742 			char tn_buf[48];
1743 
1744 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1745 			verbose(env, "variable stack access var_off=%s off=%d size=%d",
1746 				tn_buf, off, size);
1747 			return -EACCES;
1748 		}
1749 		off += reg->var_off.value;
1750 		if (off >= 0 || off < -MAX_BPF_STACK) {
1751 			verbose(env, "invalid stack off=%d size=%d\n", off,
1752 				size);
1753 			return -EACCES;
1754 		}
1755 
1756 		state = func(env, reg);
1757 		err = update_stack_depth(env, state, off);
1758 		if (err)
1759 			return err;
1760 
1761 		if (t == BPF_WRITE)
1762 			err = check_stack_write(env, state, off, size,
1763 						value_regno, insn_idx);
1764 		else
1765 			err = check_stack_read(env, state, off, size,
1766 					       value_regno);
1767 	} else if (reg_is_pkt_pointer(reg)) {
1768 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
1769 			verbose(env, "cannot write into packet\n");
1770 			return -EACCES;
1771 		}
1772 		if (t == BPF_WRITE && value_regno >= 0 &&
1773 		    is_pointer_value(env, value_regno)) {
1774 			verbose(env, "R%d leaks addr into packet\n",
1775 				value_regno);
1776 			return -EACCES;
1777 		}
1778 		err = check_packet_access(env, regno, off, size, false);
1779 		if (!err && t == BPF_READ && value_regno >= 0)
1780 			mark_reg_unknown(env, regs, value_regno);
1781 	} else {
1782 		verbose(env, "R%d invalid mem access '%s'\n", regno,
1783 			reg_type_str[reg->type]);
1784 		return -EACCES;
1785 	}
1786 
1787 	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
1788 	    regs[value_regno].type == SCALAR_VALUE) {
1789 		/* b/h/w load zero-extends, mark upper bits as known 0 */
1790 		coerce_reg_to_size(&regs[value_regno], size);
1791 	}
1792 	return err;
1793 }
1794 
check_xadd(struct bpf_verifier_env * env,int insn_idx,struct bpf_insn * insn)1795 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
1796 {
1797 	int err;
1798 
1799 	if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
1800 	    insn->imm != 0) {
1801 		verbose(env, "BPF_XADD uses reserved fields\n");
1802 		return -EINVAL;
1803 	}
1804 
1805 	/* check src1 operand */
1806 	err = check_reg_arg(env, insn->src_reg, SRC_OP);
1807 	if (err)
1808 		return err;
1809 
1810 	/* check src2 operand */
1811 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
1812 	if (err)
1813 		return err;
1814 
1815 	if (is_pointer_value(env, insn->src_reg)) {
1816 		verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
1817 		return -EACCES;
1818 	}
1819 
1820 	if (is_ctx_reg(env, insn->dst_reg) ||
1821 	    is_pkt_reg(env, insn->dst_reg)) {
1822 		verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
1823 			insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ?
1824 			"context" : "packet");
1825 		return -EACCES;
1826 	}
1827 
1828 	/* check whether atomic_add can read the memory */
1829 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1830 			       BPF_SIZE(insn->code), BPF_READ, -1, true);
1831 	if (err)
1832 		return err;
1833 
1834 	/* check whether atomic_add can write into the same memory */
1835 	return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1836 				BPF_SIZE(insn->code), BPF_WRITE, -1, true);
1837 }
1838 
1839 /* when register 'regno' is passed into function that will read 'access_size'
1840  * bytes from that pointer, make sure that it's within stack boundary
1841  * and all elements of stack are initialized.
1842  * Unlike most pointer bounds-checking functions, this one doesn't take an
1843  * 'off' argument, so it has to add in reg->off itself.
1844  */
check_stack_boundary(struct bpf_verifier_env * env,int regno,int access_size,bool zero_size_allowed,struct bpf_call_arg_meta * meta)1845 static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1846 				int access_size, bool zero_size_allowed,
1847 				struct bpf_call_arg_meta *meta)
1848 {
1849 	struct bpf_reg_state *reg = cur_regs(env) + regno;
1850 	struct bpf_func_state *state = func(env, reg);
1851 	int off, i, slot, spi;
1852 
1853 	if (reg->type != PTR_TO_STACK) {
1854 		/* Allow zero-byte read from NULL, regardless of pointer type */
1855 		if (zero_size_allowed && access_size == 0 &&
1856 		    register_is_null(reg))
1857 			return 0;
1858 
1859 		verbose(env, "R%d type=%s expected=%s\n", regno,
1860 			reg_type_str[reg->type],
1861 			reg_type_str[PTR_TO_STACK]);
1862 		return -EACCES;
1863 	}
1864 
1865 	/* Only allow fixed-offset stack reads */
1866 	if (!tnum_is_const(reg->var_off)) {
1867 		char tn_buf[48];
1868 
1869 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1870 		verbose(env, "invalid variable stack read R%d var_off=%s\n",
1871 			regno, tn_buf);
1872 		return -EACCES;
1873 	}
1874 	off = reg->off + reg->var_off.value;
1875 	if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
1876 	    access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
1877 		verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
1878 			regno, off, access_size);
1879 		return -EACCES;
1880 	}
1881 
1882 	if (meta && meta->raw_mode) {
1883 		meta->access_size = access_size;
1884 		meta->regno = regno;
1885 		return 0;
1886 	}
1887 
1888 	for (i = 0; i < access_size; i++) {
1889 		u8 *stype;
1890 
1891 		slot = -(off + i) - 1;
1892 		spi = slot / BPF_REG_SIZE;
1893 		if (state->allocated_stack <= slot)
1894 			goto err;
1895 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
1896 		if (*stype == STACK_MISC)
1897 			goto mark;
1898 		if (*stype == STACK_ZERO) {
1899 			/* helper can write anything into the stack */
1900 			*stype = STACK_MISC;
1901 			goto mark;
1902 		}
1903 err:
1904 		verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
1905 			off, i, access_size);
1906 		return -EACCES;
1907 mark:
1908 		/* reading any byte out of 8-byte 'spill_slot' will cause
1909 		 * the whole slot to be marked as 'read'
1910 		 */
1911 		mark_stack_slot_read(env, env->cur_state, env->cur_state->parent,
1912 				     spi, state->frameno);
1913 	}
1914 	return update_stack_depth(env, state, off);
1915 }
1916 
check_helper_mem_access(struct bpf_verifier_env * env,int regno,int access_size,bool zero_size_allowed,struct bpf_call_arg_meta * meta)1917 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
1918 				   int access_size, bool zero_size_allowed,
1919 				   struct bpf_call_arg_meta *meta)
1920 {
1921 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
1922 
1923 	switch (reg->type) {
1924 	case PTR_TO_PACKET:
1925 	case PTR_TO_PACKET_META:
1926 		return check_packet_access(env, regno, reg->off, access_size,
1927 					   zero_size_allowed);
1928 	case PTR_TO_MAP_VALUE:
1929 		return check_map_access(env, regno, reg->off, access_size,
1930 					zero_size_allowed);
1931 	default: /* scalar_value|ptr_to_stack or invalid ptr */
1932 		return check_stack_boundary(env, regno, access_size,
1933 					    zero_size_allowed, meta);
1934 	}
1935 }
1936 
arg_type_is_mem_ptr(enum bpf_arg_type type)1937 static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
1938 {
1939 	return type == ARG_PTR_TO_MEM ||
1940 	       type == ARG_PTR_TO_MEM_OR_NULL ||
1941 	       type == ARG_PTR_TO_UNINIT_MEM;
1942 }
1943 
arg_type_is_mem_size(enum bpf_arg_type type)1944 static bool arg_type_is_mem_size(enum bpf_arg_type type)
1945 {
1946 	return type == ARG_CONST_SIZE ||
1947 	       type == ARG_CONST_SIZE_OR_ZERO;
1948 }
1949 
check_func_arg(struct bpf_verifier_env * env,u32 regno,enum bpf_arg_type arg_type,struct bpf_call_arg_meta * meta)1950 static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
1951 			  enum bpf_arg_type arg_type,
1952 			  struct bpf_call_arg_meta *meta)
1953 {
1954 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
1955 	enum bpf_reg_type expected_type, type = reg->type;
1956 	int err = 0;
1957 
1958 	if (arg_type == ARG_DONTCARE)
1959 		return 0;
1960 
1961 	err = check_reg_arg(env, regno, SRC_OP);
1962 	if (err)
1963 		return err;
1964 
1965 	if (arg_type == ARG_ANYTHING) {
1966 		if (is_pointer_value(env, regno)) {
1967 			verbose(env, "R%d leaks addr into helper function\n",
1968 				regno);
1969 			return -EACCES;
1970 		}
1971 		return 0;
1972 	}
1973 
1974 	if (type_is_pkt_pointer(type) &&
1975 	    !may_access_direct_pkt_data(env, meta, BPF_READ)) {
1976 		verbose(env, "helper access to the packet is not allowed\n");
1977 		return -EACCES;
1978 	}
1979 
1980 	if (arg_type == ARG_PTR_TO_MAP_KEY ||
1981 	    arg_type == ARG_PTR_TO_MAP_VALUE) {
1982 		expected_type = PTR_TO_STACK;
1983 		if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE &&
1984 		    type != expected_type)
1985 			goto err_type;
1986 	} else if (arg_type == ARG_CONST_SIZE ||
1987 		   arg_type == ARG_CONST_SIZE_OR_ZERO) {
1988 		expected_type = SCALAR_VALUE;
1989 		if (type != expected_type)
1990 			goto err_type;
1991 	} else if (arg_type == ARG_CONST_MAP_PTR) {
1992 		expected_type = CONST_PTR_TO_MAP;
1993 		if (type != expected_type)
1994 			goto err_type;
1995 	} else if (arg_type == ARG_PTR_TO_CTX) {
1996 		expected_type = PTR_TO_CTX;
1997 		if (type != expected_type)
1998 			goto err_type;
1999 		err = check_ctx_reg(env, reg, regno);
2000 		if (err < 0)
2001 			return err;
2002 	} else if (arg_type_is_mem_ptr(arg_type)) {
2003 		expected_type = PTR_TO_STACK;
2004 		/* One exception here. In case function allows for NULL to be
2005 		 * passed in as argument, it's a SCALAR_VALUE type. Final test
2006 		 * happens during stack boundary checking.
2007 		 */
2008 		if (register_is_null(reg) &&
2009 		    arg_type == ARG_PTR_TO_MEM_OR_NULL)
2010 			/* final test in check_stack_boundary() */;
2011 		else if (!type_is_pkt_pointer(type) &&
2012 			 type != PTR_TO_MAP_VALUE &&
2013 			 type != expected_type)
2014 			goto err_type;
2015 		meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
2016 	} else {
2017 		verbose(env, "unsupported arg_type %d\n", arg_type);
2018 		return -EFAULT;
2019 	}
2020 
2021 	if (arg_type == ARG_CONST_MAP_PTR) {
2022 		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
2023 		meta->map_ptr = reg->map_ptr;
2024 	} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
2025 		/* bpf_map_xxx(..., map_ptr, ..., key) call:
2026 		 * check that [key, key + map->key_size) are within
2027 		 * stack limits and initialized
2028 		 */
2029 		if (!meta->map_ptr) {
2030 			/* in function declaration map_ptr must come before
2031 			 * map_key, so that it's verified and known before
2032 			 * we have to check map_key here. Otherwise it means
2033 			 * that kernel subsystem misconfigured verifier
2034 			 */
2035 			verbose(env, "invalid map_ptr to access map->key\n");
2036 			return -EACCES;
2037 		}
2038 		err = check_helper_mem_access(env, regno,
2039 					      meta->map_ptr->key_size, false,
2040 					      NULL);
2041 	} else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
2042 		/* bpf_map_xxx(..., map_ptr, ..., value) call:
2043 		 * check [value, value + map->value_size) validity
2044 		 */
2045 		if (!meta->map_ptr) {
2046 			/* kernel subsystem misconfigured verifier */
2047 			verbose(env, "invalid map_ptr to access map->value\n");
2048 			return -EACCES;
2049 		}
2050 		err = check_helper_mem_access(env, regno,
2051 					      meta->map_ptr->value_size, false,
2052 					      NULL);
2053 	} else if (arg_type_is_mem_size(arg_type)) {
2054 		bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
2055 
2056 		/* remember the mem_size which may be used later
2057 		 * to refine return values.
2058 		 */
2059 		meta->msize_smax_value = reg->smax_value;
2060 		meta->msize_umax_value = reg->umax_value;
2061 
2062 		/* The register is SCALAR_VALUE; the access check
2063 		 * happens using its boundaries.
2064 		 */
2065 		if (!tnum_is_const(reg->var_off))
2066 			/* For unprivileged variable accesses, disable raw
2067 			 * mode so that the program is required to
2068 			 * initialize all the memory that the helper could
2069 			 * just partially fill up.
2070 			 */
2071 			meta = NULL;
2072 
2073 		if (reg->smin_value < 0) {
2074 			verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
2075 				regno);
2076 			return -EACCES;
2077 		}
2078 
2079 		if (reg->umin_value == 0) {
2080 			err = check_helper_mem_access(env, regno - 1, 0,
2081 						      zero_size_allowed,
2082 						      meta);
2083 			if (err)
2084 				return err;
2085 		}
2086 
2087 		if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
2088 			verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
2089 				regno);
2090 			return -EACCES;
2091 		}
2092 		err = check_helper_mem_access(env, regno - 1,
2093 					      reg->umax_value,
2094 					      zero_size_allowed, meta);
2095 	}
2096 
2097 	return err;
2098 err_type:
2099 	verbose(env, "R%d type=%s expected=%s\n", regno,
2100 		reg_type_str[type], reg_type_str[expected_type]);
2101 	return -EACCES;
2102 }
2103 
check_map_func_compatibility(struct bpf_verifier_env * env,struct bpf_map * map,int func_id)2104 static int check_map_func_compatibility(struct bpf_verifier_env *env,
2105 					struct bpf_map *map, int func_id)
2106 {
2107 	if (!map)
2108 		return 0;
2109 
2110 	/* We need a two way check, first is from map perspective ... */
2111 	switch (map->map_type) {
2112 	case BPF_MAP_TYPE_PROG_ARRAY:
2113 		if (func_id != BPF_FUNC_tail_call)
2114 			goto error;
2115 		break;
2116 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
2117 		if (func_id != BPF_FUNC_perf_event_read &&
2118 		    func_id != BPF_FUNC_perf_event_output &&
2119 		    func_id != BPF_FUNC_perf_event_read_value)
2120 			goto error;
2121 		break;
2122 	case BPF_MAP_TYPE_STACK_TRACE:
2123 		if (func_id != BPF_FUNC_get_stackid)
2124 			goto error;
2125 		break;
2126 	case BPF_MAP_TYPE_CGROUP_ARRAY:
2127 		if (func_id != BPF_FUNC_skb_under_cgroup &&
2128 		    func_id != BPF_FUNC_current_task_under_cgroup)
2129 			goto error;
2130 		break;
2131 	case BPF_MAP_TYPE_CGROUP_STORAGE:
2132 		if (func_id != BPF_FUNC_get_local_storage)
2133 			goto error;
2134 		break;
2135 	/* devmap returns a pointer to a live net_device ifindex that we cannot
2136 	 * allow to be modified from bpf side. So do not allow lookup elements
2137 	 * for now.
2138 	 */
2139 	case BPF_MAP_TYPE_DEVMAP:
2140 		if (func_id != BPF_FUNC_redirect_map)
2141 			goto error;
2142 		break;
2143 	/* Restrict bpf side of cpumap and xskmap, open when use-cases
2144 	 * appear.
2145 	 */
2146 	case BPF_MAP_TYPE_CPUMAP:
2147 	case BPF_MAP_TYPE_XSKMAP:
2148 		if (func_id != BPF_FUNC_redirect_map)
2149 			goto error;
2150 		break;
2151 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
2152 	case BPF_MAP_TYPE_HASH_OF_MAPS:
2153 		if (func_id != BPF_FUNC_map_lookup_elem)
2154 			goto error;
2155 		break;
2156 	case BPF_MAP_TYPE_SOCKMAP:
2157 		if (func_id != BPF_FUNC_sk_redirect_map &&
2158 		    func_id != BPF_FUNC_sock_map_update &&
2159 		    func_id != BPF_FUNC_map_delete_elem &&
2160 		    func_id != BPF_FUNC_msg_redirect_map)
2161 			goto error;
2162 		break;
2163 	case BPF_MAP_TYPE_SOCKHASH:
2164 		if (func_id != BPF_FUNC_sk_redirect_hash &&
2165 		    func_id != BPF_FUNC_sock_hash_update &&
2166 		    func_id != BPF_FUNC_map_delete_elem &&
2167 		    func_id != BPF_FUNC_msg_redirect_hash)
2168 			goto error;
2169 		break;
2170 	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
2171 		if (func_id != BPF_FUNC_sk_select_reuseport)
2172 			goto error;
2173 		break;
2174 	default:
2175 		break;
2176 	}
2177 
2178 	/* ... and second from the function itself. */
2179 	switch (func_id) {
2180 	case BPF_FUNC_tail_call:
2181 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
2182 			goto error;
2183 		if (env->subprog_cnt > 1) {
2184 			verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
2185 			return -EINVAL;
2186 		}
2187 		break;
2188 	case BPF_FUNC_perf_event_read:
2189 	case BPF_FUNC_perf_event_output:
2190 	case BPF_FUNC_perf_event_read_value:
2191 		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
2192 			goto error;
2193 		break;
2194 	case BPF_FUNC_get_stackid:
2195 		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
2196 			goto error;
2197 		break;
2198 	case BPF_FUNC_current_task_under_cgroup:
2199 	case BPF_FUNC_skb_under_cgroup:
2200 		if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
2201 			goto error;
2202 		break;
2203 	case BPF_FUNC_redirect_map:
2204 		if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
2205 		    map->map_type != BPF_MAP_TYPE_CPUMAP &&
2206 		    map->map_type != BPF_MAP_TYPE_XSKMAP)
2207 			goto error;
2208 		break;
2209 	case BPF_FUNC_sk_redirect_map:
2210 	case BPF_FUNC_msg_redirect_map:
2211 	case BPF_FUNC_sock_map_update:
2212 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
2213 			goto error;
2214 		break;
2215 	case BPF_FUNC_sk_redirect_hash:
2216 	case BPF_FUNC_msg_redirect_hash:
2217 	case BPF_FUNC_sock_hash_update:
2218 		if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
2219 			goto error;
2220 		break;
2221 	case BPF_FUNC_get_local_storage:
2222 		if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE)
2223 			goto error;
2224 		break;
2225 	case BPF_FUNC_sk_select_reuseport:
2226 		if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
2227 			goto error;
2228 		break;
2229 	default:
2230 		break;
2231 	}
2232 
2233 	return 0;
2234 error:
2235 	verbose(env, "cannot pass map_type %d into func %s#%d\n",
2236 		map->map_type, func_id_name(func_id), func_id);
2237 	return -EINVAL;
2238 }
2239 
check_raw_mode_ok(const struct bpf_func_proto * fn)2240 static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
2241 {
2242 	int count = 0;
2243 
2244 	if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
2245 		count++;
2246 	if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
2247 		count++;
2248 	if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
2249 		count++;
2250 	if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
2251 		count++;
2252 	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
2253 		count++;
2254 
2255 	/* We only support one arg being in raw mode at the moment,
2256 	 * which is sufficient for the helper functions we have
2257 	 * right now.
2258 	 */
2259 	return count <= 1;
2260 }
2261 
check_args_pair_invalid(enum bpf_arg_type arg_curr,enum bpf_arg_type arg_next)2262 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
2263 				    enum bpf_arg_type arg_next)
2264 {
2265 	return (arg_type_is_mem_ptr(arg_curr) &&
2266 	        !arg_type_is_mem_size(arg_next)) ||
2267 	       (!arg_type_is_mem_ptr(arg_curr) &&
2268 		arg_type_is_mem_size(arg_next));
2269 }
2270 
check_arg_pair_ok(const struct bpf_func_proto * fn)2271 static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
2272 {
2273 	/* bpf_xxx(..., buf, len) call will access 'len'
2274 	 * bytes from memory 'buf'. Both arg types need
2275 	 * to be paired, so make sure there's no buggy
2276 	 * helper function specification.
2277 	 */
2278 	if (arg_type_is_mem_size(fn->arg1_type) ||
2279 	    arg_type_is_mem_ptr(fn->arg5_type)  ||
2280 	    check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
2281 	    check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
2282 	    check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
2283 	    check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
2284 		return false;
2285 
2286 	return true;
2287 }
2288 
check_func_proto(const struct bpf_func_proto * fn)2289 static int check_func_proto(const struct bpf_func_proto *fn)
2290 {
2291 	return check_raw_mode_ok(fn) &&
2292 	       check_arg_pair_ok(fn) ? 0 : -EINVAL;
2293 }
2294 
2295 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
2296  * are now invalid, so turn them into unknown SCALAR_VALUE.
2297  */
__clear_all_pkt_pointers(struct bpf_verifier_env * env,struct bpf_func_state * state)2298 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
2299 				     struct bpf_func_state *state)
2300 {
2301 	struct bpf_reg_state *regs = state->regs, *reg;
2302 	int i;
2303 
2304 	for (i = 0; i < MAX_BPF_REG; i++)
2305 		if (reg_is_pkt_pointer_any(&regs[i]))
2306 			mark_reg_unknown(env, regs, i);
2307 
2308 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
2309 		if (state->stack[i].slot_type[0] != STACK_SPILL)
2310 			continue;
2311 		reg = &state->stack[i].spilled_ptr;
2312 		if (reg_is_pkt_pointer_any(reg))
2313 			__mark_reg_unknown(reg);
2314 	}
2315 }
2316 
clear_all_pkt_pointers(struct bpf_verifier_env * env)2317 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
2318 {
2319 	struct bpf_verifier_state *vstate = env->cur_state;
2320 	int i;
2321 
2322 	for (i = 0; i <= vstate->curframe; i++)
2323 		__clear_all_pkt_pointers(env, vstate->frame[i]);
2324 }
2325 
check_func_call(struct bpf_verifier_env * env,struct bpf_insn * insn,int * insn_idx)2326 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
2327 			   int *insn_idx)
2328 {
2329 	struct bpf_verifier_state *state = env->cur_state;
2330 	struct bpf_func_state *caller, *callee;
2331 	int i, subprog, target_insn;
2332 
2333 	if (state->curframe + 1 >= MAX_CALL_FRAMES) {
2334 		verbose(env, "the call stack of %d frames is too deep\n",
2335 			state->curframe + 2);
2336 		return -E2BIG;
2337 	}
2338 
2339 	target_insn = *insn_idx + insn->imm;
2340 	subprog = find_subprog(env, target_insn + 1);
2341 	if (subprog < 0) {
2342 		verbose(env, "verifier bug. No program starts at insn %d\n",
2343 			target_insn + 1);
2344 		return -EFAULT;
2345 	}
2346 
2347 	caller = state->frame[state->curframe];
2348 	if (state->frame[state->curframe + 1]) {
2349 		verbose(env, "verifier bug. Frame %d already allocated\n",
2350 			state->curframe + 1);
2351 		return -EFAULT;
2352 	}
2353 
2354 	callee = kzalloc(sizeof(*callee), GFP_KERNEL);
2355 	if (!callee)
2356 		return -ENOMEM;
2357 	state->frame[state->curframe + 1] = callee;
2358 
2359 	/* callee cannot access r0, r6 - r9 for reading and has to write
2360 	 * into its own stack before reading from it.
2361 	 * callee can read/write into caller's stack
2362 	 */
2363 	init_func_state(env, callee,
2364 			/* remember the callsite, it will be used by bpf_exit */
2365 			*insn_idx /* callsite */,
2366 			state->curframe + 1 /* frameno within this callchain */,
2367 			subprog /* subprog number within this prog */);
2368 
2369 	/* copy r1 - r5 args that callee can access */
2370 	for (i = BPF_REG_1; i <= BPF_REG_5; i++)
2371 		callee->regs[i] = caller->regs[i];
2372 
2373 	/* after the call regsiters r0 - r5 were scratched */
2374 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
2375 		mark_reg_not_init(env, caller->regs, caller_saved[i]);
2376 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
2377 	}
2378 
2379 	/* only increment it after check_reg_arg() finished */
2380 	state->curframe++;
2381 
2382 	/* and go analyze first insn of the callee */
2383 	*insn_idx = target_insn;
2384 
2385 	if (env->log.level) {
2386 		verbose(env, "caller:\n");
2387 		print_verifier_state(env, caller);
2388 		verbose(env, "callee:\n");
2389 		print_verifier_state(env, callee);
2390 	}
2391 	return 0;
2392 }
2393 
prepare_func_exit(struct bpf_verifier_env * env,int * insn_idx)2394 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
2395 {
2396 	struct bpf_verifier_state *state = env->cur_state;
2397 	struct bpf_func_state *caller, *callee;
2398 	struct bpf_reg_state *r0;
2399 
2400 	callee = state->frame[state->curframe];
2401 	r0 = &callee->regs[BPF_REG_0];
2402 	if (r0->type == PTR_TO_STACK) {
2403 		/* technically it's ok to return caller's stack pointer
2404 		 * (or caller's caller's pointer) back to the caller,
2405 		 * since these pointers are valid. Only current stack
2406 		 * pointer will be invalid as soon as function exits,
2407 		 * but let's be conservative
2408 		 */
2409 		verbose(env, "cannot return stack pointer to the caller\n");
2410 		return -EINVAL;
2411 	}
2412 
2413 	state->curframe--;
2414 	caller = state->frame[state->curframe];
2415 	/* return to the caller whatever r0 had in the callee */
2416 	caller->regs[BPF_REG_0] = *r0;
2417 
2418 	*insn_idx = callee->callsite + 1;
2419 	if (env->log.level) {
2420 		verbose(env, "returning from callee:\n");
2421 		print_verifier_state(env, callee);
2422 		verbose(env, "to caller at %d:\n", *insn_idx);
2423 		print_verifier_state(env, caller);
2424 	}
2425 	/* clear everything in the callee */
2426 	free_func_state(callee);
2427 	state->frame[state->curframe + 1] = NULL;
2428 	return 0;
2429 }
2430 
do_refine_retval_range(struct bpf_reg_state * regs,int ret_type,int func_id,struct bpf_call_arg_meta * meta)2431 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
2432 				   int func_id,
2433 				   struct bpf_call_arg_meta *meta)
2434 {
2435 	struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
2436 
2437 	if (ret_type != RET_INTEGER ||
2438 	    (func_id != BPF_FUNC_get_stack &&
2439 	     func_id != BPF_FUNC_probe_read_str))
2440 		return;
2441 
2442 	ret_reg->smax_value = meta->msize_smax_value;
2443 	ret_reg->umax_value = meta->msize_umax_value;
2444 	__reg_deduce_bounds(ret_reg);
2445 	__reg_bound_offset(ret_reg);
2446 }
2447 
2448 static int
record_func_map(struct bpf_verifier_env * env,struct bpf_call_arg_meta * meta,int func_id,int insn_idx)2449 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
2450 		int func_id, int insn_idx)
2451 {
2452 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
2453 
2454 	if (func_id != BPF_FUNC_tail_call &&
2455 	    func_id != BPF_FUNC_map_lookup_elem &&
2456 	    func_id != BPF_FUNC_map_update_elem &&
2457 	    func_id != BPF_FUNC_map_delete_elem)
2458 		return 0;
2459 
2460 	if (meta->map_ptr == NULL) {
2461 		verbose(env, "kernel subsystem misconfigured verifier\n");
2462 		return -EINVAL;
2463 	}
2464 
2465 	if (!BPF_MAP_PTR(aux->map_state))
2466 		bpf_map_ptr_store(aux, meta->map_ptr,
2467 				  meta->map_ptr->unpriv_array);
2468 	else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr)
2469 		bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
2470 				  meta->map_ptr->unpriv_array);
2471 	return 0;
2472 }
2473 
check_helper_call(struct bpf_verifier_env * env,int func_id,int insn_idx)2474 static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
2475 {
2476 	const struct bpf_func_proto *fn = NULL;
2477 	struct bpf_reg_state *regs;
2478 	struct bpf_call_arg_meta meta;
2479 	bool changes_data;
2480 	int i, err;
2481 
2482 	/* find function prototype */
2483 	if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
2484 		verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
2485 			func_id);
2486 		return -EINVAL;
2487 	}
2488 
2489 	if (env->ops->get_func_proto)
2490 		fn = env->ops->get_func_proto(func_id, env->prog);
2491 	if (!fn) {
2492 		verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
2493 			func_id);
2494 		return -EINVAL;
2495 	}
2496 
2497 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
2498 	if (!env->prog->gpl_compatible && fn->gpl_only) {
2499 		verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
2500 		return -EINVAL;
2501 	}
2502 
2503 	/* With LD_ABS/IND some JITs save/restore skb from r1. */
2504 	changes_data = bpf_helper_changes_pkt_data(fn->func);
2505 	if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
2506 		verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
2507 			func_id_name(func_id), func_id);
2508 		return -EINVAL;
2509 	}
2510 
2511 	memset(&meta, 0, sizeof(meta));
2512 	meta.pkt_access = fn->pkt_access;
2513 
2514 	err = check_func_proto(fn);
2515 	if (err) {
2516 		verbose(env, "kernel subsystem misconfigured func %s#%d\n",
2517 			func_id_name(func_id), func_id);
2518 		return err;
2519 	}
2520 
2521 	/* check args */
2522 	err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
2523 	if (err)
2524 		return err;
2525 	err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
2526 	if (err)
2527 		return err;
2528 	err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
2529 	if (err)
2530 		return err;
2531 	err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
2532 	if (err)
2533 		return err;
2534 	err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
2535 	if (err)
2536 		return err;
2537 
2538 	err = record_func_map(env, &meta, func_id, insn_idx);
2539 	if (err)
2540 		return err;
2541 
2542 	/* Mark slots with STACK_MISC in case of raw mode, stack offset
2543 	 * is inferred from register state.
2544 	 */
2545 	for (i = 0; i < meta.access_size; i++) {
2546 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
2547 				       BPF_WRITE, -1, false);
2548 		if (err)
2549 			return err;
2550 	}
2551 
2552 	regs = cur_regs(env);
2553 
2554 	/* check that flags argument in get_local_storage(map, flags) is 0,
2555 	 * this is required because get_local_storage() can't return an error.
2556 	 */
2557 	if (func_id == BPF_FUNC_get_local_storage &&
2558 	    !register_is_null(&regs[BPF_REG_2])) {
2559 		verbose(env, "get_local_storage() doesn't support non-zero flags\n");
2560 		return -EINVAL;
2561 	}
2562 
2563 	/* reset caller saved regs */
2564 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
2565 		mark_reg_not_init(env, regs, caller_saved[i]);
2566 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
2567 	}
2568 
2569 	/* update return register (already marked as written above) */
2570 	if (fn->ret_type == RET_INTEGER) {
2571 		/* sets type to SCALAR_VALUE */
2572 		mark_reg_unknown(env, regs, BPF_REG_0);
2573 	} else if (fn->ret_type == RET_VOID) {
2574 		regs[BPF_REG_0].type = NOT_INIT;
2575 	} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
2576 		   fn->ret_type == RET_PTR_TO_MAP_VALUE) {
2577 		if (fn->ret_type == RET_PTR_TO_MAP_VALUE)
2578 			regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
2579 		else
2580 			regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
2581 		/* There is no offset yet applied, variable or fixed */
2582 		mark_reg_known_zero(env, regs, BPF_REG_0);
2583 		regs[BPF_REG_0].off = 0;
2584 		/* remember map_ptr, so that check_map_access()
2585 		 * can check 'value_size' boundary of memory access
2586 		 * to map element returned from bpf_map_lookup_elem()
2587 		 */
2588 		if (meta.map_ptr == NULL) {
2589 			verbose(env,
2590 				"kernel subsystem misconfigured verifier\n");
2591 			return -EINVAL;
2592 		}
2593 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
2594 		regs[BPF_REG_0].id = ++env->id_gen;
2595 	} else {
2596 		verbose(env, "unknown return type %d of func %s#%d\n",
2597 			fn->ret_type, func_id_name(func_id), func_id);
2598 		return -EINVAL;
2599 	}
2600 
2601 	do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
2602 
2603 	err = check_map_func_compatibility(env, meta.map_ptr, func_id);
2604 	if (err)
2605 		return err;
2606 
2607 	if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) {
2608 		const char *err_str;
2609 
2610 #ifdef CONFIG_PERF_EVENTS
2611 		err = get_callchain_buffers(sysctl_perf_event_max_stack);
2612 		err_str = "cannot get callchain buffer for func %s#%d\n";
2613 #else
2614 		err = -ENOTSUPP;
2615 		err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
2616 #endif
2617 		if (err) {
2618 			verbose(env, err_str, func_id_name(func_id), func_id);
2619 			return err;
2620 		}
2621 
2622 		env->prog->has_callchain_buf = true;
2623 	}
2624 
2625 	if (changes_data)
2626 		clear_all_pkt_pointers(env);
2627 	return 0;
2628 }
2629 
signed_add_overflows(s64 a,s64 b)2630 static bool signed_add_overflows(s64 a, s64 b)
2631 {
2632 	/* Do the add in u64, where overflow is well-defined */
2633 	s64 res = (s64)((u64)a + (u64)b);
2634 
2635 	if (b < 0)
2636 		return res > a;
2637 	return res < a;
2638 }
2639 
signed_sub_overflows(s64 a,s64 b)2640 static bool signed_sub_overflows(s64 a, s64 b)
2641 {
2642 	/* Do the sub in u64, where overflow is well-defined */
2643 	s64 res = (s64)((u64)a - (u64)b);
2644 
2645 	if (b < 0)
2646 		return res < a;
2647 	return res > a;
2648 }
2649 
check_reg_sane_offset(struct bpf_verifier_env * env,const struct bpf_reg_state * reg,enum bpf_reg_type type)2650 static bool check_reg_sane_offset(struct bpf_verifier_env *env,
2651 				  const struct bpf_reg_state *reg,
2652 				  enum bpf_reg_type type)
2653 {
2654 	bool known = tnum_is_const(reg->var_off);
2655 	s64 val = reg->var_off.value;
2656 	s64 smin = reg->smin_value;
2657 
2658 	if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
2659 		verbose(env, "math between %s pointer and %lld is not allowed\n",
2660 			reg_type_str[type], val);
2661 		return false;
2662 	}
2663 
2664 	if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
2665 		verbose(env, "%s pointer offset %d is not allowed\n",
2666 			reg_type_str[type], reg->off);
2667 		return false;
2668 	}
2669 
2670 	if (smin == S64_MIN) {
2671 		verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
2672 			reg_type_str[type]);
2673 		return false;
2674 	}
2675 
2676 	if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
2677 		verbose(env, "value %lld makes %s pointer be out of bounds\n",
2678 			smin, reg_type_str[type]);
2679 		return false;
2680 	}
2681 
2682 	return true;
2683 }
2684 
2685 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
2686  * Caller should also handle BPF_MOV case separately.
2687  * If we return -EACCES, caller may want to try again treating pointer as a
2688  * scalar.  So we only emit a diagnostic if !env->allow_ptr_leaks.
2689  */
adjust_ptr_min_max_vals(struct bpf_verifier_env * env,struct bpf_insn * insn,const struct bpf_reg_state * ptr_reg,const struct bpf_reg_state * off_reg)2690 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
2691 				   struct bpf_insn *insn,
2692 				   const struct bpf_reg_state *ptr_reg,
2693 				   const struct bpf_reg_state *off_reg)
2694 {
2695 	struct bpf_verifier_state *vstate = env->cur_state;
2696 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
2697 	struct bpf_reg_state *regs = state->regs, *dst_reg;
2698 	bool known = tnum_is_const(off_reg->var_off);
2699 	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
2700 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
2701 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
2702 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
2703 	u8 opcode = BPF_OP(insn->code);
2704 	u32 dst = insn->dst_reg;
2705 
2706 	dst_reg = &regs[dst];
2707 
2708 	if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
2709 	    smin_val > smax_val || umin_val > umax_val) {
2710 		/* Taint dst register if offset had invalid bounds derived from
2711 		 * e.g. dead branches.
2712 		 */
2713 		__mark_reg_unknown(dst_reg);
2714 		return 0;
2715 	}
2716 
2717 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
2718 		/* 32-bit ALU ops on pointers produce (meaningless) scalars */
2719 		verbose(env,
2720 			"R%d 32-bit pointer arithmetic prohibited\n",
2721 			dst);
2722 		return -EACCES;
2723 	}
2724 
2725 	if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
2726 		verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
2727 			dst);
2728 		return -EACCES;
2729 	}
2730 	if (ptr_reg->type == CONST_PTR_TO_MAP) {
2731 		verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
2732 			dst);
2733 		return -EACCES;
2734 	}
2735 	if (ptr_reg->type == PTR_TO_PACKET_END) {
2736 		verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
2737 			dst);
2738 		return -EACCES;
2739 	}
2740 
2741 	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
2742 	 * The id may be overwritten later if we create a new variable offset.
2743 	 */
2744 	dst_reg->type = ptr_reg->type;
2745 	dst_reg->id = ptr_reg->id;
2746 
2747 	if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
2748 	    !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
2749 		return -EINVAL;
2750 
2751 	switch (opcode) {
2752 	case BPF_ADD:
2753 		/* We can take a fixed offset as long as it doesn't overflow
2754 		 * the s32 'off' field
2755 		 */
2756 		if (known && (ptr_reg->off + smin_val ==
2757 			      (s64)(s32)(ptr_reg->off + smin_val))) {
2758 			/* pointer += K.  Accumulate it into fixed offset */
2759 			dst_reg->smin_value = smin_ptr;
2760 			dst_reg->smax_value = smax_ptr;
2761 			dst_reg->umin_value = umin_ptr;
2762 			dst_reg->umax_value = umax_ptr;
2763 			dst_reg->var_off = ptr_reg->var_off;
2764 			dst_reg->off = ptr_reg->off + smin_val;
2765 			dst_reg->range = ptr_reg->range;
2766 			break;
2767 		}
2768 		/* A new variable offset is created.  Note that off_reg->off
2769 		 * == 0, since it's a scalar.
2770 		 * dst_reg gets the pointer type and since some positive
2771 		 * integer value was added to the pointer, give it a new 'id'
2772 		 * if it's a PTR_TO_PACKET.
2773 		 * this creates a new 'base' pointer, off_reg (variable) gets
2774 		 * added into the variable offset, and we copy the fixed offset
2775 		 * from ptr_reg.
2776 		 */
2777 		if (signed_add_overflows(smin_ptr, smin_val) ||
2778 		    signed_add_overflows(smax_ptr, smax_val)) {
2779 			dst_reg->smin_value = S64_MIN;
2780 			dst_reg->smax_value = S64_MAX;
2781 		} else {
2782 			dst_reg->smin_value = smin_ptr + smin_val;
2783 			dst_reg->smax_value = smax_ptr + smax_val;
2784 		}
2785 		if (umin_ptr + umin_val < umin_ptr ||
2786 		    umax_ptr + umax_val < umax_ptr) {
2787 			dst_reg->umin_value = 0;
2788 			dst_reg->umax_value = U64_MAX;
2789 		} else {
2790 			dst_reg->umin_value = umin_ptr + umin_val;
2791 			dst_reg->umax_value = umax_ptr + umax_val;
2792 		}
2793 		dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
2794 		dst_reg->off = ptr_reg->off;
2795 		if (reg_is_pkt_pointer(ptr_reg)) {
2796 			dst_reg->id = ++env->id_gen;
2797 			/* something was added to pkt_ptr, set range to zero */
2798 			dst_reg->range = 0;
2799 		}
2800 		break;
2801 	case BPF_SUB:
2802 		if (dst_reg == off_reg) {
2803 			/* scalar -= pointer.  Creates an unknown scalar */
2804 			verbose(env, "R%d tried to subtract pointer from scalar\n",
2805 				dst);
2806 			return -EACCES;
2807 		}
2808 		/* We don't allow subtraction from FP, because (according to
2809 		 * test_verifier.c test "invalid fp arithmetic", JITs might not
2810 		 * be able to deal with it.
2811 		 */
2812 		if (ptr_reg->type == PTR_TO_STACK) {
2813 			verbose(env, "R%d subtraction from stack pointer prohibited\n",
2814 				dst);
2815 			return -EACCES;
2816 		}
2817 		if (known && (ptr_reg->off - smin_val ==
2818 			      (s64)(s32)(ptr_reg->off - smin_val))) {
2819 			/* pointer -= K.  Subtract it from fixed offset */
2820 			dst_reg->smin_value = smin_ptr;
2821 			dst_reg->smax_value = smax_ptr;
2822 			dst_reg->umin_value = umin_ptr;
2823 			dst_reg->umax_value = umax_ptr;
2824 			dst_reg->var_off = ptr_reg->var_off;
2825 			dst_reg->id = ptr_reg->id;
2826 			dst_reg->off = ptr_reg->off - smin_val;
2827 			dst_reg->range = ptr_reg->range;
2828 			break;
2829 		}
2830 		/* A new variable offset is created.  If the subtrahend is known
2831 		 * nonnegative, then any reg->range we had before is still good.
2832 		 */
2833 		if (signed_sub_overflows(smin_ptr, smax_val) ||
2834 		    signed_sub_overflows(smax_ptr, smin_val)) {
2835 			/* Overflow possible, we know nothing */
2836 			dst_reg->smin_value = S64_MIN;
2837 			dst_reg->smax_value = S64_MAX;
2838 		} else {
2839 			dst_reg->smin_value = smin_ptr - smax_val;
2840 			dst_reg->smax_value = smax_ptr - smin_val;
2841 		}
2842 		if (umin_ptr < umax_val) {
2843 			/* Overflow possible, we know nothing */
2844 			dst_reg->umin_value = 0;
2845 			dst_reg->umax_value = U64_MAX;
2846 		} else {
2847 			/* Cannot overflow (as long as bounds are consistent) */
2848 			dst_reg->umin_value = umin_ptr - umax_val;
2849 			dst_reg->umax_value = umax_ptr - umin_val;
2850 		}
2851 		dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
2852 		dst_reg->off = ptr_reg->off;
2853 		if (reg_is_pkt_pointer(ptr_reg)) {
2854 			dst_reg->id = ++env->id_gen;
2855 			/* something was added to pkt_ptr, set range to zero */
2856 			if (smin_val < 0)
2857 				dst_reg->range = 0;
2858 		}
2859 		break;
2860 	case BPF_AND:
2861 	case BPF_OR:
2862 	case BPF_XOR:
2863 		/* bitwise ops on pointers are troublesome, prohibit. */
2864 		verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
2865 			dst, bpf_alu_string[opcode >> 4]);
2866 		return -EACCES;
2867 	default:
2868 		/* other operators (e.g. MUL,LSH) produce non-pointer results */
2869 		verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
2870 			dst, bpf_alu_string[opcode >> 4]);
2871 		return -EACCES;
2872 	}
2873 
2874 	if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
2875 		return -EINVAL;
2876 
2877 	__update_reg_bounds(dst_reg);
2878 	__reg_deduce_bounds(dst_reg);
2879 	__reg_bound_offset(dst_reg);
2880 	return 0;
2881 }
2882 
2883 /* WARNING: This function does calculations on 64-bit values, but the actual
2884  * execution may occur on 32-bit values. Therefore, things like bitshifts
2885  * need extra checks in the 32-bit case.
2886  */
adjust_scalar_min_max_vals(struct bpf_verifier_env * env,struct bpf_insn * insn,struct bpf_reg_state * dst_reg,struct bpf_reg_state src_reg)2887 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2888 				      struct bpf_insn *insn,
2889 				      struct bpf_reg_state *dst_reg,
2890 				      struct bpf_reg_state src_reg)
2891 {
2892 	struct bpf_reg_state *regs = cur_regs(env);
2893 	u8 opcode = BPF_OP(insn->code);
2894 	bool src_known, dst_known;
2895 	s64 smin_val, smax_val;
2896 	u64 umin_val, umax_val;
2897 	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
2898 
2899 	if (insn_bitness == 32) {
2900 		/* Relevant for 32-bit RSH: Information can propagate towards
2901 		 * LSB, so it isn't sufficient to only truncate the output to
2902 		 * 32 bits.
2903 		 */
2904 		coerce_reg_to_size(dst_reg, 4);
2905 		coerce_reg_to_size(&src_reg, 4);
2906 	}
2907 
2908 	smin_val = src_reg.smin_value;
2909 	smax_val = src_reg.smax_value;
2910 	umin_val = src_reg.umin_value;
2911 	umax_val = src_reg.umax_value;
2912 	src_known = tnum_is_const(src_reg.var_off);
2913 	dst_known = tnum_is_const(dst_reg->var_off);
2914 
2915 	if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
2916 	    smin_val > smax_val || umin_val > umax_val) {
2917 		/* Taint dst register if offset had invalid bounds derived from
2918 		 * e.g. dead branches.
2919 		 */
2920 		__mark_reg_unknown(dst_reg);
2921 		return 0;
2922 	}
2923 
2924 	if (!src_known &&
2925 	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
2926 		__mark_reg_unknown(dst_reg);
2927 		return 0;
2928 	}
2929 
2930 	switch (opcode) {
2931 	case BPF_ADD:
2932 		if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
2933 		    signed_add_overflows(dst_reg->smax_value, smax_val)) {
2934 			dst_reg->smin_value = S64_MIN;
2935 			dst_reg->smax_value = S64_MAX;
2936 		} else {
2937 			dst_reg->smin_value += smin_val;
2938 			dst_reg->smax_value += smax_val;
2939 		}
2940 		if (dst_reg->umin_value + umin_val < umin_val ||
2941 		    dst_reg->umax_value + umax_val < umax_val) {
2942 			dst_reg->umin_value = 0;
2943 			dst_reg->umax_value = U64_MAX;
2944 		} else {
2945 			dst_reg->umin_value += umin_val;
2946 			dst_reg->umax_value += umax_val;
2947 		}
2948 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
2949 		break;
2950 	case BPF_SUB:
2951 		if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
2952 		    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
2953 			/* Overflow possible, we know nothing */
2954 			dst_reg->smin_value = S64_MIN;
2955 			dst_reg->smax_value = S64_MAX;
2956 		} else {
2957 			dst_reg->smin_value -= smax_val;
2958 			dst_reg->smax_value -= smin_val;
2959 		}
2960 		if (dst_reg->umin_value < umax_val) {
2961 			/* Overflow possible, we know nothing */
2962 			dst_reg->umin_value = 0;
2963 			dst_reg->umax_value = U64_MAX;
2964 		} else {
2965 			/* Cannot overflow (as long as bounds are consistent) */
2966 			dst_reg->umin_value -= umax_val;
2967 			dst_reg->umax_value -= umin_val;
2968 		}
2969 		dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
2970 		break;
2971 	case BPF_MUL:
2972 		dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
2973 		if (smin_val < 0 || dst_reg->smin_value < 0) {
2974 			/* Ain't nobody got time to multiply that sign */
2975 			__mark_reg_unbounded(dst_reg);
2976 			__update_reg_bounds(dst_reg);
2977 			break;
2978 		}
2979 		/* Both values are positive, so we can work with unsigned and
2980 		 * copy the result to signed (unless it exceeds S64_MAX).
2981 		 */
2982 		if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
2983 			/* Potential overflow, we know nothing */
2984 			__mark_reg_unbounded(dst_reg);
2985 			/* (except what we can learn from the var_off) */
2986 			__update_reg_bounds(dst_reg);
2987 			break;
2988 		}
2989 		dst_reg->umin_value *= umin_val;
2990 		dst_reg->umax_value *= umax_val;
2991 		if (dst_reg->umax_value > S64_MAX) {
2992 			/* Overflow possible, we know nothing */
2993 			dst_reg->smin_value = S64_MIN;
2994 			dst_reg->smax_value = S64_MAX;
2995 		} else {
2996 			dst_reg->smin_value = dst_reg->umin_value;
2997 			dst_reg->smax_value = dst_reg->umax_value;
2998 		}
2999 		break;
3000 	case BPF_AND:
3001 		if (src_known && dst_known) {
3002 			__mark_reg_known(dst_reg, dst_reg->var_off.value &
3003 						  src_reg.var_off.value);
3004 			break;
3005 		}
3006 		/* We get our minimum from the var_off, since that's inherently
3007 		 * bitwise.  Our maximum is the minimum of the operands' maxima.
3008 		 */
3009 		dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
3010 		dst_reg->umin_value = dst_reg->var_off.value;
3011 		dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
3012 		if (dst_reg->smin_value < 0 || smin_val < 0) {
3013 			/* Lose signed bounds when ANDing negative numbers,
3014 			 * ain't nobody got time for that.
3015 			 */
3016 			dst_reg->smin_value = S64_MIN;
3017 			dst_reg->smax_value = S64_MAX;
3018 		} else {
3019 			/* ANDing two positives gives a positive, so safe to
3020 			 * cast result into s64.
3021 			 */
3022 			dst_reg->smin_value = dst_reg->umin_value;
3023 			dst_reg->smax_value = dst_reg->umax_value;
3024 		}
3025 		/* We may learn something more from the var_off */
3026 		__update_reg_bounds(dst_reg);
3027 		break;
3028 	case BPF_OR:
3029 		if (src_known && dst_known) {
3030 			__mark_reg_known(dst_reg, dst_reg->var_off.value |
3031 						  src_reg.var_off.value);
3032 			break;
3033 		}
3034 		/* We get our maximum from the var_off, and our minimum is the
3035 		 * maximum of the operands' minima
3036 		 */
3037 		dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
3038 		dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
3039 		dst_reg->umax_value = dst_reg->var_off.value |
3040 				      dst_reg->var_off.mask;
3041 		if (dst_reg->smin_value < 0 || smin_val < 0) {
3042 			/* Lose signed bounds when ORing negative numbers,
3043 			 * ain't nobody got time for that.
3044 			 */
3045 			dst_reg->smin_value = S64_MIN;
3046 			dst_reg->smax_value = S64_MAX;
3047 		} else {
3048 			/* ORing two positives gives a positive, so safe to
3049 			 * cast result into s64.
3050 			 */
3051 			dst_reg->smin_value = dst_reg->umin_value;
3052 			dst_reg->smax_value = dst_reg->umax_value;
3053 		}
3054 		/* We may learn something more from the var_off */
3055 		__update_reg_bounds(dst_reg);
3056 		break;
3057 	case BPF_LSH:
3058 		if (umax_val >= insn_bitness) {
3059 			/* Shifts greater than 31 or 63 are undefined.
3060 			 * This includes shifts by a negative number.
3061 			 */
3062 			mark_reg_unknown(env, regs, insn->dst_reg);
3063 			break;
3064 		}
3065 		/* We lose all sign bit information (except what we can pick
3066 		 * up from var_off)
3067 		 */
3068 		dst_reg->smin_value = S64_MIN;
3069 		dst_reg->smax_value = S64_MAX;
3070 		/* If we might shift our top bit out, then we know nothing */
3071 		if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
3072 			dst_reg->umin_value = 0;
3073 			dst_reg->umax_value = U64_MAX;
3074 		} else {
3075 			dst_reg->umin_value <<= umin_val;
3076 			dst_reg->umax_value <<= umax_val;
3077 		}
3078 		dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
3079 		/* We may learn something more from the var_off */
3080 		__update_reg_bounds(dst_reg);
3081 		break;
3082 	case BPF_RSH:
3083 		if (umax_val >= insn_bitness) {
3084 			/* Shifts greater than 31 or 63 are undefined.
3085 			 * This includes shifts by a negative number.
3086 			 */
3087 			mark_reg_unknown(env, regs, insn->dst_reg);
3088 			break;
3089 		}
3090 		/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
3091 		 * be negative, then either:
3092 		 * 1) src_reg might be zero, so the sign bit of the result is
3093 		 *    unknown, so we lose our signed bounds
3094 		 * 2) it's known negative, thus the unsigned bounds capture the
3095 		 *    signed bounds
3096 		 * 3) the signed bounds cross zero, so they tell us nothing
3097 		 *    about the result
3098 		 * If the value in dst_reg is known nonnegative, then again the
3099 		 * unsigned bounts capture the signed bounds.
3100 		 * Thus, in all cases it suffices to blow away our signed bounds
3101 		 * and rely on inferring new ones from the unsigned bounds and
3102 		 * var_off of the result.
3103 		 */
3104 		dst_reg->smin_value = S64_MIN;
3105 		dst_reg->smax_value = S64_MAX;
3106 		dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
3107 		dst_reg->umin_value >>= umax_val;
3108 		dst_reg->umax_value >>= umin_val;
3109 		/* We may learn something more from the var_off */
3110 		__update_reg_bounds(dst_reg);
3111 		break;
3112 	case BPF_ARSH:
3113 		if (umax_val >= insn_bitness) {
3114 			/* Shifts greater than 31 or 63 are undefined.
3115 			 * This includes shifts by a negative number.
3116 			 */
3117 			mark_reg_unknown(env, regs, insn->dst_reg);
3118 			break;
3119 		}
3120 
3121 		/* Upon reaching here, src_known is true and
3122 		 * umax_val is equal to umin_val.
3123 		 */
3124 		dst_reg->smin_value >>= umin_val;
3125 		dst_reg->smax_value >>= umin_val;
3126 		dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val);
3127 
3128 		/* blow away the dst_reg umin_value/umax_value and rely on
3129 		 * dst_reg var_off to refine the result.
3130 		 */
3131 		dst_reg->umin_value = 0;
3132 		dst_reg->umax_value = U64_MAX;
3133 		__update_reg_bounds(dst_reg);
3134 		break;
3135 	default:
3136 		mark_reg_unknown(env, regs, insn->dst_reg);
3137 		break;
3138 	}
3139 
3140 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
3141 		/* 32-bit ALU ops are (32,32)->32 */
3142 		coerce_reg_to_size(dst_reg, 4);
3143 	}
3144 
3145 	__reg_deduce_bounds(dst_reg);
3146 	__reg_bound_offset(dst_reg);
3147 	return 0;
3148 }
3149 
3150 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
3151  * and var_off.
3152  */
adjust_reg_min_max_vals(struct bpf_verifier_env * env,struct bpf_insn * insn)3153 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
3154 				   struct bpf_insn *insn)
3155 {
3156 	struct bpf_verifier_state *vstate = env->cur_state;
3157 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3158 	struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
3159 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
3160 	u8 opcode = BPF_OP(insn->code);
3161 
3162 	dst_reg = &regs[insn->dst_reg];
3163 	src_reg = NULL;
3164 	if (dst_reg->type != SCALAR_VALUE)
3165 		ptr_reg = dst_reg;
3166 	if (BPF_SRC(insn->code) == BPF_X) {
3167 		src_reg = &regs[insn->src_reg];
3168 		if (src_reg->type != SCALAR_VALUE) {
3169 			if (dst_reg->type != SCALAR_VALUE) {
3170 				/* Combining two pointers by any ALU op yields
3171 				 * an arbitrary scalar. Disallow all math except
3172 				 * pointer subtraction
3173 				 */
3174 				if (opcode == BPF_SUB && env->allow_ptr_leaks) {
3175 					mark_reg_unknown(env, regs, insn->dst_reg);
3176 					return 0;
3177 				}
3178 				verbose(env, "R%d pointer %s pointer prohibited\n",
3179 					insn->dst_reg,
3180 					bpf_alu_string[opcode >> 4]);
3181 				return -EACCES;
3182 			} else {
3183 				/* scalar += pointer
3184 				 * This is legal, but we have to reverse our
3185 				 * src/dest handling in computing the range
3186 				 */
3187 				return adjust_ptr_min_max_vals(env, insn,
3188 							       src_reg, dst_reg);
3189 			}
3190 		} else if (ptr_reg) {
3191 			/* pointer += scalar */
3192 			return adjust_ptr_min_max_vals(env, insn,
3193 						       dst_reg, src_reg);
3194 		}
3195 	} else {
3196 		/* Pretend the src is a reg with a known value, since we only
3197 		 * need to be able to read from this state.
3198 		 */
3199 		off_reg.type = SCALAR_VALUE;
3200 		__mark_reg_known(&off_reg, insn->imm);
3201 		src_reg = &off_reg;
3202 		if (ptr_reg) /* pointer += K */
3203 			return adjust_ptr_min_max_vals(env, insn,
3204 						       ptr_reg, src_reg);
3205 	}
3206 
3207 	/* Got here implies adding two SCALAR_VALUEs */
3208 	if (WARN_ON_ONCE(ptr_reg)) {
3209 		print_verifier_state(env, state);
3210 		verbose(env, "verifier internal error: unexpected ptr_reg\n");
3211 		return -EINVAL;
3212 	}
3213 	if (WARN_ON(!src_reg)) {
3214 		print_verifier_state(env, state);
3215 		verbose(env, "verifier internal error: no src_reg\n");
3216 		return -EINVAL;
3217 	}
3218 	return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
3219 }
3220 
3221 /* check validity of 32-bit and 64-bit arithmetic operations */
check_alu_op(struct bpf_verifier_env * env,struct bpf_insn * insn)3222 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
3223 {
3224 	struct bpf_reg_state *regs = cur_regs(env);
3225 	u8 opcode = BPF_OP(insn->code);
3226 	int err;
3227 
3228 	if (opcode == BPF_END || opcode == BPF_NEG) {
3229 		if (opcode == BPF_NEG) {
3230 			if (BPF_SRC(insn->code) != 0 ||
3231 			    insn->src_reg != BPF_REG_0 ||
3232 			    insn->off != 0 || insn->imm != 0) {
3233 				verbose(env, "BPF_NEG uses reserved fields\n");
3234 				return -EINVAL;
3235 			}
3236 		} else {
3237 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
3238 			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
3239 			    BPF_CLASS(insn->code) == BPF_ALU64) {
3240 				verbose(env, "BPF_END uses reserved fields\n");
3241 				return -EINVAL;
3242 			}
3243 		}
3244 
3245 		/* check src operand */
3246 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3247 		if (err)
3248 			return err;
3249 
3250 		if (is_pointer_value(env, insn->dst_reg)) {
3251 			verbose(env, "R%d pointer arithmetic prohibited\n",
3252 				insn->dst_reg);
3253 			return -EACCES;
3254 		}
3255 
3256 		/* check dest operand */
3257 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
3258 		if (err)
3259 			return err;
3260 
3261 	} else if (opcode == BPF_MOV) {
3262 
3263 		if (BPF_SRC(insn->code) == BPF_X) {
3264 			if (insn->imm != 0 || insn->off != 0) {
3265 				verbose(env, "BPF_MOV uses reserved fields\n");
3266 				return -EINVAL;
3267 			}
3268 
3269 			/* check src operand */
3270 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
3271 			if (err)
3272 				return err;
3273 		} else {
3274 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
3275 				verbose(env, "BPF_MOV uses reserved fields\n");
3276 				return -EINVAL;
3277 			}
3278 		}
3279 
3280 		/* check dest operand, mark as required later */
3281 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
3282 		if (err)
3283 			return err;
3284 
3285 		if (BPF_SRC(insn->code) == BPF_X) {
3286 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
3287 				/* case: R1 = R2
3288 				 * copy register state to dest reg
3289 				 */
3290 				regs[insn->dst_reg] = regs[insn->src_reg];
3291 				regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
3292 			} else {
3293 				/* R1 = (u32) R2 */
3294 				if (is_pointer_value(env, insn->src_reg)) {
3295 					verbose(env,
3296 						"R%d partial copy of pointer\n",
3297 						insn->src_reg);
3298 					return -EACCES;
3299 				}
3300 				mark_reg_unknown(env, regs, insn->dst_reg);
3301 				coerce_reg_to_size(&regs[insn->dst_reg], 4);
3302 			}
3303 		} else {
3304 			/* case: R = imm
3305 			 * remember the value we stored into this reg
3306 			 */
3307 			/* clear any state __mark_reg_known doesn't set */
3308 			mark_reg_unknown(env, regs, insn->dst_reg);
3309 			regs[insn->dst_reg].type = SCALAR_VALUE;
3310 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
3311 				__mark_reg_known(regs + insn->dst_reg,
3312 						 insn->imm);
3313 			} else {
3314 				__mark_reg_known(regs + insn->dst_reg,
3315 						 (u32)insn->imm);
3316 			}
3317 		}
3318 
3319 	} else if (opcode > BPF_END) {
3320 		verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
3321 		return -EINVAL;
3322 
3323 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
3324 
3325 		if (BPF_SRC(insn->code) == BPF_X) {
3326 			if (insn->imm != 0 || insn->off != 0) {
3327 				verbose(env, "BPF_ALU uses reserved fields\n");
3328 				return -EINVAL;
3329 			}
3330 			/* check src1 operand */
3331 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
3332 			if (err)
3333 				return err;
3334 		} else {
3335 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
3336 				verbose(env, "BPF_ALU uses reserved fields\n");
3337 				return -EINVAL;
3338 			}
3339 		}
3340 
3341 		/* check src2 operand */
3342 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3343 		if (err)
3344 			return err;
3345 
3346 		if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
3347 		    BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
3348 			verbose(env, "div by zero\n");
3349 			return -EINVAL;
3350 		}
3351 
3352 		if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
3353 			verbose(env, "BPF_ARSH not supported for 32 bit ALU\n");
3354 			return -EINVAL;
3355 		}
3356 
3357 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
3358 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
3359 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
3360 
3361 			if (insn->imm < 0 || insn->imm >= size) {
3362 				verbose(env, "invalid shift %d\n", insn->imm);
3363 				return -EINVAL;
3364 			}
3365 		}
3366 
3367 		/* check dest operand */
3368 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
3369 		if (err)
3370 			return err;
3371 
3372 		return adjust_reg_min_max_vals(env, insn);
3373 	}
3374 
3375 	return 0;
3376 }
3377 
find_good_pkt_pointers(struct bpf_verifier_state * vstate,struct bpf_reg_state * dst_reg,enum bpf_reg_type type,bool range_right_open)3378 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
3379 				   struct bpf_reg_state *dst_reg,
3380 				   enum bpf_reg_type type,
3381 				   bool range_right_open)
3382 {
3383 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3384 	struct bpf_reg_state *regs = state->regs, *reg;
3385 	u16 new_range;
3386 	int i, j;
3387 
3388 	if (dst_reg->off < 0 ||
3389 	    (dst_reg->off == 0 && range_right_open))
3390 		/* This doesn't give us any range */
3391 		return;
3392 
3393 	if (dst_reg->umax_value > MAX_PACKET_OFF ||
3394 	    dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
3395 		/* Risk of overflow.  For instance, ptr + (1<<63) may be less
3396 		 * than pkt_end, but that's because it's also less than pkt.
3397 		 */
3398 		return;
3399 
3400 	new_range = dst_reg->off;
3401 	if (range_right_open)
3402 		new_range--;
3403 
3404 	/* Examples for register markings:
3405 	 *
3406 	 * pkt_data in dst register:
3407 	 *
3408 	 *   r2 = r3;
3409 	 *   r2 += 8;
3410 	 *   if (r2 > pkt_end) goto <handle exception>
3411 	 *   <access okay>
3412 	 *
3413 	 *   r2 = r3;
3414 	 *   r2 += 8;
3415 	 *   if (r2 < pkt_end) goto <access okay>
3416 	 *   <handle exception>
3417 	 *
3418 	 *   Where:
3419 	 *     r2 == dst_reg, pkt_end == src_reg
3420 	 *     r2=pkt(id=n,off=8,r=0)
3421 	 *     r3=pkt(id=n,off=0,r=0)
3422 	 *
3423 	 * pkt_data in src register:
3424 	 *
3425 	 *   r2 = r3;
3426 	 *   r2 += 8;
3427 	 *   if (pkt_end >= r2) goto <access okay>
3428 	 *   <handle exception>
3429 	 *
3430 	 *   r2 = r3;
3431 	 *   r2 += 8;
3432 	 *   if (pkt_end <= r2) goto <handle exception>
3433 	 *   <access okay>
3434 	 *
3435 	 *   Where:
3436 	 *     pkt_end == dst_reg, r2 == src_reg
3437 	 *     r2=pkt(id=n,off=8,r=0)
3438 	 *     r3=pkt(id=n,off=0,r=0)
3439 	 *
3440 	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
3441 	 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
3442 	 * and [r3, r3 + 8-1) respectively is safe to access depending on
3443 	 * the check.
3444 	 */
3445 
3446 	/* If our ids match, then we must have the same max_value.  And we
3447 	 * don't care about the other reg's fixed offset, since if it's too big
3448 	 * the range won't allow anything.
3449 	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
3450 	 */
3451 	for (i = 0; i < MAX_BPF_REG; i++)
3452 		if (regs[i].type == type && regs[i].id == dst_reg->id)
3453 			/* keep the maximum range already checked */
3454 			regs[i].range = max(regs[i].range, new_range);
3455 
3456 	for (j = 0; j <= vstate->curframe; j++) {
3457 		state = vstate->frame[j];
3458 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
3459 			if (state->stack[i].slot_type[0] != STACK_SPILL)
3460 				continue;
3461 			reg = &state->stack[i].spilled_ptr;
3462 			if (reg->type == type && reg->id == dst_reg->id)
3463 				reg->range = max(reg->range, new_range);
3464 		}
3465 	}
3466 }
3467 
3468 /* Adjusts the register min/max values in the case that the dst_reg is the
3469  * variable register that we are working on, and src_reg is a constant or we're
3470  * simply doing a BPF_K check.
3471  * In JEQ/JNE cases we also adjust the var_off values.
3472  */
reg_set_min_max(struct bpf_reg_state * true_reg,struct bpf_reg_state * false_reg,u64 val,u8 opcode)3473 static void reg_set_min_max(struct bpf_reg_state *true_reg,
3474 			    struct bpf_reg_state *false_reg, u64 val,
3475 			    u8 opcode)
3476 {
3477 	/* If the dst_reg is a pointer, we can't learn anything about its
3478 	 * variable offset from the compare (unless src_reg were a pointer into
3479 	 * the same object, but we don't bother with that.
3480 	 * Since false_reg and true_reg have the same type by construction, we
3481 	 * only need to check one of them for pointerness.
3482 	 */
3483 	if (__is_pointer_value(false, false_reg))
3484 		return;
3485 
3486 	switch (opcode) {
3487 	case BPF_JEQ:
3488 		/* If this is false then we know nothing Jon Snow, but if it is
3489 		 * true then we know for sure.
3490 		 */
3491 		__mark_reg_known(true_reg, val);
3492 		break;
3493 	case BPF_JNE:
3494 		/* If this is true we know nothing Jon Snow, but if it is false
3495 		 * we know the value for sure;
3496 		 */
3497 		__mark_reg_known(false_reg, val);
3498 		break;
3499 	case BPF_JGT:
3500 		false_reg->umax_value = min(false_reg->umax_value, val);
3501 		true_reg->umin_value = max(true_reg->umin_value, val + 1);
3502 		break;
3503 	case BPF_JSGT:
3504 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
3505 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
3506 		break;
3507 	case BPF_JLT:
3508 		false_reg->umin_value = max(false_reg->umin_value, val);
3509 		true_reg->umax_value = min(true_reg->umax_value, val - 1);
3510 		break;
3511 	case BPF_JSLT:
3512 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
3513 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
3514 		break;
3515 	case BPF_JGE:
3516 		false_reg->umax_value = min(false_reg->umax_value, val - 1);
3517 		true_reg->umin_value = max(true_reg->umin_value, val);
3518 		break;
3519 	case BPF_JSGE:
3520 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
3521 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
3522 		break;
3523 	case BPF_JLE:
3524 		false_reg->umin_value = max(false_reg->umin_value, val + 1);
3525 		true_reg->umax_value = min(true_reg->umax_value, val);
3526 		break;
3527 	case BPF_JSLE:
3528 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
3529 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
3530 		break;
3531 	default:
3532 		break;
3533 	}
3534 
3535 	__reg_deduce_bounds(false_reg);
3536 	__reg_deduce_bounds(true_reg);
3537 	/* We might have learned some bits from the bounds. */
3538 	__reg_bound_offset(false_reg);
3539 	__reg_bound_offset(true_reg);
3540 	/* Intersecting with the old var_off might have improved our bounds
3541 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
3542 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
3543 	 */
3544 	__update_reg_bounds(false_reg);
3545 	__update_reg_bounds(true_reg);
3546 }
3547 
3548 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
3549  * the variable reg.
3550  */
reg_set_min_max_inv(struct bpf_reg_state * true_reg,struct bpf_reg_state * false_reg,u64 val,u8 opcode)3551 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
3552 				struct bpf_reg_state *false_reg, u64 val,
3553 				u8 opcode)
3554 {
3555 	if (__is_pointer_value(false, false_reg))
3556 		return;
3557 
3558 	switch (opcode) {
3559 	case BPF_JEQ:
3560 		/* If this is false then we know nothing Jon Snow, but if it is
3561 		 * true then we know for sure.
3562 		 */
3563 		__mark_reg_known(true_reg, val);
3564 		break;
3565 	case BPF_JNE:
3566 		/* If this is true we know nothing Jon Snow, but if it is false
3567 		 * we know the value for sure;
3568 		 */
3569 		__mark_reg_known(false_reg, val);
3570 		break;
3571 	case BPF_JGT:
3572 		true_reg->umax_value = min(true_reg->umax_value, val - 1);
3573 		false_reg->umin_value = max(false_reg->umin_value, val);
3574 		break;
3575 	case BPF_JSGT:
3576 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
3577 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
3578 		break;
3579 	case BPF_JLT:
3580 		true_reg->umin_value = max(true_reg->umin_value, val + 1);
3581 		false_reg->umax_value = min(false_reg->umax_value, val);
3582 		break;
3583 	case BPF_JSLT:
3584 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
3585 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
3586 		break;
3587 	case BPF_JGE:
3588 		true_reg->umax_value = min(true_reg->umax_value, val);
3589 		false_reg->umin_value = max(false_reg->umin_value, val + 1);
3590 		break;
3591 	case BPF_JSGE:
3592 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
3593 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
3594 		break;
3595 	case BPF_JLE:
3596 		true_reg->umin_value = max(true_reg->umin_value, val);
3597 		false_reg->umax_value = min(false_reg->umax_value, val - 1);
3598 		break;
3599 	case BPF_JSLE:
3600 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
3601 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
3602 		break;
3603 	default:
3604 		break;
3605 	}
3606 
3607 	__reg_deduce_bounds(false_reg);
3608 	__reg_deduce_bounds(true_reg);
3609 	/* We might have learned some bits from the bounds. */
3610 	__reg_bound_offset(false_reg);
3611 	__reg_bound_offset(true_reg);
3612 	/* Intersecting with the old var_off might have improved our bounds
3613 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
3614 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
3615 	 */
3616 	__update_reg_bounds(false_reg);
3617 	__update_reg_bounds(true_reg);
3618 }
3619 
3620 /* Regs are known to be equal, so intersect their min/max/var_off */
__reg_combine_min_max(struct bpf_reg_state * src_reg,struct bpf_reg_state * dst_reg)3621 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
3622 				  struct bpf_reg_state *dst_reg)
3623 {
3624 	src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
3625 							dst_reg->umin_value);
3626 	src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
3627 							dst_reg->umax_value);
3628 	src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
3629 							dst_reg->smin_value);
3630 	src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
3631 							dst_reg->smax_value);
3632 	src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
3633 							     dst_reg->var_off);
3634 	/* We might have learned new bounds from the var_off. */
3635 	__update_reg_bounds(src_reg);
3636 	__update_reg_bounds(dst_reg);
3637 	/* We might have learned something about the sign bit. */
3638 	__reg_deduce_bounds(src_reg);
3639 	__reg_deduce_bounds(dst_reg);
3640 	/* We might have learned some bits from the bounds. */
3641 	__reg_bound_offset(src_reg);
3642 	__reg_bound_offset(dst_reg);
3643 	/* Intersecting with the old var_off might have improved our bounds
3644 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
3645 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
3646 	 */
3647 	__update_reg_bounds(src_reg);
3648 	__update_reg_bounds(dst_reg);
3649 }
3650 
reg_combine_min_max(struct bpf_reg_state * true_src,struct bpf_reg_state * true_dst,struct bpf_reg_state * false_src,struct bpf_reg_state * false_dst,u8 opcode)3651 static void reg_combine_min_max(struct bpf_reg_state *true_src,
3652 				struct bpf_reg_state *true_dst,
3653 				struct bpf_reg_state *false_src,
3654 				struct bpf_reg_state *false_dst,
3655 				u8 opcode)
3656 {
3657 	switch (opcode) {
3658 	case BPF_JEQ:
3659 		__reg_combine_min_max(true_src, true_dst);
3660 		break;
3661 	case BPF_JNE:
3662 		__reg_combine_min_max(false_src, false_dst);
3663 		break;
3664 	}
3665 }
3666 
mark_map_reg(struct bpf_reg_state * regs,u32 regno,u32 id,bool is_null)3667 static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
3668 			 bool is_null)
3669 {
3670 	struct bpf_reg_state *reg = &regs[regno];
3671 
3672 	if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) {
3673 		/* Old offset (both fixed and variable parts) should
3674 		 * have been known-zero, because we don't allow pointer
3675 		 * arithmetic on pointers that might be NULL.
3676 		 */
3677 		if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
3678 				 !tnum_equals_const(reg->var_off, 0) ||
3679 				 reg->off)) {
3680 			__mark_reg_known_zero(reg);
3681 			reg->off = 0;
3682 		}
3683 		if (is_null) {
3684 			reg->type = SCALAR_VALUE;
3685 		} else if (reg->map_ptr->inner_map_meta) {
3686 			reg->type = CONST_PTR_TO_MAP;
3687 			reg->map_ptr = reg->map_ptr->inner_map_meta;
3688 		} else {
3689 			reg->type = PTR_TO_MAP_VALUE;
3690 		}
3691 		/* We don't need id from this point onwards anymore, thus we
3692 		 * should better reset it, so that state pruning has chances
3693 		 * to take effect.
3694 		 */
3695 		reg->id = 0;
3696 	}
3697 }
3698 
3699 /* The logic is similar to find_good_pkt_pointers(), both could eventually
3700  * be folded together at some point.
3701  */
mark_map_regs(struct bpf_verifier_state * vstate,u32 regno,bool is_null)3702 static void mark_map_regs(struct bpf_verifier_state *vstate, u32 regno,
3703 			  bool is_null)
3704 {
3705 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3706 	struct bpf_reg_state *regs = state->regs;
3707 	u32 id = regs[regno].id;
3708 	int i, j;
3709 
3710 	for (i = 0; i < MAX_BPF_REG; i++)
3711 		mark_map_reg(regs, i, id, is_null);
3712 
3713 	for (j = 0; j <= vstate->curframe; j++) {
3714 		state = vstate->frame[j];
3715 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
3716 			if (state->stack[i].slot_type[0] != STACK_SPILL)
3717 				continue;
3718 			mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null);
3719 		}
3720 	}
3721 }
3722 
try_match_pkt_pointers(const struct bpf_insn * insn,struct bpf_reg_state * dst_reg,struct bpf_reg_state * src_reg,struct bpf_verifier_state * this_branch,struct bpf_verifier_state * other_branch)3723 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
3724 				   struct bpf_reg_state *dst_reg,
3725 				   struct bpf_reg_state *src_reg,
3726 				   struct bpf_verifier_state *this_branch,
3727 				   struct bpf_verifier_state *other_branch)
3728 {
3729 	if (BPF_SRC(insn->code) != BPF_X)
3730 		return false;
3731 
3732 	switch (BPF_OP(insn->code)) {
3733 	case BPF_JGT:
3734 		if ((dst_reg->type == PTR_TO_PACKET &&
3735 		     src_reg->type == PTR_TO_PACKET_END) ||
3736 		    (dst_reg->type == PTR_TO_PACKET_META &&
3737 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
3738 			/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
3739 			find_good_pkt_pointers(this_branch, dst_reg,
3740 					       dst_reg->type, false);
3741 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
3742 			    src_reg->type == PTR_TO_PACKET) ||
3743 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
3744 			    src_reg->type == PTR_TO_PACKET_META)) {
3745 			/* pkt_end > pkt_data', pkt_data > pkt_meta' */
3746 			find_good_pkt_pointers(other_branch, src_reg,
3747 					       src_reg->type, true);
3748 		} else {
3749 			return false;
3750 		}
3751 		break;
3752 	case BPF_JLT:
3753 		if ((dst_reg->type == PTR_TO_PACKET &&
3754 		     src_reg->type == PTR_TO_PACKET_END) ||
3755 		    (dst_reg->type == PTR_TO_PACKET_META &&
3756 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
3757 			/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
3758 			find_good_pkt_pointers(other_branch, dst_reg,
3759 					       dst_reg->type, true);
3760 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
3761 			    src_reg->type == PTR_TO_PACKET) ||
3762 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
3763 			    src_reg->type == PTR_TO_PACKET_META)) {
3764 			/* pkt_end < pkt_data', pkt_data > pkt_meta' */
3765 			find_good_pkt_pointers(this_branch, src_reg,
3766 					       src_reg->type, false);
3767 		} else {
3768 			return false;
3769 		}
3770 		break;
3771 	case BPF_JGE:
3772 		if ((dst_reg->type == PTR_TO_PACKET &&
3773 		     src_reg->type == PTR_TO_PACKET_END) ||
3774 		    (dst_reg->type == PTR_TO_PACKET_META &&
3775 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
3776 			/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
3777 			find_good_pkt_pointers(this_branch, dst_reg,
3778 					       dst_reg->type, true);
3779 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
3780 			    src_reg->type == PTR_TO_PACKET) ||
3781 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
3782 			    src_reg->type == PTR_TO_PACKET_META)) {
3783 			/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
3784 			find_good_pkt_pointers(other_branch, src_reg,
3785 					       src_reg->type, false);
3786 		} else {
3787 			return false;
3788 		}
3789 		break;
3790 	case BPF_JLE:
3791 		if ((dst_reg->type == PTR_TO_PACKET &&
3792 		     src_reg->type == PTR_TO_PACKET_END) ||
3793 		    (dst_reg->type == PTR_TO_PACKET_META &&
3794 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
3795 			/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
3796 			find_good_pkt_pointers(other_branch, dst_reg,
3797 					       dst_reg->type, false);
3798 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
3799 			    src_reg->type == PTR_TO_PACKET) ||
3800 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
3801 			    src_reg->type == PTR_TO_PACKET_META)) {
3802 			/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
3803 			find_good_pkt_pointers(this_branch, src_reg,
3804 					       src_reg->type, true);
3805 		} else {
3806 			return false;
3807 		}
3808 		break;
3809 	default:
3810 		return false;
3811 	}
3812 
3813 	return true;
3814 }
3815 
check_cond_jmp_op(struct bpf_verifier_env * env,struct bpf_insn * insn,int * insn_idx)3816 static int check_cond_jmp_op(struct bpf_verifier_env *env,
3817 			     struct bpf_insn *insn, int *insn_idx)
3818 {
3819 	struct bpf_verifier_state *this_branch = env->cur_state;
3820 	struct bpf_verifier_state *other_branch;
3821 	struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
3822 	struct bpf_reg_state *dst_reg, *other_branch_regs;
3823 	u8 opcode = BPF_OP(insn->code);
3824 	int err;
3825 
3826 	if (opcode > BPF_JSLE) {
3827 		verbose(env, "invalid BPF_JMP opcode %x\n", opcode);
3828 		return -EINVAL;
3829 	}
3830 
3831 	if (BPF_SRC(insn->code) == BPF_X) {
3832 		if (insn->imm != 0) {
3833 			verbose(env, "BPF_JMP uses reserved fields\n");
3834 			return -EINVAL;
3835 		}
3836 
3837 		/* check src1 operand */
3838 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
3839 		if (err)
3840 			return err;
3841 
3842 		if (is_pointer_value(env, insn->src_reg)) {
3843 			verbose(env, "R%d pointer comparison prohibited\n",
3844 				insn->src_reg);
3845 			return -EACCES;
3846 		}
3847 	} else {
3848 		if (insn->src_reg != BPF_REG_0) {
3849 			verbose(env, "BPF_JMP uses reserved fields\n");
3850 			return -EINVAL;
3851 		}
3852 	}
3853 
3854 	/* check src2 operand */
3855 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3856 	if (err)
3857 		return err;
3858 
3859 	dst_reg = &regs[insn->dst_reg];
3860 
3861 	/* detect if R == 0 where R was initialized to zero earlier */
3862 	if (BPF_SRC(insn->code) == BPF_K &&
3863 	    (opcode == BPF_JEQ || opcode == BPF_JNE) &&
3864 	    dst_reg->type == SCALAR_VALUE &&
3865 	    tnum_is_const(dst_reg->var_off)) {
3866 		if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) ||
3867 		    (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) {
3868 			/* if (imm == imm) goto pc+off;
3869 			 * only follow the goto, ignore fall-through
3870 			 */
3871 			*insn_idx += insn->off;
3872 			return 0;
3873 		} else {
3874 			/* if (imm != imm) goto pc+off;
3875 			 * only follow fall-through branch, since
3876 			 * that's where the program will go
3877 			 */
3878 			return 0;
3879 		}
3880 	}
3881 
3882 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
3883 	if (!other_branch)
3884 		return -EFAULT;
3885 	other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
3886 
3887 	/* detect if we are comparing against a constant value so we can adjust
3888 	 * our min/max values for our dst register.
3889 	 * this is only legit if both are scalars (or pointers to the same
3890 	 * object, I suppose, but we don't support that right now), because
3891 	 * otherwise the different base pointers mean the offsets aren't
3892 	 * comparable.
3893 	 */
3894 	if (BPF_SRC(insn->code) == BPF_X) {
3895 		if (dst_reg->type == SCALAR_VALUE &&
3896 		    regs[insn->src_reg].type == SCALAR_VALUE) {
3897 			if (tnum_is_const(regs[insn->src_reg].var_off))
3898 				reg_set_min_max(&other_branch_regs[insn->dst_reg],
3899 						dst_reg, regs[insn->src_reg].var_off.value,
3900 						opcode);
3901 			else if (tnum_is_const(dst_reg->var_off))
3902 				reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
3903 						    &regs[insn->src_reg],
3904 						    dst_reg->var_off.value, opcode);
3905 			else if (opcode == BPF_JEQ || opcode == BPF_JNE)
3906 				/* Comparing for equality, we can combine knowledge */
3907 				reg_combine_min_max(&other_branch_regs[insn->src_reg],
3908 						    &other_branch_regs[insn->dst_reg],
3909 						    &regs[insn->src_reg],
3910 						    &regs[insn->dst_reg], opcode);
3911 		}
3912 	} else if (dst_reg->type == SCALAR_VALUE) {
3913 		reg_set_min_max(&other_branch_regs[insn->dst_reg],
3914 					dst_reg, insn->imm, opcode);
3915 	}
3916 
3917 	/* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
3918 	if (BPF_SRC(insn->code) == BPF_K &&
3919 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
3920 	    dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
3921 		/* Mark all identical map registers in each branch as either
3922 		 * safe or unknown depending R == 0 or R != 0 conditional.
3923 		 */
3924 		mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE);
3925 		mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ);
3926 	} else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
3927 					   this_branch, other_branch) &&
3928 		   is_pointer_value(env, insn->dst_reg)) {
3929 		verbose(env, "R%d pointer comparison prohibited\n",
3930 			insn->dst_reg);
3931 		return -EACCES;
3932 	}
3933 	if (env->log.level)
3934 		print_verifier_state(env, this_branch->frame[this_branch->curframe]);
3935 	return 0;
3936 }
3937 
3938 /* return the map pointer stored inside BPF_LD_IMM64 instruction */
ld_imm64_to_map_ptr(struct bpf_insn * insn)3939 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
3940 {
3941 	u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
3942 
3943 	return (struct bpf_map *) (unsigned long) imm64;
3944 }
3945 
3946 /* verify BPF_LD_IMM64 instruction */
check_ld_imm(struct bpf_verifier_env * env,struct bpf_insn * insn)3947 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
3948 {
3949 	struct bpf_reg_state *regs = cur_regs(env);
3950 	int err;
3951 
3952 	if (BPF_SIZE(insn->code) != BPF_DW) {
3953 		verbose(env, "invalid BPF_LD_IMM insn\n");
3954 		return -EINVAL;
3955 	}
3956 	if (insn->off != 0) {
3957 		verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
3958 		return -EINVAL;
3959 	}
3960 
3961 	err = check_reg_arg(env, insn->dst_reg, DST_OP);
3962 	if (err)
3963 		return err;
3964 
3965 	if (insn->src_reg == 0) {
3966 		u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
3967 
3968 		regs[insn->dst_reg].type = SCALAR_VALUE;
3969 		__mark_reg_known(&regs[insn->dst_reg], imm);
3970 		return 0;
3971 	}
3972 
3973 	/* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
3974 	BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
3975 
3976 	regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
3977 	regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
3978 	return 0;
3979 }
3980 
may_access_skb(enum bpf_prog_type type)3981 static bool may_access_skb(enum bpf_prog_type type)
3982 {
3983 	switch (type) {
3984 	case BPF_PROG_TYPE_SOCKET_FILTER:
3985 	case BPF_PROG_TYPE_SCHED_CLS:
3986 	case BPF_PROG_TYPE_SCHED_ACT:
3987 		return true;
3988 	default:
3989 		return false;
3990 	}
3991 }
3992 
3993 /* verify safety of LD_ABS|LD_IND instructions:
3994  * - they can only appear in the programs where ctx == skb
3995  * - since they are wrappers of function calls, they scratch R1-R5 registers,
3996  *   preserve R6-R9, and store return value into R0
3997  *
3998  * Implicit input:
3999  *   ctx == skb == R6 == CTX
4000  *
4001  * Explicit input:
4002  *   SRC == any register
4003  *   IMM == 32-bit immediate
4004  *
4005  * Output:
4006  *   R0 - 8/16/32-bit skb data converted to cpu endianness
4007  */
check_ld_abs(struct bpf_verifier_env * env,struct bpf_insn * insn)4008 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
4009 {
4010 	struct bpf_reg_state *regs = cur_regs(env);
4011 	u8 mode = BPF_MODE(insn->code);
4012 	int i, err;
4013 
4014 	if (!may_access_skb(env->prog->type)) {
4015 		verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
4016 		return -EINVAL;
4017 	}
4018 
4019 	if (!env->ops->gen_ld_abs) {
4020 		verbose(env, "bpf verifier is misconfigured\n");
4021 		return -EINVAL;
4022 	}
4023 
4024 	if (env->subprog_cnt > 1) {
4025 		/* when program has LD_ABS insn JITs and interpreter assume
4026 		 * that r1 == ctx == skb which is not the case for callees
4027 		 * that can have arbitrary arguments. It's problematic
4028 		 * for main prog as well since JITs would need to analyze
4029 		 * all functions in order to make proper register save/restore
4030 		 * decisions in the main prog. Hence disallow LD_ABS with calls
4031 		 */
4032 		verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
4033 		return -EINVAL;
4034 	}
4035 
4036 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
4037 	    BPF_SIZE(insn->code) == BPF_DW ||
4038 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
4039 		verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
4040 		return -EINVAL;
4041 	}
4042 
4043 	/* check whether implicit source operand (register R6) is readable */
4044 	err = check_reg_arg(env, BPF_REG_6, SRC_OP);
4045 	if (err)
4046 		return err;
4047 
4048 	if (regs[BPF_REG_6].type != PTR_TO_CTX) {
4049 		verbose(env,
4050 			"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
4051 		return -EINVAL;
4052 	}
4053 
4054 	if (mode == BPF_IND) {
4055 		/* check explicit source operand */
4056 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
4057 		if (err)
4058 			return err;
4059 	}
4060 
4061 	/* reset caller saved regs to unreadable */
4062 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
4063 		mark_reg_not_init(env, regs, caller_saved[i]);
4064 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
4065 	}
4066 
4067 	/* mark destination R0 register as readable, since it contains
4068 	 * the value fetched from the packet.
4069 	 * Already marked as written above.
4070 	 */
4071 	mark_reg_unknown(env, regs, BPF_REG_0);
4072 	return 0;
4073 }
4074 
check_return_code(struct bpf_verifier_env * env)4075 static int check_return_code(struct bpf_verifier_env *env)
4076 {
4077 	struct bpf_reg_state *reg;
4078 	struct tnum range = tnum_range(0, 1);
4079 
4080 	switch (env->prog->type) {
4081 	case BPF_PROG_TYPE_CGROUP_SKB:
4082 	case BPF_PROG_TYPE_CGROUP_SOCK:
4083 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4084 	case BPF_PROG_TYPE_SOCK_OPS:
4085 	case BPF_PROG_TYPE_CGROUP_DEVICE:
4086 		break;
4087 	default:
4088 		return 0;
4089 	}
4090 
4091 	reg = cur_regs(env) + BPF_REG_0;
4092 	if (reg->type != SCALAR_VALUE) {
4093 		verbose(env, "At program exit the register R0 is not a known value (%s)\n",
4094 			reg_type_str[reg->type]);
4095 		return -EINVAL;
4096 	}
4097 
4098 	if (!tnum_in(range, reg->var_off)) {
4099 		verbose(env, "At program exit the register R0 ");
4100 		if (!tnum_is_unknown(reg->var_off)) {
4101 			char tn_buf[48];
4102 
4103 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4104 			verbose(env, "has value %s", tn_buf);
4105 		} else {
4106 			verbose(env, "has unknown scalar value");
4107 		}
4108 		verbose(env, " should have been 0 or 1\n");
4109 		return -EINVAL;
4110 	}
4111 	return 0;
4112 }
4113 
4114 /* non-recursive DFS pseudo code
4115  * 1  procedure DFS-iterative(G,v):
4116  * 2      label v as discovered
4117  * 3      let S be a stack
4118  * 4      S.push(v)
4119  * 5      while S is not empty
4120  * 6            t <- S.pop()
4121  * 7            if t is what we're looking for:
4122  * 8                return t
4123  * 9            for all edges e in G.adjacentEdges(t) do
4124  * 10               if edge e is already labelled
4125  * 11                   continue with the next edge
4126  * 12               w <- G.adjacentVertex(t,e)
4127  * 13               if vertex w is not discovered and not explored
4128  * 14                   label e as tree-edge
4129  * 15                   label w as discovered
4130  * 16                   S.push(w)
4131  * 17                   continue at 5
4132  * 18               else if vertex w is discovered
4133  * 19                   label e as back-edge
4134  * 20               else
4135  * 21                   // vertex w is explored
4136  * 22                   label e as forward- or cross-edge
4137  * 23           label t as explored
4138  * 24           S.pop()
4139  *
4140  * convention:
4141  * 0x10 - discovered
4142  * 0x11 - discovered and fall-through edge labelled
4143  * 0x12 - discovered and fall-through and branch edges labelled
4144  * 0x20 - explored
4145  */
4146 
4147 enum {
4148 	DISCOVERED = 0x10,
4149 	EXPLORED = 0x20,
4150 	FALLTHROUGH = 1,
4151 	BRANCH = 2,
4152 };
4153 
4154 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
4155 
4156 static int *insn_stack;	/* stack of insns to process */
4157 static int cur_stack;	/* current stack index */
4158 static int *insn_state;
4159 
4160 /* t, w, e - match pseudo-code above:
4161  * t - index of current instruction
4162  * w - next instruction
4163  * e - edge
4164  */
push_insn(int t,int w,int e,struct bpf_verifier_env * env)4165 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
4166 {
4167 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
4168 		return 0;
4169 
4170 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
4171 		return 0;
4172 
4173 	if (w < 0 || w >= env->prog->len) {
4174 		verbose(env, "jump out of range from insn %d to %d\n", t, w);
4175 		return -EINVAL;
4176 	}
4177 
4178 	if (e == BRANCH)
4179 		/* mark branch target for state pruning */
4180 		env->explored_states[w] = STATE_LIST_MARK;
4181 
4182 	if (insn_state[w] == 0) {
4183 		/* tree-edge */
4184 		insn_state[t] = DISCOVERED | e;
4185 		insn_state[w] = DISCOVERED;
4186 		if (cur_stack >= env->prog->len)
4187 			return -E2BIG;
4188 		insn_stack[cur_stack++] = w;
4189 		return 1;
4190 	} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
4191 		verbose(env, "back-edge from insn %d to %d\n", t, w);
4192 		return -EINVAL;
4193 	} else if (insn_state[w] == EXPLORED) {
4194 		/* forward- or cross-edge */
4195 		insn_state[t] = DISCOVERED | e;
4196 	} else {
4197 		verbose(env, "insn state internal bug\n");
4198 		return -EFAULT;
4199 	}
4200 	return 0;
4201 }
4202 
4203 /* non-recursive depth-first-search to detect loops in BPF program
4204  * loop == back-edge in directed graph
4205  */
check_cfg(struct bpf_verifier_env * env)4206 static int check_cfg(struct bpf_verifier_env *env)
4207 {
4208 	struct bpf_insn *insns = env->prog->insnsi;
4209 	int insn_cnt = env->prog->len;
4210 	int ret = 0;
4211 	int i, t;
4212 
4213 	ret = check_subprogs(env);
4214 	if (ret < 0)
4215 		return ret;
4216 
4217 	insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
4218 	if (!insn_state)
4219 		return -ENOMEM;
4220 
4221 	insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
4222 	if (!insn_stack) {
4223 		kfree(insn_state);
4224 		return -ENOMEM;
4225 	}
4226 
4227 	insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
4228 	insn_stack[0] = 0; /* 0 is the first instruction */
4229 	cur_stack = 1;
4230 
4231 peek_stack:
4232 	if (cur_stack == 0)
4233 		goto check_state;
4234 	t = insn_stack[cur_stack - 1];
4235 
4236 	if (BPF_CLASS(insns[t].code) == BPF_JMP) {
4237 		u8 opcode = BPF_OP(insns[t].code);
4238 
4239 		if (opcode == BPF_EXIT) {
4240 			goto mark_explored;
4241 		} else if (opcode == BPF_CALL) {
4242 			ret = push_insn(t, t + 1, FALLTHROUGH, env);
4243 			if (ret == 1)
4244 				goto peek_stack;
4245 			else if (ret < 0)
4246 				goto err_free;
4247 			if (t + 1 < insn_cnt)
4248 				env->explored_states[t + 1] = STATE_LIST_MARK;
4249 			if (insns[t].src_reg == BPF_PSEUDO_CALL) {
4250 				env->explored_states[t] = STATE_LIST_MARK;
4251 				ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
4252 				if (ret == 1)
4253 					goto peek_stack;
4254 				else if (ret < 0)
4255 					goto err_free;
4256 			}
4257 		} else if (opcode == BPF_JA) {
4258 			if (BPF_SRC(insns[t].code) != BPF_K) {
4259 				ret = -EINVAL;
4260 				goto err_free;
4261 			}
4262 			/* unconditional jump with single edge */
4263 			ret = push_insn(t, t + insns[t].off + 1,
4264 					FALLTHROUGH, env);
4265 			if (ret == 1)
4266 				goto peek_stack;
4267 			else if (ret < 0)
4268 				goto err_free;
4269 			/* tell verifier to check for equivalent states
4270 			 * after every call and jump
4271 			 */
4272 			if (t + 1 < insn_cnt)
4273 				env->explored_states[t + 1] = STATE_LIST_MARK;
4274 		} else {
4275 			/* conditional jump with two edges */
4276 			env->explored_states[t] = STATE_LIST_MARK;
4277 			ret = push_insn(t, t + 1, FALLTHROUGH, env);
4278 			if (ret == 1)
4279 				goto peek_stack;
4280 			else if (ret < 0)
4281 				goto err_free;
4282 
4283 			ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
4284 			if (ret == 1)
4285 				goto peek_stack;
4286 			else if (ret < 0)
4287 				goto err_free;
4288 		}
4289 	} else {
4290 		/* all other non-branch instructions with single
4291 		 * fall-through edge
4292 		 */
4293 		ret = push_insn(t, t + 1, FALLTHROUGH, env);
4294 		if (ret == 1)
4295 			goto peek_stack;
4296 		else if (ret < 0)
4297 			goto err_free;
4298 	}
4299 
4300 mark_explored:
4301 	insn_state[t] = EXPLORED;
4302 	if (cur_stack-- <= 0) {
4303 		verbose(env, "pop stack internal bug\n");
4304 		ret = -EFAULT;
4305 		goto err_free;
4306 	}
4307 	goto peek_stack;
4308 
4309 check_state:
4310 	for (i = 0; i < insn_cnt; i++) {
4311 		if (insn_state[i] != EXPLORED) {
4312 			verbose(env, "unreachable insn %d\n", i);
4313 			ret = -EINVAL;
4314 			goto err_free;
4315 		}
4316 	}
4317 	ret = 0; /* cfg looks good */
4318 
4319 err_free:
4320 	kfree(insn_state);
4321 	kfree(insn_stack);
4322 	return ret;
4323 }
4324 
4325 /* check %cur's range satisfies %old's */
range_within(struct bpf_reg_state * old,struct bpf_reg_state * cur)4326 static bool range_within(struct bpf_reg_state *old,
4327 			 struct bpf_reg_state *cur)
4328 {
4329 	return old->umin_value <= cur->umin_value &&
4330 	       old->umax_value >= cur->umax_value &&
4331 	       old->smin_value <= cur->smin_value &&
4332 	       old->smax_value >= cur->smax_value;
4333 }
4334 
4335 /* Maximum number of register states that can exist at once */
4336 #define ID_MAP_SIZE	(MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
4337 struct idpair {
4338 	u32 old;
4339 	u32 cur;
4340 };
4341 
4342 /* If in the old state two registers had the same id, then they need to have
4343  * the same id in the new state as well.  But that id could be different from
4344  * the old state, so we need to track the mapping from old to new ids.
4345  * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
4346  * regs with old id 5 must also have new id 9 for the new state to be safe.  But
4347  * regs with a different old id could still have new id 9, we don't care about
4348  * that.
4349  * So we look through our idmap to see if this old id has been seen before.  If
4350  * so, we require the new id to match; otherwise, we add the id pair to the map.
4351  */
check_ids(u32 old_id,u32 cur_id,struct idpair * idmap)4352 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
4353 {
4354 	unsigned int i;
4355 
4356 	for (i = 0; i < ID_MAP_SIZE; i++) {
4357 		if (!idmap[i].old) {
4358 			/* Reached an empty slot; haven't seen this id before */
4359 			idmap[i].old = old_id;
4360 			idmap[i].cur = cur_id;
4361 			return true;
4362 		}
4363 		if (idmap[i].old == old_id)
4364 			return idmap[i].cur == cur_id;
4365 	}
4366 	/* We ran out of idmap slots, which should be impossible */
4367 	WARN_ON_ONCE(1);
4368 	return false;
4369 }
4370 
4371 /* Returns true if (rold safe implies rcur safe) */
regsafe(struct bpf_reg_state * rold,struct bpf_reg_state * rcur,struct idpair * idmap)4372 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
4373 		    struct idpair *idmap)
4374 {
4375 	bool equal;
4376 
4377 	if (!(rold->live & REG_LIVE_READ))
4378 		/* explored state didn't use this */
4379 		return true;
4380 
4381 	equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, frameno)) == 0;
4382 
4383 	if (rold->type == PTR_TO_STACK)
4384 		/* two stack pointers are equal only if they're pointing to
4385 		 * the same stack frame, since fp-8 in foo != fp-8 in bar
4386 		 */
4387 		return equal && rold->frameno == rcur->frameno;
4388 
4389 	if (equal)
4390 		return true;
4391 
4392 	if (rold->type == NOT_INIT)
4393 		/* explored state can't have used this */
4394 		return true;
4395 	if (rcur->type == NOT_INIT)
4396 		return false;
4397 	switch (rold->type) {
4398 	case SCALAR_VALUE:
4399 		if (rcur->type == SCALAR_VALUE) {
4400 			/* new val must satisfy old val knowledge */
4401 			return range_within(rold, rcur) &&
4402 			       tnum_in(rold->var_off, rcur->var_off);
4403 		} else {
4404 			/* We're trying to use a pointer in place of a scalar.
4405 			 * Even if the scalar was unbounded, this could lead to
4406 			 * pointer leaks because scalars are allowed to leak
4407 			 * while pointers are not. We could make this safe in
4408 			 * special cases if root is calling us, but it's
4409 			 * probably not worth the hassle.
4410 			 */
4411 			return false;
4412 		}
4413 	case PTR_TO_MAP_VALUE:
4414 		/* If the new min/max/var_off satisfy the old ones and
4415 		 * everything else matches, we are OK.
4416 		 * We don't care about the 'id' value, because nothing
4417 		 * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
4418 		 */
4419 		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
4420 		       range_within(rold, rcur) &&
4421 		       tnum_in(rold->var_off, rcur->var_off);
4422 	case PTR_TO_MAP_VALUE_OR_NULL:
4423 		/* a PTR_TO_MAP_VALUE could be safe to use as a
4424 		 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
4425 		 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
4426 		 * checked, doing so could have affected others with the same
4427 		 * id, and we can't check for that because we lost the id when
4428 		 * we converted to a PTR_TO_MAP_VALUE.
4429 		 */
4430 		if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
4431 			return false;
4432 		if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
4433 			return false;
4434 		/* Check our ids match any regs they're supposed to */
4435 		return check_ids(rold->id, rcur->id, idmap);
4436 	case PTR_TO_PACKET_META:
4437 	case PTR_TO_PACKET:
4438 		if (rcur->type != rold->type)
4439 			return false;
4440 		/* We must have at least as much range as the old ptr
4441 		 * did, so that any accesses which were safe before are
4442 		 * still safe.  This is true even if old range < old off,
4443 		 * since someone could have accessed through (ptr - k), or
4444 		 * even done ptr -= k in a register, to get a safe access.
4445 		 */
4446 		if (rold->range > rcur->range)
4447 			return false;
4448 		/* If the offsets don't match, we can't trust our alignment;
4449 		 * nor can we be sure that we won't fall out of range.
4450 		 */
4451 		if (rold->off != rcur->off)
4452 			return false;
4453 		/* id relations must be preserved */
4454 		if (rold->id && !check_ids(rold->id, rcur->id, idmap))
4455 			return false;
4456 		/* new val must satisfy old val knowledge */
4457 		return range_within(rold, rcur) &&
4458 		       tnum_in(rold->var_off, rcur->var_off);
4459 	case PTR_TO_CTX:
4460 	case CONST_PTR_TO_MAP:
4461 	case PTR_TO_PACKET_END:
4462 		/* Only valid matches are exact, which memcmp() above
4463 		 * would have accepted
4464 		 */
4465 	default:
4466 		/* Don't know what's going on, just say it's not safe */
4467 		return false;
4468 	}
4469 
4470 	/* Shouldn't get here; if we do, say it's not safe */
4471 	WARN_ON_ONCE(1);
4472 	return false;
4473 }
4474 
stacksafe(struct bpf_func_state * old,struct bpf_func_state * cur,struct idpair * idmap)4475 static bool stacksafe(struct bpf_func_state *old,
4476 		      struct bpf_func_state *cur,
4477 		      struct idpair *idmap)
4478 {
4479 	int i, spi;
4480 
4481 	/* if explored stack has more populated slots than current stack
4482 	 * such stacks are not equivalent
4483 	 */
4484 	if (old->allocated_stack > cur->allocated_stack)
4485 		return false;
4486 
4487 	/* walk slots of the explored stack and ignore any additional
4488 	 * slots in the current stack, since explored(safe) state
4489 	 * didn't use them
4490 	 */
4491 	for (i = 0; i < old->allocated_stack; i++) {
4492 		spi = i / BPF_REG_SIZE;
4493 
4494 		if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ))
4495 			/* explored state didn't use this */
4496 			continue;
4497 
4498 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
4499 			continue;
4500 		/* if old state was safe with misc data in the stack
4501 		 * it will be safe with zero-initialized stack.
4502 		 * The opposite is not true
4503 		 */
4504 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
4505 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
4506 			continue;
4507 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
4508 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
4509 			/* Ex: old explored (safe) state has STACK_SPILL in
4510 			 * this stack slot, but current has has STACK_MISC ->
4511 			 * this verifier states are not equivalent,
4512 			 * return false to continue verification of this path
4513 			 */
4514 			return false;
4515 		if (i % BPF_REG_SIZE)
4516 			continue;
4517 		if (old->stack[spi].slot_type[0] != STACK_SPILL)
4518 			continue;
4519 		if (!regsafe(&old->stack[spi].spilled_ptr,
4520 			     &cur->stack[spi].spilled_ptr,
4521 			     idmap))
4522 			/* when explored and current stack slot are both storing
4523 			 * spilled registers, check that stored pointers types
4524 			 * are the same as well.
4525 			 * Ex: explored safe path could have stored
4526 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
4527 			 * but current path has stored:
4528 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
4529 			 * such verifier states are not equivalent.
4530 			 * return false to continue verification of this path
4531 			 */
4532 			return false;
4533 	}
4534 	return true;
4535 }
4536 
4537 /* compare two verifier states
4538  *
4539  * all states stored in state_list are known to be valid, since
4540  * verifier reached 'bpf_exit' instruction through them
4541  *
4542  * this function is called when verifier exploring different branches of
4543  * execution popped from the state stack. If it sees an old state that has
4544  * more strict register state and more strict stack state then this execution
4545  * branch doesn't need to be explored further, since verifier already
4546  * concluded that more strict state leads to valid finish.
4547  *
4548  * Therefore two states are equivalent if register state is more conservative
4549  * and explored stack state is more conservative than the current one.
4550  * Example:
4551  *       explored                   current
4552  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
4553  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
4554  *
4555  * In other words if current stack state (one being explored) has more
4556  * valid slots than old one that already passed validation, it means
4557  * the verifier can stop exploring and conclude that current state is valid too
4558  *
4559  * Similarly with registers. If explored state has register type as invalid
4560  * whereas register type in current state is meaningful, it means that
4561  * the current state will reach 'bpf_exit' instruction safely
4562  */
func_states_equal(struct bpf_func_state * old,struct bpf_func_state * cur)4563 static bool func_states_equal(struct bpf_func_state *old,
4564 			      struct bpf_func_state *cur)
4565 {
4566 	struct idpair *idmap;
4567 	bool ret = false;
4568 	int i;
4569 
4570 	idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
4571 	/* If we failed to allocate the idmap, just say it's not safe */
4572 	if (!idmap)
4573 		return false;
4574 
4575 	for (i = 0; i < MAX_BPF_REG; i++) {
4576 		if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
4577 			goto out_free;
4578 	}
4579 
4580 	if (!stacksafe(old, cur, idmap))
4581 		goto out_free;
4582 	ret = true;
4583 out_free:
4584 	kfree(idmap);
4585 	return ret;
4586 }
4587 
states_equal(struct bpf_verifier_env * env,struct bpf_verifier_state * old,struct bpf_verifier_state * cur)4588 static bool states_equal(struct bpf_verifier_env *env,
4589 			 struct bpf_verifier_state *old,
4590 			 struct bpf_verifier_state *cur)
4591 {
4592 	int i;
4593 
4594 	if (old->curframe != cur->curframe)
4595 		return false;
4596 
4597 	/* for states to be equal callsites have to be the same
4598 	 * and all frame states need to be equivalent
4599 	 */
4600 	for (i = 0; i <= old->curframe; i++) {
4601 		if (old->frame[i]->callsite != cur->frame[i]->callsite)
4602 			return false;
4603 		if (!func_states_equal(old->frame[i], cur->frame[i]))
4604 			return false;
4605 	}
4606 	return true;
4607 }
4608 
4609 /* A write screens off any subsequent reads; but write marks come from the
4610  * straight-line code between a state and its parent.  When we arrive at an
4611  * equivalent state (jump target or such) we didn't arrive by the straight-line
4612  * code, so read marks in the state must propagate to the parent regardless
4613  * of the state's write marks. That's what 'parent == state->parent' comparison
4614  * in mark_reg_read() and mark_stack_slot_read() is for.
4615  */
propagate_liveness(struct bpf_verifier_env * env,const struct bpf_verifier_state * vstate,struct bpf_verifier_state * vparent)4616 static int propagate_liveness(struct bpf_verifier_env *env,
4617 			      const struct bpf_verifier_state *vstate,
4618 			      struct bpf_verifier_state *vparent)
4619 {
4620 	int i, frame, err = 0;
4621 	struct bpf_func_state *state, *parent;
4622 
4623 	if (vparent->curframe != vstate->curframe) {
4624 		WARN(1, "propagate_live: parent frame %d current frame %d\n",
4625 		     vparent->curframe, vstate->curframe);
4626 		return -EFAULT;
4627 	}
4628 	/* Propagate read liveness of registers... */
4629 	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
4630 	/* We don't need to worry about FP liveness because it's read-only */
4631 	for (i = 0; i < BPF_REG_FP; i++) {
4632 		if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
4633 			continue;
4634 		if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
4635 			err = mark_reg_read(env, vstate, vparent, i);
4636 			if (err)
4637 				return err;
4638 		}
4639 	}
4640 
4641 	/* ... and stack slots */
4642 	for (frame = 0; frame <= vstate->curframe; frame++) {
4643 		state = vstate->frame[frame];
4644 		parent = vparent->frame[frame];
4645 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
4646 			    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
4647 			if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
4648 				continue;
4649 			if (state->stack[i].spilled_ptr.live & REG_LIVE_READ)
4650 				mark_stack_slot_read(env, vstate, vparent, i, frame);
4651 		}
4652 	}
4653 	return err;
4654 }
4655 
is_state_visited(struct bpf_verifier_env * env,int insn_idx)4656 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
4657 {
4658 	struct bpf_verifier_state_list *new_sl;
4659 	struct bpf_verifier_state_list *sl;
4660 	struct bpf_verifier_state *cur = env->cur_state;
4661 	int i, j, err;
4662 
4663 	sl = env->explored_states[insn_idx];
4664 	if (!sl)
4665 		/* this 'insn_idx' instruction wasn't marked, so we will not
4666 		 * be doing state search here
4667 		 */
4668 		return 0;
4669 
4670 	while (sl != STATE_LIST_MARK) {
4671 		if (states_equal(env, &sl->state, cur)) {
4672 			/* reached equivalent register/stack state,
4673 			 * prune the search.
4674 			 * Registers read by the continuation are read by us.
4675 			 * If we have any write marks in env->cur_state, they
4676 			 * will prevent corresponding reads in the continuation
4677 			 * from reaching our parent (an explored_state).  Our
4678 			 * own state will get the read marks recorded, but
4679 			 * they'll be immediately forgotten as we're pruning
4680 			 * this state and will pop a new one.
4681 			 */
4682 			err = propagate_liveness(env, &sl->state, cur);
4683 			if (err)
4684 				return err;
4685 			return 1;
4686 		}
4687 		sl = sl->next;
4688 	}
4689 
4690 	/* there were no equivalent states, remember current one.
4691 	 * technically the current state is not proven to be safe yet,
4692 	 * but it will either reach outer most bpf_exit (which means it's safe)
4693 	 * or it will be rejected. Since there are no loops, we won't be
4694 	 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
4695 	 * again on the way to bpf_exit
4696 	 */
4697 	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
4698 	if (!new_sl)
4699 		return -ENOMEM;
4700 
4701 	/* add new state to the head of linked list */
4702 	err = copy_verifier_state(&new_sl->state, cur);
4703 	if (err) {
4704 		free_verifier_state(&new_sl->state, false);
4705 		kfree(new_sl);
4706 		return err;
4707 	}
4708 	new_sl->next = env->explored_states[insn_idx];
4709 	env->explored_states[insn_idx] = new_sl;
4710 	/* connect new state to parentage chain */
4711 	cur->parent = &new_sl->state;
4712 	/* clear write marks in current state: the writes we did are not writes
4713 	 * our child did, so they don't screen off its reads from us.
4714 	 * (There are no read marks in current state, because reads always mark
4715 	 * their parent and current state never has children yet.  Only
4716 	 * explored_states can get read marks.)
4717 	 */
4718 	for (i = 0; i < BPF_REG_FP; i++)
4719 		cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE;
4720 
4721 	/* all stack frames are accessible from callee, clear them all */
4722 	for (j = 0; j <= cur->curframe; j++) {
4723 		struct bpf_func_state *frame = cur->frame[j];
4724 
4725 		for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++)
4726 			frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
4727 	}
4728 	return 0;
4729 }
4730 
do_check(struct bpf_verifier_env * env)4731 static int do_check(struct bpf_verifier_env *env)
4732 {
4733 	struct bpf_verifier_state *state;
4734 	struct bpf_insn *insns = env->prog->insnsi;
4735 	struct bpf_reg_state *regs;
4736 	int insn_cnt = env->prog->len, i;
4737 	int insn_idx, prev_insn_idx = 0;
4738 	int insn_processed = 0;
4739 	bool do_print_state = false;
4740 
4741 	state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
4742 	if (!state)
4743 		return -ENOMEM;
4744 	state->curframe = 0;
4745 	state->parent = NULL;
4746 	state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
4747 	if (!state->frame[0]) {
4748 		kfree(state);
4749 		return -ENOMEM;
4750 	}
4751 	env->cur_state = state;
4752 	init_func_state(env, state->frame[0],
4753 			BPF_MAIN_FUNC /* callsite */,
4754 			0 /* frameno */,
4755 			0 /* subprogno, zero == main subprog */);
4756 	insn_idx = 0;
4757 	for (;;) {
4758 		struct bpf_insn *insn;
4759 		u8 class;
4760 		int err;
4761 
4762 		if (insn_idx >= insn_cnt) {
4763 			verbose(env, "invalid insn idx %d insn_cnt %d\n",
4764 				insn_idx, insn_cnt);
4765 			return -EFAULT;
4766 		}
4767 
4768 		insn = &insns[insn_idx];
4769 		class = BPF_CLASS(insn->code);
4770 
4771 		if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
4772 			verbose(env,
4773 				"BPF program is too large. Processed %d insn\n",
4774 				insn_processed);
4775 			return -E2BIG;
4776 		}
4777 
4778 		err = is_state_visited(env, insn_idx);
4779 		if (err < 0)
4780 			return err;
4781 		if (err == 1) {
4782 			/* found equivalent state, can prune the search */
4783 			if (env->log.level) {
4784 				if (do_print_state)
4785 					verbose(env, "\nfrom %d to %d: safe\n",
4786 						prev_insn_idx, insn_idx);
4787 				else
4788 					verbose(env, "%d: safe\n", insn_idx);
4789 			}
4790 			goto process_bpf_exit;
4791 		}
4792 
4793 		if (need_resched())
4794 			cond_resched();
4795 
4796 		if (env->log.level > 1 || (env->log.level && do_print_state)) {
4797 			if (env->log.level > 1)
4798 				verbose(env, "%d:", insn_idx);
4799 			else
4800 				verbose(env, "\nfrom %d to %d:",
4801 					prev_insn_idx, insn_idx);
4802 			print_verifier_state(env, state->frame[state->curframe]);
4803 			do_print_state = false;
4804 		}
4805 
4806 		if (env->log.level) {
4807 			const struct bpf_insn_cbs cbs = {
4808 				.cb_print	= verbose,
4809 				.private_data	= env,
4810 			};
4811 
4812 			verbose(env, "%d: ", insn_idx);
4813 			print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
4814 		}
4815 
4816 		if (bpf_prog_is_dev_bound(env->prog->aux)) {
4817 			err = bpf_prog_offload_verify_insn(env, insn_idx,
4818 							   prev_insn_idx);
4819 			if (err)
4820 				return err;
4821 		}
4822 
4823 		regs = cur_regs(env);
4824 		env->insn_aux_data[insn_idx].seen = true;
4825 		if (class == BPF_ALU || class == BPF_ALU64) {
4826 			err = check_alu_op(env, insn);
4827 			if (err)
4828 				return err;
4829 
4830 		} else if (class == BPF_LDX) {
4831 			enum bpf_reg_type *prev_src_type, src_reg_type;
4832 
4833 			/* check for reserved fields is already done */
4834 
4835 			/* check src operand */
4836 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
4837 			if (err)
4838 				return err;
4839 
4840 			err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
4841 			if (err)
4842 				return err;
4843 
4844 			src_reg_type = regs[insn->src_reg].type;
4845 
4846 			/* check that memory (src_reg + off) is readable,
4847 			 * the state of dst_reg will be updated by this func
4848 			 */
4849 			err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
4850 					       BPF_SIZE(insn->code), BPF_READ,
4851 					       insn->dst_reg, false);
4852 			if (err)
4853 				return err;
4854 
4855 			prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
4856 
4857 			if (*prev_src_type == NOT_INIT) {
4858 				/* saw a valid insn
4859 				 * dst_reg = *(u32 *)(src_reg + off)
4860 				 * save type to validate intersecting paths
4861 				 */
4862 				*prev_src_type = src_reg_type;
4863 
4864 			} else if (src_reg_type != *prev_src_type &&
4865 				   (src_reg_type == PTR_TO_CTX ||
4866 				    *prev_src_type == PTR_TO_CTX)) {
4867 				/* ABuser program is trying to use the same insn
4868 				 * dst_reg = *(u32*) (src_reg + off)
4869 				 * with different pointer types:
4870 				 * src_reg == ctx in one branch and
4871 				 * src_reg == stack|map in some other branch.
4872 				 * Reject it.
4873 				 */
4874 				verbose(env, "same insn cannot be used with different pointers\n");
4875 				return -EINVAL;
4876 			}
4877 
4878 		} else if (class == BPF_STX) {
4879 			enum bpf_reg_type *prev_dst_type, dst_reg_type;
4880 
4881 			if (BPF_MODE(insn->code) == BPF_XADD) {
4882 				err = check_xadd(env, insn_idx, insn);
4883 				if (err)
4884 					return err;
4885 				insn_idx++;
4886 				continue;
4887 			}
4888 
4889 			/* check src1 operand */
4890 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
4891 			if (err)
4892 				return err;
4893 			/* check src2 operand */
4894 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
4895 			if (err)
4896 				return err;
4897 
4898 			dst_reg_type = regs[insn->dst_reg].type;
4899 
4900 			/* check that memory (dst_reg + off) is writeable */
4901 			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4902 					       BPF_SIZE(insn->code), BPF_WRITE,
4903 					       insn->src_reg, false);
4904 			if (err)
4905 				return err;
4906 
4907 			prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
4908 
4909 			if (*prev_dst_type == NOT_INIT) {
4910 				*prev_dst_type = dst_reg_type;
4911 			} else if (dst_reg_type != *prev_dst_type &&
4912 				   (dst_reg_type == PTR_TO_CTX ||
4913 				    *prev_dst_type == PTR_TO_CTX)) {
4914 				verbose(env, "same insn cannot be used with different pointers\n");
4915 				return -EINVAL;
4916 			}
4917 
4918 		} else if (class == BPF_ST) {
4919 			if (BPF_MODE(insn->code) != BPF_MEM ||
4920 			    insn->src_reg != BPF_REG_0) {
4921 				verbose(env, "BPF_ST uses reserved fields\n");
4922 				return -EINVAL;
4923 			}
4924 			/* check src operand */
4925 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
4926 			if (err)
4927 				return err;
4928 
4929 			if (is_ctx_reg(env, insn->dst_reg)) {
4930 				verbose(env, "BPF_ST stores into R%d context is not allowed\n",
4931 					insn->dst_reg);
4932 				return -EACCES;
4933 			}
4934 
4935 			/* check that memory (dst_reg + off) is writeable */
4936 			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4937 					       BPF_SIZE(insn->code), BPF_WRITE,
4938 					       -1, false);
4939 			if (err)
4940 				return err;
4941 
4942 		} else if (class == BPF_JMP) {
4943 			u8 opcode = BPF_OP(insn->code);
4944 
4945 			if (opcode == BPF_CALL) {
4946 				if (BPF_SRC(insn->code) != BPF_K ||
4947 				    insn->off != 0 ||
4948 				    (insn->src_reg != BPF_REG_0 &&
4949 				     insn->src_reg != BPF_PSEUDO_CALL) ||
4950 				    insn->dst_reg != BPF_REG_0) {
4951 					verbose(env, "BPF_CALL uses reserved fields\n");
4952 					return -EINVAL;
4953 				}
4954 
4955 				if (insn->src_reg == BPF_PSEUDO_CALL)
4956 					err = check_func_call(env, insn, &insn_idx);
4957 				else
4958 					err = check_helper_call(env, insn->imm, insn_idx);
4959 				if (err)
4960 					return err;
4961 
4962 			} else if (opcode == BPF_JA) {
4963 				if (BPF_SRC(insn->code) != BPF_K ||
4964 				    insn->imm != 0 ||
4965 				    insn->src_reg != BPF_REG_0 ||
4966 				    insn->dst_reg != BPF_REG_0) {
4967 					verbose(env, "BPF_JA uses reserved fields\n");
4968 					return -EINVAL;
4969 				}
4970 
4971 				insn_idx += insn->off + 1;
4972 				continue;
4973 
4974 			} else if (opcode == BPF_EXIT) {
4975 				if (BPF_SRC(insn->code) != BPF_K ||
4976 				    insn->imm != 0 ||
4977 				    insn->src_reg != BPF_REG_0 ||
4978 				    insn->dst_reg != BPF_REG_0) {
4979 					verbose(env, "BPF_EXIT uses reserved fields\n");
4980 					return -EINVAL;
4981 				}
4982 
4983 				if (state->curframe) {
4984 					/* exit from nested function */
4985 					prev_insn_idx = insn_idx;
4986 					err = prepare_func_exit(env, &insn_idx);
4987 					if (err)
4988 						return err;
4989 					do_print_state = true;
4990 					continue;
4991 				}
4992 
4993 				/* eBPF calling convetion is such that R0 is used
4994 				 * to return the value from eBPF program.
4995 				 * Make sure that it's readable at this time
4996 				 * of bpf_exit, which means that program wrote
4997 				 * something into it earlier
4998 				 */
4999 				err = check_reg_arg(env, BPF_REG_0, SRC_OP);
5000 				if (err)
5001 					return err;
5002 
5003 				if (is_pointer_value(env, BPF_REG_0)) {
5004 					verbose(env, "R0 leaks addr as return value\n");
5005 					return -EACCES;
5006 				}
5007 
5008 				err = check_return_code(env);
5009 				if (err)
5010 					return err;
5011 process_bpf_exit:
5012 				err = pop_stack(env, &prev_insn_idx, &insn_idx);
5013 				if (err < 0) {
5014 					if (err != -ENOENT)
5015 						return err;
5016 					break;
5017 				} else {
5018 					do_print_state = true;
5019 					continue;
5020 				}
5021 			} else {
5022 				err = check_cond_jmp_op(env, insn, &insn_idx);
5023 				if (err)
5024 					return err;
5025 			}
5026 		} else if (class == BPF_LD) {
5027 			u8 mode = BPF_MODE(insn->code);
5028 
5029 			if (mode == BPF_ABS || mode == BPF_IND) {
5030 				err = check_ld_abs(env, insn);
5031 				if (err)
5032 					return err;
5033 
5034 			} else if (mode == BPF_IMM) {
5035 				err = check_ld_imm(env, insn);
5036 				if (err)
5037 					return err;
5038 
5039 				insn_idx++;
5040 				env->insn_aux_data[insn_idx].seen = true;
5041 			} else {
5042 				verbose(env, "invalid BPF_LD mode\n");
5043 				return -EINVAL;
5044 			}
5045 		} else {
5046 			verbose(env, "unknown insn class %d\n", class);
5047 			return -EINVAL;
5048 		}
5049 
5050 		insn_idx++;
5051 	}
5052 
5053 	verbose(env, "processed %d insns (limit %d), stack depth ",
5054 		insn_processed, BPF_COMPLEXITY_LIMIT_INSNS);
5055 	for (i = 0; i < env->subprog_cnt; i++) {
5056 		u32 depth = env->subprog_info[i].stack_depth;
5057 
5058 		verbose(env, "%d", depth);
5059 		if (i + 1 < env->subprog_cnt)
5060 			verbose(env, "+");
5061 	}
5062 	verbose(env, "\n");
5063 	env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
5064 	return 0;
5065 }
5066 
check_map_prealloc(struct bpf_map * map)5067 static int check_map_prealloc(struct bpf_map *map)
5068 {
5069 	return (map->map_type != BPF_MAP_TYPE_HASH &&
5070 		map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
5071 		map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
5072 		!(map->map_flags & BPF_F_NO_PREALLOC);
5073 }
5074 
check_map_prog_compatibility(struct bpf_verifier_env * env,struct bpf_map * map,struct bpf_prog * prog)5075 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
5076 					struct bpf_map *map,
5077 					struct bpf_prog *prog)
5078 
5079 {
5080 	/* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
5081 	 * preallocated hash maps, since doing memory allocation
5082 	 * in overflow_handler can crash depending on where nmi got
5083 	 * triggered.
5084 	 */
5085 	if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
5086 		if (!check_map_prealloc(map)) {
5087 			verbose(env, "perf_event programs can only use preallocated hash map\n");
5088 			return -EINVAL;
5089 		}
5090 		if (map->inner_map_meta &&
5091 		    !check_map_prealloc(map->inner_map_meta)) {
5092 			verbose(env, "perf_event programs can only use preallocated inner hash map\n");
5093 			return -EINVAL;
5094 		}
5095 	}
5096 
5097 	if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
5098 	    !bpf_offload_prog_map_match(prog, map)) {
5099 		verbose(env, "offload device mismatch between prog and map\n");
5100 		return -EINVAL;
5101 	}
5102 
5103 	return 0;
5104 }
5105 
5106 /* look for pseudo eBPF instructions that access map FDs and
5107  * replace them with actual map pointers
5108  */
replace_map_fd_with_map_ptr(struct bpf_verifier_env * env)5109 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
5110 {
5111 	struct bpf_insn *insn = env->prog->insnsi;
5112 	int insn_cnt = env->prog->len;
5113 	int i, j, err;
5114 
5115 	err = bpf_prog_calc_tag(env->prog);
5116 	if (err)
5117 		return err;
5118 
5119 	for (i = 0; i < insn_cnt; i++, insn++) {
5120 		if (BPF_CLASS(insn->code) == BPF_LDX &&
5121 		    (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
5122 			verbose(env, "BPF_LDX uses reserved fields\n");
5123 			return -EINVAL;
5124 		}
5125 
5126 		if (BPF_CLASS(insn->code) == BPF_STX &&
5127 		    ((BPF_MODE(insn->code) != BPF_MEM &&
5128 		      BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
5129 			verbose(env, "BPF_STX uses reserved fields\n");
5130 			return -EINVAL;
5131 		}
5132 
5133 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
5134 			struct bpf_map *map;
5135 			struct fd f;
5136 
5137 			if (i == insn_cnt - 1 || insn[1].code != 0 ||
5138 			    insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
5139 			    insn[1].off != 0) {
5140 				verbose(env, "invalid bpf_ld_imm64 insn\n");
5141 				return -EINVAL;
5142 			}
5143 
5144 			if (insn->src_reg == 0)
5145 				/* valid generic load 64-bit imm */
5146 				goto next_insn;
5147 
5148 			if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
5149 				verbose(env,
5150 					"unrecognized bpf_ld_imm64 insn\n");
5151 				return -EINVAL;
5152 			}
5153 
5154 			f = fdget(insn->imm);
5155 			map = __bpf_map_get(f);
5156 			if (IS_ERR(map)) {
5157 				verbose(env, "fd %d is not pointing to valid bpf_map\n",
5158 					insn->imm);
5159 				return PTR_ERR(map);
5160 			}
5161 
5162 			err = check_map_prog_compatibility(env, map, env->prog);
5163 			if (err) {
5164 				fdput(f);
5165 				return err;
5166 			}
5167 
5168 			/* store map pointer inside BPF_LD_IMM64 instruction */
5169 			insn[0].imm = (u32) (unsigned long) map;
5170 			insn[1].imm = ((u64) (unsigned long) map) >> 32;
5171 
5172 			/* check whether we recorded this map already */
5173 			for (j = 0; j < env->used_map_cnt; j++)
5174 				if (env->used_maps[j] == map) {
5175 					fdput(f);
5176 					goto next_insn;
5177 				}
5178 
5179 			if (env->used_map_cnt >= MAX_USED_MAPS) {
5180 				fdput(f);
5181 				return -E2BIG;
5182 			}
5183 
5184 			/* hold the map. If the program is rejected by verifier,
5185 			 * the map will be released by release_maps() or it
5186 			 * will be used by the valid program until it's unloaded
5187 			 * and all maps are released in free_used_maps()
5188 			 */
5189 			map = bpf_map_inc(map, false);
5190 			if (IS_ERR(map)) {
5191 				fdput(f);
5192 				return PTR_ERR(map);
5193 			}
5194 			env->used_maps[env->used_map_cnt++] = map;
5195 
5196 			if (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE &&
5197 			    bpf_cgroup_storage_assign(env->prog, map)) {
5198 				verbose(env,
5199 					"only one cgroup storage is allowed\n");
5200 				fdput(f);
5201 				return -EBUSY;
5202 			}
5203 
5204 			fdput(f);
5205 next_insn:
5206 			insn++;
5207 			i++;
5208 			continue;
5209 		}
5210 
5211 		/* Basic sanity check before we invest more work here. */
5212 		if (!bpf_opcode_in_insntable(insn->code)) {
5213 			verbose(env, "unknown opcode %02x\n", insn->code);
5214 			return -EINVAL;
5215 		}
5216 	}
5217 
5218 	/* now all pseudo BPF_LD_IMM64 instructions load valid
5219 	 * 'struct bpf_map *' into a register instead of user map_fd.
5220 	 * These pointers will be used later by verifier to validate map access.
5221 	 */
5222 	return 0;
5223 }
5224 
5225 /* drop refcnt of maps used by the rejected program */
release_maps(struct bpf_verifier_env * env)5226 static void release_maps(struct bpf_verifier_env *env)
5227 {
5228 	int i;
5229 
5230 	if (env->prog->aux->cgroup_storage)
5231 		bpf_cgroup_storage_release(env->prog,
5232 					   env->prog->aux->cgroup_storage);
5233 
5234 	for (i = 0; i < env->used_map_cnt; i++)
5235 		bpf_map_put(env->used_maps[i]);
5236 }
5237 
5238 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
convert_pseudo_ld_imm64(struct bpf_verifier_env * env)5239 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
5240 {
5241 	struct bpf_insn *insn = env->prog->insnsi;
5242 	int insn_cnt = env->prog->len;
5243 	int i;
5244 
5245 	for (i = 0; i < insn_cnt; i++, insn++)
5246 		if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
5247 			insn->src_reg = 0;
5248 }
5249 
5250 /* single env->prog->insni[off] instruction was replaced with the range
5251  * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
5252  * [0, off) and [off, end) to new locations, so the patched range stays zero
5253  */
adjust_insn_aux_data(struct bpf_verifier_env * env,u32 prog_len,u32 off,u32 cnt)5254 static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
5255 				u32 off, u32 cnt)
5256 {
5257 	struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
5258 	int i;
5259 
5260 	if (cnt == 1)
5261 		return 0;
5262 	new_data = vzalloc(array_size(prog_len,
5263 				      sizeof(struct bpf_insn_aux_data)));
5264 	if (!new_data)
5265 		return -ENOMEM;
5266 	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
5267 	memcpy(new_data + off + cnt - 1, old_data + off,
5268 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
5269 	for (i = off; i < off + cnt - 1; i++)
5270 		new_data[i].seen = true;
5271 	env->insn_aux_data = new_data;
5272 	vfree(old_data);
5273 	return 0;
5274 }
5275 
adjust_subprog_starts(struct bpf_verifier_env * env,u32 off,u32 len)5276 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
5277 {
5278 	int i;
5279 
5280 	if (len == 1)
5281 		return;
5282 	/* NOTE: fake 'exit' subprog should be updated as well. */
5283 	for (i = 0; i <= env->subprog_cnt; i++) {
5284 		if (env->subprog_info[i].start < off)
5285 			continue;
5286 		env->subprog_info[i].start += len - 1;
5287 	}
5288 }
5289 
bpf_patch_insn_data(struct bpf_verifier_env * env,u32 off,const struct bpf_insn * patch,u32 len)5290 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
5291 					    const struct bpf_insn *patch, u32 len)
5292 {
5293 	struct bpf_prog *new_prog;
5294 
5295 	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
5296 	if (!new_prog)
5297 		return NULL;
5298 	if (adjust_insn_aux_data(env, new_prog->len, off, len))
5299 		return NULL;
5300 	adjust_subprog_starts(env, off, len);
5301 	return new_prog;
5302 }
5303 
5304 /* The verifier does more data flow analysis than llvm and will not
5305  * explore branches that are dead at run time. Malicious programs can
5306  * have dead code too. Therefore replace all dead at-run-time code
5307  * with 'ja -1'.
5308  *
5309  * Just nops are not optimal, e.g. if they would sit at the end of the
5310  * program and through another bug we would manage to jump there, then
5311  * we'd execute beyond program memory otherwise. Returning exception
5312  * code also wouldn't work since we can have subprogs where the dead
5313  * code could be located.
5314  */
sanitize_dead_code(struct bpf_verifier_env * env)5315 static void sanitize_dead_code(struct bpf_verifier_env *env)
5316 {
5317 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
5318 	struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
5319 	struct bpf_insn *insn = env->prog->insnsi;
5320 	const int insn_cnt = env->prog->len;
5321 	int i;
5322 
5323 	for (i = 0; i < insn_cnt; i++) {
5324 		if (aux_data[i].seen)
5325 			continue;
5326 		memcpy(insn + i, &trap, sizeof(trap));
5327 	}
5328 }
5329 
5330 /* convert load instructions that access fields of 'struct __sk_buff'
5331  * into sequence of instructions that access fields of 'struct sk_buff'
5332  */
convert_ctx_accesses(struct bpf_verifier_env * env)5333 static int convert_ctx_accesses(struct bpf_verifier_env *env)
5334 {
5335 	const struct bpf_verifier_ops *ops = env->ops;
5336 	int i, cnt, size, ctx_field_size, delta = 0;
5337 	const int insn_cnt = env->prog->len;
5338 	struct bpf_insn insn_buf[16], *insn;
5339 	struct bpf_prog *new_prog;
5340 	enum bpf_access_type type;
5341 	bool is_narrower_load;
5342 	u32 target_size;
5343 
5344 	if (ops->gen_prologue) {
5345 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
5346 					env->prog);
5347 		if (cnt >= ARRAY_SIZE(insn_buf)) {
5348 			verbose(env, "bpf verifier is misconfigured\n");
5349 			return -EINVAL;
5350 		} else if (cnt) {
5351 			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
5352 			if (!new_prog)
5353 				return -ENOMEM;
5354 
5355 			env->prog = new_prog;
5356 			delta += cnt - 1;
5357 		}
5358 	}
5359 
5360 	if (!ops->convert_ctx_access || bpf_prog_is_dev_bound(env->prog->aux))
5361 		return 0;
5362 
5363 	insn = env->prog->insnsi + delta;
5364 
5365 	for (i = 0; i < insn_cnt; i++, insn++) {
5366 		if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
5367 		    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
5368 		    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
5369 		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
5370 			type = BPF_READ;
5371 		else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
5372 			 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
5373 			 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
5374 			 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
5375 			type = BPF_WRITE;
5376 		else
5377 			continue;
5378 
5379 		if (type == BPF_WRITE &&
5380 		    env->insn_aux_data[i + delta].sanitize_stack_off) {
5381 			struct bpf_insn patch[] = {
5382 				/* Sanitize suspicious stack slot with zero.
5383 				 * There are no memory dependencies for this store,
5384 				 * since it's only using frame pointer and immediate
5385 				 * constant of zero
5386 				 */
5387 				BPF_ST_MEM(BPF_DW, BPF_REG_FP,
5388 					   env->insn_aux_data[i + delta].sanitize_stack_off,
5389 					   0),
5390 				/* the original STX instruction will immediately
5391 				 * overwrite the same stack slot with appropriate value
5392 				 */
5393 				*insn,
5394 			};
5395 
5396 			cnt = ARRAY_SIZE(patch);
5397 			new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
5398 			if (!new_prog)
5399 				return -ENOMEM;
5400 
5401 			delta    += cnt - 1;
5402 			env->prog = new_prog;
5403 			insn      = new_prog->insnsi + i + delta;
5404 			continue;
5405 		}
5406 
5407 		if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
5408 			continue;
5409 
5410 		ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
5411 		size = BPF_LDST_BYTES(insn);
5412 
5413 		/* If the read access is a narrower load of the field,
5414 		 * convert to a 4/8-byte load, to minimum program type specific
5415 		 * convert_ctx_access changes. If conversion is successful,
5416 		 * we will apply proper mask to the result.
5417 		 */
5418 		is_narrower_load = size < ctx_field_size;
5419 		if (is_narrower_load) {
5420 			u32 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
5421 			u32 off = insn->off;
5422 			u8 size_code;
5423 
5424 			if (type == BPF_WRITE) {
5425 				verbose(env, "bpf verifier narrow ctx access misconfigured\n");
5426 				return -EINVAL;
5427 			}
5428 
5429 			size_code = BPF_H;
5430 			if (ctx_field_size == 4)
5431 				size_code = BPF_W;
5432 			else if (ctx_field_size == 8)
5433 				size_code = BPF_DW;
5434 
5435 			insn->off = off & ~(size_default - 1);
5436 			insn->code = BPF_LDX | BPF_MEM | size_code;
5437 		}
5438 
5439 		target_size = 0;
5440 		cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog,
5441 					      &target_size);
5442 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
5443 		    (ctx_field_size && !target_size)) {
5444 			verbose(env, "bpf verifier is misconfigured\n");
5445 			return -EINVAL;
5446 		}
5447 
5448 		if (is_narrower_load && size < target_size) {
5449 			if (ctx_field_size <= 4)
5450 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
5451 								(1 << size * 8) - 1);
5452 			else
5453 				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
5454 								(1 << size * 8) - 1);
5455 		}
5456 
5457 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
5458 		if (!new_prog)
5459 			return -ENOMEM;
5460 
5461 		delta += cnt - 1;
5462 
5463 		/* keep walking new program and skip insns we just inserted */
5464 		env->prog = new_prog;
5465 		insn      = new_prog->insnsi + i + delta;
5466 	}
5467 
5468 	return 0;
5469 }
5470 
jit_subprogs(struct bpf_verifier_env * env)5471 static int jit_subprogs(struct bpf_verifier_env *env)
5472 {
5473 	struct bpf_prog *prog = env->prog, **func, *tmp;
5474 	int i, j, subprog_start, subprog_end = 0, len, subprog;
5475 	struct bpf_insn *insn;
5476 	void *old_bpf_func;
5477 	int err = -ENOMEM;
5478 
5479 	if (env->subprog_cnt <= 1)
5480 		return 0;
5481 
5482 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
5483 		if (insn->code != (BPF_JMP | BPF_CALL) ||
5484 		    insn->src_reg != BPF_PSEUDO_CALL)
5485 			continue;
5486 		/* Upon error here we cannot fall back to interpreter but
5487 		 * need a hard reject of the program. Thus -EFAULT is
5488 		 * propagated in any case.
5489 		 */
5490 		subprog = find_subprog(env, i + insn->imm + 1);
5491 		if (subprog < 0) {
5492 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
5493 				  i + insn->imm + 1);
5494 			return -EFAULT;
5495 		}
5496 		/* temporarily remember subprog id inside insn instead of
5497 		 * aux_data, since next loop will split up all insns into funcs
5498 		 */
5499 		insn->off = subprog;
5500 		/* remember original imm in case JIT fails and fallback
5501 		 * to interpreter will be needed
5502 		 */
5503 		env->insn_aux_data[i].call_imm = insn->imm;
5504 		/* point imm to __bpf_call_base+1 from JITs point of view */
5505 		insn->imm = 1;
5506 	}
5507 
5508 	func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
5509 	if (!func)
5510 		goto out_undo_insn;
5511 
5512 	for (i = 0; i < env->subprog_cnt; i++) {
5513 		subprog_start = subprog_end;
5514 		subprog_end = env->subprog_info[i + 1].start;
5515 
5516 		len = subprog_end - subprog_start;
5517 		func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER);
5518 		if (!func[i])
5519 			goto out_free;
5520 		memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
5521 		       len * sizeof(struct bpf_insn));
5522 		func[i]->type = prog->type;
5523 		func[i]->len = len;
5524 		if (bpf_prog_calc_tag(func[i]))
5525 			goto out_free;
5526 		func[i]->is_func = 1;
5527 		/* Use bpf_prog_F_tag to indicate functions in stack traces.
5528 		 * Long term would need debug info to populate names
5529 		 */
5530 		func[i]->aux->name[0] = 'F';
5531 		func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
5532 		func[i]->jit_requested = 1;
5533 		func[i] = bpf_int_jit_compile(func[i]);
5534 		if (!func[i]->jited) {
5535 			err = -ENOTSUPP;
5536 			goto out_free;
5537 		}
5538 		cond_resched();
5539 	}
5540 	/* at this point all bpf functions were successfully JITed
5541 	 * now populate all bpf_calls with correct addresses and
5542 	 * run last pass of JIT
5543 	 */
5544 	for (i = 0; i < env->subprog_cnt; i++) {
5545 		insn = func[i]->insnsi;
5546 		for (j = 0; j < func[i]->len; j++, insn++) {
5547 			if (insn->code != (BPF_JMP | BPF_CALL) ||
5548 			    insn->src_reg != BPF_PSEUDO_CALL)
5549 				continue;
5550 			subprog = insn->off;
5551 			insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
5552 				func[subprog]->bpf_func -
5553 				__bpf_call_base;
5554 		}
5555 
5556 		/* we use the aux data to keep a list of the start addresses
5557 		 * of the JITed images for each function in the program
5558 		 *
5559 		 * for some architectures, such as powerpc64, the imm field
5560 		 * might not be large enough to hold the offset of the start
5561 		 * address of the callee's JITed image from __bpf_call_base
5562 		 *
5563 		 * in such cases, we can lookup the start address of a callee
5564 		 * by using its subprog id, available from the off field of
5565 		 * the call instruction, as an index for this list
5566 		 */
5567 		func[i]->aux->func = func;
5568 		func[i]->aux->func_cnt = env->subprog_cnt;
5569 	}
5570 	for (i = 0; i < env->subprog_cnt; i++) {
5571 		old_bpf_func = func[i]->bpf_func;
5572 		tmp = bpf_int_jit_compile(func[i]);
5573 		if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
5574 			verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
5575 			err = -ENOTSUPP;
5576 			goto out_free;
5577 		}
5578 		cond_resched();
5579 	}
5580 
5581 	/* finally lock prog and jit images for all functions and
5582 	 * populate kallsysm
5583 	 */
5584 	for (i = 0; i < env->subprog_cnt; i++) {
5585 		bpf_prog_lock_ro(func[i]);
5586 		bpf_prog_kallsyms_add(func[i]);
5587 	}
5588 
5589 	/* Last step: make now unused interpreter insns from main
5590 	 * prog consistent for later dump requests, so they can
5591 	 * later look the same as if they were interpreted only.
5592 	 */
5593 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
5594 		if (insn->code != (BPF_JMP | BPF_CALL) ||
5595 		    insn->src_reg != BPF_PSEUDO_CALL)
5596 			continue;
5597 		insn->off = env->insn_aux_data[i].call_imm;
5598 		subprog = find_subprog(env, i + insn->off + 1);
5599 		insn->imm = subprog;
5600 	}
5601 
5602 	prog->jited = 1;
5603 	prog->bpf_func = func[0]->bpf_func;
5604 	prog->aux->func = func;
5605 	prog->aux->func_cnt = env->subprog_cnt;
5606 	return 0;
5607 out_free:
5608 	for (i = 0; i < env->subprog_cnt; i++)
5609 		if (func[i])
5610 			bpf_jit_free(func[i]);
5611 	kfree(func);
5612 out_undo_insn:
5613 	/* cleanup main prog to be interpreted */
5614 	prog->jit_requested = 0;
5615 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
5616 		if (insn->code != (BPF_JMP | BPF_CALL) ||
5617 		    insn->src_reg != BPF_PSEUDO_CALL)
5618 			continue;
5619 		insn->off = 0;
5620 		insn->imm = env->insn_aux_data[i].call_imm;
5621 	}
5622 	return err;
5623 }
5624 
fixup_call_args(struct bpf_verifier_env * env)5625 static int fixup_call_args(struct bpf_verifier_env *env)
5626 {
5627 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
5628 	struct bpf_prog *prog = env->prog;
5629 	struct bpf_insn *insn = prog->insnsi;
5630 	int i, depth;
5631 #endif
5632 	int err;
5633 
5634 	err = 0;
5635 	if (env->prog->jit_requested) {
5636 		err = jit_subprogs(env);
5637 		if (err == 0)
5638 			return 0;
5639 		if (err == -EFAULT)
5640 			return err;
5641 	}
5642 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
5643 	for (i = 0; i < prog->len; i++, insn++) {
5644 		if (insn->code != (BPF_JMP | BPF_CALL) ||
5645 		    insn->src_reg != BPF_PSEUDO_CALL)
5646 			continue;
5647 		depth = get_callee_stack_depth(env, insn, i);
5648 		if (depth < 0)
5649 			return depth;
5650 		bpf_patch_call_args(insn, depth);
5651 	}
5652 	err = 0;
5653 #endif
5654 	return err;
5655 }
5656 
5657 /* fixup insn->imm field of bpf_call instructions
5658  * and inline eligible helpers as explicit sequence of BPF instructions
5659  *
5660  * this function is called after eBPF program passed verification
5661  */
fixup_bpf_calls(struct bpf_verifier_env * env)5662 static int fixup_bpf_calls(struct bpf_verifier_env *env)
5663 {
5664 	struct bpf_prog *prog = env->prog;
5665 	struct bpf_insn *insn = prog->insnsi;
5666 	const struct bpf_func_proto *fn;
5667 	const int insn_cnt = prog->len;
5668 	const struct bpf_map_ops *ops;
5669 	struct bpf_insn_aux_data *aux;
5670 	struct bpf_insn insn_buf[16];
5671 	struct bpf_prog *new_prog;
5672 	struct bpf_map *map_ptr;
5673 	int i, cnt, delta = 0;
5674 
5675 	for (i = 0; i < insn_cnt; i++, insn++) {
5676 		if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
5677 		    insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
5678 		    insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
5679 		    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
5680 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
5681 			struct bpf_insn mask_and_div[] = {
5682 				BPF_MOV32_REG(insn->src_reg, insn->src_reg),
5683 				/* Rx div 0 -> 0 */
5684 				BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
5685 				BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
5686 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
5687 				*insn,
5688 			};
5689 			struct bpf_insn mask_and_mod[] = {
5690 				BPF_MOV32_REG(insn->src_reg, insn->src_reg),
5691 				/* Rx mod 0 -> Rx */
5692 				BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
5693 				*insn,
5694 			};
5695 			struct bpf_insn *patchlet;
5696 
5697 			if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
5698 			    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
5699 				patchlet = mask_and_div + (is64 ? 1 : 0);
5700 				cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
5701 			} else {
5702 				patchlet = mask_and_mod + (is64 ? 1 : 0);
5703 				cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
5704 			}
5705 
5706 			new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
5707 			if (!new_prog)
5708 				return -ENOMEM;
5709 
5710 			delta    += cnt - 1;
5711 			env->prog = prog = new_prog;
5712 			insn      = new_prog->insnsi + i + delta;
5713 			continue;
5714 		}
5715 
5716 		if (BPF_CLASS(insn->code) == BPF_LD &&
5717 		    (BPF_MODE(insn->code) == BPF_ABS ||
5718 		     BPF_MODE(insn->code) == BPF_IND)) {
5719 			cnt = env->ops->gen_ld_abs(insn, insn_buf);
5720 			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
5721 				verbose(env, "bpf verifier is misconfigured\n");
5722 				return -EINVAL;
5723 			}
5724 
5725 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
5726 			if (!new_prog)
5727 				return -ENOMEM;
5728 
5729 			delta    += cnt - 1;
5730 			env->prog = prog = new_prog;
5731 			insn      = new_prog->insnsi + i + delta;
5732 			continue;
5733 		}
5734 
5735 		if (insn->code != (BPF_JMP | BPF_CALL))
5736 			continue;
5737 		if (insn->src_reg == BPF_PSEUDO_CALL)
5738 			continue;
5739 
5740 		if (insn->imm == BPF_FUNC_get_route_realm)
5741 			prog->dst_needed = 1;
5742 		if (insn->imm == BPF_FUNC_get_prandom_u32)
5743 			bpf_user_rnd_init_once();
5744 		if (insn->imm == BPF_FUNC_override_return)
5745 			prog->kprobe_override = 1;
5746 		if (insn->imm == BPF_FUNC_tail_call) {
5747 			/* If we tail call into other programs, we
5748 			 * cannot make any assumptions since they can
5749 			 * be replaced dynamically during runtime in
5750 			 * the program array.
5751 			 */
5752 			prog->cb_access = 1;
5753 			env->prog->aux->stack_depth = MAX_BPF_STACK;
5754 
5755 			/* mark bpf_tail_call as different opcode to avoid
5756 			 * conditional branch in the interpeter for every normal
5757 			 * call and to prevent accidental JITing by JIT compiler
5758 			 * that doesn't support bpf_tail_call yet
5759 			 */
5760 			insn->imm = 0;
5761 			insn->code = BPF_JMP | BPF_TAIL_CALL;
5762 
5763 			aux = &env->insn_aux_data[i + delta];
5764 			if (!bpf_map_ptr_unpriv(aux))
5765 				continue;
5766 
5767 			/* instead of changing every JIT dealing with tail_call
5768 			 * emit two extra insns:
5769 			 * if (index >= max_entries) goto out;
5770 			 * index &= array->index_mask;
5771 			 * to avoid out-of-bounds cpu speculation
5772 			 */
5773 			if (bpf_map_ptr_poisoned(aux)) {
5774 				verbose(env, "tail_call abusing map_ptr\n");
5775 				return -EINVAL;
5776 			}
5777 
5778 			map_ptr = BPF_MAP_PTR(aux->map_state);
5779 			insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
5780 						  map_ptr->max_entries, 2);
5781 			insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
5782 						    container_of(map_ptr,
5783 								 struct bpf_array,
5784 								 map)->index_mask);
5785 			insn_buf[2] = *insn;
5786 			cnt = 3;
5787 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
5788 			if (!new_prog)
5789 				return -ENOMEM;
5790 
5791 			delta    += cnt - 1;
5792 			env->prog = prog = new_prog;
5793 			insn      = new_prog->insnsi + i + delta;
5794 			continue;
5795 		}
5796 
5797 		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
5798 		 * and other inlining handlers are currently limited to 64 bit
5799 		 * only.
5800 		 */
5801 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
5802 		    (insn->imm == BPF_FUNC_map_lookup_elem ||
5803 		     insn->imm == BPF_FUNC_map_update_elem ||
5804 		     insn->imm == BPF_FUNC_map_delete_elem)) {
5805 			aux = &env->insn_aux_data[i + delta];
5806 			if (bpf_map_ptr_poisoned(aux))
5807 				goto patch_call_imm;
5808 
5809 			map_ptr = BPF_MAP_PTR(aux->map_state);
5810 			ops = map_ptr->ops;
5811 			if (insn->imm == BPF_FUNC_map_lookup_elem &&
5812 			    ops->map_gen_lookup) {
5813 				cnt = ops->map_gen_lookup(map_ptr, insn_buf);
5814 				if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
5815 					verbose(env, "bpf verifier is misconfigured\n");
5816 					return -EINVAL;
5817 				}
5818 
5819 				new_prog = bpf_patch_insn_data(env, i + delta,
5820 							       insn_buf, cnt);
5821 				if (!new_prog)
5822 					return -ENOMEM;
5823 
5824 				delta    += cnt - 1;
5825 				env->prog = prog = new_prog;
5826 				insn      = new_prog->insnsi + i + delta;
5827 				continue;
5828 			}
5829 
5830 			BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
5831 				     (void *(*)(struct bpf_map *map, void *key))NULL));
5832 			BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
5833 				     (int (*)(struct bpf_map *map, void *key))NULL));
5834 			BUILD_BUG_ON(!__same_type(ops->map_update_elem,
5835 				     (int (*)(struct bpf_map *map, void *key, void *value,
5836 					      u64 flags))NULL));
5837 			switch (insn->imm) {
5838 			case BPF_FUNC_map_lookup_elem:
5839 				insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
5840 					    __bpf_call_base;
5841 				continue;
5842 			case BPF_FUNC_map_update_elem:
5843 				insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
5844 					    __bpf_call_base;
5845 				continue;
5846 			case BPF_FUNC_map_delete_elem:
5847 				insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
5848 					    __bpf_call_base;
5849 				continue;
5850 			}
5851 
5852 			goto patch_call_imm;
5853 		}
5854 
5855 patch_call_imm:
5856 		fn = env->ops->get_func_proto(insn->imm, env->prog);
5857 		/* all functions that have prototype and verifier allowed
5858 		 * programs to call them, must be real in-kernel functions
5859 		 */
5860 		if (!fn->func) {
5861 			verbose(env,
5862 				"kernel subsystem misconfigured func %s#%d\n",
5863 				func_id_name(insn->imm), insn->imm);
5864 			return -EFAULT;
5865 		}
5866 		insn->imm = fn->func - __bpf_call_base;
5867 	}
5868 
5869 	return 0;
5870 }
5871 
free_states(struct bpf_verifier_env * env)5872 static void free_states(struct bpf_verifier_env *env)
5873 {
5874 	struct bpf_verifier_state_list *sl, *sln;
5875 	int i;
5876 
5877 	if (!env->explored_states)
5878 		return;
5879 
5880 	for (i = 0; i < env->prog->len; i++) {
5881 		sl = env->explored_states[i];
5882 
5883 		if (sl)
5884 			while (sl != STATE_LIST_MARK) {
5885 				sln = sl->next;
5886 				free_verifier_state(&sl->state, false);
5887 				kfree(sl);
5888 				sl = sln;
5889 			}
5890 	}
5891 
5892 	kfree(env->explored_states);
5893 }
5894 
bpf_check(struct bpf_prog ** prog,union bpf_attr * attr)5895 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
5896 {
5897 	struct bpf_verifier_env *env;
5898 	struct bpf_verifier_log *log;
5899 	int ret = -EINVAL;
5900 
5901 	/* no program is valid */
5902 	if (ARRAY_SIZE(bpf_verifier_ops) == 0)
5903 		return -EINVAL;
5904 
5905 	/* 'struct bpf_verifier_env' can be global, but since it's not small,
5906 	 * allocate/free it every time bpf_check() is called
5907 	 */
5908 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
5909 	if (!env)
5910 		return -ENOMEM;
5911 	log = &env->log;
5912 
5913 	env->insn_aux_data =
5914 		vzalloc(array_size(sizeof(struct bpf_insn_aux_data),
5915 				   (*prog)->len));
5916 	ret = -ENOMEM;
5917 	if (!env->insn_aux_data)
5918 		goto err_free_env;
5919 	env->prog = *prog;
5920 	env->ops = bpf_verifier_ops[env->prog->type];
5921 
5922 	/* grab the mutex to protect few globals used by verifier */
5923 	mutex_lock(&bpf_verifier_lock);
5924 
5925 	if (attr->log_level || attr->log_buf || attr->log_size) {
5926 		/* user requested verbose verifier output
5927 		 * and supplied buffer to store the verification trace
5928 		 */
5929 		log->level = attr->log_level;
5930 		log->ubuf = (char __user *) (unsigned long) attr->log_buf;
5931 		log->len_total = attr->log_size;
5932 
5933 		ret = -EINVAL;
5934 		/* log attributes have to be sane */
5935 		if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
5936 		    !log->level || !log->ubuf)
5937 			goto err_unlock;
5938 	}
5939 
5940 	env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
5941 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
5942 		env->strict_alignment = true;
5943 
5944 	ret = replace_map_fd_with_map_ptr(env);
5945 	if (ret < 0)
5946 		goto skip_full_check;
5947 
5948 	if (bpf_prog_is_dev_bound(env->prog->aux)) {
5949 		ret = bpf_prog_offload_verifier_prep(env);
5950 		if (ret)
5951 			goto skip_full_check;
5952 	}
5953 
5954 	env->explored_states = kcalloc(env->prog->len,
5955 				       sizeof(struct bpf_verifier_state_list *),
5956 				       GFP_USER);
5957 	ret = -ENOMEM;
5958 	if (!env->explored_states)
5959 		goto skip_full_check;
5960 
5961 	env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
5962 
5963 	ret = check_cfg(env);
5964 	if (ret < 0)
5965 		goto skip_full_check;
5966 
5967 	ret = do_check(env);
5968 	if (env->cur_state) {
5969 		free_verifier_state(env->cur_state, true);
5970 		env->cur_state = NULL;
5971 	}
5972 
5973 skip_full_check:
5974 	while (!pop_stack(env, NULL, NULL));
5975 	free_states(env);
5976 
5977 	if (ret == 0)
5978 		sanitize_dead_code(env);
5979 
5980 	if (ret == 0)
5981 		ret = check_max_stack_depth(env);
5982 
5983 	if (ret == 0)
5984 		/* program is valid, convert *(u32*)(ctx + off) accesses */
5985 		ret = convert_ctx_accesses(env);
5986 
5987 	if (ret == 0)
5988 		ret = fixup_bpf_calls(env);
5989 
5990 	if (ret == 0)
5991 		ret = fixup_call_args(env);
5992 
5993 	if (log->level && bpf_verifier_log_full(log))
5994 		ret = -ENOSPC;
5995 	if (log->level && !log->ubuf) {
5996 		ret = -EFAULT;
5997 		goto err_release_maps;
5998 	}
5999 
6000 	if (ret == 0 && env->used_map_cnt) {
6001 		/* if program passed verifier, update used_maps in bpf_prog_info */
6002 		env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
6003 							  sizeof(env->used_maps[0]),
6004 							  GFP_KERNEL);
6005 
6006 		if (!env->prog->aux->used_maps) {
6007 			ret = -ENOMEM;
6008 			goto err_release_maps;
6009 		}
6010 
6011 		memcpy(env->prog->aux->used_maps, env->used_maps,
6012 		       sizeof(env->used_maps[0]) * env->used_map_cnt);
6013 		env->prog->aux->used_map_cnt = env->used_map_cnt;
6014 
6015 		/* program is valid. Convert pseudo bpf_ld_imm64 into generic
6016 		 * bpf_ld_imm64 instructions
6017 		 */
6018 		convert_pseudo_ld_imm64(env);
6019 	}
6020 
6021 err_release_maps:
6022 	if (!env->prog->aux->used_maps)
6023 		/* if we didn't copy map pointers into bpf_prog_info, release
6024 		 * them now. Otherwise free_used_maps() will release them.
6025 		 */
6026 		release_maps(env);
6027 	*prog = env->prog;
6028 err_unlock:
6029 	mutex_unlock(&bpf_verifier_lock);
6030 	vfree(env->insn_aux_data);
6031 err_free_env:
6032 	kfree(env);
6033 	return ret;
6034 }
6035