Lines Matching refs:env

213 find_linfo(const struct bpf_verifier_env *env, u32 insn_off)  in find_linfo()  argument
219 prog = env->prog; in find_linfo()
256 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, in bpf_verifier_log_write() argument
261 if (!bpf_verifier_log_needed(&env->log)) in bpf_verifier_log_write()
265 bpf_verifier_vlog(&env->log, fmt, args); in bpf_verifier_log_write()
272 struct bpf_verifier_env *env = private_data; in verbose() local
275 if (!bpf_verifier_log_needed(&env->log)) in verbose()
279 bpf_verifier_vlog(&env->log, fmt, args); in verbose()
291 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env, in verbose_linfo() argument
297 if (!bpf_verifier_log_needed(&env->log)) in verbose_linfo()
300 linfo = find_linfo(env, insn_off); in verbose_linfo()
301 if (!linfo || linfo == env->prev_linfo) in verbose_linfo()
308 bpf_verifier_vlog(&env->log, prefix_fmt, args); in verbose_linfo()
312 verbose(env, "%s\n", in verbose_linfo()
313 ltrim(btf_name_by_offset(env->prog->aux->btf, in verbose_linfo()
316 env->prev_linfo = linfo; in verbose_linfo()
412 static void print_liveness(struct bpf_verifier_env *env, in print_liveness() argument
416 verbose(env, "_"); in print_liveness()
418 verbose(env, "r"); in print_liveness()
420 verbose(env, "w"); in print_liveness()
422 verbose(env, "D"); in print_liveness()
425 static struct bpf_func_state *func(struct bpf_verifier_env *env, in func() argument
428 struct bpf_verifier_state *cur = env->cur_state; in func()
433 static void print_verifier_state(struct bpf_verifier_env *env, in print_verifier_state() argument
441 verbose(env, " frame%d:", state->frameno); in print_verifier_state()
447 verbose(env, " R%d", i); in print_verifier_state()
448 print_liveness(env, reg->live); in print_verifier_state()
449 verbose(env, "=%s", reg_type_str[t]); in print_verifier_state()
451 verbose(env, "P"); in print_verifier_state()
455 verbose(env, "%lld", reg->var_off.value + reg->off); in print_verifier_state()
457 verbose(env, "(id=%d", reg->id); in print_verifier_state()
459 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id); in print_verifier_state()
461 verbose(env, ",off=%d", reg->off); in print_verifier_state()
463 verbose(env, ",r=%d", reg->range); in print_verifier_state()
467 verbose(env, ",ks=%d,vs=%d", in print_verifier_state()
475 verbose(env, ",imm=%llx", reg->var_off.value); in print_verifier_state()
479 verbose(env, ",smin_value=%lld", in print_verifier_state()
483 verbose(env, ",smax_value=%lld", in print_verifier_state()
486 verbose(env, ",umin_value=%llu", in print_verifier_state()
489 verbose(env, ",umax_value=%llu", in print_verifier_state()
495 verbose(env, ",var_off=%s", tn_buf); in print_verifier_state()
498 verbose(env, ")"); in print_verifier_state()
515 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); in print_verifier_state()
516 print_liveness(env, state->stack[i].spilled_ptr.live); in print_verifier_state()
520 verbose(env, "=%s", reg_type_str[t]); in print_verifier_state()
522 verbose(env, "P"); in print_verifier_state()
524 verbose(env, "%lld", reg->var_off.value + reg->off); in print_verifier_state()
526 verbose(env, "=%s", types_buf); in print_verifier_state()
530 verbose(env, " refs=%d", state->refs[0].id); in print_verifier_state()
533 verbose(env, ",%d", state->refs[i].id); in print_verifier_state()
535 verbose(env, "\n"); in print_verifier_state()
620 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) in acquire_reference_state() argument
622 struct bpf_func_state *state = cur_func(env); in acquire_reference_state()
629 id = ++env->id_gen; in acquire_reference_state()
759 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st) in update_branch_counts() argument
776 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, in pop_stack() argument
779 struct bpf_verifier_state *cur = env->cur_state; in pop_stack()
780 struct bpf_verifier_stack_elem *elem, *head = env->head; in pop_stack()
783 if (env->head == NULL) in pop_stack()
798 env->head = elem; in pop_stack()
799 env->stack_size--; in pop_stack()
803 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, in push_stack() argument
807 struct bpf_verifier_state *cur = env->cur_state; in push_stack()
817 elem->next = env->head; in push_stack()
818 env->head = elem; in push_stack()
819 env->stack_size++; in push_stack()
824 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { in push_stack()
825 verbose(env, "The sequence of %d jumps is too complex.\n", in push_stack()
826 env->stack_size); in push_stack()
843 free_verifier_state(env->cur_state, true); in push_stack()
844 env->cur_state = NULL; in push_stack()
846 while (!pop_stack(env, NULL, NULL)); in push_stack()
886 static void mark_reg_known_zero(struct bpf_verifier_env *env, in mark_reg_known_zero() argument
890 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); in mark_reg_known_zero()
1004 static void mark_reg_unknown(struct bpf_verifier_env *env, in mark_reg_unknown() argument
1008 verbose(env, "mark_reg_unknown(regs, %u)\n", regno); in mark_reg_unknown()
1017 regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ? in mark_reg_unknown()
1027 static void mark_reg_not_init(struct bpf_verifier_env *env, in mark_reg_not_init() argument
1031 verbose(env, "mark_reg_not_init(regs, %u)\n", regno); in mark_reg_not_init()
1041 static void init_reg_state(struct bpf_verifier_env *env, in init_reg_state() argument
1048 mark_reg_not_init(env, regs, i); in init_reg_state()
1056 mark_reg_known_zero(env, regs, BPF_REG_FP); in init_reg_state()
1061 mark_reg_known_zero(env, regs, BPF_REG_1); in init_reg_state()
1065 static void init_func_state(struct bpf_verifier_env *env, in init_func_state() argument
1072 init_reg_state(env, state); in init_func_state()
1087 static int find_subprog(struct bpf_verifier_env *env, int off) in find_subprog() argument
1091 p = bsearch(&off, env->subprog_info, env->subprog_cnt, in find_subprog()
1092 sizeof(env->subprog_info[0]), cmp_subprogs); in find_subprog()
1095 return p - env->subprog_info; in find_subprog()
1099 static int add_subprog(struct bpf_verifier_env *env, int off) in add_subprog() argument
1101 int insn_cnt = env->prog->len; in add_subprog()
1105 verbose(env, "call to invalid destination\n"); in add_subprog()
1108 ret = find_subprog(env, off); in add_subprog()
1111 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { in add_subprog()
1112 verbose(env, "too many subprograms\n"); in add_subprog()
1115 env->subprog_info[env->subprog_cnt++].start = off; in add_subprog()
1116 sort(env->subprog_info, env->subprog_cnt, in add_subprog()
1117 sizeof(env->subprog_info[0]), cmp_subprogs, NULL); in add_subprog()
1121 static int check_subprogs(struct bpf_verifier_env *env) in check_subprogs() argument
1124 struct bpf_subprog_info *subprog = env->subprog_info; in check_subprogs()
1125 struct bpf_insn *insn = env->prog->insnsi; in check_subprogs()
1126 int insn_cnt = env->prog->len; in check_subprogs()
1129 ret = add_subprog(env, 0); in check_subprogs()
1139 if (!env->allow_ptr_leaks) { in check_subprogs()
1140 verbose(env, "function calls to other bpf functions are allowed for root only\n"); in check_subprogs()
1143 ret = add_subprog(env, i + insn[i].imm + 1); in check_subprogs()
1151 subprog[env->subprog_cnt].start = insn_cnt; in check_subprogs()
1153 if (env->log.level & BPF_LOG_LEVEL2) in check_subprogs()
1154 for (i = 0; i < env->subprog_cnt; i++) in check_subprogs()
1155 verbose(env, "func#%d @%d\n", i, subprog[i].start); in check_subprogs()
1169 verbose(env, "jump out of range from insn %d to %d\n", i, off); in check_subprogs()
1180 verbose(env, "last insn is not an exit or jmp\n"); in check_subprogs()
1185 if (cur_subprog < env->subprog_cnt) in check_subprogs()
1195 static int mark_reg_read(struct bpf_verifier_env *env, in mark_reg_read() argument
1207 verbose(env, "verifier BUG type %s var_off %lld off %d\n", in mark_reg_read()
1238 if (env->longest_mark_read_walk < cnt) in mark_reg_read()
1239 env->longest_mark_read_walk = cnt; in mark_reg_read()
1247 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, in is_reg64() argument
1337 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn) in insn_has_def32() argument
1342 return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP); in insn_has_def32()
1345 static void mark_insn_zext(struct bpf_verifier_env *env, in mark_insn_zext() argument
1353 env->insn_aux_data[def_idx - 1].zext_dst = true; in mark_insn_zext()
1358 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, in check_reg_arg() argument
1361 struct bpf_verifier_state *vstate = env->cur_state; in check_reg_arg()
1363 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; in check_reg_arg()
1368 verbose(env, "R%d is invalid\n", regno); in check_reg_arg()
1373 rw64 = is_reg64(env, insn, regno, reg, t); in check_reg_arg()
1377 verbose(env, "R%d !read_ok\n", regno); in check_reg_arg()
1385 mark_insn_zext(env, reg); in check_reg_arg()
1387 return mark_reg_read(env, reg, reg->parent, in check_reg_arg()
1392 verbose(env, "frame pointer is read only\n"); in check_reg_arg()
1396 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; in check_reg_arg()
1398 mark_reg_unknown(env, regs, regno); in check_reg_arg()
1404 static int push_jmp_history(struct bpf_verifier_env *env, in push_jmp_history() argument
1414 p[cnt - 1].idx = env->insn_idx; in push_jmp_history()
1415 p[cnt - 1].prev_idx = env->prev_insn_idx; in push_jmp_history()
1442 static int backtrack_insn(struct bpf_verifier_env *env, int idx, in backtrack_insn() argument
1447 .private_data = env, in backtrack_insn()
1449 struct bpf_insn *insn = env->prog->insnsi + idx; in backtrack_insn()
1459 if (env->log.level & BPF_LOG_LEVEL) { in backtrack_insn()
1460 verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask); in backtrack_insn()
1461 verbose(env, "%d: ", idx); in backtrack_insn()
1462 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); in backtrack_insn()
1518 verbose(env, "BUG spi %d\n", spi); in backtrack_insn()
1537 verbose(env, "BUG spi %d\n", spi); in backtrack_insn()
1556 verbose(env, "BUG regs %x\n", *reg_mask); in backtrack_insn()
1630 static void mark_all_scalars_precise(struct bpf_verifier_env *env, in mark_all_scalars_precise() argument
1660 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, in __mark_chain_precision() argument
1663 struct bpf_verifier_state *st = env->cur_state; in __mark_chain_precision()
1665 int last_idx = env->insn_idx; in __mark_chain_precision()
1674 if (!env->allow_ptr_leaks) in __mark_chain_precision()
1718 if (env->log.level & BPF_LOG_LEVEL) in __mark_chain_precision()
1719 verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx); in __mark_chain_precision()
1725 err = backtrack_insn(env, i, &reg_mask, &stack_mask); in __mark_chain_precision()
1728 mark_all_scalars_precise(env, st); in __mark_chain_precision()
1742 if (i >= env->prog->len) { in __mark_chain_precision()
1749 verbose(env, "BUG backtracking idx %d\n", i); in __mark_chain_precision()
1788 mark_all_scalars_precise(env, st); in __mark_chain_precision()
1805 if (env->log.level & BPF_LOG_LEVEL) { in __mark_chain_precision()
1806 print_verifier_state(env, func); in __mark_chain_precision()
1807 verbose(env, "parent %s regs=%x stack=%llx marks\n", in __mark_chain_precision()
1823 static int mark_chain_precision(struct bpf_verifier_env *env, int regno) in mark_chain_precision() argument
1825 return __mark_chain_precision(env, regno, -1); in mark_chain_precision()
1828 static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi) in mark_chain_precision_stack() argument
1830 return __mark_chain_precision(env, -1, spi); in mark_chain_precision_stack()
1884 static int check_stack_write(struct bpf_verifier_env *env, in check_stack_write() argument
1890 u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg; in check_stack_write()
1900 if (!env->allow_ptr_leaks && in check_stack_write()
1903 verbose(env, "attempt to corrupt spilled pointer on stack\n"); in check_stack_write()
1907 cur = env->cur_state->frame[env->cur_state->curframe]; in check_stack_write()
1912 !register_is_null(reg) && env->allow_ptr_leaks) { in check_stack_write()
1920 err = mark_chain_precision(env, value_regno); in check_stack_write()
1928 verbose_linfo(env, insn_idx, "; "); in check_stack_write()
1929 verbose(env, "invalid size of register spill\n"); in check_stack_write()
1934 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); in check_stack_write()
1938 if (!env->allow_ptr_leaks) { in check_stack_write()
1950 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off; in check_stack_write()
1965 verbose(env, in check_stack_write()
1998 err = mark_chain_precision(env, value_regno); in check_stack_write()
2012 static int check_stack_read(struct bpf_verifier_env *env, in check_stack_read() argument
2016 struct bpf_verifier_state *vstate = env->cur_state; in check_stack_read()
2023 verbose(env, "invalid read from stack off %d+0 size %d\n", in check_stack_read()
2033 verbose_linfo(env, env->insn_idx, "; "); in check_stack_read()
2034 verbose(env, "invalid size of register fill\n"); in check_stack_read()
2038 mark_reg_unknown(env, state->regs, value_regno); in check_stack_read()
2041 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in check_stack_read()
2046 verbose(env, "corrupted spill memory\n"); in check_stack_read()
2060 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in check_stack_read()
2071 verbose(env, "invalid read from stack off %d+%d size %d\n", in check_stack_read()
2075 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in check_stack_read()
2095 mark_reg_unknown(env, state->regs, value_regno); in check_stack_read()
2103 static int check_stack_access(struct bpf_verifier_env *env, in check_stack_access() argument
2115 verbose(env, "variable stack access var_off=%s off=%d size=%d\n", in check_stack_access()
2121 verbose(env, "invalid stack off=%d size=%d\n", off, size); in check_stack_access()
2128 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno, in check_map_access_type() argument
2131 struct bpf_reg_state *regs = cur_regs(env); in check_map_access_type()
2136 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n", in check_map_access_type()
2142 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n", in check_map_access_type()
2151 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, in __check_map_access() argument
2154 struct bpf_reg_state *regs = cur_regs(env); in __check_map_access()
2159 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", in __check_map_access()
2167 static int check_map_access(struct bpf_verifier_env *env, u32 regno, in check_map_access() argument
2170 struct bpf_verifier_state *vstate = env->cur_state; in check_map_access()
2179 if (env->log.level & BPF_LOG_LEVEL) in check_map_access()
2180 print_verifier_state(env, state); in check_map_access()
2192 …verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n… in check_map_access()
2196 err = __check_map_access(env, regno, reg->smin_value + off, size, in check_map_access()
2199 verbose(env, "R%d min value is outside of the array range\n", in check_map_access()
2209 …verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n… in check_map_access()
2213 err = __check_map_access(env, regno, reg->umax_value + off, size, in check_map_access()
2216 verbose(env, "R%d max value is outside of the array range\n", in check_map_access()
2229 verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n"); in check_map_access()
2238 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, in may_access_direct_pkt_data() argument
2242 switch (env->prog->type) { in may_access_direct_pkt_data()
2264 env->seen_direct_write = true; in may_access_direct_pkt_data()
2269 env->seen_direct_write = true; in may_access_direct_pkt_data()
2278 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, in __check_packet_access() argument
2281 struct bpf_reg_state *regs = cur_regs(env); in __check_packet_access()
2286 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", in __check_packet_access()
2293 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, in check_packet_access() argument
2296 struct bpf_reg_state *regs = cur_regs(env); in check_packet_access()
2309 …verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n… in check_packet_access()
2313 err = __check_packet_access(env, regno, off, size, zero_size_allowed); in check_packet_access()
2315 verbose(env, "R%d offset is outside of the packet\n", regno); in check_packet_access()
2325 env->prog->aux->max_pkt_offset = in check_packet_access()
2326 max_t(u32, env->prog->aux->max_pkt_offset, in check_packet_access()
2333 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, in check_ctx_access() argument
2340 if (env->ops->is_valid_access && in check_ctx_access()
2341 env->ops->is_valid_access(off, size, t, env->prog, &info)) { in check_ctx_access()
2351 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; in check_ctx_access()
2353 if (env->prog->aux->max_ctx_offset < off + size) in check_ctx_access()
2354 env->prog->aux->max_ctx_offset = off + size; in check_ctx_access()
2358 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); in check_ctx_access()
2362 static int check_flow_keys_access(struct bpf_verifier_env *env, int off, in check_flow_keys_access() argument
2367 verbose(env, "invalid access to flow keys off=%d size=%d\n", in check_flow_keys_access()
2374 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, in check_sock_access() argument
2378 struct bpf_reg_state *regs = cur_regs(env); in check_sock_access()
2384 …verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n… in check_sock_access()
2408 env->insn_aux_data[insn_idx].ctx_field_size = in check_sock_access()
2413 verbose(env, "R%d invalid %s access off=%d size=%d\n", in check_sock_access()
2428 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) in reg_state() argument
2430 return cur_regs(env) + regno; in reg_state()
2433 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) in is_pointer_value() argument
2435 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); in is_pointer_value()
2438 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) in is_ctx_reg() argument
2440 const struct bpf_reg_state *reg = reg_state(env, regno); in is_ctx_reg()
2445 static bool is_sk_reg(struct bpf_verifier_env *env, int regno) in is_sk_reg() argument
2447 const struct bpf_reg_state *reg = reg_state(env, regno); in is_sk_reg()
2452 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) in is_pkt_reg() argument
2454 const struct bpf_reg_state *reg = reg_state(env, regno); in is_pkt_reg()
2459 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) in is_flow_key_reg() argument
2461 const struct bpf_reg_state *reg = reg_state(env, regno); in is_flow_key_reg()
2467 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, in check_pkt_ptr_alignment() argument
2493 verbose(env, in check_pkt_ptr_alignment()
2502 static int check_generic_ptr_alignment(struct bpf_verifier_env *env, in check_generic_ptr_alignment() argument
2518 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", in check_generic_ptr_alignment()
2526 static int check_ptr_alignment(struct bpf_verifier_env *env, in check_ptr_alignment() argument
2530 bool strict = env->strict_alignment || strict_alignment_once; in check_ptr_alignment()
2539 return check_pkt_ptr_alignment(env, reg, off, size, strict); in check_ptr_alignment()
2572 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, in check_ptr_alignment()
2576 static int update_stack_depth(struct bpf_verifier_env *env, in update_stack_depth() argument
2580 u16 stack = env->subprog_info[func->subprogno].stack_depth; in update_stack_depth()
2586 env->subprog_info[func->subprogno].stack_depth = -off; in update_stack_depth()
2596 static int check_max_stack_depth(struct bpf_verifier_env *env) in check_max_stack_depth() argument
2599 struct bpf_subprog_info *subprog = env->subprog_info; in check_max_stack_depth()
2600 struct bpf_insn *insn = env->prog->insnsi; in check_max_stack_depth()
2610 verbose(env, "combined stack size of %d calls is %d. Too large\n", in check_max_stack_depth()
2627 idx = find_subprog(env, i); in check_max_stack_depth()
2635 verbose(env, "the call stack of %d frames is too deep !\n", in check_max_stack_depth()
2654 static int get_callee_stack_depth(struct bpf_verifier_env *env, in get_callee_stack_depth() argument
2659 subprog = find_subprog(env, start); in get_callee_stack_depth()
2665 return env->subprog_info[subprog].stack_depth; in get_callee_stack_depth()
2669 static int check_ctx_reg(struct bpf_verifier_env *env, in check_ctx_reg() argument
2677 verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n", in check_ctx_reg()
2686 verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf); in check_ctx_reg()
2693 static int check_tp_buffer_access(struct bpf_verifier_env *env, in check_tp_buffer_access() argument
2698 verbose(env, in check_tp_buffer_access()
2707 verbose(env, in check_tp_buffer_access()
2712 if (off + size > env->prog->aux->max_tp_access) in check_tp_buffer_access()
2713 env->prog->aux->max_tp_access = off + size; in check_tp_buffer_access()
2748 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, in check_mem_access() argument
2752 struct bpf_reg_state *regs = cur_regs(env); in check_mem_access()
2762 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); in check_mem_access()
2771 is_pointer_value(env, value_regno)) { in check_mem_access()
2772 verbose(env, "R%d leaks addr into map\n", value_regno); in check_mem_access()
2775 err = check_map_access_type(env, regno, off, size, t); in check_mem_access()
2778 err = check_map_access(env, regno, off, size, false); in check_mem_access()
2780 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
2786 is_pointer_value(env, value_regno)) { in check_mem_access()
2787 verbose(env, "R%d leaks addr into ctx\n", value_regno); in check_mem_access()
2791 err = check_ctx_reg(env, reg, regno); in check_mem_access()
2795 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type); in check_mem_access()
2802 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
2804 mark_reg_known_zero(env, regs, in check_mem_access()
2807 regs[value_regno].id = ++env->id_gen; in check_mem_access()
2820 err = check_stack_access(env, reg, off, size); in check_mem_access()
2824 state = func(env, reg); in check_mem_access()
2825 err = update_stack_depth(env, state, off); in check_mem_access()
2830 err = check_stack_write(env, state, off, size, in check_mem_access()
2833 err = check_stack_read(env, state, off, size, in check_mem_access()
2836 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { in check_mem_access()
2837 verbose(env, "cannot write into packet\n"); in check_mem_access()
2841 is_pointer_value(env, value_regno)) { in check_mem_access()
2842 verbose(env, "R%d leaks addr into packet\n", in check_mem_access()
2846 err = check_packet_access(env, regno, off, size, false); in check_mem_access()
2848 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
2851 is_pointer_value(env, value_regno)) { in check_mem_access()
2852 verbose(env, "R%d leaks addr into flow keys\n", in check_mem_access()
2857 err = check_flow_keys_access(env, off, size); in check_mem_access()
2859 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
2862 verbose(env, "R%d cannot write into %s\n", in check_mem_access()
2866 err = check_sock_access(env, insn_idx, regno, off, size, t); in check_mem_access()
2868 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
2870 err = check_tp_buffer_access(env, reg, regno, off, size); in check_mem_access()
2872 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
2874 verbose(env, "R%d invalid mem access '%s'\n", regno, in check_mem_access()
2887 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) in check_xadd() argument
2893 verbose(env, "BPF_XADD uses reserved fields\n"); in check_xadd()
2898 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_xadd()
2903 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_xadd()
2907 if (is_pointer_value(env, insn->src_reg)) { in check_xadd()
2908 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); in check_xadd()
2912 if (is_ctx_reg(env, insn->dst_reg) || in check_xadd()
2913 is_pkt_reg(env, insn->dst_reg) || in check_xadd()
2914 is_flow_key_reg(env, insn->dst_reg) || in check_xadd()
2915 is_sk_reg(env, insn->dst_reg)) { in check_xadd()
2916 verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", in check_xadd()
2918 reg_type_str[reg_state(env, insn->dst_reg)->type]); in check_xadd()
2923 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_xadd()
2929 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_xadd()
2933 static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno, in __check_stack_boundary() argument
2937 struct bpf_reg_state *reg = reg_state(env, regno); in __check_stack_boundary()
2942 verbose(env, "invalid stack type R%d off=%d access_size=%d\n", in __check_stack_boundary()
2948 verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n", in __check_stack_boundary()
2962 static int check_stack_boundary(struct bpf_verifier_env *env, int regno, in check_stack_boundary() argument
2966 struct bpf_reg_state *reg = reg_state(env, regno); in check_stack_boundary()
2967 struct bpf_func_state *state = func(env, reg); in check_stack_boundary()
2976 verbose(env, "R%d type=%s expected=%s\n", regno, in check_stack_boundary()
2984 err = __check_stack_boundary(env, regno, min_off, access_size, in check_stack_boundary()
2994 if (!env->allow_ptr_leaks) { in check_stack_boundary()
2998 verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n", in check_stack_boundary()
3013 verbose(env, "R%d unbounded indirect variable offset stack access\n", in check_stack_boundary()
3019 err = __check_stack_boundary(env, regno, min_off, access_size, in check_stack_boundary()
3022 verbose(env, "R%d min value is outside of stack bound\n", in check_stack_boundary()
3026 err = __check_stack_boundary(env, regno, max_off, access_size, in check_stack_boundary()
3029 verbose(env, "R%d max value is outside of stack bound\n", in check_stack_boundary()
3066 verbose(env, "invalid indirect read from stack off %d+%d size %d\n", in check_stack_boundary()
3072 verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n", in check_stack_boundary()
3080 mark_reg_read(env, &state->stack[spi].spilled_ptr, in check_stack_boundary()
3084 return update_stack_depth(env, state, min_off); in check_stack_boundary()
3087 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, in check_helper_mem_access() argument
3091 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; in check_helper_mem_access()
3096 return check_packet_access(env, regno, reg->off, access_size, in check_helper_mem_access()
3099 if (check_map_access_type(env, regno, reg->off, access_size, in check_helper_mem_access()
3103 return check_map_access(env, regno, reg->off, access_size, in check_helper_mem_access()
3106 return check_stack_boundary(env, regno, access_size, in check_helper_mem_access()
3130 static int process_spin_lock(struct bpf_verifier_env *env, int regno, in process_spin_lock() argument
3133 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; in process_spin_lock()
3134 struct bpf_verifier_state *cur = env->cur_state; in process_spin_lock()
3140 verbose(env, "R%d is not a pointer to map_value\n", regno); in process_spin_lock()
3144 verbose(env, in process_spin_lock()
3150 verbose(env, in process_spin_lock()
3157 verbose(env, in process_spin_lock()
3161 verbose(env, in process_spin_lock()
3165 verbose(env, in process_spin_lock()
3171 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n", in process_spin_lock()
3177 verbose(env, in process_spin_lock()
3184 verbose(env, "bpf_spin_unlock without taking a lock\n"); in process_spin_lock()
3188 verbose(env, "bpf_spin_unlock of different lock\n"); in process_spin_lock()
3225 static int check_func_arg(struct bpf_verifier_env *env, u32 regno, in check_func_arg() argument
3229 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; in check_func_arg()
3236 err = check_reg_arg(env, regno, SRC_OP); in check_func_arg()
3241 if (is_pointer_value(env, regno)) { in check_func_arg()
3242 verbose(env, "R%d leaks addr into helper function\n", in check_func_arg()
3250 !may_access_direct_pkt_data(env, meta, BPF_READ)) { in check_func_arg()
3251 verbose(env, "helper access to the packet is not allowed\n"); in check_func_arg()
3280 err = check_ctx_reg(env, reg, regno); in check_func_arg()
3290 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", in check_func_arg()
3303 if (process_spin_lock(env, regno, true)) in check_func_arg()
3306 if (process_spin_lock(env, regno, false)) in check_func_arg()
3309 verbose(env, "verifier internal error\n"); in check_func_arg()
3333 verbose(env, "unsupported arg_type %d\n", arg_type); in check_func_arg()
3351 verbose(env, "invalid map_ptr to access map->key\n"); in check_func_arg()
3354 err = check_helper_mem_access(env, regno, in check_func_arg()
3366 verbose(env, "invalid map_ptr to access map->value\n"); in check_func_arg()
3370 err = check_helper_mem_access(env, regno, in check_func_arg()
3394 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", in check_func_arg()
3400 err = check_helper_mem_access(env, regno - 1, 0, in check_func_arg()
3408 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", in check_func_arg()
3412 err = check_helper_mem_access(env, regno - 1, in check_func_arg()
3416 err = mark_chain_precision(env, regno); in check_func_arg()
3420 err = check_helper_mem_access(env, regno, size, false, meta); in check_func_arg()
3423 err = check_ptr_alignment(env, reg, 0, size, true); in check_func_arg()
3428 verbose(env, "R%d type=%s expected=%s\n", regno, in check_func_arg()
3433 static int check_map_func_compatibility(struct bpf_verifier_env *env, in check_map_func_compatibility() argument
3527 if (env->subprog_cnt > 1) { in check_map_func_compatibility()
3528 verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n"); in check_map_func_compatibility()
3593 verbose(env, "cannot pass map_type %d into func %s#%d\n", in check_map_func_compatibility()
3684 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, in __clear_all_pkt_pointers() argument
3692 mark_reg_unknown(env, regs, i); in __clear_all_pkt_pointers()
3702 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) in clear_all_pkt_pointers() argument
3704 struct bpf_verifier_state *vstate = env->cur_state; in clear_all_pkt_pointers()
3708 __clear_all_pkt_pointers(env, vstate->frame[i]); in clear_all_pkt_pointers()
3711 static void release_reg_references(struct bpf_verifier_env *env, in release_reg_references() argument
3720 mark_reg_unknown(env, regs, i); in release_reg_references()
3733 static int release_reference(struct bpf_verifier_env *env, in release_reference() argument
3736 struct bpf_verifier_state *vstate = env->cur_state; in release_reference()
3740 err = release_reference_state(cur_func(env), ref_obj_id); in release_reference()
3745 release_reg_references(env, vstate->frame[i], ref_obj_id); in release_reference()
3750 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, in check_func_call() argument
3753 struct bpf_verifier_state *state = env->cur_state; in check_func_call()
3758 verbose(env, "the call stack of %d frames is too deep\n", in check_func_call()
3764 subprog = find_subprog(env, target_insn + 1); in check_func_call()
3766 verbose(env, "verifier bug. No program starts at insn %d\n", in check_func_call()
3773 verbose(env, "verifier bug. Frame %d already allocated\n", in check_func_call()
3787 init_func_state(env, callee, in check_func_call()
3806 mark_reg_not_init(env, caller->regs, caller_saved[i]); in check_func_call()
3807 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); in check_func_call()
3816 if (env->log.level & BPF_LOG_LEVEL) { in check_func_call()
3817 verbose(env, "caller:\n"); in check_func_call()
3818 print_verifier_state(env, caller); in check_func_call()
3819 verbose(env, "callee:\n"); in check_func_call()
3820 print_verifier_state(env, callee); in check_func_call()
3825 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) in prepare_func_exit() argument
3827 struct bpf_verifier_state *state = env->cur_state; in prepare_func_exit()
3841 verbose(env, "cannot return stack pointer to the caller\n"); in prepare_func_exit()
3856 if (env->log.level & BPF_LOG_LEVEL) { in prepare_func_exit()
3857 verbose(env, "returning from callee:\n"); in prepare_func_exit()
3858 print_verifier_state(env, callee); in prepare_func_exit()
3859 verbose(env, "to caller at %d:\n", *insn_idx); in prepare_func_exit()
3860 print_verifier_state(env, caller); in prepare_func_exit()
3886 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, in record_func_map() argument
3889 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; in record_func_map()
3902 verbose(env, "kernel subsystem misconfigured verifier\n"); in record_func_map()
3915 verbose(env, "write into map forbidden\n"); in record_func_map()
3928 static int check_reference_leak(struct bpf_verifier_env *env) in check_reference_leak() argument
3930 struct bpf_func_state *state = cur_func(env); in check_reference_leak()
3934 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", in check_reference_leak()
3940 static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) in check_helper_call() argument
3950 verbose(env, "invalid func %s#%d\n", func_id_name(func_id), in check_helper_call()
3955 if (env->ops->get_func_proto) in check_helper_call()
3956 fn = env->ops->get_func_proto(func_id, env->prog); in check_helper_call()
3958 verbose(env, "unknown func %s#%d\n", func_id_name(func_id), in check_helper_call()
3964 if (!env->prog->gpl_compatible && fn->gpl_only) { in check_helper_call()
3965 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); in check_helper_call()
3972 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", in check_helper_call()
3982 verbose(env, "kernel subsystem misconfigured func %s#%d\n", in check_helper_call()
3989 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); in check_helper_call()
3992 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); in check_helper_call()
3995 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); in check_helper_call()
3998 err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); in check_helper_call()
4001 err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); in check_helper_call()
4005 err = record_func_map(env, &meta, func_id, insn_idx); in check_helper_call()
4013 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, in check_helper_call()
4020 err = check_reference_leak(env); in check_helper_call()
4022 verbose(env, "tail_call would lead to reference leak\n"); in check_helper_call()
4026 err = release_reference(env, meta.ref_obj_id); in check_helper_call()
4028 verbose(env, "func %s#%d reference has not been acquired before\n", in check_helper_call()
4034 regs = cur_regs(env); in check_helper_call()
4041 verbose(env, "get_local_storage() doesn't support non-zero flags\n"); in check_helper_call()
4047 mark_reg_not_init(env, regs, caller_saved[i]); in check_helper_call()
4048 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); in check_helper_call()
4057 mark_reg_unknown(env, regs, BPF_REG_0); in check_helper_call()
4063 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
4069 verbose(env, in check_helper_call()
4077 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
4080 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
4083 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
4085 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
4087 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
4089 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
4091 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
4093 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
4095 verbose(env, "unknown return type %d of func %s#%d\n", in check_helper_call()
4104 int id = acquire_reference_state(env, insn_idx); in check_helper_call()
4116 err = check_map_func_compatibility(env, meta.map_ptr, func_id); in check_helper_call()
4120 if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) { in check_helper_call()
4131 verbose(env, err_str, func_id_name(func_id), func_id); in check_helper_call()
4135 env->prog->has_callchain_buf = true; in check_helper_call()
4139 clear_all_pkt_pointers(env); in check_helper_call()
4163 static bool check_reg_sane_offset(struct bpf_verifier_env *env, in check_reg_sane_offset() argument
4172 verbose(env, "math between %s pointer and %lld is not allowed\n", in check_reg_sane_offset()
4178 verbose(env, "%s pointer offset %d is not allowed\n", in check_reg_sane_offset()
4184 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", in check_reg_sane_offset()
4190 verbose(env, "value %lld makes %s pointer be out of bounds\n", in check_reg_sane_offset()
4198 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) in cur_aux() argument
4200 return &env->insn_aux_data[env->insn_idx]; in cur_aux()
4234 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, in can_skip_alu_sanitation() argument
4237 return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K; in can_skip_alu_sanitation()
4257 static int sanitize_val_alu(struct bpf_verifier_env *env, in sanitize_val_alu() argument
4260 struct bpf_insn_aux_data *aux = cur_aux(env); in sanitize_val_alu()
4262 if (can_skip_alu_sanitation(env, insn)) in sanitize_val_alu()
4268 static int sanitize_ptr_alu(struct bpf_verifier_env *env, in sanitize_ptr_alu() argument
4274 struct bpf_verifier_state *vstate = env->cur_state; in sanitize_ptr_alu()
4275 struct bpf_insn_aux_data *aux = cur_aux(env); in sanitize_ptr_alu()
4282 if (can_skip_alu_sanitation(env, insn)) in sanitize_ptr_alu()
4314 ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); in sanitize_ptr_alu()
4325 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, in adjust_ptr_min_max_vals() argument
4330 struct bpf_verifier_state *vstate = env->cur_state; in adjust_ptr_min_max_vals()
4355 verbose(env, in adjust_ptr_min_max_vals()
4363 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", in adjust_ptr_min_max_vals()
4375 verbose(env, "R%d pointer arithmetic on %s prohibited\n", in adjust_ptr_min_max_vals()
4379 if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) { in adjust_ptr_min_max_vals()
4380 …verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibit… in adjust_ptr_min_max_vals()
4395 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || in adjust_ptr_min_max_vals()
4396 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) in adjust_ptr_min_max_vals()
4401 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); in adjust_ptr_min_max_vals()
4403 verbose(env, "R%d tried to add from different maps or paths\n", dst); in adjust_ptr_min_max_vals()
4450 dst_reg->id = ++env->id_gen; in adjust_ptr_min_max_vals()
4456 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); in adjust_ptr_min_max_vals()
4458 verbose(env, "R%d tried to sub from different maps or paths\n", dst); in adjust_ptr_min_max_vals()
4463 verbose(env, "R%d tried to subtract pointer from scalar\n", in adjust_ptr_min_max_vals()
4472 verbose(env, "R%d subtraction from stack pointer prohibited\n", in adjust_ptr_min_max_vals()
4514 dst_reg->id = ++env->id_gen; in adjust_ptr_min_max_vals()
4524 verbose(env, "R%d bitwise operator %s on pointer prohibited\n", in adjust_ptr_min_max_vals()
4529 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", in adjust_ptr_min_max_vals()
4534 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) in adjust_ptr_min_max_vals()
4544 if (!env->allow_ptr_leaks) { in adjust_ptr_min_max_vals()
4546 check_map_access(env, dst, dst_reg->off, 1, false)) { in adjust_ptr_min_max_vals()
4547 verbose(env, "R%d pointer arithmetic of map value goes out of range, " in adjust_ptr_min_max_vals()
4551 check_stack_access(env, dst_reg, dst_reg->off + in adjust_ptr_min_max_vals()
4553 verbose(env, "R%d stack pointer arithmetic goes out of range, " in adjust_ptr_min_max_vals()
4566 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, in adjust_scalar_min_max_vals() argument
4571 struct bpf_reg_state *regs = cur_regs(env); in adjust_scalar_min_max_vals()
4613 ret = sanitize_val_alu(env, insn); in adjust_scalar_min_max_vals()
4615 verbose(env, "R%d tried to add from different pointers or scalars\n", dst); in adjust_scalar_min_max_vals()
4637 ret = sanitize_val_alu(env, insn); in adjust_scalar_min_max_vals()
4639 verbose(env, "R%d tried to sub from different pointers or scalars\n", dst); in adjust_scalar_min_max_vals()
4753 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
4778 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
4808 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
4827 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
4844 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, in adjust_reg_min_max_vals() argument
4847 struct bpf_verifier_state *vstate = env->cur_state; in adjust_reg_min_max_vals()
4866 if (opcode == BPF_SUB && env->allow_ptr_leaks) { in adjust_reg_min_max_vals()
4867 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_reg_min_max_vals()
4870 verbose(env, "R%d pointer %s pointer prohibited\n", in adjust_reg_min_max_vals()
4879 err = mark_chain_precision(env, insn->dst_reg); in adjust_reg_min_max_vals()
4882 return adjust_ptr_min_max_vals(env, insn, in adjust_reg_min_max_vals()
4887 err = mark_chain_precision(env, insn->src_reg); in adjust_reg_min_max_vals()
4890 return adjust_ptr_min_max_vals(env, insn, in adjust_reg_min_max_vals()
4901 return adjust_ptr_min_max_vals(env, insn, in adjust_reg_min_max_vals()
4907 print_verifier_state(env, state); in adjust_reg_min_max_vals()
4908 verbose(env, "verifier internal error: unexpected ptr_reg\n"); in adjust_reg_min_max_vals()
4912 print_verifier_state(env, state); in adjust_reg_min_max_vals()
4913 verbose(env, "verifier internal error: no src_reg\n"); in adjust_reg_min_max_vals()
4916 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); in adjust_reg_min_max_vals()
4920 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) in check_alu_op() argument
4922 struct bpf_reg_state *regs = cur_regs(env); in check_alu_op()
4931 verbose(env, "BPF_NEG uses reserved fields\n"); in check_alu_op()
4938 verbose(env, "BPF_END uses reserved fields\n"); in check_alu_op()
4944 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_alu_op()
4948 if (is_pointer_value(env, insn->dst_reg)) { in check_alu_op()
4949 verbose(env, "R%d pointer arithmetic prohibited\n", in check_alu_op()
4955 err = check_reg_arg(env, insn->dst_reg, DST_OP); in check_alu_op()
4963 verbose(env, "BPF_MOV uses reserved fields\n"); in check_alu_op()
4968 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_alu_op()
4973 verbose(env, "BPF_MOV uses reserved fields\n"); in check_alu_op()
4979 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in check_alu_op()
4996 if (is_pointer_value(env, insn->src_reg)) { in check_alu_op()
4997 verbose(env, in check_alu_op()
5004 dst_reg->subreg_def = env->insn_idx + 1; in check_alu_op()
5006 mark_reg_unknown(env, regs, in check_alu_op()
5016 mark_reg_unknown(env, regs, insn->dst_reg); in check_alu_op()
5028 verbose(env, "invalid BPF_ALU opcode %x\n", opcode); in check_alu_op()
5035 verbose(env, "BPF_ALU uses reserved fields\n"); in check_alu_op()
5039 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_alu_op()
5044 verbose(env, "BPF_ALU uses reserved fields\n"); in check_alu_op()
5050 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_alu_op()
5056 verbose(env, "div by zero\n"); in check_alu_op()
5065 verbose(env, "invalid shift %d\n", insn->imm); in check_alu_op()
5071 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in check_alu_op()
5075 return adjust_reg_min_max_vals(env, insn); in check_alu_op()
5789 static int check_cond_jmp_op(struct bpf_verifier_env *env, in check_cond_jmp_op() argument
5792 struct bpf_verifier_state *this_branch = env->cur_state; in check_cond_jmp_op()
5803 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); in check_cond_jmp_op()
5809 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); in check_cond_jmp_op()
5814 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_cond_jmp_op()
5818 if (is_pointer_value(env, insn->src_reg)) { in check_cond_jmp_op()
5819 verbose(env, "R%d pointer comparison prohibited\n", in check_cond_jmp_op()
5826 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); in check_cond_jmp_op()
5832 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_cond_jmp_op()
5847 err = mark_chain_precision(env, insn->dst_reg); in check_cond_jmp_op()
5849 err = mark_chain_precision(env, insn->src_reg); in check_cond_jmp_op()
5864 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, in check_cond_jmp_op()
5934 is_pointer_value(env, insn->dst_reg)) { in check_cond_jmp_op()
5935 verbose(env, "R%d pointer comparison prohibited\n", in check_cond_jmp_op()
5939 if (env->log.level & BPF_LOG_LEVEL) in check_cond_jmp_op()
5940 print_verifier_state(env, this_branch->frame[this_branch->curframe]); in check_cond_jmp_op()
5945 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) in check_ld_imm() argument
5947 struct bpf_insn_aux_data *aux = cur_aux(env); in check_ld_imm()
5948 struct bpf_reg_state *regs = cur_regs(env); in check_ld_imm()
5953 verbose(env, "invalid BPF_LD_IMM insn\n"); in check_ld_imm()
5957 verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); in check_ld_imm()
5961 err = check_reg_arg(env, insn->dst_reg, DST_OP); in check_ld_imm()
5973 map = env->used_maps[aux->map_index]; in check_ld_imm()
5974 mark_reg_known_zero(env, regs, insn->dst_reg); in check_ld_imm()
5981 regs[insn->dst_reg].id = ++env->id_gen; in check_ld_imm()
5985 verbose(env, "bpf verifier is misconfigured\n"); in check_ld_imm()
6019 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) in check_ld_abs() argument
6021 struct bpf_reg_state *regs = cur_regs(env); in check_ld_abs()
6025 if (!may_access_skb(env->prog->type)) { in check_ld_abs()
6026 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); in check_ld_abs()
6030 if (!env->ops->gen_ld_abs) { in check_ld_abs()
6031 verbose(env, "bpf verifier is misconfigured\n"); in check_ld_abs()
6035 if (env->subprog_cnt > 1) { in check_ld_abs()
6043 verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n"); in check_ld_abs()
6050 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); in check_ld_abs()
6055 err = check_reg_arg(env, BPF_REG_6, SRC_OP); in check_ld_abs()
6063 err = check_reference_leak(env); in check_ld_abs()
6065 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); in check_ld_abs()
6069 if (env->cur_state->active_spin_lock) { in check_ld_abs()
6070 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n"); in check_ld_abs()
6075 verbose(env, in check_ld_abs()
6082 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_ld_abs()
6089 mark_reg_not_init(env, regs, caller_saved[i]); in check_ld_abs()
6090 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); in check_ld_abs()
6097 mark_reg_unknown(env, regs, BPF_REG_0); in check_ld_abs()
6099 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; in check_ld_abs()
6103 static int check_return_code(struct bpf_verifier_env *env) in check_return_code() argument
6109 switch (env->prog->type) { in check_return_code()
6111 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || in check_return_code()
6112 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG) in check_return_code()
6116 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { in check_return_code()
6131 reg = cur_regs(env) + BPF_REG_0; in check_return_code()
6133 verbose(env, "At program exit the register R0 is not a known value (%s)\n", in check_return_code()
6141 verbose(env, "At program exit the register R0 "); in check_return_code()
6144 verbose(env, "has value %s", tn_buf); in check_return_code()
6146 verbose(env, "has unknown scalar value"); in check_return_code()
6149 verbose(env, " should have been in %s\n", tn_buf); in check_return_code()
6155 env->prog->enforce_expected_attach_type = 1; in check_return_code()
6199 static u32 state_htab_size(struct bpf_verifier_env *env) in state_htab_size() argument
6201 return env->prog->len; in state_htab_size()
6205 struct bpf_verifier_env *env, in explored_state() argument
6208 struct bpf_verifier_state *cur = env->cur_state; in explored_state()
6211 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; in explored_state()
6214 static void init_explored_state(struct bpf_verifier_env *env, int idx) in init_explored_state() argument
6216 env->insn_aux_data[idx].prune_point = true; in init_explored_state()
6224 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, in push_insn() argument
6227 int *insn_stack = env->cfg.insn_stack; in push_insn()
6228 int *insn_state = env->cfg.insn_state; in push_insn()
6236 if (w < 0 || w >= env->prog->len) { in push_insn()
6237 verbose_linfo(env, t, "%d: ", t); in push_insn()
6238 verbose(env, "jump out of range from insn %d to %d\n", t, w); in push_insn()
6244 init_explored_state(env, w); in push_insn()
6250 if (env->cfg.cur_stack >= env->prog->len) in push_insn()
6252 insn_stack[env->cfg.cur_stack++] = w; in push_insn()
6255 if (loop_ok && env->allow_ptr_leaks) in push_insn()
6257 verbose_linfo(env, t, "%d: ", t); in push_insn()
6258 verbose_linfo(env, w, "%d: ", w); in push_insn()
6259 verbose(env, "back-edge from insn %d to %d\n", t, w); in push_insn()
6265 verbose(env, "insn state internal bug\n"); in push_insn()
6274 static int check_cfg(struct bpf_verifier_env *env) in check_cfg() argument
6276 struct bpf_insn *insns = env->prog->insnsi; in check_cfg()
6277 int insn_cnt = env->prog->len; in check_cfg()
6282 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); in check_cfg()
6286 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); in check_cfg()
6294 env->cfg.cur_stack = 1; in check_cfg()
6297 if (env->cfg.cur_stack == 0) in check_cfg()
6299 t = insn_stack[env->cfg.cur_stack - 1]; in check_cfg()
6308 ret = push_insn(t, t + 1, FALLTHROUGH, env, false); in check_cfg()
6314 init_explored_state(env, t + 1); in check_cfg()
6316 init_explored_state(env, t); in check_cfg()
6318 env, false); in check_cfg()
6331 FALLTHROUGH, env, true); in check_cfg()
6340 init_explored_state(env, t + insns[t].off + 1); in check_cfg()
6345 init_explored_state(env, t + 1); in check_cfg()
6348 init_explored_state(env, t); in check_cfg()
6349 ret = push_insn(t, t + 1, FALLTHROUGH, env, true); in check_cfg()
6355 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true); in check_cfg()
6365 ret = push_insn(t, t + 1, FALLTHROUGH, env, false); in check_cfg()
6374 if (env->cfg.cur_stack-- <= 0) { in check_cfg()
6375 verbose(env, "pop stack internal bug\n"); in check_cfg()
6384 verbose(env, "unreachable insn %d\n", i); in check_cfg()
6394 env->cfg.insn_state = env->cfg.insn_stack = NULL; in check_cfg()
6402 static int check_btf_func(struct bpf_verifier_env *env, in check_btf_func() argument
6420 if (nfuncs != env->subprog_cnt) { in check_btf_func()
6421 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n"); in check_btf_func()
6429 verbose(env, "invalid func info rec size %u\n", urec_size); in check_btf_func()
6433 prog = env->prog; in check_btf_func()
6447 verbose(env, "nonzero tailing record in func info"); in check_btf_func()
6465 verbose(env, in check_btf_func()
6472 verbose(env, in check_btf_func()
6479 if (env->subprog_info[i].start != krecord[i].insn_off) { in check_btf_func()
6480 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n"); in check_btf_func()
6488 verbose(env, "invalid type id %d in func info", in check_btf_func()
6507 static void adjust_btf_func(struct bpf_verifier_env *env) in adjust_btf_func() argument
6511 if (!env->prog->aux->func_info) in adjust_btf_func()
6514 for (i = 0; i < env->subprog_cnt; i++) in adjust_btf_func()
6515 env->prog->aux->func_info[i].insn_off = env->subprog_info[i].start; in adjust_btf_func()
6522 static int check_btf_line(struct bpf_verifier_env *env, in check_btf_line() argument
6552 prog = env->prog; in check_btf_line()
6556 sub = env->subprog_info; in check_btf_line()
6564 verbose(env, "nonzero tailing record in line_info"); in check_btf_line()
6590 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", in check_btf_line()
6598 verbose(env, in check_btf_line()
6607 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i); in check_btf_line()
6612 if (s != env->subprog_cnt) { in check_btf_line()
6617 verbose(env, "missing bpf_line_info for func#%u\n", s); in check_btf_line()
6627 if (s != env->subprog_cnt) { in check_btf_line()
6628 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n", in check_btf_line()
6629 env->subprog_cnt - s, s); in check_btf_line()
6644 static int check_btf_info(struct bpf_verifier_env *env, in check_btf_info() argument
6657 env->prog->aux->btf = btf; in check_btf_info()
6659 err = check_btf_func(env, attr, uattr); in check_btf_info()
6663 err = check_btf_line(env, attr, uattr); in check_btf_info()
6716 static void clean_func_state(struct bpf_verifier_env *env, in clean_func_state() argument
6745 static void clean_verifier_state(struct bpf_verifier_env *env, in clean_verifier_state() argument
6755 clean_func_state(env, st->frame[i]); in clean_verifier_state()
6790 static void clean_live_states(struct bpf_verifier_env *env, int insn, in clean_live_states() argument
6796 sl = *explored_state(env, insn); in clean_live_states()
6806 clean_verifier_state(env, &sl->state); in clean_live_states()
7056 static bool states_equal(struct bpf_verifier_env *env, in states_equal() argument
7089 static int propagate_liveness_reg(struct bpf_verifier_env *env, in propagate_liveness_reg() argument
7108 err = mark_reg_read(env, reg, parent_reg, flag); in propagate_liveness_reg()
7122 static int propagate_liveness(struct bpf_verifier_env *env, in propagate_liveness() argument
7144 err = propagate_liveness_reg(env, &state_reg[i], in propagate_liveness()
7149 mark_insn_zext(env, &parent_reg[i]); in propagate_liveness()
7157 err = propagate_liveness_reg(env, state_reg, in propagate_liveness()
7169 static int propagate_precision(struct bpf_verifier_env *env, in propagate_precision() argument
7182 if (env->log.level & BPF_LOG_LEVEL2) in propagate_precision()
7183 verbose(env, "propagating r%d\n", i); in propagate_precision()
7184 err = mark_chain_precision(env, i); in propagate_precision()
7196 if (env->log.level & BPF_LOG_LEVEL2) in propagate_precision()
7197 verbose(env, "propagating fp%d\n", in propagate_precision()
7199 err = mark_chain_precision_stack(env, i); in propagate_precision()
7225 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) in is_state_visited() argument
7229 struct bpf_verifier_state *cur = env->cur_state, *new; in is_state_visited()
7231 bool add_new_state = env->test_state_freq ? true : false; in is_state_visited()
7233 cur->last_insn_idx = env->prev_insn_idx; in is_state_visited()
7234 if (!env->insn_aux_data[insn_idx].prune_point) in is_state_visited()
7248 if (env->jmps_processed - env->prev_jmps_processed >= 2 && in is_state_visited()
7249 env->insn_processed - env->prev_insn_processed >= 8) in is_state_visited()
7252 pprev = explored_state(env, insn_idx); in is_state_visited()
7255 clean_live_states(env, insn_idx, cur); in is_state_visited()
7263 states_equal(env, &sl->state, cur)) { in is_state_visited()
7264 verbose_linfo(env, insn_idx, "; "); in is_state_visited()
7265 verbose(env, "infinite loop detected at insn %d\n", insn_idx); in is_state_visited()
7280 if (env->jmps_processed - env->prev_jmps_processed < 20 && in is_state_visited()
7281 env->insn_processed - env->prev_insn_processed < 100) in is_state_visited()
7285 if (states_equal(env, &sl->state, cur)) { in is_state_visited()
7297 err = propagate_liveness(env, &sl->state, cur); in is_state_visited()
7304 err = err ? : push_jmp_history(env, cur); in is_state_visited()
7305 err = err ? : propagate_precision(env, &sl->state); in is_state_visited()
7337 env->peak_states--; in is_state_visited()
7343 sl->next = env->free_list; in is_state_visited()
7344 env->free_list = sl; in is_state_visited()
7354 if (env->max_states_per_insn < states_cnt) in is_state_visited()
7355 env->max_states_per_insn = states_cnt; in is_state_visited()
7357 if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) in is_state_visited()
7358 return push_jmp_history(env, cur); in is_state_visited()
7361 return push_jmp_history(env, cur); in is_state_visited()
7375 env->total_states++; in is_state_visited()
7376 env->peak_states++; in is_state_visited()
7377 env->prev_jmps_processed = env->jmps_processed; in is_state_visited()
7378 env->prev_insn_processed = env->insn_processed; in is_state_visited()
7395 new_sl->next = *explored_state(env, insn_idx); in is_state_visited()
7396 *explored_state(env, insn_idx) = new_sl; in is_state_visited()
7467 static int do_check(struct bpf_verifier_env *env) in do_check() argument
7470 struct bpf_insn *insns = env->prog->insnsi; in do_check()
7472 int insn_cnt = env->prog->len; in do_check()
7476 env->prev_linfo = NULL; in do_check()
7489 env->cur_state = state; in do_check()
7490 init_func_state(env, state->frame[0], in do_check()
7500 env->prev_insn_idx = prev_insn_idx; in do_check()
7501 if (env->insn_idx >= insn_cnt) { in do_check()
7502 verbose(env, "invalid insn idx %d insn_cnt %d\n", in do_check()
7503 env->insn_idx, insn_cnt); in do_check()
7507 insn = &insns[env->insn_idx]; in do_check()
7510 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { in do_check()
7511 verbose(env, in do_check()
7513 env->insn_processed); in do_check()
7517 err = is_state_visited(env, env->insn_idx); in do_check()
7522 if (env->log.level & BPF_LOG_LEVEL) { in do_check()
7524 verbose(env, "\nfrom %d to %d%s: safe\n", in do_check()
7525 env->prev_insn_idx, env->insn_idx, in do_check()
7526 env->cur_state->speculative ? in do_check()
7529 verbose(env, "%d: safe\n", env->insn_idx); in do_check()
7540 if (env->log.level & BPF_LOG_LEVEL2 || in do_check()
7541 (env->log.level & BPF_LOG_LEVEL && do_print_state)) { in do_check()
7542 if (env->log.level & BPF_LOG_LEVEL2) in do_check()
7543 verbose(env, "%d:", env->insn_idx); in do_check()
7545 verbose(env, "\nfrom %d to %d%s:", in do_check()
7546 env->prev_insn_idx, env->insn_idx, in do_check()
7547 env->cur_state->speculative ? in do_check()
7549 print_verifier_state(env, state->frame[state->curframe]); in do_check()
7553 if (env->log.level & BPF_LOG_LEVEL) { in do_check()
7556 .private_data = env, in do_check()
7559 verbose_linfo(env, env->insn_idx, "; "); in do_check()
7560 verbose(env, "%d: ", env->insn_idx); in do_check()
7561 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); in do_check()
7564 if (bpf_prog_is_dev_bound(env->prog->aux)) { in do_check()
7565 err = bpf_prog_offload_verify_insn(env, env->insn_idx, in do_check()
7566 env->prev_insn_idx); in do_check()
7571 regs = cur_regs(env); in do_check()
7572 env->insn_aux_data[env->insn_idx].seen = true; in do_check()
7573 prev_insn_idx = env->insn_idx; in do_check()
7576 err = check_alu_op(env, insn); in do_check()
7586 err = check_reg_arg(env, insn->src_reg, SRC_OP); in do_check()
7590 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in do_check()
7599 err = check_mem_access(env, env->insn_idx, insn->src_reg, in do_check()
7605 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type; in do_check()
7622 verbose(env, "same insn cannot be used with different pointers\n"); in do_check()
7630 err = check_xadd(env, env->insn_idx, insn); in do_check()
7633 env->insn_idx++; in do_check()
7638 err = check_reg_arg(env, insn->src_reg, SRC_OP); in do_check()
7642 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in do_check()
7649 err = check_mem_access(env, env->insn_idx, insn->dst_reg, in do_check()
7655 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type; in do_check()
7660 verbose(env, "same insn cannot be used with different pointers\n"); in do_check()
7667 verbose(env, "BPF_ST uses reserved fields\n"); in do_check()
7671 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in do_check()
7675 if (is_ctx_reg(env, insn->dst_reg)) { in do_check()
7676 verbose(env, "BPF_ST stores into R%d %s is not allowed\n", in do_check()
7678 reg_type_str[reg_state(env, insn->dst_reg)->type]); in do_check()
7683 err = check_mem_access(env, env->insn_idx, insn->dst_reg, in do_check()
7692 env->jmps_processed++; in do_check()
7700 verbose(env, "BPF_CALL uses reserved fields\n"); in do_check()
7704 if (env->cur_state->active_spin_lock && in do_check()
7707 verbose(env, "function calls are not allowed while holding a lock\n"); in do_check()
7711 err = check_func_call(env, insn, &env->insn_idx); in do_check()
7713 err = check_helper_call(env, insn->imm, env->insn_idx); in do_check()
7723 verbose(env, "BPF_JA uses reserved fields\n"); in do_check()
7727 env->insn_idx += insn->off + 1; in do_check()
7736 verbose(env, "BPF_EXIT uses reserved fields\n"); in do_check()
7740 if (env->cur_state->active_spin_lock) { in do_check()
7741 verbose(env, "bpf_spin_unlock is missing\n"); in do_check()
7747 err = prepare_func_exit(env, &env->insn_idx); in do_check()
7754 err = check_reference_leak(env); in do_check()
7764 err = check_reg_arg(env, BPF_REG_0, SRC_OP); in do_check()
7768 if (is_pointer_value(env, BPF_REG_0)) { in do_check()
7769 verbose(env, "R0 leaks addr as return value\n"); in do_check()
7773 err = check_return_code(env); in do_check()
7777 update_branch_counts(env, env->cur_state); in do_check()
7778 err = pop_stack(env, &prev_insn_idx, in do_check()
7779 &env->insn_idx); in do_check()
7789 err = check_cond_jmp_op(env, insn, &env->insn_idx); in do_check()
7797 err = check_ld_abs(env, insn); in do_check()
7802 err = check_ld_imm(env, insn); in do_check()
7806 env->insn_idx++; in do_check()
7807 env->insn_aux_data[env->insn_idx].seen = true; in do_check()
7809 verbose(env, "invalid BPF_LD mode\n"); in do_check()
7813 verbose(env, "unknown insn class %d\n", class); in do_check()
7817 env->insn_idx++; in do_check()
7820 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; in do_check()
7845 static int check_map_prog_compatibility(struct bpf_verifier_env *env, in check_map_prog_compatibility() argument
7857 verbose(env, "perf_event programs can only use preallocated hash map\n"); in check_map_prog_compatibility()
7862 verbose(env, "perf_event programs can only use preallocated inner hash map\n"); in check_map_prog_compatibility()
7870 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); in check_map_prog_compatibility()
7876 verbose(env, "offload device mismatch between prog and map\n"); in check_map_prog_compatibility()
7892 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) in replace_map_fd_with_map_ptr() argument
7894 struct bpf_insn *insn = env->prog->insnsi; in replace_map_fd_with_map_ptr()
7895 int insn_cnt = env->prog->len; in replace_map_fd_with_map_ptr()
7898 err = bpf_prog_calc_tag(env->prog); in replace_map_fd_with_map_ptr()
7905 verbose(env, "BPF_LDX uses reserved fields\n"); in replace_map_fd_with_map_ptr()
7912 verbose(env, "BPF_STX uses reserved fields\n"); in replace_map_fd_with_map_ptr()
7925 verbose(env, "invalid bpf_ld_imm64 insn\n"); in replace_map_fd_with_map_ptr()
7940 verbose(env, in replace_map_fd_with_map_ptr()
7948 verbose(env, "fd %d is not pointing to valid bpf_map\n", in replace_map_fd_with_map_ptr()
7953 err = check_map_prog_compatibility(env, map, env->prog); in replace_map_fd_with_map_ptr()
7959 aux = &env->insn_aux_data[i]; in replace_map_fd_with_map_ptr()
7966 verbose(env, "direct value offset of %u is not allowed\n", off); in replace_map_fd_with_map_ptr()
7972 verbose(env, "no direct value access support for this map type\n"); in replace_map_fd_with_map_ptr()
7979 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n", in replace_map_fd_with_map_ptr()
7993 for (j = 0; j < env->used_map_cnt; j++) { in replace_map_fd_with_map_ptr()
7994 if (env->used_maps[j] == map) { in replace_map_fd_with_map_ptr()
8001 if (env->used_map_cnt >= MAX_USED_MAPS) { in replace_map_fd_with_map_ptr()
8017 aux->map_index = env->used_map_cnt; in replace_map_fd_with_map_ptr()
8018 env->used_maps[env->used_map_cnt++] = map; in replace_map_fd_with_map_ptr()
8021 bpf_cgroup_storage_assign(env->prog, map)) { in replace_map_fd_with_map_ptr()
8022 verbose(env, "only one cgroup storage of each type is allowed\n"); in replace_map_fd_with_map_ptr()
8036 verbose(env, "unknown opcode %02x\n", insn->code); in replace_map_fd_with_map_ptr()
8049 static void release_maps(struct bpf_verifier_env *env) in release_maps() argument
8055 if (!env->prog->aux->cgroup_storage[stype]) in release_maps()
8057 bpf_cgroup_storage_release(env->prog, in release_maps()
8058 env->prog->aux->cgroup_storage[stype]); in release_maps()
8061 for (i = 0; i < env->used_map_cnt; i++) in release_maps()
8062 bpf_map_put(env->used_maps[i]); in release_maps()
8066 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) in convert_pseudo_ld_imm64() argument
8068 struct bpf_insn *insn = env->prog->insnsi; in convert_pseudo_ld_imm64()
8069 int insn_cnt = env->prog->len; in convert_pseudo_ld_imm64()
8081 static int adjust_insn_aux_data(struct bpf_verifier_env *env, in adjust_insn_aux_data() argument
8084 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; in adjust_insn_aux_data()
8093 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); in adjust_insn_aux_data()
8107 new_data[i].zext_dst = insn_has_def32(env, insn + i); in adjust_insn_aux_data()
8109 env->insn_aux_data = new_data; in adjust_insn_aux_data()
8114 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) in adjust_subprog_starts() argument
8121 for (i = 0; i <= env->subprog_cnt; i++) { in adjust_subprog_starts()
8122 if (env->subprog_info[i].start <= off) in adjust_subprog_starts()
8124 env->subprog_info[i].start += len - 1; in adjust_subprog_starts()
8128 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, in bpf_patch_insn_data() argument
8133 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); in bpf_patch_insn_data()
8136 verbose(env, in bpf_patch_insn_data()
8138 env->insn_aux_data[off].orig_idx); in bpf_patch_insn_data()
8141 if (adjust_insn_aux_data(env, new_prog, off, len)) in bpf_patch_insn_data()
8143 adjust_subprog_starts(env, off, len); in bpf_patch_insn_data()
8147 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env, in adjust_subprog_starts_after_remove() argument
8153 for (i = 0; i < env->subprog_cnt; i++) in adjust_subprog_starts_after_remove()
8154 if (env->subprog_info[i].start >= off) in adjust_subprog_starts_after_remove()
8157 for (j = i; j < env->subprog_cnt; j++) in adjust_subprog_starts_after_remove()
8158 if (env->subprog_info[j].start >= off + cnt) in adjust_subprog_starts_after_remove()
8163 if (env->subprog_info[j].start != off + cnt) in adjust_subprog_starts_after_remove()
8167 struct bpf_prog_aux *aux = env->prog->aux; in adjust_subprog_starts_after_remove()
8171 move = env->subprog_cnt + 1 - j; in adjust_subprog_starts_after_remove()
8173 memmove(env->subprog_info + i, in adjust_subprog_starts_after_remove()
8174 env->subprog_info + j, in adjust_subprog_starts_after_remove()
8175 sizeof(*env->subprog_info) * move); in adjust_subprog_starts_after_remove()
8176 env->subprog_cnt -= j - i; in adjust_subprog_starts_after_remove()
8192 if (env->subprog_info[i].start == off) in adjust_subprog_starts_after_remove()
8197 for (; i <= env->subprog_cnt; i++) in adjust_subprog_starts_after_remove()
8198 env->subprog_info[i].start -= cnt; in adjust_subprog_starts_after_remove()
8203 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off, in bpf_adj_linfo_after_remove() argument
8206 struct bpf_prog *prog = env->prog; in bpf_adj_linfo_after_remove()
8253 for (i = 0; i <= env->subprog_cnt; i++) in bpf_adj_linfo_after_remove()
8254 if (env->subprog_info[i].linfo_idx > l_off) { in bpf_adj_linfo_after_remove()
8258 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) in bpf_adj_linfo_after_remove()
8259 env->subprog_info[i].linfo_idx -= l_cnt; in bpf_adj_linfo_after_remove()
8261 env->subprog_info[i].linfo_idx = l_off; in bpf_adj_linfo_after_remove()
8267 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) in verifier_remove_insns() argument
8269 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in verifier_remove_insns()
8270 unsigned int orig_prog_len = env->prog->len; in verifier_remove_insns()
8273 if (bpf_prog_is_dev_bound(env->prog->aux)) in verifier_remove_insns()
8274 bpf_prog_offload_remove_insns(env, off, cnt); in verifier_remove_insns()
8276 err = bpf_remove_insns(env->prog, off, cnt); in verifier_remove_insns()
8280 err = adjust_subprog_starts_after_remove(env, off, cnt); in verifier_remove_insns()
8284 err = bpf_adj_linfo_after_remove(env, off, cnt); in verifier_remove_insns()
8305 static void sanitize_dead_code(struct bpf_verifier_env *env) in sanitize_dead_code() argument
8307 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in sanitize_dead_code()
8309 struct bpf_insn *insn = env->prog->insnsi; in sanitize_dead_code()
8310 const int insn_cnt = env->prog->len; in sanitize_dead_code()
8334 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) in opt_hard_wire_dead_code_branches() argument
8336 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in opt_hard_wire_dead_code_branches()
8338 struct bpf_insn *insn = env->prog->insnsi; in opt_hard_wire_dead_code_branches()
8339 const int insn_cnt = env->prog->len; in opt_hard_wire_dead_code_branches()
8353 if (bpf_prog_is_dev_bound(env->prog->aux)) in opt_hard_wire_dead_code_branches()
8354 bpf_prog_offload_replace_insn(env, i, &ja); in opt_hard_wire_dead_code_branches()
8360 static int opt_remove_dead_code(struct bpf_verifier_env *env) in opt_remove_dead_code() argument
8362 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in opt_remove_dead_code()
8363 int insn_cnt = env->prog->len; in opt_remove_dead_code()
8375 err = verifier_remove_insns(env, i, j); in opt_remove_dead_code()
8378 insn_cnt = env->prog->len; in opt_remove_dead_code()
8384 static int opt_remove_nops(struct bpf_verifier_env *env) in opt_remove_nops() argument
8387 struct bpf_insn *insn = env->prog->insnsi; in opt_remove_nops()
8388 int insn_cnt = env->prog->len; in opt_remove_nops()
8395 err = verifier_remove_insns(env, i, 1); in opt_remove_nops()
8405 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, in opt_subreg_zext_lo32_rnd_hi32() argument
8409 struct bpf_insn_aux_data *aux = env->insn_aux_data; in opt_subreg_zext_lo32_rnd_hi32()
8410 int i, patch_len, delta = 0, len = env->prog->len; in opt_subreg_zext_lo32_rnd_hi32()
8411 struct bpf_insn *insns = env->prog->insnsi; in opt_subreg_zext_lo32_rnd_hi32()
8441 if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) { in opt_subreg_zext_lo32_rnd_hi32()
8471 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len); in opt_subreg_zext_lo32_rnd_hi32()
8474 env->prog = new_prog; in opt_subreg_zext_lo32_rnd_hi32()
8476 aux = env->insn_aux_data; in opt_subreg_zext_lo32_rnd_hi32()
8488 static int convert_ctx_accesses(struct bpf_verifier_env *env) in convert_ctx_accesses() argument
8490 const struct bpf_verifier_ops *ops = env->ops; in convert_ctx_accesses()
8492 const int insn_cnt = env->prog->len; in convert_ctx_accesses()
8499 if (ops->gen_prologue || env->seen_direct_write) { in convert_ctx_accesses()
8501 verbose(env, "bpf verifier is misconfigured\n"); in convert_ctx_accesses()
8504 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, in convert_ctx_accesses()
8505 env->prog); in convert_ctx_accesses()
8507 verbose(env, "bpf verifier is misconfigured\n"); in convert_ctx_accesses()
8510 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); in convert_ctx_accesses()
8514 env->prog = new_prog; in convert_ctx_accesses()
8519 if (bpf_prog_is_dev_bound(env->prog->aux)) in convert_ctx_accesses()
8522 insn = env->prog->insnsi + delta; in convert_ctx_accesses()
8541 env->insn_aux_data[i + delta].sanitize_stack_off) { in convert_ctx_accesses()
8549 env->insn_aux_data[i + delta].sanitize_stack_off, in convert_ctx_accesses()
8558 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); in convert_ctx_accesses()
8563 env->prog = new_prog; in convert_ctx_accesses()
8568 switch (env->insn_aux_data[i + delta].ptr_type) { in convert_ctx_accesses()
8588 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; in convert_ctx_accesses()
8603 verbose(env, "bpf verifier narrow ctx access misconfigured\n"); in convert_ctx_accesses()
8618 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, in convert_ctx_accesses()
8622 verbose(env, "bpf verifier is misconfigured\n"); in convert_ctx_accesses()
8646 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); in convert_ctx_accesses()
8653 env->prog = new_prog; in convert_ctx_accesses()
8660 static int jit_subprogs(struct bpf_verifier_env *env) in jit_subprogs() argument
8662 struct bpf_prog *prog = env->prog, **func, *tmp; in jit_subprogs()
8668 if (env->subprog_cnt <= 1) in jit_subprogs()
8679 subprog = find_subprog(env, i + insn->imm + 1); in jit_subprogs()
8692 env->insn_aux_data[i].call_imm = insn->imm; in jit_subprogs()
8702 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); in jit_subprogs()
8706 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
8708 subprog_end = env->subprog_info[i + 1].start; in jit_subprogs()
8735 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; in jit_subprogs()
8740 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; in jit_subprogs()
8752 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
8775 func[i]->aux->func_cnt = env->subprog_cnt; in jit_subprogs()
8777 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
8781 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); in jit_subprogs()
8791 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
8804 insn->off = env->insn_aux_data[i].call_imm; in jit_subprogs()
8805 subprog = find_subprog(env, i + insn->off + 1); in jit_subprogs()
8812 prog->aux->func_cnt = env->subprog_cnt; in jit_subprogs()
8816 for (i = 0; i < env->subprog_cnt; i++) in jit_subprogs()
8828 insn->imm = env->insn_aux_data[i].call_imm; in jit_subprogs()
8834 static int fixup_call_args(struct bpf_verifier_env *env) in fixup_call_args() argument
8837 struct bpf_prog *prog = env->prog; in fixup_call_args()
8843 if (env->prog->jit_requested && in fixup_call_args()
8844 !bpf_prog_is_dev_bound(env->prog->aux)) { in fixup_call_args()
8845 err = jit_subprogs(env); in fixup_call_args()
8856 depth = get_callee_stack_depth(env, insn, i); in fixup_call_args()
8871 static int fixup_bpf_calls(struct bpf_verifier_env *env) in fixup_bpf_calls() argument
8873 struct bpf_prog *prog = env->prog; in fixup_bpf_calls()
8915 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); in fixup_bpf_calls()
8920 env->prog = prog = new_prog; in fixup_bpf_calls()
8928 cnt = env->ops->gen_ld_abs(insn, insn_buf); in fixup_bpf_calls()
8930 verbose(env, "bpf verifier is misconfigured\n"); in fixup_bpf_calls()
8934 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); in fixup_bpf_calls()
8939 env->prog = prog = new_prog; in fixup_bpf_calls()
8953 aux = &env->insn_aux_data[i + delta]; in fixup_bpf_calls()
8986 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); in fixup_bpf_calls()
8991 env->prog = prog = new_prog; in fixup_bpf_calls()
9014 env->prog->aux->stack_depth = MAX_BPF_STACK; in fixup_bpf_calls()
9015 env->prog->aux->max_pkt_offset = MAX_PACKET_OFF; in fixup_bpf_calls()
9025 aux = &env->insn_aux_data[i + delta]; in fixup_bpf_calls()
9036 verbose(env, "tail_call abusing map_ptr\n"); in fixup_bpf_calls()
9049 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); in fixup_bpf_calls()
9054 env->prog = prog = new_prog; in fixup_bpf_calls()
9070 aux = &env->insn_aux_data[i + delta]; in fixup_bpf_calls()
9080 verbose(env, "bpf verifier is misconfigured\n"); in fixup_bpf_calls()
9084 new_prog = bpf_patch_insn_data(env, i + delta, in fixup_bpf_calls()
9090 env->prog = prog = new_prog; in fixup_bpf_calls()
9141 fn = env->ops->get_func_proto(insn->imm, env->prog); in fixup_bpf_calls()
9146 verbose(env, in fixup_bpf_calls()
9157 static void free_states(struct bpf_verifier_env *env) in free_states() argument
9162 sl = env->free_list; in free_states()
9170 if (!env->explored_states) in free_states()
9173 for (i = 0; i < state_htab_size(env); i++) { in free_states()
9174 sl = env->explored_states[i]; in free_states()
9184 kvfree(env->explored_states); in free_states()
9187 static void print_verification_stats(struct bpf_verifier_env *env) in print_verification_stats() argument
9191 if (env->log.level & BPF_LOG_STATS) { in print_verification_stats()
9192 verbose(env, "verification time %lld usec\n", in print_verification_stats()
9193 div_u64(env->verification_time, 1000)); in print_verification_stats()
9194 verbose(env, "stack depth "); in print_verification_stats()
9195 for (i = 0; i < env->subprog_cnt; i++) { in print_verification_stats()
9196 u32 depth = env->subprog_info[i].stack_depth; in print_verification_stats()
9198 verbose(env, "%d", depth); in print_verification_stats()
9199 if (i + 1 < env->subprog_cnt) in print_verification_stats()
9200 verbose(env, "+"); in print_verification_stats()
9202 verbose(env, "\n"); in print_verification_stats()
9204 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d " in print_verification_stats()
9206 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, in print_verification_stats()
9207 env->max_states_per_insn, env->total_states, in print_verification_stats()
9208 env->peak_states, env->longest_mark_read_walk); in print_verification_stats()
9215 struct bpf_verifier_env *env; in bpf_check() local
9227 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); in bpf_check()
9228 if (!env) in bpf_check()
9230 log = &env->log; in bpf_check()
9233 env->insn_aux_data = in bpf_check()
9236 if (!env->insn_aux_data) in bpf_check()
9239 env->insn_aux_data[i].orig_idx = i; in bpf_check()
9240 env->prog = *prog; in bpf_check()
9241 env->ops = bpf_verifier_ops[env->prog->type]; in bpf_check()
9263 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); in bpf_check()
9265 env->strict_alignment = true; in bpf_check()
9267 env->strict_alignment = false; in bpf_check()
9269 env->allow_ptr_leaks = is_priv; in bpf_check()
9272 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; in bpf_check()
9274 ret = replace_map_fd_with_map_ptr(env); in bpf_check()
9278 if (bpf_prog_is_dev_bound(env->prog->aux)) { in bpf_check()
9279 ret = bpf_prog_offload_verifier_prep(env->prog); in bpf_check()
9284 env->explored_states = kvcalloc(state_htab_size(env), in bpf_check()
9288 if (!env->explored_states) in bpf_check()
9291 ret = check_subprogs(env); in bpf_check()
9295 ret = check_btf_info(env, attr, uattr); in bpf_check()
9299 ret = check_cfg(env); in bpf_check()
9303 ret = do_check(env); in bpf_check()
9304 if (env->cur_state) { in bpf_check()
9305 free_verifier_state(env->cur_state, true); in bpf_check()
9306 env->cur_state = NULL; in bpf_check()
9309 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux)) in bpf_check()
9310 ret = bpf_prog_offload_finalize(env); in bpf_check()
9313 while (!pop_stack(env, NULL, NULL)); in bpf_check()
9314 free_states(env); in bpf_check()
9317 ret = check_max_stack_depth(env); in bpf_check()
9322 opt_hard_wire_dead_code_branches(env); in bpf_check()
9324 ret = opt_remove_dead_code(env); in bpf_check()
9326 ret = opt_remove_nops(env); in bpf_check()
9329 sanitize_dead_code(env); in bpf_check()
9334 ret = convert_ctx_accesses(env); in bpf_check()
9337 ret = fixup_bpf_calls(env); in bpf_check()
9342 if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) { in bpf_check()
9343 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr); in bpf_check()
9344 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret in bpf_check()
9349 ret = fixup_call_args(env); in bpf_check()
9351 env->verification_time = ktime_get_ns() - start_time; in bpf_check()
9352 print_verification_stats(env); in bpf_check()
9361 if (ret == 0 && env->used_map_cnt) { in bpf_check()
9363 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, in bpf_check()
9364 sizeof(env->used_maps[0]), in bpf_check()
9367 if (!env->prog->aux->used_maps) { in bpf_check()
9372 memcpy(env->prog->aux->used_maps, env->used_maps, in bpf_check()
9373 sizeof(env->used_maps[0]) * env->used_map_cnt); in bpf_check()
9374 env->prog->aux->used_map_cnt = env->used_map_cnt; in bpf_check()
9379 convert_pseudo_ld_imm64(env); in bpf_check()
9383 adjust_btf_func(env); in bpf_check()
9386 if (!env->prog->aux->used_maps) in bpf_check()
9390 release_maps(env); in bpf_check()
9391 *prog = env->prog; in bpf_check()
9395 vfree(env->insn_aux_data); in bpf_check()
9397 kfree(env); in bpf_check()