Lines Matching refs:callee

8846 				   struct bpf_func_state *callee,
8851 struct bpf_func_state *callee, int insn_idx);
8858 struct bpf_func_state *caller, *callee; in __check_func_call() local
8927 callee = async_cb->frame[0]; in __check_func_call()
8928 callee->async_entry_cnt = caller->async_entry_cnt + 1; in __check_func_call()
8931 err = set_callee_state_cb(env, caller, callee, *insn_idx); in __check_func_call()
8942 callee = kzalloc(sizeof(*callee), GFP_KERNEL); in __check_func_call()
8943 if (!callee) in __check_func_call()
8945 state->frame[state->curframe + 1] = callee; in __check_func_call()
8951 init_func_state(env, callee, in __check_func_call()
8958 err = copy_reference_state(callee, caller); in __check_func_call()
8962 err = set_callee_state_cb(env, caller, callee, *insn_idx); in __check_func_call()
8978 print_verifier_state(env, callee, true); in __check_func_call()
8983 free_func_state(callee); in __check_func_call()
8990 struct bpf_func_state *callee) in map_set_for_each_callback_args() argument
8997 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; in map_set_for_each_callback_args()
8999 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; in map_set_for_each_callback_args()
9000 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in map_set_for_each_callback_args()
9001 callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr; in map_set_for_each_callback_args()
9003 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; in map_set_for_each_callback_args()
9004 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); in map_set_for_each_callback_args()
9005 callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr; in map_set_for_each_callback_args()
9008 callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3]; in map_set_for_each_callback_args()
9011 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in map_set_for_each_callback_args()
9017 struct bpf_func_state *callee, int insn_idx) in set_callee_state() argument
9025 callee->regs[i] = caller->regs[i]; in set_callee_state()
9047 struct bpf_func_state *callee, in set_map_elem_callback_state() argument
9066 err = map->ops->map_set_for_each_callback_args(env, caller, callee); in set_map_elem_callback_state()
9070 callee->in_callback_fn = true; in set_map_elem_callback_state()
9071 callee->callback_ret_range = tnum_range(0, 1); in set_map_elem_callback_state()
9077 struct bpf_func_state *callee, in set_loop_callback_state() argument
9084 callee->regs[BPF_REG_1].type = SCALAR_VALUE; in set_loop_callback_state()
9085 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; in set_loop_callback_state()
9088 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_loop_callback_state()
9089 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_loop_callback_state()
9090 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_loop_callback_state()
9092 callee->in_callback_fn = true; in set_loop_callback_state()
9093 callee->callback_ret_range = tnum_range(0, 1); in set_loop_callback_state()
9099 struct bpf_func_state *callee, in set_timer_callback_state() argument
9107 callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP; in set_timer_callback_state()
9108 __mark_reg_known_zero(&callee->regs[BPF_REG_1]); in set_timer_callback_state()
9109 callee->regs[BPF_REG_1].map_ptr = map_ptr; in set_timer_callback_state()
9111 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; in set_timer_callback_state()
9112 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in set_timer_callback_state()
9113 callee->regs[BPF_REG_2].map_ptr = map_ptr; in set_timer_callback_state()
9115 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; in set_timer_callback_state()
9116 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); in set_timer_callback_state()
9117 callee->regs[BPF_REG_3].map_ptr = map_ptr; in set_timer_callback_state()
9120 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_timer_callback_state()
9121 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_timer_callback_state()
9122 callee->in_async_callback_fn = true; in set_timer_callback_state()
9123 callee->callback_ret_range = tnum_range(0, 1); in set_timer_callback_state()
9129 struct bpf_func_state *callee, in set_find_vma_callback_state() argument
9137 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; in set_find_vma_callback_state()
9139 callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID; in set_find_vma_callback_state()
9140 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in set_find_vma_callback_state()
9141 callee->regs[BPF_REG_2].btf = btf_vmlinux; in set_find_vma_callback_state()
9142 callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA], in set_find_vma_callback_state()
9145 callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4]; in set_find_vma_callback_state()
9148 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_find_vma_callback_state()
9149 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_find_vma_callback_state()
9150 callee->in_callback_fn = true; in set_find_vma_callback_state()
9151 callee->callback_ret_range = tnum_range(0, 1); in set_find_vma_callback_state()
9157 struct bpf_func_state *callee, in set_user_ringbuf_callback_state() argument
9164 __mark_reg_not_init(env, &callee->regs[BPF_REG_0]); in set_user_ringbuf_callback_state()
9165 mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL); in set_user_ringbuf_callback_state()
9166 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; in set_user_ringbuf_callback_state()
9169 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_user_ringbuf_callback_state()
9170 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_user_ringbuf_callback_state()
9171 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_user_ringbuf_callback_state()
9173 callee->in_callback_fn = true; in set_user_ringbuf_callback_state()
9174 callee->callback_ret_range = tnum_range(0, 1); in set_user_ringbuf_callback_state()
9180 struct bpf_func_state *callee, in set_rbtree_add_callback_state() argument
9197 mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root); in set_rbtree_add_callback_state()
9198 ref_set_non_owning(env, &callee->regs[BPF_REG_1]); in set_rbtree_add_callback_state()
9199 mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root); in set_rbtree_add_callback_state()
9200 ref_set_non_owning(env, &callee->regs[BPF_REG_2]); in set_rbtree_add_callback_state()
9202 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_rbtree_add_callback_state()
9203 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_rbtree_add_callback_state()
9204 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_rbtree_add_callback_state()
9205 callee->in_callback_fn = true; in set_rbtree_add_callback_state()
9206 callee->callback_ret_range = tnum_range(0, 1); in set_rbtree_add_callback_state()
9220 struct bpf_func_state *callee; in in_rbtree_lock_required_cb() local
9226 callee = state->frame[state->curframe]; in in_rbtree_lock_required_cb()
9228 if (!callee->in_callback_fn) in in_rbtree_lock_required_cb()
9231 kfunc_btf_id = insn[callee->callsite].imm; in in_rbtree_lock_required_cb()
9238 struct bpf_func_state *caller, *callee; in prepare_func_exit() local
9242 callee = state->frame[state->curframe]; in prepare_func_exit()
9243 r0 = &callee->regs[BPF_REG_0]; in prepare_func_exit()
9256 if (callee->in_callback_fn) { in prepare_func_exit()
9258 struct tnum range = callee->callback_ret_range; in prepare_func_exit()
9278 if (!callee->in_callback_fn) { in prepare_func_exit()
9280 err = copy_reference_state(caller, callee); in prepare_func_exit()
9285 *insn_idx = callee->callsite + 1; in prepare_func_exit()
9288 print_verifier_state(env, callee, true); in prepare_func_exit()
9293 free_func_state(callee); in prepare_func_exit()