Lines Matching refs:BPF_REG_SIZE

615 	return (-off - 1) / BPF_REG_SIZE;  in get_spi()
620 int allocated_slots = state->allocated_stack / BPF_REG_SIZE; in is_spi_bounds_valid()
712 for (i = 0; i < BPF_REG_SIZE; i++) { in mark_stack_slots_dynptr()
748 for (i = 0; i < BPF_REG_SIZE; i++) { in unmark_stack_slots_dynptr()
776 for (i = 0; i < BPF_REG_SIZE; i++) { in is_dynptr_reg_valid_uninit()
796 for (i = 0; i < BPF_REG_SIZE; i++) { in is_dynptr_reg_valid_init()
827 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; in is_spilled_reg()
931 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in print_verifier_state()
932 char types_buf[BPF_REG_SIZE + 1]; in print_verifier_state()
936 for (j = 0; j < BPF_REG_SIZE; j++) { in print_verifier_state()
942 types_buf[BPF_REG_SIZE] = 0; in print_verifier_state()
947 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); in print_verifier_state()
1062 size_t n = src->allocated_stack / BPF_REG_SIZE; in copy_stack_state()
1086 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE; in grow_stack_state()
2629 spi = (-insn->off - 1) / BPF_REG_SIZE; in backtrack_insn()
2646 spi = (-insn->off - 1) / BPF_REG_SIZE; in backtrack_insn()
2760 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_precise()
2884 if (i >= func->allocated_stack / BPF_REG_SIZE) { in __mark_chain_precision()
3010 if (size == BPF_REG_SIZE) in save_register_state()
3013 for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--) in save_register_state()
3031 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; in check_stack_write_fixed_off()
3035 err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE)); in check_stack_write_fixed_off()
3043 size != BPF_REG_SIZE) { in check_stack_write_fixed_off()
3066 if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && in check_stack_write_fixed_off()
3082 if (size != BPF_REG_SIZE) { in check_stack_write_fixed_off()
3099 for (i = 0; i < BPF_REG_SIZE; i++) in check_stack_write_fixed_off()
3110 if (size == BPF_REG_SIZE) in check_stack_write_fixed_off()
3124 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = in check_stack_write_fixed_off()
3174 err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE)); in check_stack_write_var_off()
3185 spi = slot / BPF_REG_SIZE; in check_stack_write_var_off()
3186 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_write_var_off()
3257 spi = slot / BPF_REG_SIZE; in mark_reg_stack_read()
3259 if (stype[slot % BPF_REG_SIZE] != STACK_ZERO) in mark_reg_stack_read()
3302 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; in check_stack_read_fixed_off()
3312 for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--) in check_stack_read_fixed_off()
3315 if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) { in check_stack_read_fixed_off()
3326 if (!(off % BPF_REG_SIZE) && size == spill_size) { in check_stack_read_fixed_off()
3336 type = stype[(slot - i) % BPF_REG_SIZE]; in check_stack_read_fixed_off()
3373 type = stype[(slot - i) % BPF_REG_SIZE]; in check_stack_read_fixed_off()
4952 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && in check_mem_access()
5148 spi = slot / BPF_REG_SIZE; in check_stack_range_initialized()
5151 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_range_initialized()
5171 for (j = 0; j < BPF_REG_SIZE; j++) in check_stack_range_initialized()
5843 if (arg_type_is_dynptr(arg_type) && reg->off % BPF_REG_SIZE) { in check_func_arg_reg_off()
11417 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { in clean_func_state()
11423 for (j = 0; j < BPF_REG_SIZE; j++) in clean_func_state()
11625 spi = i / BPF_REG_SIZE; in stacksafe()
11628 i += BPF_REG_SIZE - 1; in stacksafe()
11633 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) in stacksafe()
11646 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && in stacksafe()
11647 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) in stacksafe()
11649 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != in stacksafe()
11650 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) in stacksafe()
11657 if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) in stacksafe()
11829 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && in propagate_liveness()
11830 i < parent->allocated_stack / BPF_REG_SIZE; i++) { in propagate_liveness()
11865 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in propagate_precision()
11874 (-i - 1) * BPF_REG_SIZE); in propagate_precision()
12114 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { in is_state_visited()
14433 s32 r6_offset = stack_base + 0 * BPF_REG_SIZE; in inline_bpf_loop()
14434 s32 r7_offset = stack_base + 1 * BPF_REG_SIZE; in inline_bpf_loop()
14435 s32 r8_offset = stack_base + 2 * BPF_REG_SIZE; in inline_bpf_loop()
14535 stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup; in optimize_bpf_loop()