Lines Matching refs:BPF_REG_SIZE
704 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in print_verifier_state()
705 char types_buf[BPF_REG_SIZE + 1]; in print_verifier_state()
709 for (j = 0; j < BPF_REG_SIZE; j++) { in print_verifier_state()
715 types_buf[BPF_REG_SIZE] = 0; in print_verifier_state()
718 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); in print_verifier_state()
808 size_t n = src->allocated_stack / BPF_REG_SIZE; in copy_stack_state()
832 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE; in grow_stack_state()
2242 spi = (-insn->off - 1) / BPF_REG_SIZE; in backtrack_insn()
2261 spi = (-insn->off - 1) / BPF_REG_SIZE; in backtrack_insn()
2375 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_precise()
2499 if (i >= func->allocated_stack / BPF_REG_SIZE) { in __mark_chain_precision()
2636 for (i = 0; i < BPF_REG_SIZE; i++) in save_register_state()
2650 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; in check_stack_write_fixed_off()
2654 err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE)); in check_stack_write_fixed_off()
2662 size != BPF_REG_SIZE) { in check_stack_write_fixed_off()
2684 if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) && in check_stack_write_fixed_off()
2700 if (size != BPF_REG_SIZE) { in check_stack_write_fixed_off()
2717 for (i = 0; i < BPF_REG_SIZE; i++) in check_stack_write_fixed_off()
2728 if (size == BPF_REG_SIZE) in check_stack_write_fixed_off()
2742 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = in check_stack_write_fixed_off()
2792 err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE)); in check_stack_write_var_off()
2803 spi = slot / BPF_REG_SIZE; in check_stack_write_var_off()
2804 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_write_var_off()
2874 spi = slot / BPF_REG_SIZE; in mark_reg_stack_read()
2876 if (stype[slot % BPF_REG_SIZE] != STACK_ZERO) in mark_reg_stack_read()
2919 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; in check_stack_read_fixed_off()
2927 if (size != BPF_REG_SIZE) { in check_stack_read_fixed_off()
2940 for (i = 1; i < BPF_REG_SIZE; i++) { in check_stack_read_fixed_off()
2941 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { in check_stack_read_fixed_off()
2971 type = stype[(slot - i) % BPF_REG_SIZE]; in check_stack_read_fixed_off()
4320 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && in check_mem_access()
4503 spi = slot / BPF_REG_SIZE; in check_stack_range_initialized()
4506 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_range_initialized()
4526 for (j = 0; j < BPF_REG_SIZE; j++) in check_stack_range_initialized()
10118 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { in clean_func_state()
10124 for (j = 0; j < BPF_REG_SIZE; j++) in clean_func_state()
10327 spi = i / BPF_REG_SIZE; in stacksafe()
10330 i += BPF_REG_SIZE - 1; in stacksafe()
10335 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) in stacksafe()
10348 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && in stacksafe()
10349 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) in stacksafe()
10351 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != in stacksafe()
10352 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) in stacksafe()
10359 if (i % BPF_REG_SIZE) in stacksafe()
10531 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && in propagate_liveness()
10532 i < parent->allocated_stack / BPF_REG_SIZE; i++) { in propagate_liveness()
10567 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in propagate_precision()
10576 (-i - 1) * BPF_REG_SIZE); in propagate_precision()
10816 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { in is_state_visited()