Lines Matching refs:BPF_REG_SIZE
670 return (-off - 1) / BPF_REG_SIZE; in __get_spi()
683 int allocated_slots = state->allocated_stack / BPF_REG_SIZE; in is_spi_bounds_valid()
706 if (off % BPF_REG_SIZE) { in stack_slot_obj_get_spi()
910 for (i = 0; i < BPF_REG_SIZE; i++) { in mark_stack_slots_dynptr()
948 for (i = 0; i < BPF_REG_SIZE; i++) { in invalidate_dynptr()
1008 for (i = 1; i < state->allocated_stack / BPF_REG_SIZE; i++) { in unmark_stack_slots_dynptr()
1066 for (i = 0; i < BPF_REG_SIZE; i++) { in destroy_if_dynptr_stack_slot()
1142 for (i = 0; i < BPF_REG_SIZE; i++) { in is_dynptr_reg_valid_init()
1203 for (j = 0; j < BPF_REG_SIZE; j++) in mark_stack_slots_iter()
1234 for (j = 0; j < BPF_REG_SIZE; j++) in unmark_stack_slots_iter()
1262 for (j = 0; j < BPF_REG_SIZE; j++) in is_iter_reg_valid_uninit()
1292 for (j = 0; j < BPF_REG_SIZE; j++) in is_iter_reg_valid_init()
1307 enum bpf_stack_slot_type type = stack->slot_type[BPF_REG_SIZE - 1]; in is_stack_slot_special()
1329 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; in is_spilled_reg()
1334 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL && in is_spilled_scalar_reg()
1441 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in print_verifier_state()
1442 char types_buf[BPF_REG_SIZE + 1]; in print_verifier_state()
1446 for (j = 0; j < BPF_REG_SIZE; j++) { in print_verifier_state()
1451 types_buf[BPF_REG_SIZE] = 0; in print_verifier_state()
1456 switch (state->stack[i].slot_type[BPF_REG_SIZE - 1]) { in print_verifier_state()
1461 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); in print_verifier_state()
1473 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); in print_verifier_state()
1485 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); in print_verifier_state()
1497 for (j = 0; j < BPF_REG_SIZE; j++) in print_verifier_state()
1499 types_buf[BPF_REG_SIZE] = 0; in print_verifier_state()
1501 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); in print_verifier_state()
1612 size_t n = src->allocated_stack / BPF_REG_SIZE; in copy_stack_state()
1636 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE; in grow_stack_state()
3475 spi = (-insn->off - 1) / BPF_REG_SIZE; in backtrack_insn()
3492 spi = (-insn->off - 1) / BPF_REG_SIZE; in backtrack_insn()
3749 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_precise()
3779 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_imprecise()
3842 if (i >= func->allocated_stack / BPF_REG_SIZE) in mark_precise_scalar_ids()
3865 for (i = 0; i < func->allocated_stack / BPF_REG_SIZE; ++i) { in mark_precise_scalar_ids()
4120 if (i >= func->allocated_stack / BPF_REG_SIZE) { in __mark_chain_precision()
4272 if (size == BPF_REG_SIZE) in save_register_state()
4275 for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--) in save_register_state()
4298 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; in check_stack_write_fixed_off()
4303 err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE)); in check_stack_write_fixed_off()
4311 size != BPF_REG_SIZE) { in check_stack_write_fixed_off()
4340 if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && in check_stack_write_fixed_off()
4357 } else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) && in check_stack_write_fixed_off()
4366 if (size != BPF_REG_SIZE) { in check_stack_write_fixed_off()
4383 for (i = 0; i < BPF_REG_SIZE; i++) in check_stack_write_fixed_off()
4394 if (size == BPF_REG_SIZE) in check_stack_write_fixed_off()
4409 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = in check_stack_write_fixed_off()
4461 err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE)); in check_stack_write_var_off()
4480 spi = slot / BPF_REG_SIZE; in check_stack_write_var_off()
4481 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_write_var_off()
4555 spi = slot / BPF_REG_SIZE; in mark_reg_stack_read()
4558 if (stype[slot % BPF_REG_SIZE] != STACK_ZERO) in mark_reg_stack_read()
4601 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; in check_stack_read_fixed_off()
4613 for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--) in check_stack_read_fixed_off()
4616 if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) { in check_stack_read_fixed_off()
4627 if (!(off % BPF_REG_SIZE) && size == spill_size) { in check_stack_read_fixed_off()
4637 type = stype[(slot - i) % BPF_REG_SIZE]; in check_stack_read_fixed_off()
4676 type = stype[(slot - i) % BPF_REG_SIZE]; in check_stack_read_fixed_off()
6668 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && in check_mem_access()
6878 if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) { in check_stack_range_initialized()
6892 spi = slot / BPF_REG_SIZE; in check_stack_range_initialized()
6895 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_range_initialized()
6912 for (j = 0; j < BPF_REG_SIZE; j++) in check_stack_range_initialized()
7475 nr_slots = t->size / BPF_REG_SIZE; in process_iter_arg()
7485 for (i = 0; i < nr_slots * 8; i += BPF_REG_SIZE) { in process_iter_arg()
15436 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { in clean_func_state()
15442 for (j = 0; j < BPF_REG_SIZE; j++) in clean_func_state()
15655 spi = i / BPF_REG_SIZE; in stacksafe()
15658 i += BPF_REG_SIZE - 1; in stacksafe()
15663 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) in stacksafe()
15667 old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC) in stacksafe()
15680 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && in stacksafe()
15681 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) in stacksafe()
15683 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != in stacksafe()
15684 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) in stacksafe()
15691 if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) in stacksafe()
15694 switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) { in stacksafe()
15920 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && in propagate_liveness()
15921 i < parent->allocated_stack / BPF_REG_SIZE; i++) { in propagate_liveness()
15963 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in propagate_precision()
15974 fr, (-i - 1) * BPF_REG_SIZE); in propagate_precision()
15976 verbose(env, ",fp%d", (-i - 1) * BPF_REG_SIZE); in propagate_precision()
16081 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in iter_active_depths_differ()
16349 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { in is_state_visited()
18858 s32 r6_offset = stack_base + 0 * BPF_REG_SIZE; in inline_bpf_loop()
18859 s32 r7_offset = stack_base + 1 * BPF_REG_SIZE; in inline_bpf_loop()
18860 s32 r8_offset = stack_base + 2 * BPF_REG_SIZE; in inline_bpf_loop()
18960 stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup; in optimize_bpf_loop()