Lines Matching full:array
21 static void bpf_array_free_percpu(struct bpf_array *array) in bpf_array_free_percpu() argument
25 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_free_percpu()
26 free_percpu(array->pptrs[i]); in bpf_array_free_percpu()
31 static int bpf_array_alloc_percpu(struct bpf_array *array) in bpf_array_alloc_percpu() argument
36 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_alloc_percpu()
37 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8, in bpf_array_alloc_percpu()
40 bpf_array_free_percpu(array); in bpf_array_alloc_percpu()
43 array->pptrs[i] = ptr; in bpf_array_alloc_percpu()
88 struct bpf_array *array; in array_map_alloc() local
104 /* round up array size to nearest power of 2, in array_map_alloc()
113 array_size = sizeof(*array); in array_map_alloc()
118 * ensure array->value is exactly page-aligned in array_map_alloc()
136 array = data + PAGE_ALIGN(sizeof(struct bpf_array)) in array_map_alloc()
139 array = bpf_map_area_alloc(array_size, numa_node); in array_map_alloc()
141 if (!array) in array_map_alloc()
143 array->index_mask = index_mask; in array_map_alloc()
144 array->map.bypass_spec_v1 = bypass_spec_v1; in array_map_alloc()
147 bpf_map_init_from_attr(&array->map, attr); in array_map_alloc()
148 array->elem_size = elem_size; in array_map_alloc()
150 if (percpu && bpf_array_alloc_percpu(array)) { in array_map_alloc()
151 bpf_map_area_free(array); in array_map_alloc()
155 return &array->map; in array_map_alloc()
161 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_lookup_elem() local
164 if (unlikely(index >= array->map.max_entries)) in array_map_lookup_elem()
167 return array->value + array->elem_size * (index & array->index_mask); in array_map_lookup_elem()
173 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_direct_value_addr() local
180 *imm = (unsigned long)array->value; in array_map_direct_value_addr()
187 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_direct_value_meta() local
188 u64 base = (unsigned long)array->value; in array_map_direct_value_meta()
189 u64 range = array->elem_size; in array_map_direct_value_meta()
203 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_gen_lookup() local
217 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); in array_map_gen_lookup()
236 struct bpf_array *array = container_of(map, struct bpf_array, map); in percpu_array_map_lookup_elem() local
239 if (unlikely(index >= array->map.max_entries)) in percpu_array_map_lookup_elem()
242 return this_cpu_ptr(array->pptrs[index & array->index_mask]); in percpu_array_map_lookup_elem()
247 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_percpu_array_copy() local
253 if (unlikely(index >= array->map.max_entries)) in bpf_percpu_array_copy()
262 pptr = array->pptrs[index & array->index_mask]; in bpf_percpu_array_copy()
274 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_get_next_key() local
278 if (index >= array->map.max_entries) { in array_map_get_next_key()
283 if (index == array->map.max_entries - 1) in array_map_get_next_key()
300 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_update_elem() local
308 if (unlikely(index >= array->map.max_entries)) in array_map_update_elem()
320 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in array_map_update_elem()
321 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]), in array_map_update_elem()
324 val = array->value + in array_map_update_elem()
325 array->elem_size * (index & array->index_mask); in array_map_update_elem()
330 check_and_free_timer_in_array(array, val); in array_map_update_elem()
338 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_percpu_array_update() local
348 if (unlikely(index >= array->map.max_entries)) in bpf_percpu_array_update()
364 pptr = array->pptrs[index & array->index_mask]; in bpf_percpu_array_update()
379 static void *array_map_vmalloc_addr(struct bpf_array *array) in array_map_vmalloc_addr() argument
381 return (void *)round_down((unsigned long)array, PAGE_SIZE); in array_map_vmalloc_addr()
386 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_free_timers() local
392 for (i = 0; i < array->map.max_entries; i++) in array_map_free_timers()
393 bpf_timer_cancel_and_free(array->value + array->elem_size * i + in array_map_free_timers()
400 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_free() local
402 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) in array_map_free()
403 bpf_array_free_percpu(array); in array_map_free()
405 if (array->map.map_flags & BPF_F_MMAPABLE) in array_map_free()
406 bpf_map_area_free(array_map_vmalloc_addr(array)); in array_map_free()
408 bpf_map_area_free(array); in array_map_free()
435 struct bpf_array *array = container_of(map, struct bpf_array, map); in percpu_array_map_seq_show_elem() local
443 pptr = array->pptrs[index & array->index_mask]; in percpu_array_map_seq_show_elem()
478 /* bpf array can only take a u32 key. This check makes sure in array_map_check_btf()
489 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_mmap() local
490 pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT; in array_map_mmap()
496 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size)) in array_map_mmap()
499 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), in array_map_mmap()
522 struct bpf_array *array; in bpf_array_map_seq_start() local
530 array = container_of(map, struct bpf_array, map); in bpf_array_map_seq_start()
531 index = info->index & array->index_mask; in bpf_array_map_seq_start()
533 return array->pptrs[index]; in bpf_array_map_seq_start()
534 return array->value + array->elem_size * index; in bpf_array_map_seq_start()
541 struct bpf_array *array; in bpf_array_map_seq_next() local
549 array = container_of(map, struct bpf_array, map); in bpf_array_map_seq_next()
550 index = info->index & array->index_mask; in bpf_array_map_seq_next()
552 return array->pptrs[index]; in bpf_array_map_seq_next()
553 return array->value + array->elem_size * index; in bpf_array_map_seq_next()
652 struct bpf_array *array; in bpf_for_each_array_elem() local
661 array = container_of(map, struct bpf_array, map); in bpf_for_each_array_elem()
666 val = this_cpu_ptr(array->pptrs[i]); in bpf_for_each_array_elem()
668 val = array->value + array->elem_size * i; in bpf_for_each_array_elem()
744 struct bpf_array *array = container_of(map, struct bpf_array, map); in fd_array_map_free() local
748 for (i = 0; i < array->map.max_entries; i++) in fd_array_map_free()
749 BUG_ON(array->ptrs[i] != NULL); in fd_array_map_free()
751 bpf_map_area_free(array); in fd_array_map_free()
783 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_fd_array_map_update_elem() local
790 if (index >= array->map.max_entries) in bpf_fd_array_map_update_elem()
799 mutex_lock(&array->aux->poke_mutex); in bpf_fd_array_map_update_elem()
800 old_ptr = xchg(array->ptrs + index, new_ptr); in bpf_fd_array_map_update_elem()
802 mutex_unlock(&array->aux->poke_mutex); in bpf_fd_array_map_update_elem()
804 old_ptr = xchg(array->ptrs + index, new_ptr); in bpf_fd_array_map_update_elem()
814 struct bpf_array *array = container_of(map, struct bpf_array, map); in fd_array_map_delete_elem() local
818 if (index >= array->map.max_entries) in fd_array_map_delete_elem()
822 mutex_lock(&array->aux->poke_mutex); in fd_array_map_delete_elem()
823 old_ptr = xchg(array->ptrs + index, NULL); in fd_array_map_delete_elem()
825 mutex_unlock(&array->aux->poke_mutex); in fd_array_map_delete_elem()
827 old_ptr = xchg(array->ptrs + index, NULL); in fd_array_map_delete_elem()
841 struct bpf_array *array = container_of(map, struct bpf_array, map); in prog_fd_array_get_ptr() local
847 if (!bpf_prog_array_compatible(array, prog)) { in prog_fd_array_get_ptr()
868 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_fd_array_map_clear() local
871 for (i = 0; i < array->map.max_entries; i++) in bpf_fd_array_map_clear()
1191 struct bpf_array *array = container_of(map, struct bpf_array, map); in perf_event_fd_array_release() local
1199 for (i = 0; i < array->map.max_entries; i++) { in perf_event_fd_array_release()
1200 ee = READ_ONCE(array->ptrs[i]); in perf_event_fd_array_release()
1310 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_of_map_gen_lookup() local
1321 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); in array_of_map_gen_lookup()