Lines Matching refs:map
31 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_free_percpu()
42 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_alloc_percpu()
136 array->map.unpriv_array = unpriv; in array_map_alloc()
139 bpf_map_init_from_attr(&array->map, attr); in array_map_alloc()
140 array->map.pages = cost; in array_map_alloc()
148 return &array->map; in array_map_alloc()
152 static void *array_map_lookup_elem(struct bpf_map *map, void *key) in array_map_lookup_elem() argument
154 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_lookup_elem()
157 if (unlikely(index >= array->map.max_entries)) in array_map_lookup_elem()
164 static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) in array_map_gen_lookup() argument
166 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_gen_lookup()
168 u32 elem_size = round_up(map->value_size, 8); in array_map_gen_lookup()
175 if (map->unpriv_array) { in array_map_gen_lookup()
176 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); in array_map_gen_lookup()
179 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); in array_map_gen_lookup()
194 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) in percpu_array_map_lookup_elem() argument
196 struct bpf_array *array = container_of(map, struct bpf_array, map); in percpu_array_map_lookup_elem()
199 if (unlikely(index >= array->map.max_entries)) in percpu_array_map_lookup_elem()
205 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) in bpf_percpu_array_copy() argument
207 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_percpu_array_copy()
213 if (unlikely(index >= array->map.max_entries)) in bpf_percpu_array_copy()
220 size = round_up(map->value_size, 8); in bpf_percpu_array_copy()
232 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) in array_map_get_next_key() argument
234 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_get_next_key()
238 if (index >= array->map.max_entries) { in array_map_get_next_key()
243 if (index == array->map.max_entries - 1) in array_map_get_next_key()
251 static int array_map_update_elem(struct bpf_map *map, void *key, void *value, in array_map_update_elem() argument
254 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_update_elem()
261 if (unlikely(index >= array->map.max_entries)) in array_map_update_elem()
269 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) in array_map_update_elem()
271 value, map->value_size); in array_map_update_elem()
275 value, map->value_size); in array_map_update_elem()
279 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, in bpf_percpu_array_update() argument
282 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_percpu_array_update()
292 if (unlikely(index >= array->map.max_entries)) in bpf_percpu_array_update()
306 size = round_up(map->value_size, 8); in bpf_percpu_array_update()
318 static int array_map_delete_elem(struct bpf_map *map, void *key) in array_map_delete_elem() argument
324 static void array_map_free(struct bpf_map *map) in array_map_free() argument
326 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_free()
335 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) in array_map_free()
341 static void array_map_seq_show_elem(struct bpf_map *map, void *key, in array_map_seq_show_elem() argument
348 value = array_map_lookup_elem(map, key); in array_map_seq_show_elem()
355 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); in array_map_seq_show_elem()
361 static int array_map_check_btf(const struct bpf_map *map, in array_map_check_btf() argument
412 static void fd_array_map_free(struct bpf_map *map) in fd_array_map_free() argument
414 struct bpf_array *array = container_of(map, struct bpf_array, map); in fd_array_map_free()
420 for (i = 0; i < array->map.max_entries; i++) in fd_array_map_free()
426 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) in fd_array_map_lookup_elem() argument
432 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) in bpf_fd_array_map_lookup_elem() argument
437 if (!map->ops->map_fd_sys_lookup_elem) in bpf_fd_array_map_lookup_elem()
441 elem = array_map_lookup_elem(map, key); in bpf_fd_array_map_lookup_elem()
443 *value = map->ops->map_fd_sys_lookup_elem(ptr); in bpf_fd_array_map_lookup_elem()
452 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, in bpf_fd_array_map_update_elem() argument
455 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_fd_array_map_update_elem()
462 if (index >= array->map.max_entries) in bpf_fd_array_map_update_elem()
466 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); in bpf_fd_array_map_update_elem()
472 map->ops->map_fd_put_ptr(old_ptr); in bpf_fd_array_map_update_elem()
477 static int fd_array_map_delete_elem(struct bpf_map *map, void *key) in fd_array_map_delete_elem() argument
479 struct bpf_array *array = container_of(map, struct bpf_array, map); in fd_array_map_delete_elem()
483 if (index >= array->map.max_entries) in fd_array_map_delete_elem()
488 map->ops->map_fd_put_ptr(old_ptr); in fd_array_map_delete_elem()
495 static void *prog_fd_array_get_ptr(struct bpf_map *map, in prog_fd_array_get_ptr() argument
498 struct bpf_array *array = container_of(map, struct bpf_array, map); in prog_fd_array_get_ptr()
523 static void bpf_fd_array_map_clear(struct bpf_map *map) in bpf_fd_array_map_clear() argument
525 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_fd_array_map_clear()
528 for (i = 0; i < array->map.max_entries; i++) in bpf_fd_array_map_clear()
529 fd_array_map_delete_elem(map, &i); in bpf_fd_array_map_clear()
575 static void *perf_event_fd_array_get_ptr(struct bpf_map *map, in perf_event_fd_array_get_ptr() argument
606 static void perf_event_fd_array_release(struct bpf_map *map, in perf_event_fd_array_release() argument
609 struct bpf_array *array = container_of(map, struct bpf_array, map); in perf_event_fd_array_release()
614 for (i = 0; i < array->map.max_entries; i++) { in perf_event_fd_array_release()
617 fd_array_map_delete_elem(map, &i); in perf_event_fd_array_release()
636 static void *cgroup_fd_array_get_ptr(struct bpf_map *map, in cgroup_fd_array_get_ptr() argument
649 static void cgroup_fd_array_free(struct bpf_map *map) in cgroup_fd_array_free() argument
651 bpf_fd_array_map_clear(map); in cgroup_fd_array_free()
652 fd_array_map_free(map); in cgroup_fd_array_free()
670 struct bpf_map *map, *inner_map_meta; in array_of_map_alloc() local
676 map = array_map_alloc(attr); in array_of_map_alloc()
677 if (IS_ERR(map)) { in array_of_map_alloc()
679 return map; in array_of_map_alloc()
682 map->inner_map_meta = inner_map_meta; in array_of_map_alloc()
684 return map; in array_of_map_alloc()
687 static void array_of_map_free(struct bpf_map *map) in array_of_map_free() argument
692 bpf_map_meta_free(map->inner_map_meta); in array_of_map_free()
693 bpf_fd_array_map_clear(map); in array_of_map_free()
694 fd_array_map_free(map); in array_of_map_free()
697 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) in array_of_map_lookup_elem() argument
699 struct bpf_map **inner_map = array_map_lookup_elem(map, key); in array_of_map_lookup_elem()
707 static u32 array_of_map_gen_lookup(struct bpf_map *map, in array_of_map_gen_lookup() argument
710 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_of_map_gen_lookup()
711 u32 elem_size = round_up(map->value_size, 8); in array_of_map_gen_lookup()
719 if (map->unpriv_array) { in array_of_map_gen_lookup()
720 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); in array_of_map_gen_lookup()
723 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); in array_of_map_gen_lookup()