Lines Matching refs:htab

131 static inline bool htab_is_prealloc(const struct bpf_htab *htab)  in htab_is_prealloc()  argument
133 return !(htab->map.map_flags & BPF_F_NO_PREALLOC); in htab_is_prealloc()
136 static void htab_init_buckets(struct bpf_htab *htab) in htab_init_buckets() argument
140 for (i = 0; i < htab->n_buckets; i++) { in htab_init_buckets()
141 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); in htab_init_buckets()
142 raw_spin_lock_init(&htab->buckets[i].raw_lock); in htab_init_buckets()
143 lockdep_set_class(&htab->buckets[i].raw_lock, in htab_init_buckets()
144 &htab->lockdep_key); in htab_init_buckets()
149 static inline int htab_lock_bucket(const struct bpf_htab *htab, in htab_lock_bucket() argument
155 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); in htab_lock_bucket()
158 if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) { in htab_lock_bucket()
159 __this_cpu_dec(*(htab->map_locked[hash])); in htab_lock_bucket()
170 static inline void htab_unlock_bucket(const struct bpf_htab *htab, in htab_unlock_bucket() argument
174 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); in htab_unlock_bucket()
176 __this_cpu_dec(*(htab->map_locked[hash])); in htab_unlock_bucket()
182 static bool htab_is_lru(const struct bpf_htab *htab) in htab_is_lru() argument
184 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || in htab_is_lru()
185 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_lru()
188 static bool htab_is_percpu(const struct bpf_htab *htab) in htab_is_percpu() argument
190 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_is_percpu()
191 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_percpu()
210 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i) in get_htab_elem() argument
212 return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size); in get_htab_elem()
215 static bool htab_has_extra_elems(struct bpf_htab *htab) in htab_has_extra_elems() argument
217 return !htab_is_percpu(htab) && !htab_is_lru(htab); in htab_has_extra_elems()
220 static void htab_free_prealloced_timers(struct bpf_htab *htab) in htab_free_prealloced_timers() argument
222 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_timers()
225 if (!btf_record_has_field(htab->map.record, BPF_TIMER)) in htab_free_prealloced_timers()
227 if (htab_has_extra_elems(htab)) in htab_free_prealloced_timers()
233 elem = get_htab_elem(htab, i); in htab_free_prealloced_timers()
234 bpf_obj_free_timer(htab->map.record, elem->key + round_up(htab->map.key_size, 8)); in htab_free_prealloced_timers()
239 static void htab_free_prealloced_fields(struct bpf_htab *htab) in htab_free_prealloced_fields() argument
241 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_fields()
244 if (IS_ERR_OR_NULL(htab->map.record)) in htab_free_prealloced_fields()
246 if (htab_has_extra_elems(htab)) in htab_free_prealloced_fields()
251 elem = get_htab_elem(htab, i); in htab_free_prealloced_fields()
252 if (htab_is_percpu(htab)) { in htab_free_prealloced_fields()
253 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size); in htab_free_prealloced_fields()
257 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu)); in htab_free_prealloced_fields()
261 bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8)); in htab_free_prealloced_fields()
268 static void htab_free_elems(struct bpf_htab *htab) in htab_free_elems() argument
272 if (!htab_is_percpu(htab)) in htab_free_elems()
275 for (i = 0; i < htab->map.max_entries; i++) { in htab_free_elems()
278 pptr = htab_elem_get_ptr(get_htab_elem(htab, i), in htab_free_elems()
279 htab->map.key_size); in htab_free_elems()
284 bpf_map_area_free(htab->elems); in htab_free_elems()
298 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, in prealloc_lru_pop() argument
301 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); in prealloc_lru_pop()
305 bpf_map_inc_elem_count(&htab->map); in prealloc_lru_pop()
307 memcpy(l->key, key, htab->map.key_size); in prealloc_lru_pop()
314 static int prealloc_init(struct bpf_htab *htab) in prealloc_init() argument
316 u32 num_entries = htab->map.max_entries; in prealloc_init()
319 if (htab_has_extra_elems(htab)) in prealloc_init()
322 htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries, in prealloc_init()
323 htab->map.numa_node); in prealloc_init()
324 if (!htab->elems) in prealloc_init()
327 if (!htab_is_percpu(htab)) in prealloc_init()
331 u32 size = round_up(htab->map.value_size, 8); in prealloc_init()
334 pptr = bpf_map_alloc_percpu(&htab->map, size, 8, in prealloc_init()
338 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, in prealloc_init()
344 if (htab_is_lru(htab)) in prealloc_init()
345 err = bpf_lru_init(&htab->lru, in prealloc_init()
346 htab->map.map_flags & BPF_F_NO_COMMON_LRU, in prealloc_init()
350 htab); in prealloc_init()
352 err = pcpu_freelist_init(&htab->freelist); in prealloc_init()
357 if (htab_is_lru(htab)) in prealloc_init()
358 bpf_lru_populate(&htab->lru, htab->elems, in prealloc_init()
360 htab->elem_size, num_entries); in prealloc_init()
362 pcpu_freelist_populate(&htab->freelist, in prealloc_init()
363 htab->elems + offsetof(struct htab_elem, fnode), in prealloc_init()
364 htab->elem_size, num_entries); in prealloc_init()
369 htab_free_elems(htab); in prealloc_init()
373 static void prealloc_destroy(struct bpf_htab *htab) in prealloc_destroy() argument
375 htab_free_elems(htab); in prealloc_destroy()
377 if (htab_is_lru(htab)) in prealloc_destroy()
378 bpf_lru_destroy(&htab->lru); in prealloc_destroy()
380 pcpu_freelist_destroy(&htab->freelist); in prealloc_destroy()
383 static int alloc_extra_elems(struct bpf_htab *htab) in alloc_extra_elems() argument
389 pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8, in alloc_extra_elems()
395 l = pcpu_freelist_pop(&htab->freelist); in alloc_extra_elems()
402 htab->extra_elems = pptr; in alloc_extra_elems()
475 struct bpf_htab *htab; in htab_map_alloc() local
478 htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE); in htab_map_alloc()
479 if (!htab) in htab_map_alloc()
482 lockdep_register_key(&htab->lockdep_key); in htab_map_alloc()
484 bpf_map_init_from_attr(&htab->map, attr); in htab_map_alloc()
491 htab->map.max_entries = roundup(attr->max_entries, in htab_map_alloc()
493 if (htab->map.max_entries < attr->max_entries) in htab_map_alloc()
494 htab->map.max_entries = rounddown(attr->max_entries, in htab_map_alloc()
499 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); in htab_map_alloc()
501 htab->elem_size = sizeof(struct htab_elem) + in htab_map_alloc()
502 round_up(htab->map.key_size, 8); in htab_map_alloc()
504 htab->elem_size += sizeof(void *); in htab_map_alloc()
506 htab->elem_size += round_up(htab->map.value_size, 8); in htab_map_alloc()
510 if (htab->n_buckets == 0 || in htab_map_alloc()
511 htab->n_buckets > U32_MAX / sizeof(struct bucket)) in htab_map_alloc()
514 err = bpf_map_init_elem_count(&htab->map); in htab_map_alloc()
519 htab->buckets = bpf_map_area_alloc(htab->n_buckets * in htab_map_alloc()
521 htab->map.numa_node); in htab_map_alloc()
522 if (!htab->buckets) in htab_map_alloc()
526 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map, in htab_map_alloc()
530 if (!htab->map_locked[i]) in htab_map_alloc()
534 if (htab->map.map_flags & BPF_F_ZERO_SEED) in htab_map_alloc()
535 htab->hashrnd = 0; in htab_map_alloc()
537 htab->hashrnd = get_random_u32(); in htab_map_alloc()
539 htab_init_buckets(htab); in htab_map_alloc()
556 htab->use_percpu_counter = true; in htab_map_alloc()
558 if (htab->use_percpu_counter) { in htab_map_alloc()
559 err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL); in htab_map_alloc()
565 err = prealloc_init(htab); in htab_map_alloc()
573 err = alloc_extra_elems(htab); in htab_map_alloc()
578 err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false); in htab_map_alloc()
582 err = bpf_mem_alloc_init(&htab->pcpu_ma, in htab_map_alloc()
583 round_up(htab->map.value_size, 8), true); in htab_map_alloc()
589 return &htab->map; in htab_map_alloc()
592 prealloc_destroy(htab); in htab_map_alloc()
594 if (htab->use_percpu_counter) in htab_map_alloc()
595 percpu_counter_destroy(&htab->pcount); in htab_map_alloc()
597 free_percpu(htab->map_locked[i]); in htab_map_alloc()
598 bpf_map_area_free(htab->buckets); in htab_map_alloc()
599 bpf_mem_alloc_destroy(&htab->pcpu_ma); in htab_map_alloc()
600 bpf_mem_alloc_destroy(&htab->ma); in htab_map_alloc()
602 bpf_map_free_elem_count(&htab->map); in htab_map_alloc()
604 lockdep_unregister_key(&htab->lockdep_key); in htab_map_alloc()
605 bpf_map_area_free(htab); in htab_map_alloc()
616 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) in __select_bucket() argument
618 return &htab->buckets[hash & (htab->n_buckets - 1)]; in __select_bucket()
621 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash) in select_bucket() argument
623 return &__select_bucket(htab, hash)->head; in select_bucket()
669 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_elem() local
679 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_map_lookup_elem()
681 head = select_bucket(htab, hash); in __htab_map_lookup_elem()
683 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in __htab_map_lookup_elem()
773 static void check_and_free_fields(struct bpf_htab *htab, in check_and_free_fields() argument
776 if (htab_is_percpu(htab)) { in check_and_free_fields()
777 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size); in check_and_free_fields()
781 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu)); in check_and_free_fields()
783 void *map_value = elem->key + round_up(htab->map.key_size, 8); in check_and_free_fields()
785 bpf_obj_free_fields(htab->map.record, map_value); in check_and_free_fields()
794 struct bpf_htab *htab = arg; in htab_lru_map_delete_node() local
803 b = __select_bucket(htab, tgt_l->hash); in htab_lru_map_delete_node()
806 ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags); in htab_lru_map_delete_node()
813 check_and_free_fields(htab, l); in htab_lru_map_delete_node()
814 bpf_map_dec_elem_count(&htab->map); in htab_lru_map_delete_node()
818 htab_unlock_bucket(htab, b, tgt_l->hash, flags); in htab_lru_map_delete_node()
826 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_get_next_key() local
839 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_get_next_key()
841 head = select_bucket(htab, hash); in htab_map_get_next_key()
844 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in htab_map_get_next_key()
860 i = hash & (htab->n_buckets - 1); in htab_map_get_next_key()
865 for (; i < htab->n_buckets; i++) { in htab_map_get_next_key()
866 head = select_bucket(htab, i); in htab_map_get_next_key()
882 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) in htab_elem_free() argument
884 check_and_free_fields(htab, l); in htab_elem_free()
885 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) in htab_elem_free()
886 bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr); in htab_elem_free()
887 bpf_mem_cache_free(&htab->ma, l); in htab_elem_free()
890 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) in htab_put_fd_value() argument
892 struct bpf_map *map = &htab->map; in htab_put_fd_value()
901 static bool is_map_full(struct bpf_htab *htab) in is_map_full() argument
903 if (htab->use_percpu_counter) in is_map_full()
904 return __percpu_counter_compare(&htab->pcount, htab->map.max_entries, in is_map_full()
906 return atomic_read(&htab->count) >= htab->map.max_entries; in is_map_full()
909 static void inc_elem_count(struct bpf_htab *htab) in inc_elem_count() argument
911 bpf_map_inc_elem_count(&htab->map); in inc_elem_count()
913 if (htab->use_percpu_counter) in inc_elem_count()
914 percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH); in inc_elem_count()
916 atomic_inc(&htab->count); in inc_elem_count()
919 static void dec_elem_count(struct bpf_htab *htab) in dec_elem_count() argument
921 bpf_map_dec_elem_count(&htab->map); in dec_elem_count()
923 if (htab->use_percpu_counter) in dec_elem_count()
924 percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH); in dec_elem_count()
926 atomic_dec(&htab->count); in dec_elem_count()
930 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) in free_htab_elem() argument
932 htab_put_fd_value(htab, l); in free_htab_elem()
934 if (htab_is_prealloc(htab)) { in free_htab_elem()
935 bpf_map_dec_elem_count(&htab->map); in free_htab_elem()
936 check_and_free_fields(htab, l); in free_htab_elem()
937 __pcpu_freelist_push(&htab->freelist, &l->fnode); in free_htab_elem()
939 dec_elem_count(htab); in free_htab_elem()
940 htab_elem_free(htab, l); in free_htab_elem()
944 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, in pcpu_copy_value() argument
949 copy_map_value(&htab->map, this_cpu_ptr(pptr), value); in pcpu_copy_value()
951 u32 size = round_up(htab->map.value_size, 8); in pcpu_copy_value()
955 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off); in pcpu_copy_value()
961 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr, in pcpu_init_value() argument
975 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value); in pcpu_init_value()
977 zero_map_value(&htab->map, per_cpu_ptr(pptr, cpu)); in pcpu_init_value()
980 pcpu_copy_value(htab, pptr, value, onallcpus); in pcpu_init_value()
984 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab) in fd_htab_map_needs_adjust() argument
986 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && in fd_htab_map_needs_adjust()
990 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, in alloc_htab_elem() argument
995 u32 size = htab->map.value_size; in alloc_htab_elem()
996 bool prealloc = htab_is_prealloc(htab); in alloc_htab_elem()
1005 pl_new = this_cpu_ptr(htab->extra_elems); in alloc_htab_elem()
1007 htab_put_fd_value(htab, old_elem); in alloc_htab_elem()
1012 l = __pcpu_freelist_pop(&htab->freelist); in alloc_htab_elem()
1016 bpf_map_inc_elem_count(&htab->map); in alloc_htab_elem()
1019 if (is_map_full(htab)) in alloc_htab_elem()
1027 inc_elem_count(htab); in alloc_htab_elem()
1028 l_new = bpf_mem_cache_alloc(&htab->ma); in alloc_htab_elem()
1041 pptr = bpf_mem_cache_alloc(&htab->pcpu_ma); in alloc_htab_elem()
1043 bpf_mem_cache_free(&htab->ma, l_new); in alloc_htab_elem()
1051 pcpu_init_value(htab, pptr, value, onallcpus); in alloc_htab_elem()
1055 } else if (fd_htab_map_needs_adjust(htab)) { in alloc_htab_elem()
1059 copy_map_value(&htab->map, in alloc_htab_elem()
1067 dec_elem_count(htab); in alloc_htab_elem()
1071 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, in check_flags() argument
1089 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_update_elem() local
1106 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_update_elem()
1108 b = __select_bucket(htab, hash); in htab_map_update_elem()
1116 htab->n_buckets); in htab_map_update_elem()
1117 ret = check_flags(htab, l_old, map_flags); in htab_map_update_elem()
1133 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_map_update_elem()
1139 ret = check_flags(htab, l_old, map_flags); in htab_map_update_elem()
1157 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, in htab_map_update_elem()
1171 if (!htab_is_prealloc(htab)) in htab_map_update_elem()
1172 free_htab_elem(htab, l_old); in htab_map_update_elem()
1174 check_and_free_fields(htab, l_old); in htab_map_update_elem()
1178 htab_unlock_bucket(htab, b, hash, flags); in htab_map_update_elem()
1182 static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem) in htab_lru_push_free() argument
1184 check_and_free_fields(htab, elem); in htab_lru_push_free()
1185 bpf_map_dec_elem_count(&htab->map); in htab_lru_push_free()
1186 bpf_lru_push_free(&htab->lru, &elem->lru_node); in htab_lru_push_free()
1192 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_update_elem() local
1209 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_update_elem()
1211 b = __select_bucket(htab, hash); in htab_lru_map_update_elem()
1219 l_new = prealloc_lru_pop(htab, key, hash); in htab_lru_map_update_elem()
1222 copy_map_value(&htab->map, in htab_lru_map_update_elem()
1225 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_lru_map_update_elem()
1231 ret = check_flags(htab, l_old, map_flags); in htab_lru_map_update_elem()
1246 htab_unlock_bucket(htab, b, hash, flags); in htab_lru_map_update_elem()
1250 htab_lru_push_free(htab, l_new); in htab_lru_map_update_elem()
1252 htab_lru_push_free(htab, l_old); in htab_lru_map_update_elem()
1261 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_percpu_map_update_elem() local
1278 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_percpu_map_update_elem()
1280 b = __select_bucket(htab, hash); in __htab_percpu_map_update_elem()
1283 ret = htab_lock_bucket(htab, b, hash, &flags); in __htab_percpu_map_update_elem()
1289 ret = check_flags(htab, l_old, map_flags); in __htab_percpu_map_update_elem()
1295 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), in __htab_percpu_map_update_elem()
1298 l_new = alloc_htab_elem(htab, key, value, key_size, in __htab_percpu_map_update_elem()
1308 htab_unlock_bucket(htab, b, hash, flags); in __htab_percpu_map_update_elem()
1316 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_lru_percpu_map_update_elem() local
1333 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_lru_percpu_map_update_elem()
1335 b = __select_bucket(htab, hash); in __htab_lru_percpu_map_update_elem()
1344 l_new = prealloc_lru_pop(htab, key, hash); in __htab_lru_percpu_map_update_elem()
1349 ret = htab_lock_bucket(htab, b, hash, &flags); in __htab_lru_percpu_map_update_elem()
1355 ret = check_flags(htab, l_old, map_flags); in __htab_lru_percpu_map_update_elem()
1363 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), in __htab_lru_percpu_map_update_elem()
1366 pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size), in __htab_lru_percpu_map_update_elem()
1373 htab_unlock_bucket(htab, b, hash, flags); in __htab_lru_percpu_map_update_elem()
1376 bpf_map_dec_elem_count(&htab->map); in __htab_lru_percpu_map_update_elem()
1377 bpf_lru_push_free(&htab->lru, &l_new->lru_node); in __htab_lru_percpu_map_update_elem()
1398 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_delete_elem() local
1411 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_delete_elem()
1412 b = __select_bucket(htab, hash); in htab_map_delete_elem()
1415 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_map_delete_elem()
1423 free_htab_elem(htab, l); in htab_map_delete_elem()
1428 htab_unlock_bucket(htab, b, hash, flags); in htab_map_delete_elem()
1434 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_delete_elem() local
1447 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_delete_elem()
1448 b = __select_bucket(htab, hash); in htab_lru_map_delete_elem()
1451 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_lru_map_delete_elem()
1462 htab_unlock_bucket(htab, b, hash, flags); in htab_lru_map_delete_elem()
1464 htab_lru_push_free(htab, l); in htab_lru_map_delete_elem()
1468 static void delete_all_elements(struct bpf_htab *htab) in delete_all_elements() argument
1476 for (i = 0; i < htab->n_buckets; i++) { in delete_all_elements()
1477 struct hlist_nulls_head *head = select_bucket(htab, i); in delete_all_elements()
1483 htab_elem_free(htab, l); in delete_all_elements()
1489 static void htab_free_malloced_timers(struct bpf_htab *htab) in htab_free_malloced_timers() argument
1494 for (i = 0; i < htab->n_buckets; i++) { in htab_free_malloced_timers()
1495 struct hlist_nulls_head *head = select_bucket(htab, i); in htab_free_malloced_timers()
1501 bpf_obj_free_timer(htab->map.record, l->key + round_up(htab->map.key_size, 8)); in htab_free_malloced_timers()
1510 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_free_timers() local
1513 if (!btf_record_has_field(htab->map.record, BPF_TIMER)) in htab_map_free_timers()
1515 if (!htab_is_prealloc(htab)) in htab_map_free_timers()
1516 htab_free_malloced_timers(htab); in htab_map_free_timers()
1518 htab_free_prealloced_timers(htab); in htab_map_free_timers()
1524 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_free() local
1536 if (!htab_is_prealloc(htab)) { in htab_map_free()
1537 delete_all_elements(htab); in htab_map_free()
1539 htab_free_prealloced_fields(htab); in htab_map_free()
1540 prealloc_destroy(htab); in htab_map_free()
1544 free_percpu(htab->extra_elems); in htab_map_free()
1545 bpf_map_area_free(htab->buckets); in htab_map_free()
1546 bpf_mem_alloc_destroy(&htab->pcpu_ma); in htab_map_free()
1547 bpf_mem_alloc_destroy(&htab->ma); in htab_map_free()
1548 if (htab->use_percpu_counter) in htab_map_free()
1549 percpu_counter_destroy(&htab->pcount); in htab_map_free()
1551 free_percpu(htab->map_locked[i]); in htab_map_free()
1552 lockdep_unregister_key(&htab->lockdep_key); in htab_map_free()
1553 bpf_map_area_free(htab); in htab_map_free()
1581 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_and_delete_elem() local
1591 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_map_lookup_and_delete_elem()
1592 b = __select_bucket(htab, hash); in __htab_map_lookup_and_delete_elem()
1595 ret = htab_lock_bucket(htab, b, hash, &bflags); in __htab_map_lookup_and_delete_elem()
1610 copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu)); in __htab_map_lookup_and_delete_elem()
1611 check_and_init_map_value(&htab->map, value + off); in __htab_map_lookup_and_delete_elem()
1630 free_htab_elem(htab, l); in __htab_map_lookup_and_delete_elem()
1633 htab_unlock_bucket(htab, b, hash, bflags); in __htab_map_lookup_and_delete_elem()
1636 htab_lru_push_free(htab, l); in __htab_map_lookup_and_delete_elem()
1678 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_and_delete_batch() local
1715 if (batch >= htab->n_buckets) in __htab_map_lookup_and_delete_batch()
1718 key_size = htab->map.key_size; in __htab_map_lookup_and_delete_batch()
1719 roundup_key_size = round_up(htab->map.key_size, 8); in __htab_map_lookup_and_delete_batch()
1720 value_size = htab->map.value_size; in __htab_map_lookup_and_delete_batch()
1747 b = &htab->buckets[batch]; in __htab_map_lookup_and_delete_batch()
1751 ret = htab_lock_bucket(htab, b, batch, &flags); in __htab_map_lookup_and_delete_batch()
1774 htab_unlock_bucket(htab, b, batch, flags); in __htab_map_lookup_and_delete_batch()
1785 htab_unlock_bucket(htab, b, batch, flags); in __htab_map_lookup_and_delete_batch()
1806 copy_map_value_long(&htab->map, dst_val + off, per_cpu_ptr(pptr, cpu)); in __htab_map_lookup_and_delete_batch()
1807 check_and_init_map_value(&htab->map, dst_val + off); in __htab_map_lookup_and_delete_batch()
1840 free_htab_elem(htab, l); in __htab_map_lookup_and_delete_batch()
1847 htab_unlock_bucket(htab, b, batch, flags); in __htab_map_lookup_and_delete_batch()
1853 htab_lru_push_free(htab, l); in __htab_map_lookup_and_delete_batch()
1860 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) { in __htab_map_lookup_and_delete_batch()
1877 if (batch >= htab->n_buckets) { in __htab_map_lookup_and_delete_batch()
1970 struct bpf_htab *htab; member
1980 const struct bpf_htab *htab = info->htab; in bpf_hash_map_seq_find_next() local
1989 if (bucket_id >= htab->n_buckets) in bpf_hash_map_seq_find_next()
2003 b = &htab->buckets[bucket_id++]; in bpf_hash_map_seq_find_next()
2008 for (i = bucket_id; i < htab->n_buckets; i++) { in bpf_hash_map_seq_find_next()
2009 b = &htab->buckets[i]; in bpf_hash_map_seq_find_next()
2127 seq_info->htab = container_of(map, struct bpf_htab, map); in bpf_iter_init_hash_map()
2156 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in bpf_for_each_hash_elem() local
2171 is_percpu = htab_is_percpu(htab); in bpf_for_each_hash_elem()
2179 for (i = 0; i < htab->n_buckets; i++) { in bpf_for_each_hash_elem()
2180 b = &htab->buckets[i]; in bpf_for_each_hash_elem()
2211 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_mem_usage() local
2212 u32 value_size = round_up(htab->map.value_size, 8); in htab_map_mem_usage()
2213 bool prealloc = htab_is_prealloc(htab); in htab_map_mem_usage()
2214 bool percpu = htab_is_percpu(htab); in htab_map_mem_usage()
2215 bool lru = htab_is_lru(htab); in htab_map_mem_usage()
2219 usage += sizeof(struct bucket) * htab->n_buckets; in htab_map_mem_usage()
2223 if (htab_has_extra_elems(htab)) in htab_map_mem_usage()
2226 usage += htab->elem_size * num_entries; in htab_map_mem_usage()
2235 num_entries = htab->use_percpu_counter ? in htab_map_mem_usage()
2236 percpu_counter_sum(&htab->pcount) : in htab_map_mem_usage()
2237 atomic_read(&htab->count); in htab_map_mem_usage()
2238 usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries; in htab_map_mem_usage()
2264 BATCH_OPS(htab),
2379 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in bpf_percpu_hash_update() local
2383 if (htab_is_lru(htab)) in bpf_percpu_hash_update()
2472 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in fd_htab_map_free() local
2478 for (i = 0; i < htab->n_buckets; i++) { in fd_htab_map_free()
2479 head = select_bucket(htab, i); in fd_htab_map_free()
2596 BATCH_OPS(htab),