Lines Matching refs:htab

111 				struct bpf_htab *htab;  member
125 static inline bool htab_is_prealloc(const struct bpf_htab *htab) in htab_is_prealloc() argument
127 return !(htab->map.map_flags & BPF_F_NO_PREALLOC); in htab_is_prealloc()
130 static inline bool htab_use_raw_lock(const struct bpf_htab *htab) in htab_use_raw_lock() argument
132 return (!IS_ENABLED(CONFIG_PREEMPT_RT) || htab_is_prealloc(htab)); in htab_use_raw_lock()
135 static void htab_init_buckets(struct bpf_htab *htab) in htab_init_buckets() argument
139 for (i = 0; i < htab->n_buckets; i++) { in htab_init_buckets()
140 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); in htab_init_buckets()
141 if (htab_use_raw_lock(htab)) in htab_init_buckets()
142 raw_spin_lock_init(&htab->buckets[i].raw_lock); in htab_init_buckets()
144 spin_lock_init(&htab->buckets[i].lock); in htab_init_buckets()
148 static inline unsigned long htab_lock_bucket(const struct bpf_htab *htab, in htab_lock_bucket() argument
153 if (htab_use_raw_lock(htab)) in htab_lock_bucket()
160 static inline void htab_unlock_bucket(const struct bpf_htab *htab, in htab_unlock_bucket() argument
164 if (htab_use_raw_lock(htab)) in htab_unlock_bucket()
172 static bool htab_is_lru(const struct bpf_htab *htab) in htab_is_lru() argument
174 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || in htab_is_lru()
175 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_lru()
178 static bool htab_is_percpu(const struct bpf_htab *htab) in htab_is_percpu() argument
180 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_is_percpu()
181 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_percpu()
200 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i) in get_htab_elem() argument
202 return (struct htab_elem *) (htab->elems + i * htab->elem_size); in get_htab_elem()
205 static void htab_free_elems(struct bpf_htab *htab) in htab_free_elems() argument
209 if (!htab_is_percpu(htab)) in htab_free_elems()
212 for (i = 0; i < htab->map.max_entries; i++) { in htab_free_elems()
215 pptr = htab_elem_get_ptr(get_htab_elem(htab, i), in htab_free_elems()
216 htab->map.key_size); in htab_free_elems()
221 bpf_map_area_free(htab->elems); in htab_free_elems()
235 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, in prealloc_lru_pop() argument
238 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); in prealloc_lru_pop()
243 memcpy(l->key, key, htab->map.key_size); in prealloc_lru_pop()
250 static int prealloc_init(struct bpf_htab *htab) in prealloc_init() argument
252 u32 num_entries = htab->map.max_entries; in prealloc_init()
255 if (!htab_is_percpu(htab) && !htab_is_lru(htab)) in prealloc_init()
258 htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries, in prealloc_init()
259 htab->map.numa_node); in prealloc_init()
260 if (!htab->elems) in prealloc_init()
263 if (!htab_is_percpu(htab)) in prealloc_init()
267 u32 size = round_up(htab->map.value_size, 8); in prealloc_init()
273 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, in prealloc_init()
279 if (htab_is_lru(htab)) in prealloc_init()
280 err = bpf_lru_init(&htab->lru, in prealloc_init()
281 htab->map.map_flags & BPF_F_NO_COMMON_LRU, in prealloc_init()
285 htab); in prealloc_init()
287 err = pcpu_freelist_init(&htab->freelist); in prealloc_init()
292 if (htab_is_lru(htab)) in prealloc_init()
293 bpf_lru_populate(&htab->lru, htab->elems, in prealloc_init()
295 htab->elem_size, num_entries); in prealloc_init()
297 pcpu_freelist_populate(&htab->freelist, in prealloc_init()
298 htab->elems + offsetof(struct htab_elem, fnode), in prealloc_init()
299 htab->elem_size, num_entries); in prealloc_init()
304 htab_free_elems(htab); in prealloc_init()
308 static void prealloc_destroy(struct bpf_htab *htab) in prealloc_destroy() argument
310 htab_free_elems(htab); in prealloc_destroy()
312 if (htab_is_lru(htab)) in prealloc_destroy()
313 bpf_lru_destroy(&htab->lru); in prealloc_destroy()
315 pcpu_freelist_destroy(&htab->freelist); in prealloc_destroy()
318 static int alloc_extra_elems(struct bpf_htab *htab) in alloc_extra_elems() argument
330 l = pcpu_freelist_pop(&htab->freelist); in alloc_extra_elems()
337 htab->extra_elems = pptr; in alloc_extra_elems()
358 BUILD_BUG_ON(offsetof(struct htab_elem, htab) != in htab_map_alloc_check()
424 struct bpf_htab *htab; in htab_map_alloc() local
428 htab = kzalloc(sizeof(*htab), GFP_USER); in htab_map_alloc()
429 if (!htab) in htab_map_alloc()
432 bpf_map_init_from_attr(&htab->map, attr); in htab_map_alloc()
439 htab->map.max_entries = roundup(attr->max_entries, in htab_map_alloc()
441 if (htab->map.max_entries < attr->max_entries) in htab_map_alloc()
442 htab->map.max_entries = rounddown(attr->max_entries, in htab_map_alloc()
447 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); in htab_map_alloc()
449 htab->elem_size = sizeof(struct htab_elem) + in htab_map_alloc()
450 round_up(htab->map.key_size, 8); in htab_map_alloc()
452 htab->elem_size += sizeof(void *); in htab_map_alloc()
454 htab->elem_size += round_up(htab->map.value_size, 8); in htab_map_alloc()
458 if (htab->n_buckets == 0 || in htab_map_alloc()
459 htab->n_buckets > U32_MAX / sizeof(struct bucket)) in htab_map_alloc()
462 cost = (u64) htab->n_buckets * sizeof(struct bucket) + in htab_map_alloc()
463 (u64) htab->elem_size * htab->map.max_entries; in htab_map_alloc()
466 cost += (u64) round_up(htab->map.value_size, 8) * in htab_map_alloc()
467 num_possible_cpus() * htab->map.max_entries; in htab_map_alloc()
469 cost += (u64) htab->elem_size * num_possible_cpus(); in htab_map_alloc()
472 err = bpf_map_charge_init(&htab->map.memory, cost); in htab_map_alloc()
477 htab->buckets = bpf_map_area_alloc(htab->n_buckets * in htab_map_alloc()
479 htab->map.numa_node); in htab_map_alloc()
480 if (!htab->buckets) in htab_map_alloc()
483 if (htab->map.map_flags & BPF_F_ZERO_SEED) in htab_map_alloc()
484 htab->hashrnd = 0; in htab_map_alloc()
486 htab->hashrnd = get_random_int(); in htab_map_alloc()
488 htab_init_buckets(htab); in htab_map_alloc()
491 err = prealloc_init(htab); in htab_map_alloc()
499 err = alloc_extra_elems(htab); in htab_map_alloc()
505 return &htab->map; in htab_map_alloc()
508 prealloc_destroy(htab); in htab_map_alloc()
510 bpf_map_area_free(htab->buckets); in htab_map_alloc()
512 bpf_map_charge_finish(&htab->map.memory); in htab_map_alloc()
514 kfree(htab); in htab_map_alloc()
523 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) in __select_bucket() argument
525 return &htab->buckets[hash & (htab->n_buckets - 1)]; in __select_bucket()
528 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash) in select_bucket() argument
530 return &__select_bucket(htab, hash)->head; in select_bucket()
576 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_elem() local
585 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_map_lookup_elem()
587 head = select_bucket(htab, hash); in __htab_map_lookup_elem()
589 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in __htab_map_lookup_elem()
684 struct bpf_htab *htab = (struct bpf_htab *)arg; in htab_lru_map_delete_node() local
692 b = __select_bucket(htab, tgt_l->hash); in htab_lru_map_delete_node()
695 flags = htab_lock_bucket(htab, b); in htab_lru_map_delete_node()
703 htab_unlock_bucket(htab, b, flags); in htab_lru_map_delete_node()
711 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_get_next_key() local
724 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_get_next_key()
726 head = select_bucket(htab, hash); in htab_map_get_next_key()
729 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in htab_map_get_next_key()
745 i = hash & (htab->n_buckets - 1); in htab_map_get_next_key()
750 for (; i < htab->n_buckets; i++) { in htab_map_get_next_key()
751 head = select_bucket(htab, i); in htab_map_get_next_key()
767 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) in htab_elem_free() argument
769 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) in htab_elem_free()
770 free_percpu(htab_elem_get_ptr(l, htab->map.key_size)); in htab_elem_free()
777 struct bpf_htab *htab = l->htab; in htab_elem_free_rcu() local
779 htab_elem_free(htab, l); in htab_elem_free_rcu()
782 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) in htab_put_fd_value() argument
784 struct bpf_map *map = &htab->map; in htab_put_fd_value()
793 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) in free_htab_elem() argument
795 htab_put_fd_value(htab, l); in free_htab_elem()
797 if (htab_is_prealloc(htab)) { in free_htab_elem()
798 __pcpu_freelist_push(&htab->freelist, &l->fnode); in free_htab_elem()
800 atomic_dec(&htab->count); in free_htab_elem()
801 l->htab = htab; in free_htab_elem()
806 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, in pcpu_copy_value() argument
811 memcpy(this_cpu_ptr(pptr), value, htab->map.value_size); in pcpu_copy_value()
813 u32 size = round_up(htab->map.value_size, 8); in pcpu_copy_value()
824 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr, in pcpu_init_value() argument
833 if (htab_is_prealloc(htab) && !onallcpus) { in pcpu_init_value()
834 u32 size = round_up(htab->map.value_size, 8); in pcpu_init_value()
846 pcpu_copy_value(htab, pptr, value, onallcpus); in pcpu_init_value()
850 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab) in fd_htab_map_needs_adjust() argument
852 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && in fd_htab_map_needs_adjust()
856 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, in alloc_htab_elem() argument
861 u32 size = htab->map.value_size; in alloc_htab_elem()
862 bool prealloc = htab_is_prealloc(htab); in alloc_htab_elem()
871 pl_new = this_cpu_ptr(htab->extra_elems); in alloc_htab_elem()
873 htab_put_fd_value(htab, old_elem); in alloc_htab_elem()
878 l = __pcpu_freelist_pop(&htab->freelist); in alloc_htab_elem()
884 if (atomic_inc_return(&htab->count) > htab->map.max_entries) in alloc_htab_elem()
894 l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, in alloc_htab_elem()
895 htab->map.numa_node); in alloc_htab_elem()
900 check_and_init_map_lock(&htab->map, in alloc_htab_elem()
920 pcpu_init_value(htab, pptr, value, onallcpus); in alloc_htab_elem()
924 } else if (fd_htab_map_needs_adjust(htab)) { in alloc_htab_elem()
928 copy_map_value(&htab->map, in alloc_htab_elem()
936 atomic_dec(&htab->count); in alloc_htab_elem()
940 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, in check_flags() argument
958 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_update_elem() local
974 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_update_elem()
976 b = __select_bucket(htab, hash); in htab_map_update_elem()
984 htab->n_buckets); in htab_map_update_elem()
985 ret = check_flags(htab, l_old, map_flags); in htab_map_update_elem()
1001 flags = htab_lock_bucket(htab, b); in htab_map_update_elem()
1005 ret = check_flags(htab, l_old, map_flags); in htab_map_update_elem()
1023 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, in htab_map_update_elem()
1037 if (!htab_is_prealloc(htab)) in htab_map_update_elem()
1038 free_htab_elem(htab, l_old); in htab_map_update_elem()
1042 htab_unlock_bucket(htab, b, flags); in htab_map_update_elem()
1049 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_update_elem() local
1065 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_update_elem()
1067 b = __select_bucket(htab, hash); in htab_lru_map_update_elem()
1075 l_new = prealloc_lru_pop(htab, key, hash); in htab_lru_map_update_elem()
1080 flags = htab_lock_bucket(htab, b); in htab_lru_map_update_elem()
1084 ret = check_flags(htab, l_old, map_flags); in htab_lru_map_update_elem()
1099 htab_unlock_bucket(htab, b, flags); in htab_lru_map_update_elem()
1102 bpf_lru_push_free(&htab->lru, &l_new->lru_node); in htab_lru_map_update_elem()
1104 bpf_lru_push_free(&htab->lru, &l_old->lru_node); in htab_lru_map_update_elem()
1113 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_percpu_map_update_elem() local
1129 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_percpu_map_update_elem()
1131 b = __select_bucket(htab, hash); in __htab_percpu_map_update_elem()
1134 flags = htab_lock_bucket(htab, b); in __htab_percpu_map_update_elem()
1138 ret = check_flags(htab, l_old, map_flags); in __htab_percpu_map_update_elem()
1144 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), in __htab_percpu_map_update_elem()
1147 l_new = alloc_htab_elem(htab, key, value, key_size, in __htab_percpu_map_update_elem()
1157 htab_unlock_bucket(htab, b, flags); in __htab_percpu_map_update_elem()
1165 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_lru_percpu_map_update_elem() local
1181 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_lru_percpu_map_update_elem()
1183 b = __select_bucket(htab, hash); in __htab_lru_percpu_map_update_elem()
1192 l_new = prealloc_lru_pop(htab, key, hash); in __htab_lru_percpu_map_update_elem()
1197 flags = htab_lock_bucket(htab, b); in __htab_lru_percpu_map_update_elem()
1201 ret = check_flags(htab, l_old, map_flags); in __htab_lru_percpu_map_update_elem()
1209 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), in __htab_lru_percpu_map_update_elem()
1212 pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size), in __htab_lru_percpu_map_update_elem()
1219 htab_unlock_bucket(htab, b, flags); in __htab_lru_percpu_map_update_elem()
1221 bpf_lru_push_free(&htab->lru, &l_new->lru_node); in __htab_lru_percpu_map_update_elem()
1241 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_delete_elem() local
1253 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_delete_elem()
1254 b = __select_bucket(htab, hash); in htab_map_delete_elem()
1257 flags = htab_lock_bucket(htab, b); in htab_map_delete_elem()
1263 free_htab_elem(htab, l); in htab_map_delete_elem()
1267 htab_unlock_bucket(htab, b, flags); in htab_map_delete_elem()
1273 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_delete_elem() local
1285 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_delete_elem()
1286 b = __select_bucket(htab, hash); in htab_lru_map_delete_elem()
1289 flags = htab_lock_bucket(htab, b); in htab_lru_map_delete_elem()
1298 htab_unlock_bucket(htab, b, flags); in htab_lru_map_delete_elem()
1300 bpf_lru_push_free(&htab->lru, &l->lru_node); in htab_lru_map_delete_elem()
1304 static void delete_all_elements(struct bpf_htab *htab) in delete_all_elements() argument
1308 for (i = 0; i < htab->n_buckets; i++) { in delete_all_elements()
1309 struct hlist_nulls_head *head = select_bucket(htab, i); in delete_all_elements()
1315 htab_elem_free(htab, l); in delete_all_elements()
1323 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_free() local
1334 if (!htab_is_prealloc(htab)) in htab_map_free()
1335 delete_all_elements(htab); in htab_map_free()
1337 prealloc_destroy(htab); in htab_map_free()
1339 free_percpu(htab->extra_elems); in htab_map_free()
1340 bpf_map_area_free(htab->buckets); in htab_map_free()
1341 kfree(htab); in htab_map_free()
1372 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_and_delete_batch() local
1409 if (batch >= htab->n_buckets) in __htab_map_lookup_and_delete_batch()
1412 key_size = htab->map.key_size; in __htab_map_lookup_and_delete_batch()
1413 roundup_key_size = round_up(htab->map.key_size, 8); in __htab_map_lookup_and_delete_batch()
1414 value_size = htab->map.value_size; in __htab_map_lookup_and_delete_batch()
1441 b = &htab->buckets[batch]; in __htab_map_lookup_and_delete_batch()
1445 flags = htab_lock_bucket(htab, b); in __htab_map_lookup_and_delete_batch()
1462 htab_unlock_bucket(htab, b, flags); in __htab_map_lookup_and_delete_batch()
1473 htab_unlock_bucket(htab, b, flags); in __htab_map_lookup_and_delete_batch()
1519 free_htab_elem(htab, l); in __htab_map_lookup_and_delete_batch()
1526 htab_unlock_bucket(htab, b, flags); in __htab_map_lookup_and_delete_batch()
1532 bpf_lru_push_free(&htab->lru, &l->lru_node); in __htab_map_lookup_and_delete_batch()
1539 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) { in __htab_map_lookup_and_delete_batch()
1556 if (batch >= htab->n_buckets) { in __htab_map_lookup_and_delete_batch()
1649 struct bpf_htab *htab; member
1659 const struct bpf_htab *htab = info->htab; in bpf_hash_map_seq_find_next() local
1668 if (bucket_id >= htab->n_buckets) in bpf_hash_map_seq_find_next()
1682 b = &htab->buckets[bucket_id++]; in bpf_hash_map_seq_find_next()
1687 for (i = bucket_id; i < htab->n_buckets; i++) { in bpf_hash_map_seq_find_next()
1688 b = &htab->buckets[i]; in bpf_hash_map_seq_find_next()
1805 seq_info->htab = container_of(map, struct bpf_htab, map); in bpf_iter_init_hash_map()
1842 BATCH_OPS(htab),
1925 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in bpf_percpu_hash_update() local
1929 if (htab_is_lru(htab)) in bpf_percpu_hash_update()
2012 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in fd_htab_map_free() local
2018 for (i = 0; i < htab->n_buckets; i++) { in fd_htab_map_free()
2019 head = select_bucket(htab, i); in fd_htab_map_free()