Lines Matching +full:cpu +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
46 * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
50 * by pinning the task to the current CPU and incrementing the recursion
51 * protection across the map operation.
72 * it is only safe to use raw spinlock for preallocated hash map on a RT kernel,
74 * after hash map was fully converted to use bpf_mem_alloc, there will be
75 * non-synchronous memory allocation for non-preallocated hash map, so it is
84 #define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
87 struct bpf_map map; member
97 /* number of elements in non-preallocated hashtable are kept
123 /* pointer to per-cpu pointer */
133 return !(htab->map.map_flags & BPF_F_NO_PREALLOC); in htab_is_prealloc()
140 for (i = 0; i < htab->n_buckets; i++) { in htab_init_buckets()
141 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); in htab_init_buckets()
142 raw_spin_lock_init(&htab->buckets[i].raw_lock); in htab_init_buckets()
143 lockdep_set_class(&htab->buckets[i].raw_lock, in htab_init_buckets()
144 &htab->lockdep_key); in htab_init_buckets()
158 if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) { in htab_lock_bucket()
159 __this_cpu_dec(*(htab->map_locked[hash])); in htab_lock_bucket()
161 return -EBUSY; in htab_lock_bucket()
164 raw_spin_lock_irqsave(&b->raw_lock, flags); in htab_lock_bucket()
175 raw_spin_unlock_irqrestore(&b->raw_lock, flags); in htab_unlock_bucket()
176 __this_cpu_dec(*(htab->map_locked[hash])); in htab_unlock_bucket()
184 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || in htab_is_lru()
185 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_lru()
190 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_is_percpu()
191 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_percpu()
197 *(void __percpu **)(l->key + key_size) = pptr; in htab_elem_set_ptr()
202 return *(void __percpu **)(l->key + key_size); in htab_elem_get_ptr()
205 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) in fd_htab_map_get_ptr() argument
207 return *(void **)(l->key + roundup(map->key_size, 8)); in fd_htab_map_get_ptr()
212 return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size); in get_htab_elem()
222 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_timers()
225 if (!map_value_has_timer(&htab->map)) in htab_free_prealloced_timers()
234 bpf_timer_cancel_and_free(elem->key + in htab_free_prealloced_timers()
235 round_up(htab->map.key_size, 8) + in htab_free_prealloced_timers()
236 htab->map.timer_off); in htab_free_prealloced_timers()
243 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_kptrs()
246 if (!map_value_has_kptrs(&htab->map)) in htab_free_prealloced_kptrs()
255 bpf_map_free_kptrs(&htab->map, elem->key + round_up(htab->map.key_size, 8)); in htab_free_prealloced_kptrs()
267 for (i = 0; i < htab->map.max_entries; i++) { in htab_free_elems()
271 htab->map.key_size); in htab_free_elems()
276 bpf_map_area_free(htab->elems); in htab_free_elems()
281 * order is always lru_lock -> bucket_lock and this only happens in
293 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); in prealloc_lru_pop()
298 memcpy(l->key, key, htab->map.key_size); in prealloc_lru_pop()
307 u32 num_entries = htab->map.max_entries; in prealloc_init()
308 int err = -ENOMEM, i; in prealloc_init()
313 htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries, in prealloc_init()
314 htab->map.numa_node); in prealloc_init()
315 if (!htab->elems) in prealloc_init()
316 return -ENOMEM; in prealloc_init()
322 u32 size = round_up(htab->map.value_size, 8); in prealloc_init()
325 pptr = bpf_map_alloc_percpu(&htab->map, size, 8, in prealloc_init()
329 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, in prealloc_init()
336 err = bpf_lru_init(&htab->lru, in prealloc_init()
337 htab->map.map_flags & BPF_F_NO_COMMON_LRU, in prealloc_init()
338 offsetof(struct htab_elem, hash) - in prealloc_init()
343 err = pcpu_freelist_init(&htab->freelist); in prealloc_init()
349 bpf_lru_populate(&htab->lru, htab->elems, in prealloc_init()
351 htab->elem_size, num_entries); in prealloc_init()
353 pcpu_freelist_populate(&htab->freelist, in prealloc_init()
354 htab->elems + offsetof(struct htab_elem, fnode), in prealloc_init()
355 htab->elem_size, num_entries); in prealloc_init()
369 bpf_lru_destroy(&htab->lru); in prealloc_destroy()
371 pcpu_freelist_destroy(&htab->freelist); in prealloc_destroy()
378 int cpu; in alloc_extra_elems() local
380 pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8, in alloc_extra_elems()
383 return -ENOMEM; in alloc_extra_elems()
385 for_each_possible_cpu(cpu) { in alloc_extra_elems()
386 l = pcpu_freelist_pop(&htab->freelist); in alloc_extra_elems()
391 *per_cpu_ptr(pptr, cpu) = l_new; in alloc_extra_elems()
393 htab->extra_elems = pptr; in alloc_extra_elems()
400 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_map_alloc_check()
401 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc_check()
402 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || in htab_map_alloc_check()
403 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc_check()
404 /* percpu_lru means each cpu has its own LRU list. in htab_map_alloc_check()
406 * the map's value itself is percpu. percpu_lru has in htab_map_alloc_check()
407 * nothing to do with the map's value. in htab_map_alloc_check()
409 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); in htab_map_alloc_check()
410 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); in htab_map_alloc_check()
411 bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED); in htab_map_alloc_check()
421 return -EPERM; in htab_map_alloc_check()
425 return -EPERM; in htab_map_alloc_check()
427 if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK || in htab_map_alloc_check()
428 !bpf_map_flags_access_ok(attr->map_flags)) in htab_map_alloc_check()
429 return -EINVAL; in htab_map_alloc_check()
432 return -EINVAL; in htab_map_alloc_check()
435 return -ENOTSUPP; in htab_map_alloc_check()
438 return -EINVAL; in htab_map_alloc_check()
441 * value_size == 0 may be allowed in the future to use map as a set in htab_map_alloc_check()
443 if (attr->max_entries == 0 || attr->key_size == 0 || in htab_map_alloc_check()
444 attr->value_size == 0) in htab_map_alloc_check()
445 return -EINVAL; in htab_map_alloc_check()
447 if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE - in htab_map_alloc_check()
452 * kmalloc-able later in htab_map_update_elem() in htab_map_alloc_check()
454 return -E2BIG; in htab_map_alloc_check()
461 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_map_alloc()
462 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc()
463 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || in htab_map_alloc()
464 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc()
465 /* percpu_lru means each cpu has its own LRU list. in htab_map_alloc()
467 * the map's value itself is percpu. percpu_lru has in htab_map_alloc()
468 * nothing to do with the map's value. in htab_map_alloc()
470 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); in htab_map_alloc()
471 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); in htab_map_alloc()
477 return ERR_PTR(-ENOMEM); in htab_map_alloc()
479 lockdep_register_key(&htab->lockdep_key); in htab_map_alloc()
481 bpf_map_init_from_attr(&htab->map, attr); in htab_map_alloc()
484 /* ensure each CPU's lru list has >=1 elements. in htab_map_alloc()
488 htab->map.max_entries = roundup(attr->max_entries, in htab_map_alloc()
490 if (htab->map.max_entries < attr->max_entries) in htab_map_alloc()
491 htab->map.max_entries = rounddown(attr->max_entries, in htab_map_alloc()
496 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); in htab_map_alloc()
498 htab->elem_size = sizeof(struct htab_elem) + in htab_map_alloc()
499 round_up(htab->map.key_size, 8); in htab_map_alloc()
501 htab->elem_size += sizeof(void *); in htab_map_alloc()
503 htab->elem_size += round_up(htab->map.value_size, 8); in htab_map_alloc()
505 err = -E2BIG; in htab_map_alloc()
507 if (htab->n_buckets == 0 || in htab_map_alloc()
508 htab->n_buckets > U32_MAX / sizeof(struct bucket)) in htab_map_alloc()
511 err = -ENOMEM; in htab_map_alloc()
512 htab->buckets = bpf_map_area_alloc(htab->n_buckets * in htab_map_alloc()
514 htab->map.numa_node); in htab_map_alloc()
515 if (!htab->buckets) in htab_map_alloc()
519 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map, in htab_map_alloc()
523 if (!htab->map_locked[i]) in htab_map_alloc()
527 if (htab->map.map_flags & BPF_F_ZERO_SEED) in htab_map_alloc()
528 htab->hashrnd = 0; in htab_map_alloc()
530 htab->hashrnd = get_random_u32(); in htab_map_alloc()
536 * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus() in htab_map_alloc()
538 * hash map size is 10k, which means that a system with 64 cpus will fill in htab_map_alloc()
540 * define our own batch count as 32 then 10k hash map can be filled up to 80%: in htab_map_alloc()
541 * 10k - 8k > 32 _batch_ * 64 _cpus_ in htab_map_alloc()
542 * and __percpu_counter_compare() will still be fast. At that point hash map in htab_map_alloc()
543 * collisions will dominate its performance anyway. Assume that hash map filled in htab_map_alloc()
548 if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH) in htab_map_alloc()
549 htab->use_percpu_counter = true; in htab_map_alloc()
551 if (htab->use_percpu_counter) { in htab_map_alloc()
552 err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL); in htab_map_alloc()
571 err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false); in htab_map_alloc()
575 err = bpf_mem_alloc_init(&htab->pcpu_ma, in htab_map_alloc()
576 round_up(htab->map.value_size, 8), true); in htab_map_alloc()
582 return &htab->map; in htab_map_alloc()
587 if (htab->use_percpu_counter) in htab_map_alloc()
588 percpu_counter_destroy(&htab->pcount); in htab_map_alloc()
590 free_percpu(htab->map_locked[i]); in htab_map_alloc()
591 bpf_map_area_free(htab->buckets); in htab_map_alloc()
592 bpf_mem_alloc_destroy(&htab->pcpu_ma); in htab_map_alloc()
593 bpf_mem_alloc_destroy(&htab->ma); in htab_map_alloc()
595 lockdep_unregister_key(&htab->lockdep_key); in htab_map_alloc()
607 return &htab->buckets[hash & (htab->n_buckets - 1)]; in __select_bucket()
612 return &__select_bucket(htab, hash)->head; in select_bucket()
623 if (l->hash == hash && !memcmp(&l->key, key, key_size)) in lookup_elem_raw()
642 if (l->hash == hash && !memcmp(&l->key, key, key_size)) in lookup_nulls_elem_raw()
645 if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1)))) in lookup_nulls_elem_raw()
656 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) in __htab_map_lookup_elem() argument
658 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_elem()
666 key_size = map->key_size; in __htab_map_lookup_elem()
668 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_map_lookup_elem()
672 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in __htab_map_lookup_elem()
677 static void *htab_map_lookup_elem(struct bpf_map *map, void *key) in htab_map_lookup_elem() argument
679 struct htab_elem *l = __htab_map_lookup_elem(map, key); in htab_map_lookup_elem()
682 return l->key + round_up(map->key_size, 8); in htab_map_lookup_elem()
691 * map->ops->map_lookup_elem
698 static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) in htab_map_gen_lookup() argument
704 (void *(*)(struct bpf_map *map, void *key))NULL)); in htab_map_gen_lookup()
709 round_up(map->key_size, 8)); in htab_map_gen_lookup()
710 return insn - insn_buf; in htab_map_gen_lookup()
713 static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map, in __htab_lru_map_lookup_elem() argument
716 struct htab_elem *l = __htab_map_lookup_elem(map, key); in __htab_lru_map_lookup_elem()
720 bpf_lru_node_set_ref(&l->lru_node); in __htab_lru_map_lookup_elem()
721 return l->key + round_up(map->key_size, 8); in __htab_lru_map_lookup_elem()
727 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) in htab_lru_map_lookup_elem() argument
729 return __htab_lru_map_lookup_elem(map, key, true); in htab_lru_map_lookup_elem()
732 static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key) in htab_lru_map_lookup_elem_sys() argument
734 return __htab_lru_map_lookup_elem(map, key, false); in htab_lru_map_lookup_elem_sys()
737 static int htab_lru_map_gen_lookup(struct bpf_map *map, in htab_lru_map_gen_lookup() argument
745 (void *(*)(struct bpf_map *map, void *key))NULL)); in htab_lru_map_gen_lookup()
758 round_up(map->key_size, 8)); in htab_lru_map_gen_lookup()
759 return insn - insn_buf; in htab_lru_map_gen_lookup()
765 void *map_value = elem->key + round_up(htab->map.key_size, 8); in check_and_free_fields()
767 if (map_value_has_timer(&htab->map)) in check_and_free_fields()
768 bpf_timer_cancel_and_free(map_value + htab->map.timer_off); in check_and_free_fields()
769 if (map_value_has_kptrs(&htab->map)) in check_and_free_fields()
770 bpf_map_free_kptrs(&htab->map, map_value); in check_and_free_fields()
787 b = __select_bucket(htab, tgt_l->hash); in htab_lru_map_delete_node()
788 head = &b->head; in htab_lru_map_delete_node()
790 ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags); in htab_lru_map_delete_node()
796 hlist_nulls_del_rcu(&l->hash_node); in htab_lru_map_delete_node()
801 htab_unlock_bucket(htab, b, tgt_l->hash, flags); in htab_lru_map_delete_node()
807 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) in htab_map_get_next_key() argument
809 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_get_next_key()
817 key_size = map->key_size; in htab_map_get_next_key()
822 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_get_next_key()
827 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in htab_map_get_next_key()
833 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)), in htab_map_get_next_key()
837 /* if next elem in this hash list is non-zero, just return it */ in htab_map_get_next_key()
838 memcpy(next_key, next_l->key, key_size); in htab_map_get_next_key()
843 i = hash & (htab->n_buckets - 1); in htab_map_get_next_key()
848 for (; i < htab->n_buckets; i++) { in htab_map_get_next_key()
856 memcpy(next_key, next_l->key, key_size); in htab_map_get_next_key()
862 return -ENOENT; in htab_map_get_next_key()
867 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) in htab_elem_free()
868 bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr); in htab_elem_free()
870 bpf_mem_cache_free(&htab->ma, l); in htab_elem_free()
875 struct bpf_map *map = &htab->map; in htab_put_fd_value() local
878 if (map->ops->map_fd_put_ptr) { in htab_put_fd_value()
879 ptr = fd_htab_map_get_ptr(map, l); in htab_put_fd_value()
880 map->ops->map_fd_put_ptr(ptr); in htab_put_fd_value()
886 if (htab->use_percpu_counter) in is_map_full()
887 return __percpu_counter_compare(&htab->pcount, htab->map.max_entries, in is_map_full()
889 return atomic_read(&htab->count) >= htab->map.max_entries; in is_map_full()
894 if (htab->use_percpu_counter) in inc_elem_count()
895 percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH); in inc_elem_count()
897 atomic_inc(&htab->count); in inc_elem_count()
902 if (htab->use_percpu_counter) in dec_elem_count()
903 percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH); in dec_elem_count()
905 atomic_dec(&htab->count); in dec_elem_count()
915 __pcpu_freelist_push(&htab->freelist, &l->fnode); in free_htab_elem()
927 memcpy(this_cpu_ptr(pptr), value, htab->map.value_size); in pcpu_copy_value()
929 u32 size = round_up(htab->map.value_size, 8); in pcpu_copy_value()
930 int off = 0, cpu; in pcpu_copy_value() local
932 for_each_possible_cpu(cpu) { in pcpu_copy_value()
933 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), in pcpu_copy_value()
943 /* When not setting the initial value on all cpus, zero-fill element in pcpu_init_value()
949 u32 size = round_up(htab->map.value_size, 8); in pcpu_init_value()
951 int cpu; in pcpu_init_value() local
953 for_each_possible_cpu(cpu) { in pcpu_init_value()
954 if (cpu == current_cpu) in pcpu_init_value()
955 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value, in pcpu_init_value()
958 memset(per_cpu_ptr(pptr, cpu), 0, size); in pcpu_init_value()
967 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && in fd_htab_map_needs_adjust()
976 u32 size = htab->map.value_size; in alloc_htab_elem()
984 * use per-cpu extra elems to avoid freelist_pop/push in alloc_htab_elem()
986 pl_new = this_cpu_ptr(htab->extra_elems); in alloc_htab_elem()
993 l = __pcpu_freelist_pop(&htab->freelist); in alloc_htab_elem()
995 return ERR_PTR(-E2BIG); in alloc_htab_elem()
1001 /* when map is full and update() is replacing in alloc_htab_elem()
1006 return ERR_PTR(-E2BIG); in alloc_htab_elem()
1008 l_new = bpf_mem_cache_alloc(&htab->ma); in alloc_htab_elem()
1010 l_new = ERR_PTR(-ENOMEM); in alloc_htab_elem()
1013 check_and_init_map_value(&htab->map, in alloc_htab_elem()
1014 l_new->key + round_up(key_size, 8)); in alloc_htab_elem()
1017 memcpy(l_new->key, key, key_size); in alloc_htab_elem()
1022 /* alloc_percpu zero-fills */ in alloc_htab_elem()
1023 pptr = bpf_mem_cache_alloc(&htab->pcpu_ma); in alloc_htab_elem()
1025 bpf_mem_cache_free(&htab->ma, l_new); in alloc_htab_elem()
1026 l_new = ERR_PTR(-ENOMEM); in alloc_htab_elem()
1029 l_new->ptr_to_pptr = pptr; in alloc_htab_elem()
1039 memcpy(l_new->key + round_up(key_size, 8), value, size); in alloc_htab_elem()
1041 copy_map_value(&htab->map, in alloc_htab_elem()
1042 l_new->key + round_up(key_size, 8), in alloc_htab_elem()
1046 l_new->hash = hash; in alloc_htab_elem()
1058 return -EEXIST; in check_flags()
1062 return -ENOENT; in check_flags()
1068 static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, in htab_map_update_elem() argument
1071 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_update_elem()
1081 return -EINVAL; in htab_map_update_elem()
1086 key_size = map->key_size; in htab_map_update_elem()
1088 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_update_elem()
1091 head = &b->head; in htab_map_update_elem()
1094 if (unlikely(!map_value_has_spin_lock(map))) in htab_map_update_elem()
1095 return -EINVAL; in htab_map_update_elem()
1098 htab->n_buckets); in htab_map_update_elem()
1104 copy_map_value_locked(map, in htab_map_update_elem()
1105 l_old->key + round_up(key_size, 8), in htab_map_update_elem()
1132 copy_map_value_locked(map, in htab_map_update_elem()
1133 l_old->key + round_up(key_size, 8), in htab_map_update_elem()
1142 /* all pre-allocated elements are in use or memory exhausted */ in htab_map_update_elem()
1150 hlist_nulls_add_head_rcu(&l_new->hash_node, head); in htab_map_update_elem()
1152 hlist_nulls_del_rcu(&l_old->hash_node); in htab_map_update_elem()
1167 bpf_lru_push_free(&htab->lru, &elem->lru_node); in htab_lru_push_free()
1170 static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, in htab_lru_map_update_elem() argument
1173 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_update_elem()
1183 return -EINVAL; in htab_lru_map_update_elem()
1188 key_size = map->key_size; in htab_lru_map_update_elem()
1190 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_update_elem()
1193 head = &b->head; in htab_lru_map_update_elem()
1202 return -ENOMEM; in htab_lru_map_update_elem()
1203 copy_map_value(&htab->map, in htab_lru_map_update_elem()
1204 l_new->key + round_up(map->key_size, 8), value); in htab_lru_map_update_elem()
1219 hlist_nulls_add_head_rcu(&l_new->hash_node, head); in htab_lru_map_update_elem()
1221 bpf_lru_node_set_ref(&l_new->lru_node); in htab_lru_map_update_elem()
1222 hlist_nulls_del_rcu(&l_old->hash_node); in htab_lru_map_update_elem()
1237 static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, in __htab_percpu_map_update_elem() argument
1241 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_percpu_map_update_elem()
1251 return -EINVAL; in __htab_percpu_map_update_elem()
1256 key_size = map->key_size; in __htab_percpu_map_update_elem()
1258 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_percpu_map_update_elem()
1261 head = &b->head; in __htab_percpu_map_update_elem()
1274 /* per-cpu hash map can update value in-place */ in __htab_percpu_map_update_elem()
1284 hlist_nulls_add_head_rcu(&l_new->hash_node, head); in __htab_percpu_map_update_elem()
1292 static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, in __htab_lru_percpu_map_update_elem() argument
1296 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_lru_percpu_map_update_elem()
1306 return -EINVAL; in __htab_lru_percpu_map_update_elem()
1311 key_size = map->key_size; in __htab_lru_percpu_map_update_elem()
1313 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_lru_percpu_map_update_elem()
1316 head = &b->head; in __htab_lru_percpu_map_update_elem()
1326 return -ENOMEM; in __htab_lru_percpu_map_update_elem()
1340 bpf_lru_node_set_ref(&l_old->lru_node); in __htab_lru_percpu_map_update_elem()
1342 /* per-cpu hash map can update value in-place */ in __htab_lru_percpu_map_update_elem()
1348 hlist_nulls_add_head_rcu(&l_new->hash_node, head); in __htab_lru_percpu_map_update_elem()
1355 bpf_lru_push_free(&htab->lru, &l_new->lru_node); in __htab_lru_percpu_map_update_elem()
1359 static int htab_percpu_map_update_elem(struct bpf_map *map, void *key, in htab_percpu_map_update_elem() argument
1362 return __htab_percpu_map_update_elem(map, key, value, map_flags, false); in htab_percpu_map_update_elem()
1365 static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, in htab_lru_percpu_map_update_elem() argument
1368 return __htab_lru_percpu_map_update_elem(map, key, value, map_flags, in htab_lru_percpu_map_update_elem()
1373 static int htab_map_delete_elem(struct bpf_map *map, void *key) in htab_map_delete_elem() argument
1375 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_delete_elem()
1386 key_size = map->key_size; in htab_map_delete_elem()
1388 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_delete_elem()
1390 head = &b->head; in htab_map_delete_elem()
1399 hlist_nulls_del_rcu(&l->hash_node); in htab_map_delete_elem()
1402 ret = -ENOENT; in htab_map_delete_elem()
1409 static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) in htab_lru_map_delete_elem() argument
1411 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_delete_elem()
1422 key_size = map->key_size; in htab_lru_map_delete_elem()
1424 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_delete_elem()
1426 head = &b->head; in htab_lru_map_delete_elem()
1435 hlist_nulls_del_rcu(&l->hash_node); in htab_lru_map_delete_elem()
1437 ret = -ENOENT; in htab_lru_map_delete_elem()
1453 for (i = 0; i < htab->n_buckets; i++) { in delete_all_elements()
1459 hlist_nulls_del_rcu(&l->hash_node); in delete_all_elements()
1471 for (i = 0; i < htab->n_buckets; i++) { in htab_free_malloced_timers()
1480 bpf_timer_cancel_and_free(l->key + in htab_free_malloced_timers()
1481 round_up(htab->map.key_size, 8) + in htab_free_malloced_timers()
1482 htab->map.timer_off); in htab_free_malloced_timers()
1489 static void htab_map_free_timers(struct bpf_map *map) in htab_map_free_timers() argument
1491 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_free_timers()
1494 if (!map_value_has_timer(&htab->map)) in htab_map_free_timers()
1502 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
1503 static void htab_map_free(struct bpf_map *map) in htab_map_free() argument
1505 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_free()
1510 * There is no need to synchronize_rcu() here to protect map elements. in htab_map_free()
1524 bpf_map_free_kptr_off_tab(map); in htab_map_free()
1525 free_percpu(htab->extra_elems); in htab_map_free()
1526 bpf_map_area_free(htab->buckets); in htab_map_free()
1527 bpf_mem_alloc_destroy(&htab->pcpu_ma); in htab_map_free()
1528 bpf_mem_alloc_destroy(&htab->ma); in htab_map_free()
1529 if (htab->use_percpu_counter) in htab_map_free()
1530 percpu_counter_destroy(&htab->pcount); in htab_map_free()
1532 free_percpu(htab->map_locked[i]); in htab_map_free()
1533 lockdep_unregister_key(&htab->lockdep_key); in htab_map_free()
1537 static void htab_map_seq_show_elem(struct bpf_map *map, void *key, in htab_map_seq_show_elem() argument
1544 value = htab_map_lookup_elem(map, key); in htab_map_seq_show_elem()
1550 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); in htab_map_seq_show_elem()
1552 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); in htab_map_seq_show_elem()
1558 static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key, in __htab_map_lookup_and_delete_elem() argument
1562 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_and_delete_elem()
1570 key_size = map->key_size; in __htab_map_lookup_and_delete_elem()
1572 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_map_lookup_and_delete_elem()
1574 head = &b->head; in __htab_map_lookup_and_delete_elem()
1582 ret = -ENOENT; in __htab_map_lookup_and_delete_elem()
1585 u32 roundup_value_size = round_up(map->value_size, 8); in __htab_map_lookup_and_delete_elem()
1587 int off = 0, cpu; in __htab_map_lookup_and_delete_elem() local
1590 for_each_possible_cpu(cpu) { in __htab_map_lookup_and_delete_elem()
1592 per_cpu_ptr(pptr, cpu), in __htab_map_lookup_and_delete_elem()
1597 u32 roundup_key_size = round_up(map->key_size, 8); in __htab_map_lookup_and_delete_elem()
1600 copy_map_value_locked(map, value, l->key + in __htab_map_lookup_and_delete_elem()
1604 copy_map_value(map, value, l->key + in __htab_map_lookup_and_delete_elem()
1606 check_and_init_map_value(map, value); in __htab_map_lookup_and_delete_elem()
1609 hlist_nulls_del_rcu(&l->hash_node); in __htab_map_lookup_and_delete_elem()
1622 static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key, in htab_map_lookup_and_delete_elem() argument
1625 return __htab_map_lookup_and_delete_elem(map, key, value, false, false, in htab_map_lookup_and_delete_elem()
1629 static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map, in htab_percpu_map_lookup_and_delete_elem() argument
1633 return __htab_map_lookup_and_delete_elem(map, key, value, false, true, in htab_percpu_map_lookup_and_delete_elem()
1637 static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key, in htab_lru_map_lookup_and_delete_elem() argument
1640 return __htab_map_lookup_and_delete_elem(map, key, value, true, false, in htab_lru_map_lookup_and_delete_elem()
1644 static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map, in htab_lru_percpu_map_lookup_and_delete_elem() argument
1648 return __htab_map_lookup_and_delete_elem(map, key, value, true, true, in htab_lru_percpu_map_lookup_and_delete_elem()
1653 __htab_map_lookup_and_delete_batch(struct bpf_map *map, in __htab_map_lookup_and_delete_batch() argument
1659 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_and_delete_batch()
1662 void __user *uvalues = u64_to_user_ptr(attr->batch.values); in __htab_map_lookup_and_delete_batch()
1663 void __user *ukeys = u64_to_user_ptr(attr->batch.keys); in __htab_map_lookup_and_delete_batch()
1664 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); in __htab_map_lookup_and_delete_batch()
1676 elem_map_flags = attr->batch.elem_flags; in __htab_map_lookup_and_delete_batch()
1678 ((elem_map_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map))) in __htab_map_lookup_and_delete_batch()
1679 return -EINVAL; in __htab_map_lookup_and_delete_batch()
1681 map_flags = attr->batch.flags; in __htab_map_lookup_and_delete_batch()
1683 return -EINVAL; in __htab_map_lookup_and_delete_batch()
1685 max_count = attr->batch.count; in __htab_map_lookup_and_delete_batch()
1689 if (put_user(0, &uattr->batch.count)) in __htab_map_lookup_and_delete_batch()
1690 return -EFAULT; in __htab_map_lookup_and_delete_batch()
1694 return -EFAULT; in __htab_map_lookup_and_delete_batch()
1696 if (batch >= htab->n_buckets) in __htab_map_lookup_and_delete_batch()
1697 return -ENOENT; in __htab_map_lookup_and_delete_batch()
1699 key_size = htab->map.key_size; in __htab_map_lookup_and_delete_batch()
1700 roundup_key_size = round_up(htab->map.key_size, 8); in __htab_map_lookup_and_delete_batch()
1701 value_size = htab->map.value_size; in __htab_map_lookup_and_delete_batch()
1718 ret = -ENOMEM; in __htab_map_lookup_and_delete_batch()
1728 b = &htab->buckets[batch]; in __htab_map_lookup_and_delete_batch()
1729 head = &b->head; in __htab_map_lookup_and_delete_batch()
1749 if (bucket_cnt > (max_count - total)) { in __htab_map_lookup_and_delete_batch()
1751 ret = -ENOSPC; in __htab_map_lookup_and_delete_batch()
1779 memcpy(dst_key, l->key, key_size); in __htab_map_lookup_and_delete_batch()
1782 int off = 0, cpu; in __htab_map_lookup_and_delete_batch() local
1785 pptr = htab_elem_get_ptr(l, map->key_size); in __htab_map_lookup_and_delete_batch()
1786 for_each_possible_cpu(cpu) { in __htab_map_lookup_and_delete_batch()
1788 per_cpu_ptr(pptr, cpu), size); in __htab_map_lookup_and_delete_batch()
1792 value = l->key + roundup_key_size; in __htab_map_lookup_and_delete_batch()
1793 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { in __htab_map_lookup_and_delete_batch()
1796 /* Actual value is the id of the inner map */ in __htab_map_lookup_and_delete_batch()
1797 map_id = map->ops->map_fd_sys_lookup_elem(*inner_map); in __htab_map_lookup_and_delete_batch()
1802 copy_map_value_locked(map, dst_val, value, in __htab_map_lookup_and_delete_batch()
1805 copy_map_value(map, dst_val, value); in __htab_map_lookup_and_delete_batch()
1806 check_and_init_map_value(map, dst_val); in __htab_map_lookup_and_delete_batch()
1809 hlist_nulls_del_rcu(&l->hash_node); in __htab_map_lookup_and_delete_batch()
1817 l->batch_flink = node_to_free; in __htab_map_lookup_and_delete_batch()
1832 node_to_free = node_to_free->batch_flink; in __htab_map_lookup_and_delete_batch()
1840 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) { in __htab_map_lookup_and_delete_batch()
1851 ret = -EFAULT; in __htab_map_lookup_and_delete_batch()
1857 if (batch >= htab->n_buckets) { in __htab_map_lookup_and_delete_batch()
1858 ret = -ENOENT; in __htab_map_lookup_and_delete_batch()
1864 if (ret == -EFAULT) in __htab_map_lookup_and_delete_batch()
1868 ubatch = u64_to_user_ptr(attr->batch.out_batch); in __htab_map_lookup_and_delete_batch()
1870 put_user(total, &uattr->batch.count)) in __htab_map_lookup_and_delete_batch()
1871 ret = -EFAULT; in __htab_map_lookup_and_delete_batch()
1880 htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, in htab_percpu_map_lookup_batch() argument
1883 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, in htab_percpu_map_lookup_batch()
1888 htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map, in htab_percpu_map_lookup_and_delete_batch() argument
1892 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, in htab_percpu_map_lookup_and_delete_batch()
1897 htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, in htab_map_lookup_batch() argument
1900 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, in htab_map_lookup_batch()
1905 htab_map_lookup_and_delete_batch(struct bpf_map *map, in htab_map_lookup_and_delete_batch() argument
1909 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, in htab_map_lookup_and_delete_batch()
1914 htab_lru_percpu_map_lookup_batch(struct bpf_map *map, in htab_lru_percpu_map_lookup_batch() argument
1918 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, in htab_lru_percpu_map_lookup_batch()
1923 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map, in htab_lru_percpu_map_lookup_and_delete_batch() argument
1927 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, in htab_lru_percpu_map_lookup_and_delete_batch()
1932 htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, in htab_lru_map_lookup_batch() argument
1935 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, in htab_lru_map_lookup_batch()
1940 htab_lru_map_lookup_and_delete_batch(struct bpf_map *map, in htab_lru_map_lookup_and_delete_batch() argument
1944 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, in htab_lru_map_lookup_and_delete_batch()
1949 struct bpf_map *map; member
1951 void *percpu_value_buf; // non-zero means percpu hash
1960 const struct bpf_htab *htab = info->htab; in bpf_hash_map_seq_find_next()
1961 u32 skip_elems = info->skip_elems; in bpf_hash_map_seq_find_next()
1962 u32 bucket_id = info->bucket_id; in bpf_hash_map_seq_find_next()
1969 if (bucket_id >= htab->n_buckets) in bpf_hash_map_seq_find_next()
1977 n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node)); in bpf_hash_map_seq_find_next()
1983 b = &htab->buckets[bucket_id++]; in bpf_hash_map_seq_find_next()
1988 for (i = bucket_id; i < htab->n_buckets; i++) { in bpf_hash_map_seq_find_next()
1989 b = &htab->buckets[i]; in bpf_hash_map_seq_find_next()
1993 head = &b->head; in bpf_hash_map_seq_find_next()
1996 info->bucket_id = i; in bpf_hash_map_seq_find_next()
1997 info->skip_elems = count; in bpf_hash_map_seq_find_next()
2007 info->bucket_id = i; in bpf_hash_map_seq_find_next()
2008 info->skip_elems = 0; in bpf_hash_map_seq_find_next()
2014 struct bpf_iter_seq_hash_map_info *info = seq->private; in bpf_hash_map_seq_start()
2028 struct bpf_iter_seq_hash_map_info *info = seq->private; in bpf_hash_map_seq_next()
2031 ++info->skip_elems; in bpf_hash_map_seq_next()
2037 struct bpf_iter_seq_hash_map_info *info = seq->private; in __bpf_hash_map_seq_show()
2040 struct bpf_map *map = info->map; in __bpf_hash_map_seq_show() local
2042 int ret = 0, off = 0, cpu; in __bpf_hash_map_seq_show() local
2050 ctx.map = info->map; in __bpf_hash_map_seq_show()
2052 roundup_key_size = round_up(map->key_size, 8); in __bpf_hash_map_seq_show()
2053 ctx.key = elem->key; in __bpf_hash_map_seq_show()
2054 if (!info->percpu_value_buf) { in __bpf_hash_map_seq_show()
2055 ctx.value = elem->key + roundup_key_size; in __bpf_hash_map_seq_show()
2057 roundup_value_size = round_up(map->value_size, 8); in __bpf_hash_map_seq_show()
2058 pptr = htab_elem_get_ptr(elem, map->key_size); in __bpf_hash_map_seq_show()
2059 for_each_possible_cpu(cpu) { in __bpf_hash_map_seq_show()
2060 bpf_long_memcpy(info->percpu_value_buf + off, in __bpf_hash_map_seq_show()
2061 per_cpu_ptr(pptr, cpu), in __bpf_hash_map_seq_show()
2065 ctx.value = info->percpu_value_buf; in __bpf_hash_map_seq_show()
2091 struct bpf_map *map = aux->map; in bpf_iter_init_hash_map() local
2095 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_iter_init_hash_map()
2096 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in bpf_iter_init_hash_map()
2097 buf_size = round_up(map->value_size, 8) * num_possible_cpus(); in bpf_iter_init_hash_map()
2100 return -ENOMEM; in bpf_iter_init_hash_map()
2102 seq_info->percpu_value_buf = value_buf; in bpf_iter_init_hash_map()
2105 bpf_map_inc_with_uref(map); in bpf_iter_init_hash_map()
2106 seq_info->map = map; in bpf_iter_init_hash_map()
2107 seq_info->htab = container_of(map, struct bpf_htab, map); in bpf_iter_init_hash_map()
2115 bpf_map_put_with_uref(seq_info->map); in bpf_iter_fini_hash_map()
2116 kfree(seq_info->percpu_value_buf); in bpf_iter_fini_hash_map()
2133 static int bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn, in bpf_for_each_hash_elem() argument
2136 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in bpf_for_each_hash_elem()
2149 return -EINVAL; in bpf_for_each_hash_elem()
2153 roundup_key_size = round_up(map->key_size, 8); in bpf_for_each_hash_elem()
2159 for (i = 0; i < htab->n_buckets; i++) { in bpf_for_each_hash_elem()
2160 b = &htab->buckets[i]; in bpf_for_each_hash_elem()
2162 head = &b->head; in bpf_for_each_hash_elem()
2164 key = elem->key; in bpf_for_each_hash_elem()
2166 /* current cpu value for percpu map */ in bpf_for_each_hash_elem()
2167 pptr = htab_elem_get_ptr(elem, map->key_size); in bpf_for_each_hash_elem()
2170 val = elem->key + roundup_key_size; in bpf_for_each_hash_elem()
2173 ret = callback_fn((u64)(long)map, (u64)(long)key, in bpf_for_each_hash_elem()
2175 /* return value: 0 - continue, 1 - stop and return */ in bpf_for_each_hash_elem()
2232 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key) in htab_percpu_map_lookup_elem() argument
2234 struct htab_elem *l = __htab_map_lookup_elem(map, key); in htab_percpu_map_lookup_elem()
2237 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); in htab_percpu_map_lookup_elem()
2242 static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) in htab_percpu_map_lookup_percpu_elem() argument
2246 if (cpu >= nr_cpu_ids) in htab_percpu_map_lookup_percpu_elem()
2249 l = __htab_map_lookup_elem(map, key); in htab_percpu_map_lookup_percpu_elem()
2251 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu); in htab_percpu_map_lookup_percpu_elem()
2256 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) in htab_lru_percpu_map_lookup_elem() argument
2258 struct htab_elem *l = __htab_map_lookup_elem(map, key); in htab_lru_percpu_map_lookup_elem()
2261 bpf_lru_node_set_ref(&l->lru_node); in htab_lru_percpu_map_lookup_elem()
2262 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); in htab_lru_percpu_map_lookup_elem()
2268 static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) in htab_lru_percpu_map_lookup_percpu_elem() argument
2272 if (cpu >= nr_cpu_ids) in htab_lru_percpu_map_lookup_percpu_elem()
2275 l = __htab_map_lookup_elem(map, key); in htab_lru_percpu_map_lookup_percpu_elem()
2277 bpf_lru_node_set_ref(&l->lru_node); in htab_lru_percpu_map_lookup_percpu_elem()
2278 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu); in htab_lru_percpu_map_lookup_percpu_elem()
2284 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) in bpf_percpu_hash_copy() argument
2288 int ret = -ENOENT; in bpf_percpu_hash_copy()
2289 int cpu, off = 0; in bpf_percpu_hash_copy() local
2292 /* per_cpu areas are zero-filled and bpf programs can only in bpf_percpu_hash_copy()
2296 size = round_up(map->value_size, 8); in bpf_percpu_hash_copy()
2298 l = __htab_map_lookup_elem(map, key); in bpf_percpu_hash_copy()
2301 /* We do not mark LRU map element here in order to not mess up in bpf_percpu_hash_copy()
2302 * eviction heuristics when user space does a map walk. in bpf_percpu_hash_copy()
2304 pptr = htab_elem_get_ptr(l, map->key_size); in bpf_percpu_hash_copy()
2305 for_each_possible_cpu(cpu) { in bpf_percpu_hash_copy()
2307 per_cpu_ptr(pptr, cpu), size); in bpf_percpu_hash_copy()
2316 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, in bpf_percpu_hash_update() argument
2319 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in bpf_percpu_hash_update()
2324 ret = __htab_lru_percpu_map_update_elem(map, key, value, in bpf_percpu_hash_update()
2327 ret = __htab_percpu_map_update_elem(map, key, value, map_flags, in bpf_percpu_hash_update()
2334 static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key, in htab_percpu_map_seq_show_elem() argument
2339 int cpu; in htab_percpu_map_seq_show_elem() local
2343 l = __htab_map_lookup_elem(map, key); in htab_percpu_map_seq_show_elem()
2349 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); in htab_percpu_map_seq_show_elem()
2351 pptr = htab_elem_get_ptr(l, map->key_size); in htab_percpu_map_seq_show_elem()
2352 for_each_possible_cpu(cpu) { in htab_percpu_map_seq_show_elem()
2353 seq_printf(m, "\tcpu%d: ", cpu); in htab_percpu_map_seq_show_elem()
2354 btf_type_seq_show(map->btf, map->btf_value_type_id, in htab_percpu_map_seq_show_elem()
2355 per_cpu_ptr(pptr, cpu), m); in htab_percpu_map_seq_show_elem()
2403 if (attr->value_size != sizeof(u32)) in fd_htab_map_alloc_check()
2404 return -EINVAL; in fd_htab_map_alloc_check()
2408 static void fd_htab_map_free(struct bpf_map *map) in fd_htab_map_free() argument
2410 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in fd_htab_map_free()
2416 for (i = 0; i < htab->n_buckets; i++) { in fd_htab_map_free()
2420 void *ptr = fd_htab_map_get_ptr(map, l); in fd_htab_map_free()
2422 map->ops->map_fd_put_ptr(ptr); in fd_htab_map_free()
2426 htab_map_free(map); in fd_htab_map_free()
2430 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) in bpf_fd_htab_map_lookup_elem() argument
2435 if (!map->ops->map_fd_sys_lookup_elem) in bpf_fd_htab_map_lookup_elem()
2436 return -ENOTSUPP; in bpf_fd_htab_map_lookup_elem()
2439 ptr = htab_map_lookup_elem(map, key); in bpf_fd_htab_map_lookup_elem()
2441 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr)); in bpf_fd_htab_map_lookup_elem()
2443 ret = -ENOENT; in bpf_fd_htab_map_lookup_elem()
2450 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, in bpf_fd_htab_map_update_elem() argument
2457 ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); in bpf_fd_htab_map_update_elem()
2461 ret = htab_map_update_elem(map, key, &ptr, map_flags); in bpf_fd_htab_map_update_elem()
2463 map->ops->map_fd_put_ptr(ptr); in bpf_fd_htab_map_update_elem()
2470 struct bpf_map *map, *inner_map_meta; in htab_of_map_alloc() local
2472 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); in htab_of_map_alloc()
2476 map = htab_map_alloc(attr); in htab_of_map_alloc()
2477 if (IS_ERR(map)) { in htab_of_map_alloc()
2479 return map; in htab_of_map_alloc()
2482 map->inner_map_meta = inner_map_meta; in htab_of_map_alloc()
2484 return map; in htab_of_map_alloc()
2487 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key) in htab_of_map_lookup_elem() argument
2489 struct bpf_map **inner_map = htab_map_lookup_elem(map, key); in htab_of_map_lookup_elem()
2497 static int htab_of_map_gen_lookup(struct bpf_map *map, in htab_of_map_gen_lookup() argument
2504 (void *(*)(struct bpf_map *map, void *key))NULL)); in htab_of_map_gen_lookup()
2509 round_up(map->key_size, 8)); in htab_of_map_gen_lookup()
2512 return insn - insn_buf; in htab_of_map_gen_lookup()
2515 static void htab_of_map_free(struct bpf_map *map) in htab_of_map_free() argument
2517 bpf_map_meta_free(map->inner_map_meta); in htab_of_map_free()
2518 fd_htab_map_free(map); in htab_of_map_free()