Home
last modified time | relevance | path

Searched refs:buckets (Results 1 – 25 of 84) sorted by relevance

1234

/Linux-v5.15/drivers/s390/scsi/
Dzfcp_reqlist.h24 struct list_head buckets[ZFCP_REQ_LIST_BUCKETS]; member
50 INIT_LIST_HEAD(&rl->buckets[i]); in zfcp_reqlist_alloc()
66 if (!list_empty(&rl->buckets[i])) in zfcp_reqlist_isempty()
90 list_for_each_entry(req, &rl->buckets[i], list) in _zfcp_reqlist_find()
163 list_add_tail(&req->list, &rl->buckets[i]); in zfcp_reqlist_add()
180 list_splice_init(&rl->buckets[i], list); in zfcp_reqlist_move()
207 list_for_each_entry(req, &rl->buckets[i], list) in zfcp_reqlist_apply_for_all()
/Linux-v5.15/tools/lib/bpf/
Dhashmap.h57 struct hashmap_entry **buckets; member
67 .buckets = NULL, \
157 for (cur = map->buckets[bkt]; cur; cur = cur->next)
169 for (cur = map->buckets[bkt]; \
180 for (cur = map->buckets \
181 ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
188 for (cur = map->buckets \
189 ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
Dhashmap.c45 map->buckets = NULL; in hashmap__init()
71 free(map->buckets); in hashmap__clear()
72 map->buckets = NULL; in hashmap__clear()
124 free(map->buckets); in hashmap_grow()
125 map->buckets = new_buckets; in hashmap_grow()
137 if (!map->buckets) in hashmap_find_entry()
140 for (prev_ptr = &map->buckets[hash], cur = *prev_ptr; in hashmap_find_entry()
200 hashmap_add_entry(&map->buckets[h], entry); in hashmap__insert()
/Linux-v5.15/tools/perf/util/
Dhashmap.h57 struct hashmap_entry **buckets; member
67 .buckets = NULL, \
157 for (cur = map->buckets[bkt]; cur; cur = cur->next)
169 for (cur = map->buckets[bkt]; \
180 for (cur = map->buckets \
181 ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
188 for (cur = map->buckets \
189 ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
Dhashmap.c45 map->buckets = NULL; in hashmap__init()
71 free(map->buckets); in hashmap__clear()
72 map->buckets = NULL; in hashmap__clear()
124 free(map->buckets); in hashmap_grow()
125 map->buckets = new_buckets; in hashmap_grow()
137 if (!map->buckets) in hashmap_find_entry()
140 for (prev_ptr = &map->buckets[hash], cur = *prev_ptr; in hashmap_find_entry()
200 hashmap_add_entry(&map->buckets[h], entry); in hashmap__insert()
/Linux-v5.15/Documentation/networking/
Dnexthop-group-resilient.rst54 continuous. With a hash table, mapping between the hash table buckets and
56 the buckets that held it are simply reassigned to other next hops::
70 choose a subset of buckets that are currently not used for forwarding
72 keeping the "busy" buckets intact. This way, established flows are ideally
80 certain number of buckets, according to its weight and the number of
81 buckets in the hash table. In accordance with the source code, we will call
86 Next hops that have fewer buckets than their wants count, are called
98 buckets:
105 underweight next hops. If, after considering all buckets in this manner,
109 There may not be enough "idle" buckets to satisfy the updated wants counts
[all …]
/Linux-v5.15/block/
Dblk-stat.c86 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn()
93 for (bucket = 0; bucket < cb->buckets; bucket++) { in blk_stat_timer_fn()
105 unsigned int buckets, void *data) in blk_stat_alloc_callback() argument
113 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat), in blk_stat_alloc_callback()
119 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat), in blk_stat_alloc_callback()
130 cb->buckets = buckets; in blk_stat_alloc_callback()
147 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_add_callback()
Dblk-stat.h45 unsigned int buckets; member
87 unsigned int buckets, void *data);
Dkyber-iosched.c136 atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS]; member
216 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; in flush_latency_buckets() local
217 atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type]; in flush_latency_buckets()
221 buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0); in flush_latency_buckets()
232 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; in calculate_percentile() local
236 samples += buckets[bucket]; in calculate_percentile()
255 if (buckets[bucket] >= percentile_samples) in calculate_percentile()
257 percentile_samples -= buckets[bucket]; in calculate_percentile()
259 memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type])); in calculate_percentile()
634 atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]); in add_latency_sample()
/Linux-v5.15/tools/testing/selftests/drivers/net/netdevsim/
Dnexthop.sh213 $IP nexthop add id 10 group 1/2 type resilient buckets 4
229 $IP nexthop add id 10 group 1,3/2,2 type resilient buckets 5
259 $IP nexthop add id 10 group 1/2 type resilient buckets 4 &> /dev/null
325 $IP nexthop add id 10 group 1/2 type resilient buckets 6
353 $IP nexthop add id 10 group 1/2 type resilient buckets 6
408 $IP nexthop add id 10 group 1/2 type resilient buckets 8 idle_timer 4
434 type resilient buckets 8 idle_timer 6
469 $IP nexthop add id 10 group 1/2 type resilient buckets 8 $timer 4
504 $IP nexthop add id 10 group 1/2 type resilient buckets 8 $timer 8
535 type resilient buckets 8 $timer 4
[all …]
/Linux-v5.15/net/ceph/crush/
Dcrush.c111 if (map->buckets) { in crush_destroy()
114 if (map->buckets[b] == NULL) in crush_destroy()
116 crush_destroy_bucket(map->buckets[b]); in crush_destroy()
118 kfree(map->buckets); in crush_destroy()
Dmapper.c527 itemtype = map->buckets[-1-item]->type; in crush_choose_firstn()
540 in = map->buckets[-1-item]; in crush_choose_firstn()
564 map->buckets[-1-item], in crush_choose_firstn()
741 itemtype = map->buckets[-1-item]->type; in crush_choose_indep()
758 in = map->buckets[-1-item]; in crush_choose_indep()
778 map->buckets[-1-item], in crush_choose_indep()
865 if (!map->buckets[b]) in crush_init_workspace()
869 switch (map->buckets[b]->alg) { in crush_init_workspace()
877 v += map->buckets[b]->size * sizeof(__u32); in crush_init_workspace()
949 map->buckets[-1-curstep->arg1])) { in crush_do_rule()
[all …]
/Linux-v5.15/net/netfilter/ipvs/
Dip_vs_sh.c70 struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE]; member
108 struct ip_vs_dest *dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get()
130 dest = rcu_dereference(s->buckets[ihash].dest); in ip_vs_sh_get_fallback()
145 dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get_fallback()
172 b = &s->buckets[0]; in ip_vs_sh_reassign()
216 b = &s->buckets[0]; in ip_vs_sh_flush()
Dip_vs_dh.c64 struct ip_vs_dh_bucket buckets[IP_VS_DH_TAB_SIZE]; member
90 return rcu_dereference(s->buckets[ip_vs_dh_hashkey(af, addr)].dest); in ip_vs_dh_get()
106 b = &s->buckets[0]; in ip_vs_dh_reassign()
140 b = &s->buckets[0]; in ip_vs_dh_flush()
/Linux-v5.15/fs/nfs/
Dpnfs_nfs.c102 p = kmalloc(struct_size(p, buckets, n), gfp_flags); in pnfs_alloc_commit_array()
109 for (b = &p->buckets[0]; n != 0; b++, n--) { in pnfs_alloc_commit_array()
259 struct pnfs_commit_bucket *buckets, in pnfs_bucket_scan_array() argument
267 cnt = pnfs_bucket_scan_ds_commit_list(&buckets[i], cinfo, max); in pnfs_bucket_scan_array()
288 cnt = pnfs_bucket_scan_array(cinfo, array->buckets, in pnfs_generic_scan_commit_lists()
304 struct pnfs_commit_bucket *buckets, in pnfs_bucket_recover_commit_reqs() argument
314 for (i = 0, b = buckets; i < nbuckets; i++, b++) { in pnfs_bucket_recover_commit_reqs()
343 array->buckets, in pnfs_generic_recover_commit_reqs()
355 pnfs_bucket_search_commit_reqs(struct pnfs_commit_bucket *buckets, in pnfs_bucket_search_commit_reqs() argument
364 for (i = 0, b = buckets; i < nbuckets; i++, b++) { in pnfs_bucket_search_commit_reqs()
[all …]
Dnfs42xattr.c70 struct nfs4_xattr_bucket buckets[NFS4_XATTR_HASH_SIZE]; member
111 INIT_HLIST_HEAD(&cache->buckets[i].hlist); in nfs4_xattr_hash_init()
112 spin_lock_init(&cache->buckets[i].lock); in nfs4_xattr_hash_init()
113 cache->buckets[i].cache = cache; in nfs4_xattr_hash_init()
114 cache->buckets[i].draining = false; in nfs4_xattr_hash_init()
276 if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist))) in nfs4_xattr_free_cache_cb()
278 cache->buckets[i].draining = false; in nfs4_xattr_free_cache_cb()
395 bucket = &cache->buckets[i]; in nfs4_xattr_discard_cache()
507 return &cache->buckets[jhash(name, strlen(name), 0) & in nfs4_xattr_hash_bucket()
508 (ARRAY_SIZE(cache->buckets) - 1)]; in nfs4_xattr_hash_bucket()
[all …]
/Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/ipoib/
Dipoib_vlan.c45 struct hlist_head buckets[1 << MLX5I_MAX_LOG_PKEY_SUP]; member
71 static struct qpn_to_netdev *mlx5i_find_qpn_to_netdev_node(struct hlist_head *buckets, in mlx5i_find_qpn_to_netdev_node() argument
74 struct hlist_head *h = &buckets[hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP)]; in mlx5i_find_qpn_to_netdev_node()
99 hlist_add_head(&new_node->hlist, &ht->buckets[key]); in mlx5i_pkey_add_qpn()
112 node = mlx5i_find_qpn_to_netdev_node(ht->buckets, qpn); in mlx5i_pkey_del_qpn()
131 node = mlx5i_find_qpn_to_netdev_node(ipriv->qpn_htbl->buckets, qpn); in mlx5i_pkey_get_netdev()
/Linux-v5.15/kernel/bpf/
Dbpf_local_storage.c21 return &smap->buckets[hash_ptr(selem, smap->bucket_log)]; in select_bucket()
500 b = &smap->buckets[i]; in bpf_local_storage_map_free()
535 kvfree(smap->buckets); in bpf_local_storage_map_free()
574 smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets, in bpf_local_storage_map_alloc()
576 if (!smap->buckets) { in bpf_local_storage_map_alloc()
582 INIT_HLIST_HEAD(&smap->buckets[i].list); in bpf_local_storage_map_alloc()
583 raw_spin_lock_init(&smap->buckets[i].lock); in bpf_local_storage_map_alloc()
Dstackmap.c31 struct stack_map_bucket *buckets[]; member
287 bucket = READ_ONCE(smap->buckets[id]); in __bpf_get_stackid()
331 old_bucket = xchg(&smap->buckets[id], new_bucket); in __bpf_get_stackid()
630 bucket = xchg(&smap->buckets[id], NULL); in bpf_stackmap_copy()
638 old_bucket = xchg(&smap->buckets[id], bucket); in bpf_stackmap_copy()
657 if (id >= smap->n_buckets || !smap->buckets[id]) in stack_map_get_next_key()
663 while (id < smap->n_buckets && !smap->buckets[id]) in stack_map_get_next_key()
689 old_bucket = xchg(&smap->buckets[id], NULL); in stack_map_delete_elem()
/Linux-v5.15/drivers/md/
Ddm-region-hash.c70 struct list_head *buckets; member
206 rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets))); in dm_region_hash_create()
207 if (!rh->buckets) { in dm_region_hash_create()
214 INIT_LIST_HEAD(rh->buckets + i); in dm_region_hash_create()
228 vfree(rh->buckets); in dm_region_hash_create()
244 list_for_each_entry_safe(reg, nreg, rh->buckets + h, in dm_region_hash_destroy()
255 vfree(rh->buckets); in dm_region_hash_destroy()
274 struct list_head *bucket = rh->buckets + rh_hash(rh, region); in __rh_lookup()
285 list_add(&reg->hash_list, rh->buckets + rh_hash(rh, reg->key)); in __rh_insert()
/Linux-v5.15/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
Dpno.c298 struct brcmf_gscan_bucket_config **buckets, in brcmf_pno_prep_fwconfig() argument
323 *buckets = NULL; in brcmf_pno_prep_fwconfig()
355 *buckets = fw_buckets; in brcmf_pno_prep_fwconfig()
396 struct brcmf_gscan_bucket_config *buckets; in brcmf_pno_config_sched_scans() local
403 n_buckets = brcmf_pno_prep_fwconfig(pi, &pno_cfg, &buckets, in brcmf_pno_config_sched_scans()
408 gsz = sizeof(*gscan_cfg) + (n_buckets - 1) * sizeof(*buckets); in brcmf_pno_config_sched_scans()
437 memcpy(&gscan_cfg->bucket[0], buckets, in brcmf_pno_config_sched_scans()
438 n_buckets * sizeof(*buckets)); in brcmf_pno_config_sched_scans()
463 kfree(buckets); in brcmf_pno_config_sched_scans()
/Linux-v5.15/net/sched/
Dsch_hhf.c128 struct wdrr_bucket buckets[WDRR_BUCKET_CNT]; member
355 bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; in hhf_drop()
357 bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH]; in hhf_drop()
368 return bucket - q->buckets; in hhf_drop()
381 bucket = &q->buckets[idx]; in hhf_enqueue()
435 int weight = (bucket - q->buckets == WDRR_BUCKET_FOR_HH) ? in hhf_dequeue()
645 struct wdrr_bucket *bucket = q->buckets + i; in hhf_init()
/Linux-v5.15/drivers/md/persistent-data/
Ddm-transaction-manager.c96 struct hlist_head buckets[DM_HASH_SIZE]; member
110 hlist_for_each_entry(si, tm->buckets + bucket, hlist) in is_shadow()
134 hlist_add_head(&si->hlist, tm->buckets + bucket); in insert_shadow()
148 bucket = tm->buckets + i; in wipe_shadow_table()
177 INIT_HLIST_HEAD(tm->buckets + i); in dm_tm_create()
/Linux-v5.15/net/netfilter/
Dnft_set_hash.c441 u32 buckets; member
460 hash = reciprocal_scale(hash, priv->buckets); in nft_hash_lookup()
480 hash = reciprocal_scale(hash, priv->buckets); in nft_hash_get()
501 hash = reciprocal_scale(hash, priv->buckets); in nft_hash_lookup_fast()
525 hash = reciprocal_scale(hash, priv->buckets); in nft_jhash()
607 for (i = 0; i < priv->buckets; i++) { in nft_hash_walk()
638 priv->buckets = nft_hash_buckets(desc->size); in nft_hash_init()
651 for (i = 0; i < priv->buckets; i++) { in nft_hash_destroy()
/Linux-v5.15/drivers/md/bcache/
Dalloc.c146 trace_bcache_invalidate(ca, b - ca->buckets); in __bch_invalidate_one_bucket()
157 fifo_push(&ca->free_inc, b - ca->buckets); in bch_invalidate_one_bucket()
226 b = ca->buckets + ca->fifo_last_bucket++; in invalidate_buckets_fifo()
252 b = ca->buckets + n; in invalidate_buckets_random()
445 b = ca->buckets + r; in bch_bucket_alloc()
507 k->ptr[0] = MAKE_PTR(ca->buckets[b].gen, in __bch_bucket_alloc_set()

1234