Home
last modified time | relevance | path

Searched refs:buckets (Results 1 – 25 of 70) sorted by relevance

123

/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/steering/
Ddr_icm_pool.c43 struct mlx5dr_icm_bucket *buckets; member
338 hot_size += pool->buckets[chunk_order].hot_list_count * in dr_icm_hot_mem_size()
379 bool buckets[DR_CHUNK_SIZE_MAX]) in dr_icm_chill_buckets_start()
385 bucket = &pool->buckets[i]; in dr_icm_chill_buckets_start()
396 buckets[i] = true; in dr_icm_chill_buckets_start()
403 bool buckets[DR_CHUNK_SIZE_MAX]) in dr_icm_chill_buckets_end()
409 bucket = &pool->buckets[i]; in dr_icm_chill_buckets_end()
415 if (!buckets[i]) in dr_icm_chill_buckets_end()
425 bool buckets[DR_CHUNK_SIZE_MAX]) in dr_icm_chill_buckets_abort()
431 bucket = &pool->buckets[i]; in dr_icm_chill_buckets_abort()
[all …]
/Linux-v5.4/drivers/s390/scsi/
Dzfcp_reqlist.h24 struct list_head buckets[ZFCP_REQ_LIST_BUCKETS]; member
50 INIT_LIST_HEAD(&rl->buckets[i]); in zfcp_reqlist_alloc()
66 if (!list_empty(&rl->buckets[i])) in zfcp_reqlist_isempty()
90 list_for_each_entry(req, &rl->buckets[i], list) in _zfcp_reqlist_find()
163 list_add_tail(&req->list, &rl->buckets[i]); in zfcp_reqlist_add()
180 list_splice_init(&rl->buckets[i], list); in zfcp_reqlist_move()
207 list_for_each_entry(req, &rl->buckets[i], list) in zfcp_reqlist_apply_for_all()
/Linux-v5.4/tools/lib/bpf/
Dhashmap.h40 struct hashmap_entry **buckets; member
50 .buckets = NULL, \
140 for (cur = map->buckets[bkt]; cur; cur = cur->next)
152 for (cur = map->buckets[bkt]; \
165 map->buckets ? map->buckets[bkt] : NULL; }); \
173 cur = map->buckets ? map->buckets[bkt] : NULL; }); \
Dhashmap.c39 map->buckets = NULL; in hashmap__init()
59 free(map->buckets); in hashmap__clear()
112 free(map->buckets); in hashmap_grow()
113 map->buckets = new_buckets; in hashmap_grow()
125 if (!map->buckets) in hashmap_find_entry()
128 for (prev_ptr = &map->buckets[hash], cur = *prev_ptr; in hashmap_find_entry()
188 hashmap_add_entry(&map->buckets[h], entry); in hashmap__insert()
/Linux-v5.4/block/
Dblk-stat.c85 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn()
92 for (bucket = 0; bucket < cb->buckets; bucket++) { in blk_stat_timer_fn()
104 unsigned int buckets, void *data) in blk_stat_alloc_callback() argument
112 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat), in blk_stat_alloc_callback()
118 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat), in blk_stat_alloc_callback()
129 cb->buckets = buckets; in blk_stat_alloc_callback()
145 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_add_callback()
Dblk-stat.h45 unsigned int buckets; member
87 unsigned int buckets, void *data);
Dkyber-iosched.c134 atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS]; member
213 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; in flush_latency_buckets() local
214 atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type]; in flush_latency_buckets()
218 buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0); in flush_latency_buckets()
229 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; in calculate_percentile() local
233 samples += buckets[bucket]; in calculate_percentile()
252 if (buckets[bucket] >= percentile_samples) in calculate_percentile()
254 percentile_samples -= buckets[bucket]; in calculate_percentile()
256 memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type])); in calculate_percentile()
632 atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]); in add_latency_sample()
/Linux-v5.4/fs/nfs/filelayout/
Dfilelayout.c756 kfree(flo->commit_info.buckets); in filelayout_free_lseg()
757 flo->commit_info.buckets = NULL; in filelayout_free_lseg()
768 struct pnfs_commit_bucket *buckets; in filelayout_alloc_commit_info() local
787 buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket), in filelayout_alloc_commit_info()
789 if (!buckets) in filelayout_alloc_commit_info()
792 INIT_LIST_HEAD(&buckets[i].written); in filelayout_alloc_commit_info()
793 INIT_LIST_HEAD(&buckets[i].committing); in filelayout_alloc_commit_info()
795 buckets[i].direct_verf.committed = NFS_INVALID_STABLE_HOW; in filelayout_alloc_commit_info()
802 list_splice(&cinfo->ds->buckets[i].written, in filelayout_alloc_commit_info()
803 &buckets[i].written); in filelayout_alloc_commit_info()
[all …]
/Linux-v5.4/net/ceph/crush/
Dcrush.c112 if (map->buckets) { in crush_destroy()
115 if (map->buckets[b] == NULL) in crush_destroy()
117 crush_destroy_bucket(map->buckets[b]); in crush_destroy()
119 kfree(map->buckets); in crush_destroy()
Dmapper.c527 itemtype = map->buckets[-1-item]->type; in crush_choose_firstn()
540 in = map->buckets[-1-item]; in crush_choose_firstn()
564 map->buckets[-1-item], in crush_choose_firstn()
741 itemtype = map->buckets[-1-item]->type; in crush_choose_indep()
758 in = map->buckets[-1-item]; in crush_choose_indep()
778 map->buckets[-1-item], in crush_choose_indep()
865 if (!map->buckets[b]) in crush_init_workspace()
869 switch (map->buckets[b]->alg) { in crush_init_workspace()
877 v += map->buckets[b]->size * sizeof(__u32); in crush_init_workspace()
949 map->buckets[-1-curstep->arg1])) { in crush_do_rule()
[all …]
/Linux-v5.4/net/netfilter/ipvs/
Dip_vs_sh.c70 struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE]; member
108 struct ip_vs_dest *dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get()
130 dest = rcu_dereference(s->buckets[ihash].dest); in ip_vs_sh_get_fallback()
145 dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get_fallback()
172 b = &s->buckets[0]; in ip_vs_sh_reassign()
216 b = &s->buckets[0]; in ip_vs_sh_flush()
Dip_vs_dh.c64 struct ip_vs_dh_bucket buckets[IP_VS_DH_TAB_SIZE]; member
90 return rcu_dereference(s->buckets[ip_vs_dh_hashkey(af, addr)].dest); in ip_vs_dh_get()
106 b = &s->buckets[0]; in ip_vs_dh_reassign()
140 b = &s->buckets[0]; in ip_vs_dh_flush()
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/ipoib/
Dipoib_vlan.c45 struct hlist_head buckets[1 << MLX5I_MAX_LOG_PKEY_SUP]; member
71 static struct qpn_to_netdev *mlx5i_find_qpn_to_netdev_node(struct hlist_head *buckets, in mlx5i_find_qpn_to_netdev_node() argument
74 struct hlist_head *h = &buckets[hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP)]; in mlx5i_find_qpn_to_netdev_node()
99 hlist_add_head(&new_node->hlist, &ht->buckets[key]); in mlx5i_pkey_add_qpn()
112 node = mlx5i_find_qpn_to_netdev_node(ht->buckets, qpn); in mlx5i_pkey_del_qpn()
131 node = mlx5i_find_qpn_to_netdev_node(ipriv->qpn_htbl->buckets, qpn); in mlx5i_pkey_get_netdev()
/Linux-v5.4/drivers/md/
Ddm-region-hash.c70 struct list_head *buckets; member
206 rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets))); in dm_region_hash_create()
207 if (!rh->buckets) { in dm_region_hash_create()
214 INIT_LIST_HEAD(rh->buckets + i); in dm_region_hash_create()
228 vfree(rh->buckets); in dm_region_hash_create()
244 list_for_each_entry_safe(reg, nreg, rh->buckets + h, in dm_region_hash_destroy()
255 vfree(rh->buckets); in dm_region_hash_destroy()
274 struct list_head *bucket = rh->buckets + rh_hash(rh, region); in __rh_lookup()
285 list_add(&reg->hash_list, rh->buckets + rh_hash(rh, reg->key)); in __rh_insert()
/Linux-v5.4/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
Dpno.c294 struct brcmf_gscan_bucket_config **buckets, in brcmf_pno_prep_fwconfig() argument
319 *buckets = NULL; in brcmf_pno_prep_fwconfig()
351 *buckets = fw_buckets; in brcmf_pno_prep_fwconfig()
392 struct brcmf_gscan_bucket_config *buckets; in brcmf_pno_config_sched_scans() local
399 n_buckets = brcmf_pno_prep_fwconfig(pi, &pno_cfg, &buckets, in brcmf_pno_config_sched_scans()
404 gsz = sizeof(*gscan_cfg) + (n_buckets - 1) * sizeof(*buckets); in brcmf_pno_config_sched_scans()
433 memcpy(&gscan_cfg->bucket[0], buckets, in brcmf_pno_config_sched_scans()
434 n_buckets * sizeof(*buckets)); in brcmf_pno_config_sched_scans()
459 kfree(buckets); in brcmf_pno_config_sched_scans()
/Linux-v5.4/net/core/
Dbpf_sk_storage.c49 struct bucket *buckets; member
96 return &smap->buckets[hash_ptr(selem, smap->bucket_log)]; in select_bucket()
578 b = &smap->buckets[i]; in bpf_sk_storage_map_free()
605 kvfree(smap->buckets); in bpf_sk_storage_map_free()
649 cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap); in bpf_sk_storage_map_alloc()
657 smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets, in bpf_sk_storage_map_alloc()
659 if (!smap->buckets) { in bpf_sk_storage_map_alloc()
666 INIT_HLIST_HEAD(&smap->buckets[i].list); in bpf_sk_storage_map_alloc()
667 raw_spin_lock_init(&smap->buckets[i].lock); in bpf_sk_storage_map_alloc()
/Linux-v5.4/fs/nfs/
Dpnfs_nfs.c125 cnt = pnfs_generic_scan_ds_commit_list(&cinfo->ds->buckets[i], in pnfs_generic_scan_commit_lists()
145 for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) { in pnfs_generic_recover_commit_reqs()
171 bucket = &fl_cinfo->buckets[i]; in pnfs_generic_retry_commit()
198 bucket = fl_cinfo->buckets; in pnfs_generic_alloc_ds_commits()
223 bucket = &cinfo->ds->buckets[data->ds_commit_index]; in pnfs_fetch_commit_bucket_list()
934 struct pnfs_commit_bucket *buckets; in pnfs_layout_mark_request_commit() local
937 buckets = cinfo->ds->buckets; in pnfs_layout_mark_request_commit()
938 list = &buckets[ds_commit_idx].written; in pnfs_layout_mark_request_commit()
951 WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL); in pnfs_layout_mark_request_commit()
952 buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg); in pnfs_layout_mark_request_commit()
/Linux-v5.4/kernel/bpf/
Dstackmap.c30 struct stack_map_bucket *buckets[]; member
386 bucket = READ_ONCE(smap->buckets[id]); in BPF_CALL_3()
430 old_bucket = xchg(&smap->buckets[id], new_bucket); in BPF_CALL_3()
528 bucket = xchg(&smap->buckets[id], NULL); in bpf_stackmap_copy()
536 old_bucket = xchg(&smap->buckets[id], bucket); in bpf_stackmap_copy()
555 if (id >= smap->n_buckets || !smap->buckets[id]) in stack_map_get_next_key()
561 while (id < smap->n_buckets && !smap->buckets[id]) in stack_map_get_next_key()
587 old_bucket = xchg(&smap->buckets[id], NULL); in stack_map_delete_elem()
/Linux-v5.4/drivers/md/persistent-data/
Ddm-transaction-manager.c96 struct hlist_head buckets[DM_HASH_SIZE]; member
110 hlist_for_each_entry(si, tm->buckets + bucket, hlist) in is_shadow()
134 hlist_add_head(&si->hlist, tm->buckets + bucket); in insert_shadow()
148 bucket = tm->buckets + i; in wipe_shadow_table()
177 INIT_HLIST_HEAD(tm->buckets + i); in dm_tm_create()
/Linux-v5.4/net/openvswitch/
Dflow_table.c137 kvfree(ti->buckets); in __table_instance_destroy()
149 ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head), in table_instance_alloc()
151 if (!ti->buckets) { in table_instance_alloc()
157 INIT_HLIST_HEAD(&ti->buckets[i]); in table_instance_alloc()
215 struct hlist_head *head = &ti->buckets[i]; in table_instance_destroy()
260 head = &ti->buckets[*bucket]; in ovs_flow_tbl_dump_next()
279 return &ti->buckets[hash & (ti->n_buckets - 1)]; in find_bucket()
312 struct hlist_head *head = &old->buckets[i]; in flow_table_copy_flows()
/Linux-v5.4/net/sched/
Dsch_hhf.c128 struct wdrr_bucket buckets[WDRR_BUCKET_CNT]; member
355 bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; in hhf_drop()
357 bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH]; in hhf_drop()
368 return bucket - q->buckets; in hhf_drop()
381 bucket = &q->buckets[idx]; in hhf_enqueue()
435 int weight = (bucket - q->buckets == WDRR_BUCKET_FOR_HH) ? in hhf_dequeue()
645 struct wdrr_bucket *bucket = q->buckets + i; in hhf_init()
/Linux-v5.4/lib/
Drhashtable.c59 return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]); in lockdep_rht_bucket_is_held()
92 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); in nested_bucket_table_free()
149 size = sizeof(*tbl) + sizeof(tbl->buckets[0]); in nested_bucket_table_alloc()
155 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, in nested_bucket_table_alloc()
175 tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp); in bucket_table_alloc()
197 INIT_RHT_NULLS_HEAD(tbl->buckets[i]); in bucket_table_alloc()
247 rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING); in rhashtable_rehash_one()
249 head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash); in rhashtable_rehash_one()
253 rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry); in rhashtable_rehash_one()
1176 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); in __rht_bucket_nested()
[all …]
/Linux-v5.4/net/netfilter/
Dnft_set_hash.c413 u32 buckets; member
431 hash = reciprocal_scale(hash, priv->buckets); in nft_hash_lookup()
451 hash = reciprocal_scale(hash, priv->buckets); in nft_hash_get()
471 hash = reciprocal_scale(hash, priv->buckets); in nft_hash_lookup_fast()
495 hash = reciprocal_scale(hash, priv->buckets); in nft_jhash()
577 for (i = 0; i < priv->buckets; i++) { in nft_hash_walk()
608 priv->buckets = nft_hash_buckets(desc->size); in nft_hash_init()
621 for (i = 0; i < priv->buckets; i++) { in nft_hash_destroy()
/Linux-v5.4/drivers/md/bcache/
Dalloc.c147 trace_bcache_invalidate(ca, b - ca->buckets); in __bch_invalidate_one_bucket()
158 fifo_push(&ca->free_inc, b - ca->buckets); in bch_invalidate_one_bucket()
227 b = ca->buckets + ca->fifo_last_bucket++; in invalidate_buckets_fifo()
253 b = ca->buckets + n; in invalidate_buckets_random()
443 b = ca->buckets + r; in bch_bucket_alloc()
510 k->ptr[i] = MAKE_PTR(ca->buckets[b].gen, in __bch_bucket_alloc_set()
/Linux-v5.4/Documentation/media/uapi/v4l/
Dpixfmt-meta-vsp1-hgt.rst30 The histogram is a matrix of 6 Hue and 32 Saturation buckets, 192 in
31 total. Each HSV value is added to one or more buckets with a weight
33 corresponding buckets is done by inspecting the H and S value independently.

123