/Linux-v4.19/lib/ |
D | rhashtable.c | 41 static u32 head_hashfn(struct rhashtable *ht, in head_hashfn() argument 45 return rht_head_hashfn(ht, tbl, he, ht->p); in head_hashfn() 51 int lockdep_rht_mutex_is_held(struct rhashtable *ht) in lockdep_rht_mutex_is_held() argument 53 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; in lockdep_rht_mutex_is_held() 116 static union nested_table *nested_table_alloc(struct rhashtable *ht, in nested_table_alloc() argument 139 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, in nested_bucket_table_alloc() argument 156 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, in nested_bucket_table_alloc() 167 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, in bucket_table_alloc() argument 181 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); in bucket_table_alloc() 195 ht->p.locks_mul, gfp) < 0) { in bucket_table_alloc() [all …]
|
D | test_rhashtable.c | 118 static int insert_retry(struct rhashtable *ht, struct test_obj *obj, in insert_retry() argument 126 err = rhashtable_insert_fast(ht, &obj->node, params); in insert_retry() 140 static int __init test_rht_lookup(struct rhashtable *ht, struct test_obj *array, in test_rht_lookup() argument 155 obj = rhashtable_lookup_fast(ht, &key, test_rht_params); in test_rht_lookup() 178 static void test_bucket_stats(struct rhashtable *ht, unsigned int entries) in test_bucket_stats() argument 184 err = rhashtable_walk_init(ht, &hti, GFP_KERNEL); in test_bucket_stats() 210 total, atomic_read(&ht->nelems), entries, chain_len); in test_bucket_stats() 212 if (total != atomic_read(&ht->nelems) || total != entries) in test_bucket_stats() 216 static s64 __init test_rhashtable(struct rhashtable *ht, struct test_obj *array, in test_rhashtable() argument 234 err = insert_retry(ht, obj, test_rht_params); in test_rhashtable() [all …]
|
/Linux-v4.19/include/linux/ |
D | rhashtable.h | 86 static inline void *rht_obj(const struct rhashtable *ht, in rht_obj() argument 89 return (char *)he - ht->p.head_offset; in rht_obj() 98 static inline unsigned int rht_key_get_hash(struct rhashtable *ht, in rht_key_get_hash() argument 106 hash = ht->p.hashfn(key, ht->key_len, hash_rnd); in rht_key_get_hash() 117 unsigned int key_len = ht->p.key_len; in rht_key_get_hash() 129 struct rhashtable *ht, const struct bucket_table *tbl, in rht_key_hashfn() argument 132 unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd); in rht_key_hashfn() 138 struct rhashtable *ht, const struct bucket_table *tbl, in rht_head_hashfn() argument 141 const char *ptr = rht_obj(ht, he); in rht_head_hashfn() 145 ht->p.key_len, in rht_head_hashfn() [all …]
|
D | rhashtable-types.h | 34 struct rhashtable *ht; member 100 struct rhashtable ht; member 123 struct rhashtable *ht; member 132 int rhashtable_init(struct rhashtable *ht,
|
D | hashtable.h | 34 static inline void __hash_init(struct hlist_head *ht, unsigned int sz) in __hash_init() argument 39 INIT_HLIST_HEAD(&ht[i]); in __hash_init() 81 static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) in __hash_empty() argument 86 if (!hlist_empty(&ht[i])) in __hash_empty()
|
/Linux-v4.19/drivers/gpu/drm/ |
D | drm_hashtab.c | 41 int drm_ht_create(struct drm_open_hash *ht, unsigned int order) in drm_ht_create() argument 45 ht->order = order; in drm_ht_create() 46 ht->table = NULL; in drm_ht_create() 47 if (size <= PAGE_SIZE / sizeof(*ht->table)) in drm_ht_create() 48 ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL); in drm_ht_create() 50 ht->table = vzalloc(array_size(size, sizeof(*ht->table))); in drm_ht_create() 51 if (!ht->table) { in drm_ht_create() 59 void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key) in drm_ht_verbose_list() argument 66 hashed_key = hash_long(key, ht->order); in drm_ht_verbose_list() 68 h_list = &ht->table[hashed_key]; in drm_ht_verbose_list() [all …]
|
/Linux-v4.19/net/sched/ |
D | cls_u32.c | 92 struct tc_u_knode __rcu *ht[1]; member 121 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root); in u32_classify() local 133 n = rcu_dereference_bh(ht->ht[sel]); in u32_classify() 178 ht = rcu_dereference_bh(n->ht_down); in u32_classify() 179 if (!ht) { in u32_classify() 212 ht = rcu_dereference_bh(n->ht_down); in u32_classify() 214 if (ht->divisor) { in u32_classify() 221 sel = ht->divisor & u32_hash_fold(*data, &n->sel, in u32_classify() 254 ht = rcu_dereference_bh(n->ht_up); in u32_classify() 268 struct tc_u_hnode *ht; in u32_lookup_ht() local [all …]
|
D | cls_rsvp.h | 73 struct rsvp_session __rcu *ht[256]; member 84 struct rsvp_filter __rcu *ht[16 + 1]; member 174 for (s = rcu_dereference_bh(head->ht[h1]); s; in rsvp_classify() 187 for (f = rcu_dereference_bh(s->ht[h2]); f; in rsvp_classify() 212 for (f = rcu_dereference_bh(s->ht[16]); f; in rsvp_classify() 233 for (s = rtnl_dereference(head->ht[h1]); s; in rsvp_replace() 235 for (ins = &s->ht[h2], pins = rtnl_dereference(*ins); ; in rsvp_replace() 262 for (s = rtnl_dereference(head->ht[h1]); s; in rsvp_get() 264 for (f = rtnl_dereference(s->ht[h2]); f; in rsvp_get() 326 while ((s = rtnl_dereference(data->ht[h1])) != NULL) { in rsvp_destroy() [all …]
|
D | cls_fw.c | 37 struct fw_filter __rcu *ht[HTSIZE]; member 71 for (f = rcu_dereference_bh(head->ht[fw_hash(id)]); f; in fw_classify() 109 f = rtnl_dereference(head->ht[fw_hash(handle)]); in fw_get() 152 while ((f = rtnl_dereference(head->ht[h])) != NULL) { in fw_destroy() 153 RCU_INIT_POINTER(head->ht[h], in fw_destroy() 178 fp = &head->ht[fw_hash(f->id)]; in fw_delete() 194 if (rcu_access_pointer(head->ht[h])) { in fw_delete() 299 fp = &head->ht[fw_hash(fnew->id)]; in fw_change() 345 RCU_INIT_POINTER(f->next, head->ht[fw_hash(handle)]); in fw_change() 346 rcu_assign_pointer(head->ht[fw_hash(handle)], f); in fw_change() [all …]
|
D | cls_route.c | 46 struct route4_filter __rcu *ht[16 + 16 + 1]; member 168 for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]); in route4_classify() 174 for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]); in route4_classify() 180 for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]); in route4_classify() 236 for (f = rtnl_dereference(b->ht[h2]); in route4_get() 295 while ((f = rtnl_dereference(b->ht[h2])) != NULL) { in route4_destroy() 299 RCU_INIT_POINTER(b->ht[h2], next); in route4_destroy() 331 fp = &b->ht[from_hash(h >> 16)]; in route4_delete() 353 rt = rtnl_dereference(b->ht[i]); in route4_delete() 441 for (fp = rtnl_dereference(b->ht[h2]); in route4_set_parms() [all …]
|
D | sch_sfq.c | 130 sfq_index *ht; /* Hash table ('divisor' slots) */ member 325 q->ht[slot->hash] = SFQ_EMPTY_SLOT; in sfq_drop() 369 x = q->ht[hash]; in sfq_enqueue() 375 q->ht[hash] = x; in sfq_enqueue() 511 q->ht[slot->hash] = SFQ_EMPTY_SLOT; in sfq_dequeue() 562 q->ht[slot->hash] = SFQ_EMPTY_SLOT; in sfq_rehash() 568 sfq_index x = q->ht[hash]; in sfq_rehash() 581 q->ht[hash] = x; in sfq_rehash() 719 sfq_free(q->ht); in sfq_destroy() 760 q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor); in sfq_init() [all …]
|
/Linux-v4.19/kernel/ |
D | smpboot.c | 87 struct smp_hotplug_thread *ht; member 109 struct smp_hotplug_thread *ht = td->ht; in smpboot_thread_fn() local 118 if (ht->cleanup && td->status != HP_THREAD_NONE) in smpboot_thread_fn() 119 ht->cleanup(td->cpu, cpu_online(td->cpu)); in smpboot_thread_fn() 127 if (ht->park && td->status == HP_THREAD_ACTIVE) { in smpboot_thread_fn() 129 ht->park(td->cpu); in smpboot_thread_fn() 144 if (ht->setup) in smpboot_thread_fn() 145 ht->setup(td->cpu); in smpboot_thread_fn() 152 if (ht->unpark) in smpboot_thread_fn() 153 ht->unpark(td->cpu); in smpboot_thread_fn() [all …]
|
/Linux-v4.19/include/drm/ |
D | drm_hashtab.h | 52 int drm_ht_create(struct drm_open_hash *ht, unsigned int order); 53 int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); 54 int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, 57 int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); 59 void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); 60 int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); 61 int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); 62 void drm_ht_remove(struct drm_open_hash *ht);
|
/Linux-v4.19/net/hsr/ |
D | hsr_main.h | 74 static inline u16 get_hsr_tag_path(struct hsr_tag *ht) in get_hsr_tag_path() argument 76 return ntohs(ht->path_and_LSDU_size) >> 12; in get_hsr_tag_path() 79 static inline u16 get_hsr_tag_LSDU_size(struct hsr_tag *ht) in get_hsr_tag_LSDU_size() argument 81 return ntohs(ht->path_and_LSDU_size) & 0x0FFF; in get_hsr_tag_LSDU_size() 84 static inline void set_hsr_tag_path(struct hsr_tag *ht, u16 path) in set_hsr_tag_path() argument 86 ht->path_and_LSDU_size = htons( in set_hsr_tag_path() 87 (ntohs(ht->path_and_LSDU_size) & 0x0FFF) | (path << 12)); in set_hsr_tag_path() 90 static inline void set_hsr_tag_LSDU_size(struct hsr_tag *ht, u16 LSDU_size) in set_hsr_tag_LSDU_size() argument 92 ht->path_and_LSDU_size = htons( in set_hsr_tag_LSDU_size() 93 (ntohs(ht->path_and_LSDU_size) & 0xF000) | in set_hsr_tag_LSDU_size()
|
/Linux-v4.19/net/netfilter/ |
D | xt_hashlimit.c | 177 hash_dst(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst) in hash_dst() argument 181 ht->rnd); in hash_dst() 188 return reciprocal_scale(hash, ht->cfg.size); in hash_dst() 192 dsthash_find(const struct xt_hashlimit_htable *ht, in dsthash_find() argument 196 u_int32_t hash = hash_dst(ht, dst); in dsthash_find() 198 if (!hlist_empty(&ht->hash[hash])) { in dsthash_find() 199 hlist_for_each_entry_rcu(ent, &ht->hash[hash], node) in dsthash_find() 210 dsthash_alloc_init(struct xt_hashlimit_htable *ht, in dsthash_alloc_init() argument 215 spin_lock(&ht->lock); in dsthash_alloc_init() 220 ent = dsthash_find(ht, dst); in dsthash_alloc_init() [all …]
|
D | nft_set_hash.c | 28 struct rhashtable ht; member 91 he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params); in nft_rhash_lookup() 109 he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params); in nft_rhash_get() 132 he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params); in nft_rhash_update() 140 prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node, in nft_rhash_update() 174 prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node, in nft_rhash_insert() 220 he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params); in nft_rhash_deactivate() 237 rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params); in nft_rhash_remove() 249 err = rhashtable_walk_init(&priv->ht, &hti, GFP_ATOMIC); in nft_rhash_walk() 301 err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL); in nft_rhash_gc() [all …]
|
/Linux-v4.19/drivers/net/wireless/mediatek/mt76/ |
D | mt76x2_phy_common.c | 225 mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2])); in mt76x2_phy_set_txpower() 227 mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10])); in mt76x2_phy_set_txpower() 229 mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2])); in mt76x2_phy_set_txpower() 231 mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0)); in mt76x2_phy_set_txpower() 233 mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8])); in mt76x2_phy_set_txpower() 235 mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0)); in mt76x2_phy_set_txpower() 237 mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0)); in mt76x2_phy_set_txpower()
|
D | mt76x2_eeprom.c | 458 t->ht[0] = t->ht[1] = mt76x2_rate_power_val(val); in mt76x2_get_rate_power() 459 t->ht[2] = t->ht[3] = mt76x2_rate_power_val(val >> 8); in mt76x2_get_rate_power() 462 t->ht[4] = t->ht[5] = mt76x2_rate_power_val(val); in mt76x2_get_rate_power() 463 t->ht[6] = t->ht[7] = mt76x2_rate_power_val(val >> 8); in mt76x2_get_rate_power() 466 t->ht[8] = t->ht[9] = mt76x2_rate_power_val(val); in mt76x2_get_rate_power() 467 t->ht[10] = t->ht[11] = mt76x2_rate_power_val(val >> 8); in mt76x2_get_rate_power() 470 t->ht[12] = t->ht[13] = mt76x2_rate_power_val(val); in mt76x2_get_rate_power() 471 t->ht[14] = t->ht[15] = mt76x2_rate_power_val(val >> 8); in mt76x2_get_rate_power()
|
/Linux-v4.19/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ |
D | ipoib_vlan.c | 88 struct mlx5i_pkey_qpn_ht *ht = ipriv->qpn_htbl; in mlx5i_pkey_add_qpn() local 98 spin_lock_bh(&ht->ht_lock); in mlx5i_pkey_add_qpn() 99 hlist_add_head(&new_node->hlist, &ht->buckets[key]); in mlx5i_pkey_add_qpn() 100 spin_unlock_bh(&ht->ht_lock); in mlx5i_pkey_add_qpn() 109 struct mlx5i_pkey_qpn_ht *ht = ipriv->qpn_htbl; in mlx5i_pkey_del_qpn() local 112 node = mlx5i_find_qpn_to_netdev_node(ht->buckets, qpn); in mlx5i_pkey_del_qpn() 118 spin_lock_bh(&ht->ht_lock); in mlx5i_pkey_del_qpn() 120 spin_unlock_bh(&ht->ht_lock); in mlx5i_pkey_del_qpn()
|
/Linux-v4.19/drivers/gpu/drm/ttm/ |
D | ttm_object.c | 235 struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; in ttm_base_object_lookup() local 239 ret = drm_ht_find_item_rcu(ht, key, &hash); in ttm_base_object_lookup() 257 struct drm_open_hash *ht = &tdev->object_hash; in ttm_base_object_lookup_for_ref() local 261 ret = drm_ht_find_item_rcu(ht, key, &hash); in ttm_base_object_lookup_for_ref() 287 struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; in ttm_ref_object_exists() local 292 if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0)) in ttm_ref_object_exists() 325 struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; in ttm_ref_object_add() local 343 ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash); in ttm_ref_object_add() 374 ret = drm_ht_insert_item_rcu(ht, &ref->hash); in ttm_ref_object_add() 402 struct drm_open_hash *ht; in ttm_ref_object_release() local [all …]
|
/Linux-v4.19/tools/include/linux/ |
D | hashtable.h | 31 static inline void __hash_init(struct hlist_head *ht, unsigned int sz) in __hash_init() argument 36 INIT_HLIST_HEAD(&ht[i]); in __hash_init() 69 static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) in __hash_empty() argument 74 if (!hlist_empty(&ht[i])) in __hash_empty()
|
/Linux-v4.19/drivers/md/ |
D | dm-cache-policy-smq.c | 583 static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned nr_entries) in h_init() argument 587 ht->es = es; in h_init() 589 ht->hash_bits = __ffs(nr_buckets); in h_init() 591 ht->buckets = vmalloc(array_size(nr_buckets, sizeof(*ht->buckets))); in h_init() 592 if (!ht->buckets) in h_init() 596 ht->buckets[i] = INDEXER_NULL; in h_init() 601 static void h_exit(struct smq_hash_table *ht) in h_exit() argument 603 vfree(ht->buckets); in h_exit() 606 static struct entry *h_head(struct smq_hash_table *ht, unsigned bucket) in h_head() argument 608 return to_entry(ht->es, ht->buckets[bucket]); in h_head() [all …]
|
/Linux-v4.19/arch/powerpc/kernel/ |
D | kgdb.c | 95 struct hard_trap_info *ht; in computeSignal() local 97 for (ht = hard_trap_info; ht->tt && ht->signo; ht++) in computeSignal() 98 if (ht->tt == tt) in computeSignal() 99 return ht->signo; in computeSignal()
|
/Linux-v4.19/arch/mips/kernel/ |
D | kgdb.c | 231 struct hard_trap_info *ht; in compute_signal() local 233 for (ht = hard_trap_info; ht->tt && ht->signo; ht++) in compute_signal() 234 if (ht->tt == tt) in compute_signal() 235 return ht->signo; in compute_signal()
|
/Linux-v4.19/drivers/net/wireless/intel/iwlwifi/dvm/ |
D | rxon.c | 186 if (ctx->ht.enabled) in iwlagn_update_qos() 623 if (!ctx->ht.enabled) { in _iwl_set_rxon_ht() 634 rxon->flags |= cpu_to_le32(ctx->ht.protection << in _iwl_set_rxon_ht() 644 if (ctx->ht.protection == in _iwl_set_rxon_ht() 651 switch (ctx->ht.extension_chan_offset) { in _iwl_set_rxon_ht() 666 switch (ctx->ht.extension_chan_offset) { in _iwl_set_rxon_ht() 695 le32_to_cpu(rxon->flags), ctx->ht.protection, in _iwl_set_rxon_ht() 696 ctx->ht.extension_chan_offset); in _iwl_set_rxon_ht() 1158 ctx->ht.extension_chan_offset = in iwlagn_config_ht40() 1160 ctx->ht.is_40mhz = true; in iwlagn_config_ht40() [all …]
|