Lines Matching refs:tbl
119 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, in rht_bucket_index() argument
122 return hash & (tbl->size - 1); in rht_bucket_index()
156 struct rhashtable *ht, const struct bucket_table *tbl, in rht_key_hashfn() argument
159 unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd); in rht_key_hashfn()
161 return rht_bucket_index(tbl, hash); in rht_key_hashfn()
165 struct rhashtable *ht, const struct bucket_table *tbl, in rht_head_hashfn() argument
171 rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?: in rht_head_hashfn()
173 tbl->hash_rnd)) : in rht_head_hashfn()
174 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params); in rht_head_hashfn()
183 const struct bucket_table *tbl) in rht_grow_above_75() argument
186 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && in rht_grow_above_75()
187 (!ht->p.max_size || tbl->size < ht->p.max_size); in rht_grow_above_75()
196 const struct bucket_table *tbl) in rht_shrink_below_30() argument
199 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) && in rht_shrink_below_30()
200 tbl->size > ht->p.min_size; in rht_shrink_below_30()
209 const struct bucket_table *tbl) in rht_grow_above_100() argument
211 return atomic_read(&ht->nelems) > tbl->size && in rht_grow_above_100()
212 (!ht->p.max_size || tbl->size < ht->p.max_size); in rht_grow_above_100()
221 const struct bucket_table *tbl) in rht_grow_above_max() argument
228 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
235 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, in lockdep_rht_bucket_is_held() argument
265 const struct bucket_table *tbl, unsigned int hash);
267 const struct bucket_table *tbl, unsigned int hash);
269 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash);
277 #define rht_dereference_bucket(p, tbl, hash) \ argument
278 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
280 #define rht_dereference_bucket_rcu(p, tbl, hash) \ argument
281 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
287 const struct bucket_table *tbl, unsigned int hash) in rht_bucket() argument
289 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : in rht_bucket()
290 &tbl->buckets[hash]; in rht_bucket()
294 struct bucket_table *tbl, unsigned int hash) in rht_bucket_var() argument
296 return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : in rht_bucket_var()
297 &tbl->buckets[hash]; in rht_bucket_var()
301 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) in rht_bucket_insert() argument
303 return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : in rht_bucket_insert()
304 &tbl->buckets[hash]; in rht_bucket_insert()
326 static inline unsigned long rht_lock(struct bucket_table *tbl, in rht_lock() argument
333 lock_map_acquire(&tbl->dep_map); in rht_lock()
337 static inline unsigned long rht_lock_nested(struct bucket_table *tbl, in rht_lock_nested() argument
345 lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_); in rht_lock_nested()
349 static inline void rht_unlock(struct bucket_table *tbl, in rht_unlock() argument
353 lock_map_release(&tbl->dep_map); in rht_unlock()
381 struct bucket_table *tbl, in rht_ptr() argument
384 return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt); in rht_ptr()
401 static inline void rht_assign_unlock(struct bucket_table *tbl, in rht_assign_unlock() argument
408 lock_map_release(&tbl->dep_map); in rht_assign_unlock()
422 #define rht_for_each_from(pos, head, tbl, hash) \ argument
425 pos = rht_dereference_bucket((pos)->next, tbl, hash))
433 #define rht_for_each(pos, tbl, hash) \ argument
434 rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
435 tbl, hash)
446 #define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \ argument
449 pos = rht_dereference_bucket((pos)->next, tbl, hash))
459 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \ argument
461 rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
462 tbl, hash, member)
476 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ argument
477 for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
479 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
483 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
496 #define rht_for_each_rcu_from(pos, head, tbl, hash) \ argument
512 #define rht_for_each_rcu(pos, tbl, hash) \ argument
514 pos = rht_ptr_rcu(rht_bucket(tbl, hash)); \
531 #define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \ argument
535 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
549 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ argument
551 rht_ptr_rcu(rht_bucket(tbl, hash)), \
552 tbl, hash, member)
598 struct bucket_table *tbl; in __rhashtable_lookup() local
602 tbl = rht_dereference_rcu(ht->tbl, ht); in __rhashtable_lookup()
604 hash = rht_key_hashfn(ht, tbl, key, params); in __rhashtable_lookup()
605 bkt = rht_bucket(tbl, hash); in __rhashtable_lookup()
607 rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) { in __rhashtable_lookup()
622 tbl = rht_dereference_rcu(tbl->future_tbl, ht); in __rhashtable_lookup()
623 if (unlikely(tbl)) in __rhashtable_lookup()
715 struct bucket_table *tbl; in __rhashtable_insert_fast() local
724 tbl = rht_dereference_rcu(ht->tbl, ht); in __rhashtable_insert_fast()
725 hash = rht_head_hashfn(ht, tbl, obj, params); in __rhashtable_insert_fast()
727 bkt = rht_bucket_insert(ht, tbl, hash); in __rhashtable_insert_fast()
732 flags = rht_lock(tbl, bkt); in __rhashtable_insert_fast()
734 if (unlikely(rcu_access_pointer(tbl->future_tbl))) { in __rhashtable_insert_fast()
736 rht_unlock(tbl, bkt, flags); in __rhashtable_insert_fast()
741 rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { in __rhashtable_insert_fast()
764 head = rht_dereference_bucket(head->next, tbl, hash); in __rhashtable_insert_fast()
768 rht_unlock(tbl, bkt, flags); in __rhashtable_insert_fast()
770 rht_assign_unlock(tbl, bkt, obj, flags); in __rhashtable_insert_fast()
779 if (unlikely(rht_grow_above_max(ht, tbl))) in __rhashtable_insert_fast()
782 if (unlikely(rht_grow_above_100(ht, tbl))) in __rhashtable_insert_fast()
786 head = rht_ptr(bkt, tbl, hash); in __rhashtable_insert_fast()
797 rht_assign_unlock(tbl, bkt, obj, flags); in __rhashtable_insert_fast()
799 if (rht_grow_above_75(ht, tbl)) in __rhashtable_insert_fast()
809 rht_unlock(tbl, bkt, flags); in __rhashtable_insert_fast()
996 struct rhashtable *ht, struct bucket_table *tbl, in __rhashtable_remove_fast_one() argument
1007 hash = rht_head_hashfn(ht, tbl, obj, params); in __rhashtable_remove_fast_one()
1008 bkt = rht_bucket_var(tbl, hash); in __rhashtable_remove_fast_one()
1012 flags = rht_lock(tbl, bkt); in __rhashtable_remove_fast_one()
1014 rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { in __rhashtable_remove_fast_one()
1030 tbl, hash); in __rhashtable_remove_fast_one()
1036 list = rht_dereference_bucket(list->next, tbl, hash); in __rhashtable_remove_fast_one()
1042 obj = rht_dereference_bucket(obj->next, tbl, hash); in __rhashtable_remove_fast_one()
1046 list = rht_dereference_bucket(list->next, tbl, hash); in __rhashtable_remove_fast_one()
1056 rht_unlock(tbl, bkt, flags); in __rhashtable_remove_fast_one()
1058 rht_assign_unlock(tbl, bkt, obj, flags); in __rhashtable_remove_fast_one()
1063 rht_unlock(tbl, bkt, flags); in __rhashtable_remove_fast_one()
1068 rht_shrink_below_30(ht, tbl))) in __rhashtable_remove_fast_one()
1081 struct bucket_table *tbl; in __rhashtable_remove_fast() local
1086 tbl = rht_dereference_rcu(ht->tbl, ht); in __rhashtable_remove_fast()
1093 while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params, in __rhashtable_remove_fast()
1095 (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) in __rhashtable_remove_fast()
1149 struct rhashtable *ht, struct bucket_table *tbl, in __rhashtable_replace_fast() argument
1163 hash = rht_head_hashfn(ht, tbl, obj_old, params); in __rhashtable_replace_fast()
1164 if (hash != rht_head_hashfn(ht, tbl, obj_new, params)) in __rhashtable_replace_fast()
1167 bkt = rht_bucket_var(tbl, hash); in __rhashtable_replace_fast()
1172 flags = rht_lock(tbl, bkt); in __rhashtable_replace_fast()
1174 rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { in __rhashtable_replace_fast()
1183 rht_unlock(tbl, bkt, flags); in __rhashtable_replace_fast()
1185 rht_assign_unlock(tbl, bkt, obj_new, flags); in __rhashtable_replace_fast()
1191 rht_unlock(tbl, bkt, flags); in __rhashtable_replace_fast()
1216 struct bucket_table *tbl; in rhashtable_replace_fast() local
1221 tbl = rht_dereference_rcu(ht->tbl, ht); in rhashtable_replace_fast()
1228 while ((err = __rhashtable_replace_fast(ht, tbl, obj_old, in rhashtable_replace_fast()
1230 (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) in rhashtable_replace_fast()