/Linux-v4.19/net/ceph/crush/ |
D | mapper.c | 74 static int bucket_perm_choose(const struct crush_bucket *bucket, in bucket_perm_choose() argument 78 unsigned int pr = r % bucket->size; in bucket_perm_choose() 83 dprintk("bucket %d new x=%d\n", bucket->id, x); in bucket_perm_choose() 88 s = crush_hash32_3(bucket->hash, x, bucket->id, 0) % in bucket_perm_choose() 89 bucket->size; in bucket_perm_choose() 95 for (i = 0; i < bucket->size; i++) in bucket_perm_choose() 100 for (i = 1; i < bucket->size; i++) in bucket_perm_choose() 112 if (p < bucket->size - 1) { in bucket_perm_choose() 113 i = crush_hash32_3(bucket->hash, x, bucket->id, p) % in bucket_perm_choose() 114 (bucket->size - p); in bucket_perm_choose() [all …]
|
/Linux-v4.19/block/ |
D | blk-stat.c | 55 int bucket; in blk_stat_add() local 67 bucket = cb->bucket_fn(rq); in blk_stat_add() 68 if (bucket < 0) in blk_stat_add() 71 stat = &get_cpu_ptr(cb->cpu_stat)[bucket]; in blk_stat_add() 81 unsigned int bucket; in blk_stat_timer_fn() local 84 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn() 85 blk_rq_stat_init(&cb->stat[bucket]); in blk_stat_timer_fn() 91 for (bucket = 0; bucket < cb->buckets; bucket++) { in blk_stat_timer_fn() 92 blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]); in blk_stat_timer_fn() 93 blk_rq_stat_init(&cpu_stat[bucket]); in blk_stat_timer_fn() [all …]
|
/Linux-v4.19/net/sched/ |
D | sch_hhf.c | 328 static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket) in dequeue_head() argument 330 struct sk_buff *skb = bucket->head; in dequeue_head() 332 bucket->head = skb->next; in dequeue_head() 338 static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb) in bucket_add() argument 340 if (bucket->head == NULL) in bucket_add() 341 bucket->head = skb; in bucket_add() 343 bucket->tail->next = skb; in bucket_add() 344 bucket->tail = skb; in bucket_add() 351 struct wdrr_bucket *bucket; in hhf_drop() local 354 bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; in hhf_drop() [all …]
|
/Linux-v4.19/drivers/infiniband/sw/rdmavt/ |
D | trace_qp.h | 60 TP_PROTO(struct rvt_qp *qp, u32 bucket), 61 TP_ARGS(qp, bucket), 65 __field(u32, bucket) 70 __entry->bucket = bucket; 76 __entry->bucket 81 TP_PROTO(struct rvt_qp *qp, u32 bucket), 82 TP_ARGS(qp, bucket)); 85 TP_PROTO(struct rvt_qp *qp, u32 bucket), 86 TP_ARGS(qp, bucket));
|
/Linux-v4.19/net/9p/ |
D | error.c | 196 int bucket; in p9_error_init() local 199 for (bucket = 0; bucket < ERRHASHSZ; bucket++) in p9_error_init() 200 INIT_HLIST_HEAD(&hash_errmap[bucket]); in p9_error_init() 205 bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ; in p9_error_init() 207 hlist_add_head(&c->list, &hash_errmap[bucket]); in p9_error_init() 225 int bucket; in p9_errstr2errno() local 229 bucket = jhash(errstr, len, 0) % ERRHASHSZ; in p9_errstr2errno() 230 hlist_for_each_entry(c, &hash_errmap[bucket], list) { in p9_errstr2errno()
|
/Linux-v4.19/fs/dlm/ |
D | debug_fs.c | 370 unsigned bucket; member 429 unsigned bucket, entry; in table_seq_start() local 432 bucket = n >> 32; in table_seq_start() 435 if (bucket >= ls->ls_rsbtbl_size) in table_seq_start() 452 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; in table_seq_start() 454 spin_lock(&ls->ls_rsbtbl[bucket].lock); in table_seq_start() 461 ri->bucket = bucket; in table_seq_start() 462 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in table_seq_start() 467 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in table_seq_start() 477 bucket++; in table_seq_start() [all …]
|
D | dir.c | 203 uint32_t hash, bucket; in find_rsb_root() local 207 bucket = hash & (ls->ls_rsbtbl_size - 1); in find_rsb_root() 209 spin_lock(&ls->ls_rsbtbl[bucket].lock); in find_rsb_root() 210 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, &r); in find_rsb_root() 212 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss, in find_rsb_root() 214 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in find_rsb_root()
|
/Linux-v4.19/net/vmw_vsock/ |
D | diag.c | 60 unsigned int bucket; in vsock_diag_dump() local 71 bucket = cb->args[1]; in vsock_diag_dump() 80 while (bucket < ARRAY_SIZE(vsock_bind_table)) { in vsock_diag_dump() 81 struct list_head *head = &vsock_bind_table[bucket]; in vsock_diag_dump() 102 bucket++; in vsock_diag_dump() 106 bucket = 0; in vsock_diag_dump() 110 while (bucket < ARRAY_SIZE(vsock_connected_table)) { in vsock_diag_dump() 111 struct list_head *head = &vsock_connected_table[bucket]; in vsock_diag_dump() 136 bucket++; in vsock_diag_dump() 143 cb->args[1] = bucket; in vsock_diag_dump()
|
/Linux-v4.19/drivers/cpuidle/governors/ |
D | menu.c | 128 unsigned int bucket; member 145 int bucket = 0; in which_bucket() local 154 bucket = BUCKETS/2; in which_bucket() 157 return bucket; in which_bucket() 159 return bucket + 1; in which_bucket() 161 return bucket + 2; in which_bucket() 163 return bucket + 3; in which_bucket() 165 return bucket + 4; in which_bucket() 166 return bucket + 5; in which_bucket() 311 data->bucket = which_bucket(data->next_timer_us, nr_iowaiters); in menu_select() [all …]
|
/Linux-v4.19/Documentation/media/uapi/v4l/ |
D | pixfmt-meta-vsp1-hgt.rst | 28 The Saturation position **n** (0 - 31) of the bucket in the matrix is 33 The Hue position **m** (0 - 5) of the bucket in the matrix depends on 92 - :cspan:`4` Histogram bucket (m=0, n=0) [31:0] 94 - :cspan:`4` Histogram bucket (m=0, n=1) [31:0] 98 - :cspan:`4` Histogram bucket (m=0, n=31) [31:0] 100 - :cspan:`4` Histogram bucket (m=1, n=0) [31:0] 104 - :cspan:`4` Histogram bucket (m=2, n=0) [31:0] 108 - :cspan:`4` Histogram bucket (m=3, n=0) [31:0] 112 - :cspan:`4` Histogram bucket (m=4, n=0) [31:0] 116 - :cspan:`4` Histogram bucket (m=5, n=0) [31:0] [all …]
|
/Linux-v4.19/fs/nfs/ |
D | pnfs_nfs.c | 76 struct pnfs_commit_bucket *bucket; in pnfs_generic_clear_request_commit() local 78 bucket = list_first_entry(&req->wb_list, in pnfs_generic_clear_request_commit() 81 freeme = bucket->wlseg; in pnfs_generic_clear_request_commit() 82 bucket->wlseg = NULL; in pnfs_generic_clear_request_commit() 91 pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, in pnfs_generic_scan_ds_commit_list() argument 95 struct list_head *src = &bucket->written; in pnfs_generic_scan_ds_commit_list() 96 struct list_head *dst = &bucket->committing; in pnfs_generic_scan_ds_commit_list() 104 if (bucket->clseg == NULL) in pnfs_generic_scan_ds_commit_list() 105 bucket->clseg = pnfs_get_lseg(bucket->wlseg); in pnfs_generic_scan_ds_commit_list() 107 pnfs_put_lseg(bucket->wlseg); in pnfs_generic_scan_ds_commit_list() [all …]
|
/Linux-v4.19/kernel/dma/ |
D | debug.c | 279 static void put_hash_bucket(struct hash_bucket *bucket, in put_hash_bucket() argument 281 __releases(&bucket->lock) in put_hash_bucket() 285 spin_unlock_irqrestore(&bucket->lock, __flags); in put_hash_bucket() 310 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, in __hash_bucket_find() argument 317 list_for_each_entry(entry, &bucket->list, list) { in __hash_bucket_find() 360 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, in bucket_find_exact() argument 363 return __hash_bucket_find(bucket, ref, exact_match); in bucket_find_exact() 366 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, in bucket_find_contain() argument 376 entry = __hash_bucket_find(*bucket, ref, containing_match); in bucket_find_contain() 384 put_hash_bucket(*bucket, flags); in bucket_find_contain() [all …]
|
/Linux-v4.19/fs/ocfs2/ |
D | xattr.c | 135 struct ocfs2_xattr_bucket *bucket; member 289 struct ocfs2_xattr_bucket *bucket, 311 struct ocfs2_xattr_bucket *bucket, 332 struct ocfs2_xattr_bucket *bucket; in ocfs2_xattr_bucket_new() local 337 bucket = kzalloc(sizeof(struct ocfs2_xattr_bucket), GFP_NOFS); in ocfs2_xattr_bucket_new() 338 if (bucket) { in ocfs2_xattr_bucket_new() 339 bucket->bu_inode = inode; in ocfs2_xattr_bucket_new() 340 bucket->bu_blocks = blks; in ocfs2_xattr_bucket_new() 343 return bucket; in ocfs2_xattr_bucket_new() 346 static void ocfs2_xattr_bucket_relse(struct ocfs2_xattr_bucket *bucket) in ocfs2_xattr_bucket_relse() argument [all …]
|
/Linux-v4.19/arch/mips/netlogic/xlr/ |
D | fmn.c | 72 int bucket, rv; in fmn_message_handler() local 86 for (bucket = 0; bucket < 8; bucket++) { in fmn_message_handler() 88 if (bkt_status & (1 << bucket)) in fmn_message_handler() 90 rv = nlm_fmn_receive(bucket, &size, &code, &src_stnid, in fmn_message_handler() 101 hndlr->action(bucket, src_stnid, size, code, in fmn_message_handler()
|
/Linux-v4.19/lib/ |
D | stackdepot.c | 182 static inline struct stack_record *find_stack(struct stack_record *bucket, in find_stack() argument 188 for (found = bucket; found; found = found->next) { in find_stack() 222 struct stack_record *found = NULL, **bucket; in depot_save_stack() local 231 bucket = &stack_table[hash & STACK_HASH_MASK]; in depot_save_stack() 238 found = find_stack(smp_load_acquire(bucket), trace->entries, in depot_save_stack() 267 found = find_stack(*bucket, trace->entries, trace->nr_entries, hash); in depot_save_stack() 273 new->next = *bucket; in depot_save_stack() 278 smp_store_release(bucket, new); in depot_save_stack()
|
/Linux-v4.19/drivers/md/bcache/ |
D | alloc.c | 76 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) in bch_inc_gen() 89 struct bucket *b; in bch_rescale_priorities() 126 static inline bool can_inc_bucket_gen(struct bucket *b) in can_inc_bucket_gen() 131 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) in bch_can_invalidate_bucket() 141 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in __bch_invalidate_one_bucket() 154 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in bch_invalidate_one_bucket() 182 struct bucket *b; in invalidate_buckets_lru() 219 struct bucket *b; in invalidate_buckets_fifo() 242 struct bucket *b; in invalidate_buckets_random() 303 static int bch_allocator_push(struct cache *ca, long bucket) in bch_allocator_push() argument [all …]
|
D | bcache.h | 197 struct bucket { struct 210 BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); 216 BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE); 217 BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); 442 struct bucket *buckets; 444 DECLARE_HEAP(struct bucket *, heap); 777 static inline struct bucket *PTR_BUCKET(struct cache_set *c, in PTR_BUCKET() 876 static inline uint8_t bucket_gc_gen(struct bucket *b) in bucket_gc_gen() 942 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b); 945 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b); [all …]
|
/Linux-v4.19/include/trace/events/ |
D | bcache.h | 68 __field(size_t, bucket ) 72 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 75 TP_printk("bucket %zu", __entry->bucket) 246 __field(size_t, bucket ) 252 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 257 TP_printk("bucket %zu", __entry->bucket) 348 __field(size_t, bucket ) 353 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 357 TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys) 407 TP_PROTO(struct cache *ca, size_t bucket), [all …]
|
/Linux-v4.19/kernel/bpf/ |
D | stackmap.c | 343 struct stack_map_bucket *bucket, *new_bucket, *old_bucket; in BPF_CALL_3() local 379 bucket = READ_ONCE(smap->buckets[id]); in BPF_CALL_3() 381 hash_matches = bucket && bucket->hash == hash; in BPF_CALL_3() 397 if (hash_matches && bucket->nr == trace_nr && in BPF_CALL_3() 398 memcmp(bucket->data, new_bucket->data, trace_len) == 0) { in BPF_CALL_3() 402 if (bucket && !(flags & BPF_F_REUSE_STACKID)) { in BPF_CALL_3() 407 if (hash_matches && bucket->nr == trace_nr && in BPF_CALL_3() 408 memcmp(bucket->data, ips, trace_len) == 0) in BPF_CALL_3() 410 if (bucket && !(flags & BPF_F_REUSE_STACKID)) in BPF_CALL_3() 515 struct stack_map_bucket *bucket, *old_bucket; in bpf_stackmap_copy() local [all …]
|
/Linux-v4.19/drivers/md/persistent-data/ |
D | dm-transaction-manager.c | 106 unsigned bucket = dm_hash_block(b, DM_HASH_MASK); in is_shadow() local 110 hlist_for_each_entry(si, tm->buckets + bucket, hlist) in is_shadow() 126 unsigned bucket; in insert_shadow() local 132 bucket = dm_hash_block(b, DM_HASH_MASK); in insert_shadow() 134 hlist_add_head(&si->hlist, tm->buckets + bucket); in insert_shadow() 143 struct hlist_head *bucket; in wipe_shadow_table() local 148 bucket = tm->buckets + i; in wipe_shadow_table() 149 hlist_for_each_entry_safe(si, tmp, bucket, hlist) in wipe_shadow_table() 152 INIT_HLIST_HEAD(bucket); in wipe_shadow_table()
|
/Linux-v4.19/arch/sparc/kernel/ |
D | irq_64.c | 206 struct ino_bucket bucket; member 257 struct ino_bucket *bucket; in cookie_exists() local 268 bucket = (struct ino_bucket *) __va(cookie); in cookie_exists() 269 irq = bucket->__irq; in cookie_exists() 278 struct ino_bucket *bucket; in sysino_exists() local 281 bucket = &ivector_table[sysino]; in sysino_exists() 282 irq = bucket_get_irq(__pa(bucket)); in sysino_exists() 615 struct ino_bucket *bucket; in build_irq() local 622 bucket = &ivector_table[ino]; in build_irq() 623 irq = bucket_get_irq(__pa(bucket)); in build_irq() [all …]
|
/Linux-v4.19/net/atm/ |
D | proc.c | 69 int bucket; member 78 static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l) in __vcc_walk() argument 83 for (*bucket = 0; *bucket < VCC_HTABLE_SIZE; ++*bucket) { in __vcc_walk() 84 struct hlist_head *head = &vcc_hash[*bucket]; in __vcc_walk() 98 if (!sk && ++*bucket < VCC_HTABLE_SIZE) { in __vcc_walk() 99 sk = sk_head(&vcc_hash[*bucket]); in __vcc_walk() 113 return __vcc_walk(&state->sk, family, &state->bucket, l) ? in vcc_walk()
|
/Linux-v4.19/include/net/ |
D | transp_v6.h | 48 __u16 srcp, __u16 destp, int rqueue, int bucket); 51 __u16 destp, int bucket) in ip6_dgram_sock_seq_show() argument 54 bucket); in ip6_dgram_sock_seq_show()
|
/Linux-v4.19/fs/xfs/libxfs/ |
D | xfs_ag.c | 220 int bucket; in xfs_agflblock_init() local 229 for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++) in xfs_agflblock_init() 230 agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); in xfs_agflblock_init() 240 int bucket; in xfs_agiblock_init() local 258 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) in xfs_agiblock_init() 259 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); in xfs_agiblock_init()
|
/Linux-v4.19/drivers/net/ethernet/freescale/fman/ |
D | fman_dtsec.c | 534 static void set_bucket(struct dtsec_regs __iomem *regs, int bucket, in set_bucket() argument 537 int reg_idx = (bucket >> 5) & 0xf; in set_bucket() 538 int bit_idx = bucket & 0x1f; in set_bucket() 1061 s32 bucket; in dtsec_add_hash_mac_address() local 1092 bucket = (s32)((crc >> 23) & 0x1ff); in dtsec_add_hash_mac_address() 1094 bucket = (s32)((crc >> 24) & 0xff); in dtsec_add_hash_mac_address() 1099 bucket += 0x100; in dtsec_add_hash_mac_address() 1102 set_bucket(dtsec->regs, bucket, true); in dtsec_add_hash_mac_address() 1114 &dtsec->multicast_addr_hash->lsts[bucket]); in dtsec_add_hash_mac_address() 1117 &dtsec->unicast_addr_hash->lsts[bucket]); in dtsec_add_hash_mac_address() [all …]
|