/Linux-v6.6/tools/tracing/rtla/src/ |
D | osnoise_hist.c | 44 int bucket_size; member 62 int bucket_size; member 91 *osnoise_alloc_histogram(int nr_cpus, int entries, int bucket_size) in osnoise_alloc_histogram() argument 101 data->bucket_size = bucket_size; in osnoise_alloc_histogram() 138 if (data->bucket_size) in osnoise_hist_update_multiple() 139 bucket = duration / data->bucket_size; in osnoise_hist_update_multiple() 173 int bucket_size; in osnoise_init_trace_hist() local 180 bucket_size = params->output_divisor * params->bucket_size; in osnoise_init_trace_hist() 181 snprintf(buff, sizeof(buff), "duration.buckets=%d", bucket_size); in osnoise_init_trace_hist() 387 bucket * data->bucket_size); in osnoise_print_stats() [all …]
|
D | timerlat_hist.c | 52 int bucket_size; member 81 int bucket_size; member 117 *timerlat_alloc_histogram(int nr_cpus, int entries, int bucket_size) in timerlat_alloc_histogram() argument 127 data->bucket_size = bucket_size; in timerlat_alloc_histogram() 181 if (data->bucket_size) in timerlat_hist_update() 182 bucket = latency / data->bucket_size; in timerlat_hist_update() 422 bucket * data->bucket_size); in timerlat_print_stats() 573 params->bucket_size = 1; in timerlat_hist_parse_args() 652 params->bucket_size = get_llong_from_str(optarg); in timerlat_hist_parse_args() 653 if ((params->bucket_size == 0) || (params->bucket_size >= 1000000)) in timerlat_hist_parse_args() [all …]
|
/Linux-v6.6/tools/testing/selftests/kvm/ |
D | kvm_binary_stats_test.c | 130 TEST_ASSERT(pdesc->bucket_size, in stats_test() 135 TEST_ASSERT(!pdesc->bucket_size, in stats_test()
|
/Linux-v6.6/drivers/md/bcache/ |
D | alloc.c | 90 unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024; in bch_rescale_priorities() 339 ca->sb.bucket_size, GFP_KERNEL); in bch_allocator_thread() 449 SET_GC_SECTORS_USED(b, ca->sb.bucket_size); in bch_bucket_alloc() 585 ret->sectors_free = c->cache->sb.bucket_size; in pick_data_bucket()
|
D | super.c | 66 unsigned int bucket_size = le16_to_cpu(s->bucket_size); in get_bucket_size() local 73 order = le16_to_cpu(s->bucket_size); in get_bucket_size() 81 bucket_size = 1 << order; in get_bucket_size() 83 bucket_size += in get_bucket_size() 88 return bucket_size; in get_bucket_size() 99 sb->bucket_size = get_bucket_size(sb, s); in read_super_common() 125 if (!is_power_of_2(sb->bucket_size)) in read_super_common() 129 if (sb->bucket_size < PAGE_SECTORS) in read_super_common() 134 sb->bucket_size * sb->nbuckets) in read_super_common() 157 if (sb->first_bucket * sb->bucket_size < 16) in read_super_common() [all …]
|
D | movinggc.c | 209 reserve_sectors = ca->sb.bucket_size * in bch_moving_gc() 217 GC_SECTORS_USED(b) == ca->sb.bucket_size || in bch_moving_gc()
|
D | bcache_ondisk.h | 192 __le16 bucket_size; /* sectors */ member 254 __u32 bucket_size; /* sectors */ member
|
D | bcache.h | 764 #define bucket_bytes(ca) ((ca)->sb.bucket_size << 9) 775 n = sb->bucket_size / PAGE_SECTORS; in meta_bucket_pages() 806 return s & (c->cache->sb.bucket_size - 1); in bucket_remainder()
|
D | sysfs.c | 66 read_attribute(bucket_size); 737 sysfs_hprint(bucket_size, bucket_bytes(c->cache)); in SHOW() 1041 sysfs_hprint(bucket_size, bucket_bytes(ca)); in SHOW() 1126 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1)); in SHOW()
|
D | extents.c | 57 if (KEY_SIZE(k) + r > c->cache->sb.bucket_size || in __ptr_invalid() 78 if (KEY_SIZE(k) + r > c->cache->sb.bucket_size) in bch_ptr_status()
|
D | journal.c | 52 while (offset < ca->sb.bucket_size) { in journal_read_bucket() 53 reread: left = ca->sb.bucket_size - offset; in journal_read_bucket() 687 c->journal.blocks_free = ca->sb.bucket_size >> c->block_bits; in journal_reclaim()
|
D | btree.h | 197 atomic_set(&c->sectors_to_gc, c->cache->sb.bucket_size * c->nbuckets / 16); in set_gc_sectors()
|
D | writeback.c | 38 uint64_t cache_sectors = c->nbuckets * c->cache->sb.bucket_size - in __calc_target_rate() 108 div_s64((dirty_buckets * c->cache->sb.bucket_size), dirty); in __update_writeback_rate()
|
D | btree.c | 160 iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size; in bch_btree_node_read_done()
|
/Linux-v6.6/drivers/net/ethernet/marvell/ |
D | mv643xx_eth.c | 1137 int bucket_size; in tx_set_rate() local 1147 bucket_size = (burst + 255) >> 8; in tx_set_rate() 1148 if (bucket_size > 65535) in tx_set_rate() 1149 bucket_size = 65535; in tx_set_rate() 1155 wrlp(mp, TX_BW_BURST, bucket_size); in tx_set_rate() 1160 wrlp(mp, TX_BW_BURST_MOVED, bucket_size); in tx_set_rate() 1169 int bucket_size; in txq_set_rate() local 1175 bucket_size = (burst + 255) >> 8; in txq_set_rate() 1176 if (bucket_size > 65535) in txq_set_rate() 1177 bucket_size = 65535; in txq_set_rate() [all …]
|
/Linux-v6.6/kernel/bpf/ |
D | hashtab.c | 1684 u32 batch, max_count, size, bucket_size, map_id; in __htab_map_lookup_and_delete_batch() local 1728 bucket_size = 5; in __htab_map_lookup_and_delete_batch() 1734 keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN); in __htab_map_lookup_and_delete_batch() 1735 values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN); in __htab_map_lookup_and_delete_batch() 1780 if (bucket_cnt > bucket_size) { in __htab_map_lookup_and_delete_batch() 1781 bucket_size = bucket_cnt; in __htab_map_lookup_and_delete_batch()
|
/Linux-v6.6/Documentation/ABI/testing/ |
D | sysfs-block-bcache | 116 What: /sys/block/<disk>/bcache/bucket_size
|
/Linux-v6.6/include/linux/ |
D | kvm_host.h | 1789 .bucket_size = bsz 1915 u64 value, size_t bucket_size) in kvm_stats_linear_hist_update() argument 1917 size_t index = div64_u64(value, bucket_size); in kvm_stats_linear_hist_update()
|
/Linux-v6.6/Documentation/admin-guide/ |
D | bcache.rst | 226 bucket_size: 1024 528 bucket_size 615 bucket_size
|
/Linux-v6.6/tools/include/uapi/linux/ |
D | kvm.h | 2206 __u32 bucket_size; member
|
/Linux-v6.6/include/uapi/linux/ |
D | kvm.h | 2213 __u32 bucket_size; member
|
/Linux-v6.6/Documentation/virt/kvm/ |
D | api.rst | 5823 __u32 bucket_size; 5905 The ``bucket_size`` field is used as a parameter for histogram statistics data.
|