/Linux-v5.4/drivers/lightnvm/ |
D | pblk-rb.c | 48 static unsigned int pblk_rb_calculate_size(unsigned int nr_entries, in pblk_rb_calculate_size() argument 52 unsigned int max_sz = max(thr_sz, nr_entries); in pblk_rb_calculate_size() 81 unsigned int nr_entries; in pblk_rb_init() local 83 nr_entries = pblk_rb_calculate_size(size, threshold); in pblk_rb_init() 84 entries = vzalloc(array_size(nr_entries, sizeof(struct pblk_rb_entry))); in pblk_rb_init() 88 power_size = get_count_order(nr_entries); in pblk_rb_init() 94 rb->nr_entries = (1 << power_size); in pblk_rb_init() 165 pblk_rl_init(&pblk->rl, rb->nr_entries, threshold); in pblk_rb_init() 197 return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries); in pblk_rb_space() 201 unsigned int nr_entries) in pblk_rb_ptr_wrap() argument [all …]
|
D | pblk-cache.c | 29 int nr_entries = pblk_get_secs(bio); in pblk_write_to_cache() local 40 ret = pblk_rb_may_write_user(&pblk->rwb, bio, nr_entries, &bpos); in pblk_write_to_cache() 61 for (i = 0; i < nr_entries; i++) { in pblk_write_to_cache() 72 atomic64_add(nr_entries, &pblk->user_wa); in pblk_write_to_cache() 75 atomic_long_add(nr_entries, &pblk->inflight_writes); in pblk_write_to_cache() 76 atomic_long_add(nr_entries, &pblk->req_writes); in pblk_write_to_cache() 79 pblk_rl_inserted(&pblk->rl, nr_entries); in pblk_write_to_cache()
|
D | pblk-rl.c | 36 int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries) in pblk_rl_user_may_insert() argument 41 if (unlikely(rb_space >= 0) && (rb_space - nr_entries < 0)) in pblk_rl_user_may_insert() 50 void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries) in pblk_rl_inserted() argument 55 atomic_sub(nr_entries, &rl->rb_space); in pblk_rl_inserted() 58 int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries) in pblk_rl_gc_may_insert() argument 68 void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries) in pblk_rl_user_in() argument 70 atomic_add(nr_entries, &rl->rb_user_cnt); in pblk_rl_user_in() 87 void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries) in pblk_rl_gc_in() argument 89 atomic_add(nr_entries, &rl->rb_gc_cnt); in pblk_rl_gc_in()
|
/Linux-v5.4/fs/xfs/libxfs/ |
D | xfs_iext_tree.c | 454 int *nr_entries) in xfs_iext_split_node() argument 466 *nr_entries = 0; in xfs_iext_split_node() 482 *nr_entries = nr_move; in xfs_iext_split_node() 484 *nr_entries = nr_keep; in xfs_iext_split_node() 500 int i, pos, nr_entries; in xfs_iext_insert_node() local 509 nr_entries = xfs_iext_node_nr_entries(node, pos); in xfs_iext_insert_node() 511 ASSERT(pos >= nr_entries || xfs_iext_key_cmp(node, pos, offset) != 0); in xfs_iext_insert_node() 512 ASSERT(nr_entries <= KEYS_PER_NODE); in xfs_iext_insert_node() 514 if (nr_entries == KEYS_PER_NODE) in xfs_iext_insert_node() 515 new = xfs_iext_split_node(&node, &pos, &nr_entries); in xfs_iext_insert_node() [all …]
|
/Linux-v5.4/drivers/dma/dw-edma/ |
D | dw-edma-v0-debugfs.c | 98 int nr_entries, struct dentry *dir) in dw_edma_debugfs_create_x32() argument 102 for (i = 0; i < nr_entries; i++) { in dw_edma_debugfs_create_x32() 112 int nr_entries; in dw_edma_debugfs_regs_ch() local 125 nr_entries = ARRAY_SIZE(debugfs_regs); in dw_edma_debugfs_regs_ch() 126 dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, dir); in dw_edma_debugfs_regs_ch() 167 int nr_entries, i; in dw_edma_debugfs_regs_wr() local 174 nr_entries = ARRAY_SIZE(debugfs_regs); in dw_edma_debugfs_regs_wr() 175 dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); in dw_edma_debugfs_regs_wr() 178 nr_entries = ARRAY_SIZE(debugfs_unroll_regs); in dw_edma_debugfs_regs_wr() 179 dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, in dw_edma_debugfs_regs_wr() [all …]
|
/Linux-v5.4/drivers/md/persistent-data/ |
D | dm-btree-remove.c | 58 uint32_t nr_entries = le32_to_cpu(n->header.nr_entries); in node_shift() local 63 BUG_ON(shift > nr_entries); in node_shift() 67 (nr_entries - shift) * sizeof(__le64)); in node_shift() 70 (nr_entries - shift) * value_size); in node_shift() 72 BUG_ON(nr_entries + shift > le32_to_cpu(n->header.max_entries)); in node_shift() 75 nr_entries * sizeof(__le64)); in node_shift() 78 nr_entries * value_size); in node_shift() 84 uint32_t nr_left = le32_to_cpu(left->header.nr_entries); in node_copy() 113 unsigned nr_entries = le32_to_cpu(n->header.nr_entries); in delete_at() local 114 unsigned nr_to_copy = nr_entries - (index + 1); in delete_at() [all …]
|
D | dm-btree.c | 43 int lo = -1, hi = le32_to_cpu(n->header.nr_entries); in bsearch() 75 uint32_t nr_entries = le32_to_cpu(n->header.nr_entries); in inc_children() local 78 for (i = 0; i < nr_entries; i++) in inc_children() 81 for (i = 0; i < nr_entries; i++) in inc_children() 89 uint32_t nr_entries = le32_to_cpu(node->header.nr_entries); in insert_at() local 92 if (index > nr_entries || in insert_at() 101 array_insert(node->keys, sizeof(*node->keys), nr_entries, index, &key_le); in insert_at() 102 array_insert(value_base(node), value_size, nr_entries, index, value); in insert_at() 103 node->header.nr_entries = cpu_to_le32(nr_entries + 1); in insert_at() 144 n->header.nr_entries = cpu_to_le32(0); in dm_btree_empty() [all …]
|
D | dm-array.c | 26 __le32 nr_entries; member 113 unsigned i, nr_entries = le32_to_cpu(ab->nr_entries); in on_entries() local 115 for (i = 0; i < nr_entries; i++) in on_entries() 164 (*ab)->nr_entries = cpu_to_le32(0); in alloc_ablock() 179 uint32_t nr_entries; in fill_ablock() local 183 BUG_ON(new_nr < le32_to_cpu(ab->nr_entries)); in fill_ablock() 185 nr_entries = le32_to_cpu(ab->nr_entries); in fill_ablock() 186 for (i = nr_entries; i < new_nr; i++) { in fill_ablock() 191 ab->nr_entries = cpu_to_le32(new_nr); in fill_ablock() 203 uint32_t nr_entries; in trim_ablock() local [all …]
|
/Linux-v5.4/arch/x86/kernel/ |
D | e820.c | 82 for (i = 0; i < table->nr_entries; i++) { in _e820__mapped_any() 117 for (i = 0; i < e820_table->nr_entries; i++) { in __e820__mapped_all() 168 int x = table->nr_entries; in __e820__range_add() 179 table->nr_entries++; in __e820__range_add() 206 for (i = 0; i < e820_table->nr_entries; i++) { in e820__print_table() 317 if (table->nr_entries < 2) in e820__update_table() 320 BUG_ON(table->nr_entries > max_nr_entries); in e820__update_table() 323 for (i = 0; i < table->nr_entries; i++) { in e820__update_table() 329 for (i = 0; i < 2 * table->nr_entries; i++) in e820__update_table() 337 for (i = 0; i < table->nr_entries; i++) { in e820__update_table() [all …]
|
/Linux-v5.4/tools/perf/util/ |
D | syscalltbl.c | 61 int nr_entries = 0, i, j; in syscalltbl__init_native() local 66 ++nr_entries; in syscalltbl__init_native() 68 entries = tbl->syscalls.entries = malloc(sizeof(struct syscall) * nr_entries); in syscalltbl__init_native() 80 qsort(tbl->syscalls.entries, nr_entries, sizeof(struct syscall), syscallcmp); in syscalltbl__init_native() 81 tbl->syscalls.nr_entries = nr_entries; in syscalltbl__init_native() 112 tbl->syscalls.nr_entries, sizeof(*sc), in syscalltbl__id() 123 for (i = *idx + 1; i < tbl->syscalls.nr_entries; ++i) { in syscalltbl__strglobmatch_next()
|
/Linux-v5.4/kernel/ |
D | stacktrace.c | 23 void stack_trace_print(const unsigned long *entries, unsigned int nr_entries, in stack_trace_print() argument 31 for (i = 0; i < nr_entries; i++) in stack_trace_print() 47 unsigned int nr_entries, int spaces) in stack_trace_snprint() argument 54 for (i = 0; i < nr_entries && size; i++) { in stack_trace_snprint() 282 return trace.nr_entries; in stack_trace_save() 307 return trace.nr_entries; in stack_trace_save_tsk() 329 return trace.nr_entries; in stack_trace_save_regs() 354 return ret ? ret : trace.nr_entries; in stack_trace_save_tsk_reliable() 374 return trace.nr_entries; in stack_trace_save_user()
|
D | backtracetest.c | 48 unsigned int nr_entries; in backtrace_test_saved() local 53 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); in backtrace_test_saved() 54 stack_trace_print(entries, nr_entries, 0); in backtrace_test_saved()
|
/Linux-v5.4/mm/ |
D | page_owner.c | 59 unsigned int nr_entries; in create_dummy_stack() local 61 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); in create_dummy_stack() 62 return stack_depot_save(entries, nr_entries, GFP_KERNEL); in create_dummy_stack() 104 unsigned int nr_entries, in check_recursive_alloc() argument 109 for (i = 0; i < nr_entries; i++) { in check_recursive_alloc() 120 unsigned int nr_entries; in save_stack() local 122 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2); in save_stack() 132 if (check_recursive_alloc(entries, nr_entries, _RET_IP_)) in save_stack() 135 handle = stack_depot_save(entries, nr_entries, flags); in save_stack() 344 unsigned int nr_entries; in print_page_owner() local [all …]
|
/Linux-v5.4/tools/perf/ui/ |
D | browser.c | 108 if (browser->nr_entries == 0) in ui_browser__list_head_seek() 316 if (browser->nr_entries > 1) { in ui_browser__scrollbar_set() 318 (browser->nr_entries - 1)); in ui_browser__scrollbar_set() 348 if (browser->nr_entries == 0 && browser->no_samples_msg) in __ui_browser__refresh() 368 void ui_browser__update_nr_entries(struct ui_browser *browser, u32 nr_entries) in ui_browser__update_nr_entries() argument 370 off_t offset = nr_entries - browser->nr_entries; in ui_browser__update_nr_entries() 372 browser->nr_entries = nr_entries; in ui_browser__update_nr_entries() 424 if (browser->index == browser->nr_entries - 1) in ui_browser__run() 455 if (browser->top_idx + browser->rows > browser->nr_entries - 1) in ui_browser__run() 459 if (browser->index + offset > browser->nr_entries - 1) in ui_browser__run() [all …]
|
/Linux-v5.4/arch/mips/kernel/ |
D | stacktrace.c | 30 trace->entries[trace->nr_entries++] = addr; in save_raw_context_stack() 31 if (trace->nr_entries >= trace->max_entries) in save_raw_context_stack() 58 trace->entries[trace->nr_entries++] = pc; in save_context_stack() 59 if (trace->nr_entries >= trace->max_entries) in save_context_stack() 83 WARN_ON(trace->nr_entries || !trace->max_entries); in save_stack_trace_tsk()
|
/Linux-v5.4/tools/perf/trace/beauty/ |
D | ioctl.c | 41 if (nr < strarray__ioctl_tty_cmd.nr_entries && strarray__ioctl_tty_cmd.entries[nr] != NULL) in ioctl__scnprintf_tty_cmd() 52 if (nr < strarray__drm_ioctl_cmds.nr_entries && strarray__drm_ioctl_cmds.entries[nr] != NULL) in ioctl__scnprintf_drm_cmd() 63 …if (nr < strarray__sndrv_pcm_ioctl_cmds.nr_entries && strarray__sndrv_pcm_ioctl_cmds.entries[nr] !… in ioctl__scnprintf_sndrv_pcm_cmd() 74 …if (nr < strarray__sndrv_ctl_ioctl_cmds.nr_entries && strarray__sndrv_ctl_ioctl_cmds.entries[nr] !… in ioctl__scnprintf_sndrv_ctl_cmd() 85 if (nr < strarray__kvm_ioctl_cmds.nr_entries && strarray__kvm_ioctl_cmds.entries[nr] != NULL) in ioctl__scnprintf_kvm_cmd() 98 if (nr < s->nr_entries && s->entries[nr] != NULL) in ioctl__scnprintf_vhost_virtio_cmd() 109 if (nr < strarray__perf_ioctl_cmds.nr_entries && strarray__perf_ioctl_cmds.entries[nr] != NULL) in ioctl__scnprintf_perf_cmd() 120 …if (nr < strarray__usbdevfs_ioctl_cmds.nr_entries && strarray__usbdevfs_ioctl_cmds.entries[nr] != … in ioctl__scnprintf_usbdevfs_cmd()
|
/Linux-v5.4/drivers/md/ |
D | dm-cache-policy-internal.h | 115 static inline size_t bitset_size_in_bytes(unsigned nr_entries) in bitset_size_in_bytes() argument 117 return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG); in bitset_size_in_bytes() 120 static inline unsigned long *alloc_bitset(unsigned nr_entries) in alloc_bitset() argument 122 size_t s = bitset_size_in_bytes(nr_entries); in alloc_bitset() 126 static inline void clear_bitset(void *bitset, unsigned nr_entries) in clear_bitset() argument 128 size_t s = bitset_size_in_bytes(nr_entries); in clear_bitset()
|
/Linux-v5.4/arch/openrisc/kernel/ |
D | stacktrace.c | 37 if (trace->nr_entries < trace->max_entries) in save_stack_address() 38 trace->entries[trace->nr_entries++] = addr; in save_stack_address() 63 if (trace->nr_entries < trace->max_entries) in save_stack_address_nosched() 64 trace->entries[trace->nr_entries++] = addr; in save_stack_address_nosched()
|
/Linux-v5.4/arch/sh/kernel/ |
D | stacktrace.c | 38 if (trace->nr_entries < trace->max_entries) in save_stack_address() 39 trace->entries[trace->nr_entries++] = addr; in save_stack_address() 71 if (trace->nr_entries < trace->max_entries) in save_stack_address_nosched() 72 trace->entries[trace->nr_entries++] = addr; in save_stack_address_nosched()
|
/Linux-v5.4/drivers/net/ethernet/mellanox/mlxsw/ |
D | spectrum1_kvdl.c | 122 unsigned int entry_index, nr_entries; in mlxsw_sp1_kvdl_part_alloc() local 124 nr_entries = (info->end_index - info->start_index + 1) / in mlxsw_sp1_kvdl_part_alloc() 126 entry_index = find_first_zero_bit(part->usage, nr_entries); in mlxsw_sp1_kvdl_part_alloc() 127 if (entry_index == nr_entries) in mlxsw_sp1_kvdl_part_alloc() 215 unsigned int nr_entries; in mlxsw_sp1_kvdl_part_init() local 227 nr_entries = div_u64(resource_size, info->alloc_size); in mlxsw_sp1_kvdl_part_init() 228 usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long); in mlxsw_sp1_kvdl_part_init() 281 unsigned int nr_entries; in mlxsw_sp1_kvdl_part_occ() local 285 nr_entries = (info->end_index - in mlxsw_sp1_kvdl_part_occ() 288 while ((bit = find_next_bit(part->usage, nr_entries, bit + 1)) in mlxsw_sp1_kvdl_part_occ() [all …]
|
/Linux-v5.4/tools/include/linux/ |
D | stacktrace.h | 8 unsigned int nr_entries, max_entries; member 15 backtrace_symbols_fd((void **)trace->entries, trace->nr_entries, 1); in print_stack_trace() 19 ((trace)->nr_entries = \
|
/Linux-v5.4/arch/sparc/kernel/ |
D | stacktrace.c | 58 trace->entries[trace->nr_entries++] = pc; in __save_stack_trace() 66 if (trace->nr_entries < in __save_stack_trace() 68 trace->entries[trace->nr_entries++] = pc; in __save_stack_trace() 74 } while (trace->nr_entries < trace->max_entries); in __save_stack_trace()
|
/Linux-v5.4/arch/ia64/kernel/ |
D | stacktrace.c | 19 trace->nr_entries = 0; in ia64_do_save_stack() 25 trace->entries[trace->nr_entries++] = ip; in ia64_do_save_stack() 26 if (trace->nr_entries == trace->max_entries) in ia64_do_save_stack()
|
/Linux-v5.4/arch/parisc/kernel/ |
D | stacktrace.c | 23 trace->nr_entries = 0; in dump_trace() 24 while (trace->nr_entries < trace->max_entries) { in dump_trace() 29 trace->entries[trace->nr_entries++] = info.ip; in dump_trace()
|
/Linux-v5.4/lib/ |
D | stackdepot.c | 220 unsigned int nr_entries, in stack_depot_save() argument 230 if (unlikely(nr_entries == 0)) in stack_depot_save() 233 hash = hash_stack(entries, nr_entries); in stack_depot_save() 242 nr_entries, hash); in stack_depot_save() 270 found = find_stack(*bucket, entries, nr_entries, hash); in stack_depot_save() 273 depot_alloc_stack(entries, nr_entries, in stack_depot_save()
|