/Linux-v4.19/drivers/lightnvm/ |
D | pblk-rb.c | 55 rb->nr_entries = (1 << power_size); in pblk_rb_init() 122 pblk_rl_init(&pblk->rl, rb->nr_entries); in pblk_rb_init() 130 unsigned int pblk_rb_calculate_size(unsigned int nr_entries) in pblk_rb_calculate_size() argument 133 return (1 << max(get_count_order(nr_entries), 7)); in pblk_rb_calculate_size() 168 return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries); in pblk_rb_space() 180 return pblk_rb_ring_count(mem, subm, rb->nr_entries); in pblk_rb_read_count() 188 return pblk_rb_ring_count(mem, sync, rb->nr_entries); in pblk_rb_sync_count() 191 unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries) in pblk_rb_read_commit() argument 198 (subm + nr_entries) & (rb->nr_entries - 1)); in pblk_rb_read_commit() 231 rb->l2p_update = (rb->l2p_update + 1) & (rb->nr_entries - 1); in __pblk_rb_update_l2p() [all …]
|
D | pblk-cache.c | 27 int nr_entries = pblk_get_secs(bio); in pblk_write_to_cache() local 38 ret = pblk_rb_may_write_user(&pblk->rwb, bio, nr_entries, &bpos); in pblk_write_to_cache() 58 for (i = 0; i < nr_entries; i++) { in pblk_write_to_cache() 69 atomic64_add(nr_entries, &pblk->user_wa); in pblk_write_to_cache() 72 atomic_long_add(nr_entries, &pblk->inflight_writes); in pblk_write_to_cache() 73 atomic_long_add(nr_entries, &pblk->req_writes); in pblk_write_to_cache() 76 pblk_rl_inserted(&pblk->rl, nr_entries); in pblk_write_to_cache()
|
D | pblk-rl.c | 35 int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries) in pblk_rl_user_may_insert() argument 40 if (unlikely(rb_space >= 0) && (rb_space - nr_entries < 0)) in pblk_rl_user_may_insert() 49 void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries) in pblk_rl_inserted() argument 54 atomic_sub(nr_entries, &rl->rb_space); in pblk_rl_inserted() 57 int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries) in pblk_rl_gc_may_insert() argument 67 void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries) in pblk_rl_user_in() argument 69 atomic_add(nr_entries, &rl->rb_user_cnt); in pblk_rl_user_in() 86 void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries) in pblk_rl_gc_in() argument 88 atomic_add(nr_entries, &rl->rb_gc_cnt); in pblk_rl_gc_in()
|
/Linux-v4.19/fs/xfs/libxfs/ |
D | xfs_iext_tree.c | 458 int *nr_entries) in xfs_iext_split_node() argument 470 *nr_entries = 0; in xfs_iext_split_node() 486 *nr_entries = nr_move; in xfs_iext_split_node() 488 *nr_entries = nr_keep; in xfs_iext_split_node() 504 int i, pos, nr_entries; in xfs_iext_insert_node() local 513 nr_entries = xfs_iext_node_nr_entries(node, pos); in xfs_iext_insert_node() 515 ASSERT(pos >= nr_entries || xfs_iext_key_cmp(node, pos, offset) != 0); in xfs_iext_insert_node() 516 ASSERT(nr_entries <= KEYS_PER_NODE); in xfs_iext_insert_node() 518 if (nr_entries == KEYS_PER_NODE) in xfs_iext_insert_node() 519 new = xfs_iext_split_node(&node, &pos, &nr_entries); in xfs_iext_insert_node() [all …]
|
/Linux-v4.19/arch/parisc/kernel/ |
D | stacktrace.c | 22 trace->nr_entries = 0; in dump_trace() 23 while (trace->nr_entries < trace->max_entries) { in dump_trace() 28 trace->entries[trace->nr_entries++] = info.ip; in dump_trace() 39 if (trace->nr_entries < trace->max_entries) in save_stack_trace() 40 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace() 47 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk() 48 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace_tsk()
|
/Linux-v4.19/arch/sh/kernel/ |
D | stacktrace.c | 41 if (trace->nr_entries < trace->max_entries) in save_stack_address() 42 trace->entries[trace->nr_entries++] = addr; in save_stack_address() 55 if (trace->nr_entries < trace->max_entries) in save_stack_trace() 56 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace() 76 if (trace->nr_entries < trace->max_entries) in save_stack_address_nosched() 77 trace->entries[trace->nr_entries++] = addr; in save_stack_address_nosched() 90 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk() 91 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace_tsk()
|
/Linux-v4.19/arch/x86/kernel/ |
D | stacktrace.c | 26 if (trace->nr_entries >= trace->max_entries) in save_stack_address() 29 trace->entries[trace->nr_entries++] = addr; in save_stack_address() 50 if (trace->nr_entries < trace->max_entries) in __save_stack_trace() 51 trace->entries[trace->nr_entries++] = ULONG_MAX; in __save_stack_trace() 136 if (trace->nr_entries < trace->max_entries) in __save_stack_trace_reliable() 137 trace->entries[trace->nr_entries++] = ULONG_MAX; in __save_stack_trace_reliable() 197 if (trace->nr_entries < trace->max_entries) in __save_stack_trace_user() 198 trace->entries[trace->nr_entries++] = regs->ip; in __save_stack_trace_user() 200 while (trace->nr_entries < trace->max_entries) { in __save_stack_trace_user() 210 trace->entries[trace->nr_entries++] = in __save_stack_trace_user() [all …]
|
D | e820.c | 80 for (i = 0; i < e820_table->nr_entries; i++) { in e820__mapped_any() 104 for (i = 0; i < e820_table->nr_entries; i++) { in __e820__mapped_all() 155 int x = table->nr_entries; in __e820__range_add() 166 table->nr_entries++; in __e820__range_add() 193 for (i = 0; i < e820_table->nr_entries; i++) { in e820__print_table() 304 if (table->nr_entries < 2) in e820__update_table() 307 BUG_ON(table->nr_entries > max_nr_entries); in e820__update_table() 310 for (i = 0; i < table->nr_entries; i++) { in e820__update_table() 316 for (i = 0; i < 2 * table->nr_entries; i++) in e820__update_table() 324 for (i = 0; i < table->nr_entries; i++) { in e820__update_table() [all …]
|
/Linux-v4.19/arch/s390/kernel/ |
D | stacktrace.c | 25 if (trace->nr_entries < trace->max_entries) { in __save_address() 26 trace->entries[trace->nr_entries++] = address; in __save_address() 48 if (trace->nr_entries < trace->max_entries) in save_stack_trace() 49 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace() 61 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk() 62 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace_tsk() 72 if (trace->nr_entries < trace->max_entries) in save_stack_trace_regs() 73 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace_regs()
|
/Linux-v4.19/arch/arm/kernel/ |
D | stacktrace.c | 85 trace->entries[trace->nr_entries++] = addr; in save_trace() 87 if (trace->nr_entries >= trace->max_entries) in save_trace() 95 trace->entries[trace->nr_entries++] = regs->ARM_pc; in save_trace() 97 return trace->nr_entries >= trace->max_entries; in save_trace() 118 if (trace->nr_entries < trace->max_entries) in __save_stack_trace() 119 trace->entries[trace->nr_entries++] = ULONG_MAX; in __save_stack_trace() 137 if (trace->nr_entries < trace->max_entries) in __save_stack_trace() 138 trace->entries[trace->nr_entries++] = ULONG_MAX; in __save_stack_trace() 156 if (trace->nr_entries < trace->max_entries) in save_stack_trace_regs() 157 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace_regs()
|
/Linux-v4.19/drivers/md/persistent-data/ |
D | dm-btree-remove.c | 58 uint32_t nr_entries = le32_to_cpu(n->header.nr_entries); in node_shift() local 63 BUG_ON(shift > nr_entries); in node_shift() 67 (nr_entries - shift) * sizeof(__le64)); in node_shift() 70 (nr_entries - shift) * value_size); in node_shift() 72 BUG_ON(nr_entries + shift > le32_to_cpu(n->header.max_entries)); in node_shift() 75 nr_entries * sizeof(__le64)); in node_shift() 78 nr_entries * value_size); in node_shift() 84 uint32_t nr_left = le32_to_cpu(left->header.nr_entries); in node_copy() 113 unsigned nr_entries = le32_to_cpu(n->header.nr_entries); in delete_at() local 114 unsigned nr_to_copy = nr_entries - (index + 1); in delete_at() [all …]
|
D | dm-btree.c | 43 int lo = -1, hi = le32_to_cpu(n->header.nr_entries); in bsearch() 75 uint32_t nr_entries = le32_to_cpu(n->header.nr_entries); in inc_children() local 78 for (i = 0; i < nr_entries; i++) in inc_children() 81 for (i = 0; i < nr_entries; i++) in inc_children() 89 uint32_t nr_entries = le32_to_cpu(node->header.nr_entries); in insert_at() local 92 if (index > nr_entries || in insert_at() 101 array_insert(node->keys, sizeof(*node->keys), nr_entries, index, &key_le); in insert_at() 102 array_insert(value_base(node), value_size, nr_entries, index, value); in insert_at() 103 node->header.nr_entries = cpu_to_le32(nr_entries + 1); in insert_at() 144 n->header.nr_entries = cpu_to_le32(0); in dm_btree_empty() [all …]
|
D | dm-array.c | 26 __le32 nr_entries; member 113 unsigned i, nr_entries = le32_to_cpu(ab->nr_entries); in on_entries() local 115 for (i = 0; i < nr_entries; i++) in on_entries() 164 (*ab)->nr_entries = cpu_to_le32(0); in alloc_ablock() 179 uint32_t nr_entries; in fill_ablock() local 183 BUG_ON(new_nr < le32_to_cpu(ab->nr_entries)); in fill_ablock() 185 nr_entries = le32_to_cpu(ab->nr_entries); in fill_ablock() 186 for (i = nr_entries; i < new_nr; i++) { in fill_ablock() 191 ab->nr_entries = cpu_to_le32(new_nr); in fill_ablock() 203 uint32_t nr_entries; in trim_ablock() local [all …]
|
/Linux-v4.19/tools/perf/util/ |
D | syscalltbl.c | 69 int nr_entries = 0, i, j; in syscalltbl__init_native() local 74 ++nr_entries; in syscalltbl__init_native() 76 entries = tbl->syscalls.entries = malloc(sizeof(struct syscall) * nr_entries); in syscalltbl__init_native() 88 qsort(tbl->syscalls.entries, nr_entries, sizeof(struct syscall), syscallcmp); in syscalltbl__init_native() 89 tbl->syscalls.nr_entries = nr_entries; in syscalltbl__init_native() 119 tbl->syscalls.nr_entries, sizeof(*sc), in syscalltbl__id() 130 for (i = *idx + 1; i < tbl->syscalls.nr_entries; ++i) { in syscalltbl__strglobmatch_next()
|
/Linux-v4.19/kernel/trace/ |
D | trace_stack.c | 52 stack_trace_max.nr_entries); in stack_trace_print() 54 for (i = 0; i < stack_trace_max.nr_entries; i++) { in stack_trace_print() 57 if (i+1 == stack_trace_max.nr_entries || in stack_trace_print() 113 stack_trace_max.nr_entries = 0; in check_stack() 119 for (i = 0; i < stack_trace_max.nr_entries; i++) { in check_stack() 128 if (i == stack_trace_max.nr_entries) in check_stack() 146 while (i < stack_trace_max.nr_entries) { in check_stack() 152 for (; p < top && i < stack_trace_max.nr_entries; p++) { in check_stack() 185 stack_trace_max.nr_entries = x; in check_stack() 289 if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX) in __next() [all …]
|
/Linux-v4.19/arch/mips/kernel/ |
D | stacktrace.c | 29 trace->entries[trace->nr_entries++] = addr; in save_raw_context_stack() 30 if (trace->nr_entries >= trace->max_entries) in save_raw_context_stack() 57 trace->entries[trace->nr_entries++] = pc; in save_context_stack() 58 if (trace->nr_entries >= trace->max_entries) in save_context_stack() 82 WARN_ON(trace->nr_entries || !trace->max_entries); in save_stack_trace_tsk()
|
/Linux-v4.19/arch/openrisc/kernel/ |
D | stacktrace.c | 37 if (trace->nr_entries < trace->max_entries) in save_stack_address() 38 trace->entries[trace->nr_entries++] = addr; in save_stack_address() 63 if (trace->nr_entries < trace->max_entries) in save_stack_address_nosched() 64 trace->entries[trace->nr_entries++] = addr; in save_stack_address_nosched()
|
/Linux-v4.19/drivers/md/ |
D | dm-cache-policy-internal.h | 115 static inline size_t bitset_size_in_bytes(unsigned nr_entries) in bitset_size_in_bytes() argument 117 return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG); in bitset_size_in_bytes() 120 static inline unsigned long *alloc_bitset(unsigned nr_entries) in alloc_bitset() argument 122 size_t s = bitset_size_in_bytes(nr_entries); in alloc_bitset() 126 static inline void clear_bitset(void *bitset, unsigned nr_entries) in clear_bitset() argument 128 size_t s = bitset_size_in_bytes(nr_entries); in clear_bitset()
|
/Linux-v4.19/arch/arm64/kernel/ |
D | stacktrace.c | 123 trace->entries[trace->nr_entries++] = addr; in save_trace() 125 return trace->nr_entries >= trace->max_entries; in save_trace() 144 if (trace->nr_entries < trace->max_entries) in save_stack_trace_regs() 145 trace->entries[trace->nr_entries++] = ULONG_MAX; in save_stack_trace_regs() 175 if (trace->nr_entries < trace->max_entries) in __save_stack_trace() 176 trace->entries[trace->nr_entries++] = ULONG_MAX; in __save_stack_trace()
|
/Linux-v4.19/tools/perf/trace/beauty/ |
D | ioctl.c | 41 if (nr < strarray__ioctl_tty_cmd.nr_entries && strarray__ioctl_tty_cmd.entries[nr] != NULL) in ioctl__scnprintf_tty_cmd() 52 if (nr < strarray__drm_ioctl_cmds.nr_entries && strarray__drm_ioctl_cmds.entries[nr] != NULL) in ioctl__scnprintf_drm_cmd() 63 …if (nr < strarray__sndrv_pcm_ioctl_cmds.nr_entries && strarray__sndrv_pcm_ioctl_cmds.entries[nr] !… in ioctl__scnprintf_sndrv_pcm_cmd() 74 …if (nr < strarray__sndrv_ctl_ioctl_cmds.nr_entries && strarray__sndrv_ctl_ioctl_cmds.entries[nr] !… in ioctl__scnprintf_sndrv_ctl_cmd() 85 if (nr < strarray__kvm_ioctl_cmds.nr_entries && strarray__kvm_ioctl_cmds.entries[nr] != NULL) in ioctl__scnprintf_kvm_cmd() 98 if (nr < s->nr_entries && s->entries[nr] != NULL) in ioctl__scnprintf_vhost_virtio_cmd() 109 if (nr < strarray__perf_ioctl_cmds.nr_entries && strarray__perf_ioctl_cmds.entries[nr] != NULL) in ioctl__scnprintf_perf_cmd()
|
/Linux-v4.19/arch/um/kernel/ |
D | stacktrace.c | 53 if (trace->nr_entries >= trace->max_entries) in save_addr() 56 trace->entries[trace->nr_entries++] = address; in save_addr() 66 if (trace->nr_entries < trace->max_entries) in __save_stack_trace() 67 trace->entries[trace->nr_entries++] = ULONG_MAX; in __save_stack_trace()
|
/Linux-v4.19/drivers/net/ethernet/mellanox/mlxsw/ |
D | spectrum1_kvdl.c | 122 unsigned int entry_index, nr_entries; in mlxsw_sp1_kvdl_part_alloc() local 124 nr_entries = (info->end_index - info->start_index + 1) / in mlxsw_sp1_kvdl_part_alloc() 126 entry_index = find_first_zero_bit(part->usage, nr_entries); in mlxsw_sp1_kvdl_part_alloc() 127 if (entry_index == nr_entries) in mlxsw_sp1_kvdl_part_alloc() 215 unsigned int nr_entries; in mlxsw_sp1_kvdl_part_init() local 227 nr_entries = div_u64(resource_size, info->alloc_size); in mlxsw_sp1_kvdl_part_init() 228 usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long); in mlxsw_sp1_kvdl_part_init() 281 unsigned int nr_entries; in mlxsw_sp1_kvdl_part_occ() local 285 nr_entries = (info->end_index - in mlxsw_sp1_kvdl_part_occ() 288 while ((bit = find_next_bit(part->usage, nr_entries, bit + 1)) in mlxsw_sp1_kvdl_part_occ() [all …]
|
/Linux-v4.19/tools/perf/ui/ |
D | browser.c | 109 if (browser->nr_entries == 0) in ui_browser__list_head_seek() 317 if (browser->nr_entries > 1) { in ui_browser__scrollbar_set() 319 (browser->nr_entries - 1)); in ui_browser__scrollbar_set() 367 void ui_browser__update_nr_entries(struct ui_browser *browser, u32 nr_entries) in ui_browser__update_nr_entries() argument 369 off_t offset = nr_entries - browser->nr_entries; in ui_browser__update_nr_entries() 371 browser->nr_entries = nr_entries; in ui_browser__update_nr_entries() 423 if (browser->index == browser->nr_entries - 1) in ui_browser__run() 454 if (browser->top_idx + browser->rows > browser->nr_entries - 1) in ui_browser__run() 458 if (browser->index + offset > browser->nr_entries - 1) in ui_browser__run() 459 offset = browser->nr_entries - 1 - browser->index; in ui_browser__run() [all …]
|
/Linux-v4.19/tools/include/linux/ |
D | stacktrace.h | 8 unsigned int nr_entries, max_entries; member 15 backtrace_symbols_fd((void **)trace->entries, trace->nr_entries, 1); in print_stack_trace() 19 ((trace)->nr_entries = \
|
/Linux-v4.19/arch/sparc/kernel/ |
D | stacktrace.c | 57 trace->entries[trace->nr_entries++] = pc; in __save_stack_trace() 63 if (trace->nr_entries < in __save_stack_trace() 65 trace->entries[trace->nr_entries++] = pc; in __save_stack_trace() 71 } while (trace->nr_entries < trace->max_entries); in __save_stack_trace()
|