| /Linux-v6.1/fs/xfs/ |
| D | xfs_stats.h | 165 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->s.v++; \ 166 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->s.v++; \ 171 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->s.v--; \ 172 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->s.v--; \ 177 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->s.v += (inc); \ 178 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->s.v += (inc); \ 183 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->a[off]++; \ 184 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->a[off]++; \ 189 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->a[off]; \ 190 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->a[off]; \ [all …]
|
| D | xfs_stats.c | 15 val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx)); in counter_val() 71 xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes; in xfs_stats_format() 72 xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes; in xfs_stats_format() 73 xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes; in xfs_stats_format() 74 defer_relog += per_cpu_ptr(stats, i)->s.defer_relog; in xfs_stats_format() 100 vn_active = per_cpu_ptr(stats, c)->s.vn_active; in xfs_stats_clearall() 101 memset(per_cpu_ptr(stats, c), 0, sizeof(*stats)); in xfs_stats_clearall() 102 per_cpu_ptr(stats, c)->s.vn_active = vn_active; in xfs_stats_clearall()
|
| /Linux-v6.1/arch/x86/events/amd/ |
| D | uncore.c | 75 return *per_cpu_ptr(amd_uncore_nb, event->cpu); in event_to_amd_uncore() 77 return *per_cpu_ptr(amd_uncore_llc, event->cpu); in event_to_amd_uncore() 446 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL; in amd_uncore_cpu_up_prepare() 460 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb; in amd_uncore_cpu_up_prepare() 464 *per_cpu_ptr(amd_uncore_llc, cpu) = NULL; in amd_uncore_cpu_up_prepare() 478 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc; in amd_uncore_cpu_up_prepare() 505 that = *per_cpu_ptr(uncores, cpu); in amd_uncore_find_online_sibling() 530 uncore = *per_cpu_ptr(amd_uncore_nb, cpu); in amd_uncore_cpu_starting() 535 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore; in amd_uncore_cpu_starting() 539 uncore = *per_cpu_ptr(amd_uncore_llc, cpu); in amd_uncore_cpu_starting() [all …]
|
| /Linux-v6.1/include/linux/ |
| D | context_tracking_state.h | 64 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); in ct_dynticks_cpu() 71 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); in ct_dynticks_cpu_acquire() 83 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); in ct_dynticks_nesting_cpu() 95 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); in ct_dynticks_nmi_nesting_cpu()
|
| D | part_stat.h | 29 (per_cpu_ptr((part)->bd_stats, (cpu))->field) 39 res += per_cpu_ptr((part)->bd_stats, _cpu)->field; \ 48 memset(per_cpu_ptr(part->bd_stats, i), value, in part_stat_set_all()
|
| /Linux-v6.1/drivers/infiniband/ulp/rtrs/ |
| D | rtrs-clt-stats.c | 27 s = per_cpu_ptr(stats->pcpu_stats, con->cpu); in rtrs_clt_update_wc_stats() 47 s = per_cpu_ptr(stats->pcpu_stats, cpu); in rtrs_clt_stats_migration_from_cnt_to_str() 66 s = per_cpu_ptr(stats->pcpu_stats, cpu); in rtrs_clt_stats_migration_to_cnt_to_str() 90 r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma; in rtrs_clt_stats_rdma_to_str() 119 s = per_cpu_ptr(stats->pcpu_stats, cpu); in rtrs_clt_reset_rdma_stats() 135 s = per_cpu_ptr(stats->pcpu_stats, cpu); in rtrs_clt_reset_cpu_migr_stats()
|
| D | rtrs-srv-stats.c | 21 r = per_cpu_ptr(stats->rdma_stats, cpu); in rtrs_srv_reset_rdma_stats() 40 r = per_cpu_ptr(stats->rdma_stats, cpu); in rtrs_srv_stats_rdma_to_str()
|
| /Linux-v6.1/kernel/sched/ |
| D | cpuacct.c | 97 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_read() 98 u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; in cpuacct_cpuusage_read() 137 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_write() 138 u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; in cpuacct_cpuusage_write() 270 u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; in cpuacct_stats_show() 278 cputime.sum_exec_runtime += *per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_stats_show() 342 *per_cpu_ptr(ca->cpuusage, cpu) += cputime; in cpuacct_charge()
|
| D | topology.c | 908 sibling = *per_cpu_ptr(sdd->sd, i); in build_balance_mask() 969 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in init_overlap_sched_group() 1029 sibling = *per_cpu_ptr(sdd->sd, i); in build_overlap_sched_groups() 1179 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in get_group() 1187 sg = *per_cpu_ptr(sdd->sg, cpu); in get_group() 1188 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group() 1493 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); in claim_allocations() 1494 *per_cpu_ptr(sdd->sd, cpu) = NULL; in claim_allocations() 1496 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) in claim_allocations() 1497 *per_cpu_ptr(sdd->sds, cpu) = NULL; in claim_allocations() [all …]
|
| /Linux-v6.1/kernel/bpf/ |
| D | percpu_freelist.c | 15 struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu); in pcpu_freelist_init() 66 head = per_cpu_ptr(s->freelist, cpu); in ___pcpu_freelist_push_nmi() 110 head = per_cpu_ptr(s->freelist, cpu); in pcpu_freelist_populate() 128 head = per_cpu_ptr(s->freelist, cpu); in ___pcpu_freelist_pop() 160 head = per_cpu_ptr(s->freelist, cpu); in ___pcpu_freelist_pop_nmi()
|
| D | bpf_lru_list.c | 408 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_pop_free() 441 loc_l = per_cpu_ptr(clru->local_list, cpu); in bpf_common_lru_pop_free() 470 steal_loc_l = per_cpu_ptr(clru->local_list, steal); in bpf_common_lru_pop_free() 515 loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu); in bpf_common_lru_push_free() 542 l = per_cpu_ptr(lru->percpu_lru, node->cpu); in bpf_percpu_lru_push_free() 592 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_populate() 659 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_lru_init() 673 loc_l = per_cpu_ptr(clru->local_list, cpu); in bpf_lru_init()
|
| /Linux-v6.1/kernel/irq/ |
| D | matrix.c | 138 cm = per_cpu_ptr(m->maps, cpu); in matrix_find_best_cpu() 159 cm = per_cpu_ptr(m->maps, cpu); in matrix_find_best_cpu_managed() 215 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_reserve_managed() 256 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_remove_managed() 299 cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_alloc_managed() 394 cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_alloc() 421 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_free() 504 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_debug_show()
|
| /Linux-v6.1/kernel/ |
| D | smpboot.c | 172 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in __smpboot_create_thread() 197 *per_cpu_ptr(ht->store, cpu) = tsk; in __smpboot_create_thread() 230 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in smpboot_unpark_thread() 249 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in smpboot_park_thread() 272 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in smpboot_destroy_threads() 277 *per_cpu_ptr(ht->store, cpu) = NULL; in smpboot_destroy_threads()
|
| D | relay.c | 204 *per_cpu_ptr(chan->buf, buf->cpu) = NULL; in relay_destroy_buf() 327 if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) { in relay_reset() 334 if ((buf = *per_cpu_ptr(chan->buf, i))) in relay_reset() 382 return *per_cpu_ptr(chan->buf, 0); in relay_open_buf() 406 *per_cpu_ptr(chan->buf, 0) = buf; in relay_open_buf() 440 if (*per_cpu_ptr(chan->buf, cpu)) in relay_prepare_cpu() 448 *per_cpu_ptr(chan->buf, cpu) = buf; in relay_prepare_cpu() 520 *per_cpu_ptr(chan->buf, i) = buf; in relay_open() 529 if ((buf = *per_cpu_ptr(chan->buf, i))) in relay_open() 594 buf = *per_cpu_ptr(chan->buf, 0); in relay_late_setup_files() [all …]
|
| D | cpu.c | 171 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_invoke_callback() 563 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in bringup_wait_for_ap() 811 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_invoke_ap_callback() 860 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_kick_ap_work() 891 st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_init_state() 1034 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in takedown_cpu() 1129 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in _cpu_down() 1297 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in notify_cpu_starting() 1337 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in _cpu_up() 1959 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_rollback_install() [all …]
|
| /Linux-v6.1/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/ |
| D | percpu.h | 28 #define per_cpu_ptr(ptr, cpu) \ macro 69 THIS_CPU_ADD_HELPER(per_cpu_ptr(&(pcp), thread_cpu_id), \ 76 THIS_CPU_ADD_HELPER(per_cpu_ptr(&(pcp), this_cpu_add_impl_cpu), \
|
| /Linux-v6.1/fs/squashfs/ |
| D | decompressor_multi_percpu.c | 40 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create() 54 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create() 71 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_destroy()
|
| /Linux-v6.1/drivers/powercap/ |
| D | idle_inject.c | 92 iit = per_cpu_ptr(&idle_inject_thread, cpu); in idle_inject_wakeup() 137 iit = per_cpu_ptr(&idle_inject_thread, cpu); in idle_inject_fn() 254 iit = per_cpu_ptr(&idle_inject_thread, cpu); in idle_inject_stop() 284 per_cpu_ptr(&idle_inject_thread, cpu); in idle_inject_should_run()
|
| /Linux-v6.1/kernel/trace/ |
| D | trace_functions_graph.c | 173 data = per_cpu_ptr(tr->array_buffer.data, cpu); in trace_graph_entry() 253 data = per_cpu_ptr(tr->array_buffer.data, cpu); in trace_graph_return() 391 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); in verif_pid() 644 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_entry_leaf() 688 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_entry_nested() 788 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); in check_irq_entry() 834 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); in check_irq_return() 918 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_return() 980 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; in print_graph_comment() 1046 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { in print_graph_function_flags() [all …]
|
| /Linux-v6.1/drivers/clocksource/ |
| D | timer-mp-csky.c | 78 struct timer_of *to = per_cpu_ptr(&csky_to, cpu); in csky_mptimer_starting_cpu() 145 to = per_cpu_ptr(&csky_to, cpu); in csky_mptimer_init() 168 to = per_cpu_ptr(&csky_to, cpu_rollback); in csky_mptimer_init()
|
| /Linux-v6.1/drivers/hv/ |
| D | hv.c | 129 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); in hv_synic_alloc() 141 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); in hv_synic_alloc() 189 = per_cpu_ptr(hv_context.cpu_context, cpu); in hv_synic_free() 209 = per_cpu_ptr(hv_context.cpu_context, cpu); in hv_synic_enable_regs() 294 = per_cpu_ptr(hv_context.cpu_context, cpu); in hv_synic_disable_regs()
|
| /Linux-v6.1/tools/testing/radix-tree/linux/ |
| D | percpu.h | 10 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) macro 11 #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu))
|
| /Linux-v6.1/arch/x86/kernel/ |
| D | irq_64.c | 37 char *stack = (char *)per_cpu_ptr(&irq_stack_backing_store, cpu); in map_irq_stack() 63 void *va = per_cpu_ptr(&irq_stack_backing_store, cpu); in map_irq_stack()
|
| D | kgdb.c | 208 bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu); in kgdb_correct_hw_break() 237 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_reserve_slot() 249 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_reserve_slot() 264 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_release_slot() 304 bp = *per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_remove_all_hw_break() 397 bp = *per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_disable_hw_debug() 666 pevent = per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_arch_late()
|
| /Linux-v6.1/net/netfilter/ |
| D | nf_flow_table_procfs.c | 18 return per_cpu_ptr(net->ft.stat, cpu); in nf_flow_table_cpu_seq_start() 33 return per_cpu_ptr(net->ft.stat, cpu); in nf_flow_table_cpu_seq_next()
|