/Linux-v4.19/fs/xfs/ |
D | xfs_stats.h | 186 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->s.v++; \ 187 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->s.v++; \ 192 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->s.v--; \ 193 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->s.v--; \ 198 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->s.v += (inc); \ 199 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->s.v += (inc); \ 204 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->a[off]++; \ 205 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->a[off]++; \ 210 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->a[off]; \ 211 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->a[off]; \ [all …]
|
D | xfs_stats.c | 16 val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx)); in counter_val() 71 xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes; in xfs_stats_format() 72 xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes; in xfs_stats_format() 73 xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes; in xfs_stats_format() 97 vn_active = per_cpu_ptr(stats, c)->s.vn_active; in xfs_stats_clearall() 98 memset(per_cpu_ptr(stats, c), 0, sizeof(*stats)); in xfs_stats_clearall() 99 per_cpu_ptr(stats, c)->s.vn_active = vn_active; in xfs_stats_clearall()
|
/Linux-v4.19/arch/x86/events/amd/ |
D | uncore.c | 78 return *per_cpu_ptr(amd_uncore_nb, event->cpu); in event_to_amd_uncore() 80 return *per_cpu_ptr(amd_uncore_llc, event->cpu); in event_to_amd_uncore() 343 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb; in amd_uncore_cpu_up_prepare() 357 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc; in amd_uncore_cpu_up_prepare() 364 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL; in amd_uncore_cpu_up_prepare() 377 that = *per_cpu_ptr(uncores, cpu); in amd_uncore_find_online_sibling() 402 uncore = *per_cpu_ptr(amd_uncore_nb, cpu); in amd_uncore_cpu_starting() 407 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore; in amd_uncore_cpu_starting() 411 uncore = *per_cpu_ptr(amd_uncore_llc, cpu); in amd_uncore_cpu_starting() 415 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore; in amd_uncore_cpu_starting() [all …]
|
/Linux-v4.19/kernel/sched/ |
D | topology.c | 622 sibling = *per_cpu_ptr(sdd->sd, i); in build_balance_mask() 681 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in init_overlap_sched_group() 715 sibling = *per_cpu_ptr(sdd->sd, i); in build_overlap_sched_groups() 830 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in get_group() 837 sg = *per_cpu_ptr(sdd->sg, cpu); in get_group() 838 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group() 1029 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); in claim_allocations() 1030 *per_cpu_ptr(sdd->sd, cpu) = NULL; in claim_allocations() 1032 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) in claim_allocations() 1033 *per_cpu_ptr(sdd->sds, cpu) = NULL; in claim_allocations() [all …]
|
/Linux-v4.19/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/ |
D | percpu.h | 28 #define per_cpu_ptr(ptr, cpu) \ macro 69 THIS_CPU_ADD_HELPER(per_cpu_ptr(&(pcp), thread_cpu_id), \ 76 THIS_CPU_ADD_HELPER(per_cpu_ptr(&(pcp), this_cpu_add_impl_cpu), \
|
/Linux-v4.19/fs/squashfs/ |
D | decompressor_multi_percpu.c | 40 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create() 53 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create() 70 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_destroy()
|
/Linux-v4.19/kernel/ |
D | smpboot.c | 172 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in __smpboot_create_thread() 196 *per_cpu_ptr(ht->store, cpu) = tsk; in __smpboot_create_thread() 229 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in smpboot_unpark_thread() 248 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in smpboot_park_thread() 271 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in smpboot_destroy_threads() 276 *per_cpu_ptr(ht->store, cpu) = NULL; in smpboot_destroy_threads()
|
D | relay.c | 218 *per_cpu_ptr(chan->buf, buf->cpu) = NULL; in relay_destroy_buf() 395 if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) { in relay_reset() 402 if ((buf = *per_cpu_ptr(chan->buf, i))) in relay_reset() 448 return *per_cpu_ptr(chan->buf, 0); in relay_open_buf() 472 *per_cpu_ptr(chan->buf, 0) = buf; in relay_open_buf() 527 if ((buf = *per_cpu_ptr(chan->buf, cpu))) in relay_prepare_cpu() 535 *per_cpu_ptr(chan->buf, cpu) = buf; in relay_prepare_cpu() 600 *per_cpu_ptr(chan->buf, i) = buf; in relay_open() 609 if ((buf = *per_cpu_ptr(chan->buf, i))) in relay_open() 674 buf = *per_cpu_ptr(chan->buf, 0); in relay_late_setup_files() [all …]
|
D | cpu.c | 146 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_invoke_callback() 497 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in bringup_wait_for_ap() 577 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_create() 674 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_invoke_ap_callback() 723 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_kick_ap_work() 834 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in takedown_cpu() 838 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); in takedown_cpu() 854 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread); in takedown_cpu() 931 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in _cpu_down() 1024 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in notify_cpu_starting() [all …]
|
/Linux-v4.19/kernel/irq/ |
D | matrix.c | 172 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_reserve_managed() 213 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_remove_managed() 244 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_alloc_managed() 331 cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_alloc() 341 cm = per_cpu_ptr(m->maps, best_cpu); in irq_matrix_alloc() 369 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_free() 448 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_debug_show()
|
/Linux-v4.19/drivers/block/zram/ |
D | zcomp.c | 166 if (WARN_ON(*per_cpu_ptr(comp->stream, cpu))) in zcomp_cpu_up_prepare() 174 *per_cpu_ptr(comp->stream, cpu) = zstrm; in zcomp_cpu_up_prepare() 183 zstrm = *per_cpu_ptr(comp->stream, cpu); in zcomp_cpu_dead() 186 *per_cpu_ptr(comp->stream, cpu) = NULL; in zcomp_cpu_dead()
|
/Linux-v4.19/kernel/bpf/ |
D | percpu_freelist.c | 18 struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu); in pcpu_freelist_init() 65 head = per_cpu_ptr(s->freelist, cpu); in pcpu_freelist_populate() 87 head = per_cpu_ptr(s->freelist, cpu); in pcpu_freelist_pop()
|
D | bpf_lru_list.c | 411 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_pop_free() 444 loc_l = per_cpu_ptr(clru->local_list, cpu); in bpf_common_lru_pop_free() 473 steal_loc_l = per_cpu_ptr(clru->local_list, steal); in bpf_common_lru_pop_free() 517 loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu); in bpf_common_lru_push_free() 544 l = per_cpu_ptr(lru->percpu_lru, node->cpu); in bpf_percpu_lru_push_free() 594 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_populate() 661 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_lru_init() 675 loc_l = per_cpu_ptr(clru->local_list, cpu); in bpf_lru_init()
|
/Linux-v4.19/drivers/powercap/ |
D | idle_inject.c | 89 iit = per_cpu_ptr(&idle_inject_thread, cpu); in idle_inject_wakeup() 134 iit = per_cpu_ptr(&idle_inject_thread, cpu); in idle_inject_fn() 239 iit = per_cpu_ptr(&idle_inject_thread, cpu); in idle_inject_stop() 271 per_cpu_ptr(&idle_inject_thread, cpu); in idle_inject_should_run()
|
/Linux-v4.19/net/xfrm/ |
D | xfrm_ipcomp.c | 49 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); in ipcomp_decompress() 50 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); in ipcomp_decompress() 216 vfree(*per_cpu_ptr(scratches, i)); in ipcomp_free_scratches() 241 *per_cpu_ptr(scratches, i) = scratch; in ipcomp_alloc_scratches() 269 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); in ipcomp_free_tfms() 311 *per_cpu_ptr(tfms, cpu) = tfm; in ipcomp_alloc_tfms()
|
/Linux-v4.19/tools/testing/radix-tree/linux/ |
D | percpu.h | 10 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) macro 11 #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu))
|
/Linux-v4.19/kernel/trace/ |
D | trace_functions_graph.c | 413 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in trace_graph_entry() 487 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in trace_graph_return() 606 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); in verif_pid() 844 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_entry_leaf() 896 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_entry_nested() 992 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); in check_irq_entry() 1038 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); in check_irq_return() 1122 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_return() 1184 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; in print_graph_comment() 1250 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { in print_graph_function_flags() [all …]
|
/Linux-v4.19/drivers/clocksource/ |
D | riscv_timer.c | 61 struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu); in riscv_timer_starting_cpu() 93 cs = per_cpu_ptr(&riscv_clocksource, cpu_id); in riscv_timer_init_dt()
|
/Linux-v4.19/crypto/ |
D | scompress.c | 76 vfree(*per_cpu_ptr(scratches, i)); in crypto_scomp_free_scratches() 96 *per_cpu_ptr(scratches, i) = scratch; in crypto_scomp_alloc_scratches() 150 u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu); in scomp_acomp_comp_decomp() 151 u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu); in scomp_acomp_comp_decomp()
|
/Linux-v4.19/drivers/hv/ |
D | hv.c | 202 = per_cpu_ptr(hv_context.cpu_context, cpu); in hv_synic_alloc() 259 = per_cpu_ptr(hv_context.cpu_context, cpu); in hv_synic_free() 280 = per_cpu_ptr(hv_context.cpu_context, cpu); in hv_synic_init() 348 = per_cpu_ptr(hv_context.cpu_context, cpu); in hv_synic_clockevents_cleanup()
|
/Linux-v4.19/arch/x86/kernel/ |
D | kgdb.c | 225 bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu); in kgdb_correct_hw_break() 254 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_reserve_slot() 266 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_reserve_slot() 281 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_release_slot() 321 bp = *per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_remove_all_hw_break() 414 bp = *per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_disable_hw_debug() 686 pevent = per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_arch_late()
|
/Linux-v4.19/kernel/rcu/ |
D | tree_exp.h | 289 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); in exp_funnel_lock() 371 rdp = per_cpu_ptr(rsp->rda, cpu); in sync_sched_exp_online_cleanup() 403 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in sync_rcu_exp_select_node_cpus() 404 struct rcu_dynticks *rdtp = per_cpu_ptr(&rcu_dynticks, cpu); in sync_rcu_exp_select_node_cpus() 432 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in sync_rcu_exp_select_node_cpus() 551 rdp = per_cpu_ptr(rsp->rda, cpu); in synchronize_sched_expedited_wait() 684 rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); in _synchronize_rcu_expedited()
|
D | srcutree.c | 142 sdp = per_cpu_ptr(sp->sda, cpu); in init_srcu_struct_nodes() 261 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); in srcu_readers_lock_idx() 278 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); in srcu_readers_unlock_idx() 347 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); in srcu_readers_active() 388 if (WARN_ON(delayed_work_pending(&per_cpu_ptr(sp->sda, cpu)->work))) in _cleanup_srcu_struct() 391 flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work); in _cleanup_srcu_struct() 519 srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay); in srcu_schedule_cbs_snp() 583 sdp = per_cpu_ptr(sp->sda, cpu); in srcu_gp_end() 1062 sdp = per_cpu_ptr(sp->sda, cpu); in srcu_barrier() 1279 sdp = per_cpu_ptr(sp->sda, cpu); in srcu_torture_stats_print()
|
/Linux-v4.19/lib/ |
D | percpu_counter.c | 67 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_set() 114 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in __percpu_counter_sum() 188 pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_cpu_dead()
|
/Linux-v4.19/net/core/ |
D | gro_cells.c | 64 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); in gro_cells_init() 85 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); in gro_cells_destroy()
|