Lines Matching refs:cpuctx
159 static void perf_ctx_lock(struct perf_cpu_context *cpuctx, in perf_ctx_lock() argument
162 raw_spin_lock(&cpuctx->ctx.lock); in perf_ctx_lock()
167 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, in perf_ctx_unlock() argument
172 raw_spin_unlock(&cpuctx->ctx.lock); in perf_ctx_unlock()
215 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in event_function() local
216 struct perf_event_context *task_ctx = cpuctx->task_ctx; in event_function()
221 perf_ctx_lock(cpuctx, task_ctx); in event_function()
246 WARN_ON_ONCE(&cpuctx->ctx != ctx); in event_function()
249 efs->func(event, cpuctx, ctx, efs->data); in event_function()
251 perf_ctx_unlock(cpuctx, task_ctx); in event_function()
312 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in event_function_local() local
325 perf_ctx_lock(cpuctx, task_ctx); in event_function_local()
341 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx)) in event_function_local()
345 WARN_ON_ONCE(&cpuctx->ctx != ctx); in event_function_local()
348 func(event, cpuctx, ctx, data); in event_function_local()
350 perf_ctx_unlock(cpuctx, task_ctx); in event_function_local()
443 static bool perf_rotate_context(struct perf_cpu_context *cpuctx);
566 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
569 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
681 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in perf_cgroup_match() local
688 if (!cpuctx->cgrp) in perf_cgroup_match()
697 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, in perf_cgroup_match()
733 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) in update_cgrp_time_from_cpuctx() argument
735 struct perf_cgroup *cgrp = cpuctx->cgrp; in update_cgrp_time_from_cpuctx()
803 struct perf_cpu_context *cpuctx; in perf_cgroup_switch() local
814 list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) { in perf_cgroup_switch()
815 WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0); in perf_cgroup_switch()
817 perf_ctx_lock(cpuctx, cpuctx->task_ctx); in perf_cgroup_switch()
818 perf_pmu_disable(cpuctx->ctx.pmu); in perf_cgroup_switch()
821 cpu_ctx_sched_out(cpuctx, EVENT_ALL); in perf_cgroup_switch()
826 cpuctx->cgrp = NULL; in perf_cgroup_switch()
830 WARN_ON_ONCE(cpuctx->cgrp); in perf_cgroup_switch()
838 cpuctx->cgrp = perf_cgroup_from_task(task, in perf_cgroup_switch()
839 &cpuctx->ctx); in perf_cgroup_switch()
840 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); in perf_cgroup_switch()
842 perf_pmu_enable(cpuctx->ctx.pmu); in perf_cgroup_switch()
843 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); in perf_cgroup_switch()
904 struct perf_cpu_context *cpuctx; in perf_cgroup_ensure_storage() local
916 cpuctx = per_cpu_ptr(event->pmu->pmu_cpu_context, cpu); in perf_cgroup_ensure_storage()
917 if (heap_size <= cpuctx->heap_size) in perf_cgroup_ensure_storage()
927 raw_spin_lock_irq(&cpuctx->ctx.lock); in perf_cgroup_ensure_storage()
928 if (cpuctx->heap_size < heap_size) { in perf_cgroup_ensure_storage()
929 swap(cpuctx->heap, storage); in perf_cgroup_ensure_storage()
930 if (storage == cpuctx->heap_default) in perf_cgroup_ensure_storage()
932 cpuctx->heap_size = heap_size; in perf_cgroup_ensure_storage()
934 raw_spin_unlock_irq(&cpuctx->ctx.lock); in perf_cgroup_ensure_storage()
993 struct perf_cpu_context *cpuctx; in perf_cgroup_event_enable() local
1002 cpuctx = container_of(ctx, struct perf_cpu_context, ctx); in perf_cgroup_event_enable()
1010 if (ctx->is_active && !cpuctx->cgrp) { in perf_cgroup_event_enable()
1014 cpuctx->cgrp = cgrp; in perf_cgroup_event_enable()
1020 list_add(&cpuctx->cgrp_cpuctx_entry, in perf_cgroup_event_enable()
1027 struct perf_cpu_context *cpuctx; in perf_cgroup_event_disable() local
1036 cpuctx = container_of(ctx, struct perf_cpu_context, ctx); in perf_cgroup_event_disable()
1041 if (ctx->is_active && cpuctx->cgrp) in perf_cgroup_event_disable()
1042 cpuctx->cgrp = NULL; in perf_cgroup_event_disable()
1044 list_del(&cpuctx->cgrp_cpuctx_entry); in perf_cgroup_event_disable()
1067 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) in update_cgrp_time_from_cpuctx() argument
1130 struct perf_cpu_context *cpuctx; in perf_mux_hrtimer_handler() local
1135 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); in perf_mux_hrtimer_handler()
1136 rotations = perf_rotate_context(cpuctx); in perf_mux_hrtimer_handler()
1138 raw_spin_lock(&cpuctx->hrtimer_lock); in perf_mux_hrtimer_handler()
1140 hrtimer_forward_now(hr, cpuctx->hrtimer_interval); in perf_mux_hrtimer_handler()
1142 cpuctx->hrtimer_active = 0; in perf_mux_hrtimer_handler()
1143 raw_spin_unlock(&cpuctx->hrtimer_lock); in perf_mux_hrtimer_handler()
1148 static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) in __perf_mux_hrtimer_init() argument
1150 struct hrtimer *timer = &cpuctx->hrtimer; in __perf_mux_hrtimer_init()
1151 struct pmu *pmu = cpuctx->ctx.pmu; in __perf_mux_hrtimer_init()
1166 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); in __perf_mux_hrtimer_init()
1168 raw_spin_lock_init(&cpuctx->hrtimer_lock); in __perf_mux_hrtimer_init()
1173 static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) in perf_mux_hrtimer_restart() argument
1175 struct hrtimer *timer = &cpuctx->hrtimer; in perf_mux_hrtimer_restart()
1176 struct pmu *pmu = cpuctx->ctx.pmu; in perf_mux_hrtimer_restart()
1183 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags); in perf_mux_hrtimer_restart()
1184 if (!cpuctx->hrtimer_active) { in perf_mux_hrtimer_restart()
1185 cpuctx->hrtimer_active = 1; in perf_mux_hrtimer_restart()
1186 hrtimer_forward_now(timer, cpuctx->hrtimer_interval); in perf_mux_hrtimer_restart()
1189 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags); in perf_mux_hrtimer_restart()
2046 struct perf_cpu_context *cpuctx,
2052 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in perf_put_aux_event() local
2081 event_sched_out(iter, cpuctx, ctx); in perf_put_aux_event()
2145 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in perf_remove_sibling_event() local
2147 event_sched_out(event, cpuctx, ctx); in perf_remove_sibling_event()
2252 struct perf_cpu_context *cpuctx, in event_sched_out() argument
2283 cpuctx->active_oncpu--; in event_sched_out()
2288 if (event->attr.exclusive || !cpuctx->active_oncpu) in event_sched_out()
2289 cpuctx->exclusive = 0; in event_sched_out()
2296 struct perf_cpu_context *cpuctx, in group_sched_out() argument
2306 event_sched_out(group_event, cpuctx, ctx); in group_sched_out()
2312 event_sched_out(event, cpuctx, ctx); in group_sched_out()
2327 struct perf_cpu_context *cpuctx, in __perf_remove_from_context() argument
2335 update_cgrp_time_from_cpuctx(cpuctx); in __perf_remove_from_context()
2338 event_sched_out(event, cpuctx, ctx); in __perf_remove_from_context()
2347 WARN_ON_ONCE(cpuctx->task_ctx != ctx); in __perf_remove_from_context()
2348 cpuctx->task_ctx = NULL; in __perf_remove_from_context()
2394 struct perf_cpu_context *cpuctx, in __perf_event_disable() argument
2407 group_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2409 event_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2510 struct perf_cpu_context *cpuctx, in event_sched_in() argument
2555 cpuctx->active_oncpu++; in event_sched_in()
2562 cpuctx->exclusive = 1; in event_sched_in()
2572 struct perf_cpu_context *cpuctx, in group_sched_in() argument
2583 if (event_sched_in(group_event, cpuctx, ctx)) in group_sched_in()
2590 if (event_sched_in(event, cpuctx, ctx)) { in group_sched_in()
2609 event_sched_out(event, cpuctx, ctx); in group_sched_in()
2611 event_sched_out(group_event, cpuctx, ctx); in group_sched_in()
2622 struct perf_cpu_context *cpuctx, in group_can_go_on() argument
2634 if (cpuctx->exclusive) in group_can_go_on()
2657 struct perf_cpu_context *cpuctx,
2661 struct perf_cpu_context *cpuctx,
2665 static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, in task_ctx_sched_out() argument
2669 if (!cpuctx->task_ctx) in task_ctx_sched_out()
2672 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) in task_ctx_sched_out()
2675 ctx_sched_out(ctx, cpuctx, event_type); in task_ctx_sched_out()
2678 static void perf_event_sched_in(struct perf_cpu_context *cpuctx, in perf_event_sched_in() argument
2682 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); in perf_event_sched_in()
2684 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); in perf_event_sched_in()
2685 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); in perf_event_sched_in()
2687 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); in perf_event_sched_in()
2705 static void ctx_resched(struct perf_cpu_context *cpuctx, in ctx_resched() argument
2721 perf_pmu_disable(cpuctx->ctx.pmu); in ctx_resched()
2723 task_ctx_sched_out(cpuctx, task_ctx, event_type); in ctx_resched()
2733 cpu_ctx_sched_out(cpuctx, ctx_event_type); in ctx_resched()
2735 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); in ctx_resched()
2737 perf_event_sched_in(cpuctx, task_ctx, current); in ctx_resched()
2738 perf_pmu_enable(cpuctx->ctx.pmu); in ctx_resched()
2743 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_pmu_resched() local
2744 struct perf_event_context *task_ctx = cpuctx->task_ctx; in perf_pmu_resched()
2746 perf_ctx_lock(cpuctx, task_ctx); in perf_pmu_resched()
2747 ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU); in perf_pmu_resched()
2748 perf_ctx_unlock(cpuctx, task_ctx); in perf_pmu_resched()
2761 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_install_in_context() local
2762 struct perf_event_context *task_ctx = cpuctx->task_ctx; in __perf_install_in_context()
2766 raw_spin_lock(&cpuctx->ctx.lock); in __perf_install_in_context()
2785 WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx); in __perf_install_in_context()
2803 ctx_sched_out(ctx, cpuctx, EVENT_TIME); in __perf_install_in_context()
2805 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_install_in_context()
2811 perf_ctx_unlock(cpuctx, task_ctx); in __perf_install_in_context()
2936 struct perf_cpu_context *cpuctx, in __perf_event_enable() argument
2948 ctx_sched_out(ctx, cpuctx, EVENT_TIME); in __perf_event_enable()
2957 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); in __perf_event_enable()
2966 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); in __perf_event_enable()
2970 task_ctx = cpuctx->task_ctx; in __perf_event_enable()
2974 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_event_enable()
3200 struct perf_cpu_context *cpuctx, in ctx_sched_out() argument
3214 WARN_ON_ONCE(cpuctx->task_ctx); in ctx_sched_out()
3223 WARN_ON_ONCE(cpuctx->task_ctx != ctx); in ctx_sched_out()
3225 cpuctx->task_ctx = NULL; in ctx_sched_out()
3241 update_cgrp_time_from_cpuctx(cpuctx); in ctx_sched_out()
3252 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
3257 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
3377 struct perf_cpu_context *cpuctx; in perf_event_context_sched_out() local
3385 cpuctx = __get_cpu_context(ctx); in perf_event_context_sched_out()
3386 if (!cpuctx->task_ctx) in perf_event_context_sched_out()
3420 if (cpuctx->sched_cb_usage && pmu->sched_task) in perf_event_context_sched_out()
3460 if (cpuctx->sched_cb_usage && pmu->sched_task) in perf_event_context_sched_out()
3462 task_ctx_sched_out(cpuctx, ctx, EVENT_ALL); in perf_event_context_sched_out()
3471 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_sched_cb_dec() local
3473 --cpuctx->sched_cb_usage; in perf_sched_cb_dec()
3479 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_sched_cb_inc() local
3481 cpuctx->sched_cb_usage++; in perf_sched_cb_inc()
3492 static void __perf_pmu_sched_task(struct perf_cpu_context *cpuctx, bool sched_in) in __perf_pmu_sched_task() argument
3496 pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */ in __perf_pmu_sched_task()
3501 perf_ctx_lock(cpuctx, cpuctx->task_ctx); in __perf_pmu_sched_task()
3504 pmu->sched_task(cpuctx->task_ctx, sched_in); in __perf_pmu_sched_task()
3507 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); in __perf_pmu_sched_task()
3550 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, in cpu_ctx_sched_out() argument
3553 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); in cpu_ctx_sched_out()
3587 static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx, in visit_groups_merge() argument
3601 if (cpuctx) { in visit_groups_merge()
3603 .data = cpuctx->heap, in visit_groups_merge()
3605 .size = cpuctx->heap_size, in visit_groups_merge()
3608 lockdep_assert_held(&cpuctx->ctx.lock); in visit_groups_merge()
3611 if (cpuctx->cgrp) in visit_groups_merge()
3612 css = &cpuctx->cgrp->css; in visit_groups_merge()
3652 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in merge_sched_in() local
3661 if (group_can_go_on(event, cpuctx, *can_add_hw)) { in merge_sched_in()
3662 if (!group_sched_in(event, cpuctx, ctx)) in merge_sched_in()
3674 perf_mux_hrtimer_restart(cpuctx); in merge_sched_in()
3682 struct perf_cpu_context *cpuctx) in ctx_pinned_sched_in() argument
3686 if (ctx != &cpuctx->ctx) in ctx_pinned_sched_in()
3687 cpuctx = NULL; in ctx_pinned_sched_in()
3689 visit_groups_merge(cpuctx, &ctx->pinned_groups, in ctx_pinned_sched_in()
3696 struct perf_cpu_context *cpuctx) in ctx_flexible_sched_in() argument
3700 if (ctx != &cpuctx->ctx) in ctx_flexible_sched_in()
3701 cpuctx = NULL; in ctx_flexible_sched_in()
3703 visit_groups_merge(cpuctx, &ctx->flexible_groups, in ctx_flexible_sched_in()
3710 struct perf_cpu_context *cpuctx, in ctx_sched_in() argument
3725 cpuctx->task_ctx = ctx; in ctx_sched_in()
3727 WARN_ON_ONCE(cpuctx->task_ctx != ctx); in ctx_sched_in()
3744 ctx_pinned_sched_in(ctx, cpuctx); in ctx_sched_in()
3748 ctx_flexible_sched_in(ctx, cpuctx); in ctx_sched_in()
3751 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, in cpu_ctx_sched_in() argument
3755 struct perf_event_context *ctx = &cpuctx->ctx; in cpu_ctx_sched_in()
3757 ctx_sched_in(ctx, cpuctx, event_type, task); in cpu_ctx_sched_in()
3763 struct perf_cpu_context *cpuctx; in perf_event_context_sched_in() local
3766 cpuctx = __get_cpu_context(ctx); in perf_event_context_sched_in()
3767 if (cpuctx->task_ctx == ctx) { in perf_event_context_sched_in()
3768 if (cpuctx->sched_cb_usage) in perf_event_context_sched_in()
3769 __perf_pmu_sched_task(cpuctx, true); in perf_event_context_sched_in()
3773 perf_ctx_lock(cpuctx, ctx); in perf_event_context_sched_in()
3791 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); in perf_event_context_sched_in()
3792 perf_event_sched_in(cpuctx, ctx, task); in perf_event_context_sched_in()
3794 if (cpuctx->sched_cb_usage && pmu->sched_task) in perf_event_context_sched_in()
3795 pmu->sched_task(cpuctx->task_ctx, true); in perf_event_context_sched_in()
3800 perf_ctx_unlock(cpuctx, ctx); in perf_event_context_sched_in()
4060 static bool perf_rotate_context(struct perf_cpu_context *cpuctx) in perf_rotate_context() argument
4071 cpu_rotate = cpuctx->ctx.rotate_necessary; in perf_rotate_context()
4072 task_ctx = cpuctx->task_ctx; in perf_rotate_context()
4078 perf_ctx_lock(cpuctx, cpuctx->task_ctx); in perf_rotate_context()
4079 perf_pmu_disable(cpuctx->ctx.pmu); in perf_rotate_context()
4084 cpu_event = ctx_event_to_rotate(&cpuctx->ctx); in perf_rotate_context()
4091 ctx_sched_out(task_ctx, cpuctx, EVENT_FLEXIBLE); in perf_rotate_context()
4093 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); in perf_rotate_context()
4098 rotate_ctx(&cpuctx->ctx, cpu_event); in perf_rotate_context()
4100 perf_event_sched_in(cpuctx, task_ctx, current); in perf_rotate_context()
4102 perf_pmu_enable(cpuctx->ctx.pmu); in perf_rotate_context()
4103 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); in perf_rotate_context()
4147 struct perf_cpu_context *cpuctx; in perf_event_enable_on_exec() local
4157 cpuctx = __get_cpu_context(ctx); in perf_event_enable_on_exec()
4158 perf_ctx_lock(cpuctx, ctx); in perf_event_enable_on_exec()
4159 ctx_sched_out(ctx, cpuctx, EVENT_TIME); in perf_event_enable_on_exec()
4170 ctx_resched(cpuctx, ctx, event_type); in perf_event_enable_on_exec()
4172 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); in perf_event_enable_on_exec()
4174 perf_ctx_unlock(cpuctx, ctx); in perf_event_enable_on_exec()
4214 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_read() local
4224 if (ctx->task && cpuctx->task_ctx != ctx) in __perf_event_read()
4483 struct perf_cpu_context *cpuctx; in find_get_context() local
4495 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in find_get_context()
4496 ctx = &cpuctx->ctx; in find_get_context()
5356 struct perf_cpu_context *cpuctx, in __perf_event_period() argument
7441 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in __perf_pmu_output_stop() local
7447 perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false); in __perf_pmu_output_stop()
7448 if (cpuctx->task_ctx) in __perf_pmu_output_stop()
7449 perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop, in __perf_pmu_output_stop()
10646 struct perf_cpu_context *cpuctx; in perf_event_mux_interval_ms_store() local
10647 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_event_mux_interval_ms_store()
10648 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); in perf_event_mux_interval_ms_store()
10651 (remote_function_f)perf_mux_hrtimer_restart, cpuctx); in perf_event_mux_interval_ms_store()
10787 struct perf_cpu_context *cpuctx; in perf_pmu_register() local
10789 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_pmu_register()
10790 __perf_event_init_context(&cpuctx->ctx); in perf_pmu_register()
10791 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); in perf_pmu_register()
10792 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); in perf_pmu_register()
10793 cpuctx->ctx.pmu = pmu; in perf_pmu_register()
10794 cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask); in perf_pmu_register()
10796 __perf_mux_hrtimer_init(cpuctx, cpu); in perf_pmu_register()
10798 cpuctx->heap_size = ARRAY_SIZE(cpuctx->heap_default); in perf_pmu_register()
10799 cpuctx->heap = cpuctx->heap_default; in perf_pmu_register()
11926 struct perf_cpu_context *cpuctx = in SYSCALL_DEFINE5() local
11929 if (!cpuctx->online) { in SYSCALL_DEFINE5()
12124 struct perf_cpu_context *cpuctx = in perf_event_create_kernel_counter() local
12126 if (!cpuctx->online) { in perf_event_create_kernel_counter()
12873 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_exit_context() local
12877 ctx_sched_out(ctx, cpuctx, EVENT_TIME); in __perf_event_exit_context()
12879 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); in __perf_event_exit_context()
12885 struct perf_cpu_context *cpuctx; in perf_event_exit_cpu_context() local
12891 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_event_exit_cpu_context()
12892 ctx = &cpuctx->ctx; in perf_event_exit_cpu_context()
12896 cpuctx->online = 0; in perf_event_exit_cpu_context()
12910 struct perf_cpu_context *cpuctx; in perf_event_init_cpu() local
12919 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_event_init_cpu()
12920 ctx = &cpuctx->ctx; in perf_event_init_cpu()
12923 cpuctx->online = 1; in perf_event_init_cpu()