Lines Matching refs:cpuctx

163 static void perf_ctx_lock(struct perf_cpu_context *cpuctx,  in perf_ctx_lock()  argument
166 raw_spin_lock(&cpuctx->ctx.lock); in perf_ctx_lock()
171 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, in perf_ctx_unlock() argument
176 raw_spin_unlock(&cpuctx->ctx.lock); in perf_ctx_unlock()
219 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in event_function() local
220 struct perf_event_context *task_ctx = cpuctx->task_ctx; in event_function()
225 perf_ctx_lock(cpuctx, task_ctx); in event_function()
250 WARN_ON_ONCE(&cpuctx->ctx != ctx); in event_function()
253 efs->func(event, cpuctx, ctx, efs->data); in event_function()
255 perf_ctx_unlock(cpuctx, task_ctx); in event_function()
316 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in event_function_local() local
329 perf_ctx_lock(cpuctx, task_ctx); in event_function_local()
345 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx)) in event_function_local()
349 WARN_ON_ONCE(&cpuctx->ctx != ctx); in event_function_local()
352 func(event, cpuctx, ctx, data); in event_function_local()
354 perf_ctx_unlock(cpuctx, task_ctx); in event_function_local()
450 static bool perf_rotate_context(struct perf_cpu_context *cpuctx);
573 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
576 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
683 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in perf_cgroup_match() local
690 if (!cpuctx->cgrp) in perf_cgroup_match()
699 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, in perf_cgroup_match()
735 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) in update_cgrp_time_from_cpuctx() argument
737 struct perf_cgroup *cgrp = cpuctx->cgrp; in update_cgrp_time_from_cpuctx()
805 struct perf_cpu_context *cpuctx; in perf_cgroup_switch() local
816 list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) { in perf_cgroup_switch()
817 WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0); in perf_cgroup_switch()
819 perf_ctx_lock(cpuctx, cpuctx->task_ctx); in perf_cgroup_switch()
820 perf_pmu_disable(cpuctx->ctx.pmu); in perf_cgroup_switch()
823 cpu_ctx_sched_out(cpuctx, EVENT_ALL); in perf_cgroup_switch()
828 cpuctx->cgrp = NULL; in perf_cgroup_switch()
832 WARN_ON_ONCE(cpuctx->cgrp); in perf_cgroup_switch()
840 cpuctx->cgrp = perf_cgroup_from_task(task, in perf_cgroup_switch()
841 &cpuctx->ctx); in perf_cgroup_switch()
842 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); in perf_cgroup_switch()
844 perf_pmu_enable(cpuctx->ctx.pmu); in perf_cgroup_switch()
845 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); in perf_cgroup_switch()
906 struct perf_cpu_context *cpuctx; in perf_cgroup_ensure_storage() local
918 cpuctx = per_cpu_ptr(event->pmu->pmu_cpu_context, cpu); in perf_cgroup_ensure_storage()
919 if (heap_size <= cpuctx->heap_size) in perf_cgroup_ensure_storage()
929 raw_spin_lock_irq(&cpuctx->ctx.lock); in perf_cgroup_ensure_storage()
930 if (cpuctx->heap_size < heap_size) { in perf_cgroup_ensure_storage()
931 swap(cpuctx->heap, storage); in perf_cgroup_ensure_storage()
932 if (storage == cpuctx->heap_default) in perf_cgroup_ensure_storage()
934 cpuctx->heap_size = heap_size; in perf_cgroup_ensure_storage()
936 raw_spin_unlock_irq(&cpuctx->ctx.lock); in perf_cgroup_ensure_storage()
995 struct perf_cpu_context *cpuctx; in perf_cgroup_event_enable() local
1004 cpuctx = container_of(ctx, struct perf_cpu_context, ctx); in perf_cgroup_event_enable()
1012 if (ctx->is_active && !cpuctx->cgrp) { in perf_cgroup_event_enable()
1016 cpuctx->cgrp = cgrp; in perf_cgroup_event_enable()
1022 list_add(&cpuctx->cgrp_cpuctx_entry, in perf_cgroup_event_enable()
1029 struct perf_cpu_context *cpuctx; in perf_cgroup_event_disable() local
1038 cpuctx = container_of(ctx, struct perf_cpu_context, ctx); in perf_cgroup_event_disable()
1043 if (ctx->is_active && cpuctx->cgrp) in perf_cgroup_event_disable()
1044 cpuctx->cgrp = NULL; in perf_cgroup_event_disable()
1046 list_del(&cpuctx->cgrp_cpuctx_entry); in perf_cgroup_event_disable()
1069 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) in update_cgrp_time_from_cpuctx() argument
1132 struct perf_cpu_context *cpuctx; in perf_mux_hrtimer_handler() local
1137 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); in perf_mux_hrtimer_handler()
1138 rotations = perf_rotate_context(cpuctx); in perf_mux_hrtimer_handler()
1140 raw_spin_lock(&cpuctx->hrtimer_lock); in perf_mux_hrtimer_handler()
1142 hrtimer_forward_now(hr, cpuctx->hrtimer_interval); in perf_mux_hrtimer_handler()
1144 cpuctx->hrtimer_active = 0; in perf_mux_hrtimer_handler()
1145 raw_spin_unlock(&cpuctx->hrtimer_lock); in perf_mux_hrtimer_handler()
1150 static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) in __perf_mux_hrtimer_init() argument
1152 struct hrtimer *timer = &cpuctx->hrtimer; in __perf_mux_hrtimer_init()
1153 struct pmu *pmu = cpuctx->ctx.pmu; in __perf_mux_hrtimer_init()
1168 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); in __perf_mux_hrtimer_init()
1170 raw_spin_lock_init(&cpuctx->hrtimer_lock); in __perf_mux_hrtimer_init()
1175 static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) in perf_mux_hrtimer_restart() argument
1177 struct hrtimer *timer = &cpuctx->hrtimer; in perf_mux_hrtimer_restart()
1178 struct pmu *pmu = cpuctx->ctx.pmu; in perf_mux_hrtimer_restart()
1185 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags); in perf_mux_hrtimer_restart()
1186 if (!cpuctx->hrtimer_active) { in perf_mux_hrtimer_restart()
1187 cpuctx->hrtimer_active = 1; in perf_mux_hrtimer_restart()
1188 hrtimer_forward_now(timer, cpuctx->hrtimer_interval); in perf_mux_hrtimer_restart()
1191 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags); in perf_mux_hrtimer_restart()
2039 struct perf_cpu_context *cpuctx,
2045 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in perf_put_aux_event() local
2074 event_sched_out(iter, cpuctx, ctx); in perf_put_aux_event()
2138 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in perf_remove_sibling_event() local
2140 event_sched_out(event, cpuctx, ctx); in perf_remove_sibling_event()
2265 struct perf_cpu_context *cpuctx, in event_sched_out() argument
2296 cpuctx->active_oncpu--; in event_sched_out()
2301 if (event->attr.exclusive || !cpuctx->active_oncpu) in event_sched_out()
2302 cpuctx->exclusive = 0; in event_sched_out()
2309 struct perf_cpu_context *cpuctx, in group_sched_out() argument
2319 event_sched_out(group_event, cpuctx, ctx); in group_sched_out()
2325 event_sched_out(event, cpuctx, ctx); in group_sched_out()
2341 struct perf_cpu_context *cpuctx, in __perf_remove_from_context() argument
2349 update_cgrp_time_from_cpuctx(cpuctx); in __perf_remove_from_context()
2352 event_sched_out(event, cpuctx, ctx); in __perf_remove_from_context()
2363 WARN_ON_ONCE(cpuctx->task_ctx != ctx); in __perf_remove_from_context()
2364 cpuctx->task_ctx = NULL; in __perf_remove_from_context()
2406 struct perf_cpu_context *cpuctx, in __perf_event_disable() argument
2419 group_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2421 event_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2522 struct perf_cpu_context *cpuctx, in event_sched_in() argument
2567 cpuctx->active_oncpu++; in event_sched_in()
2574 cpuctx->exclusive = 1; in event_sched_in()
2584 struct perf_cpu_context *cpuctx, in group_sched_in() argument
2595 if (event_sched_in(group_event, cpuctx, ctx)) in group_sched_in()
2602 if (event_sched_in(event, cpuctx, ctx)) { in group_sched_in()
2621 event_sched_out(event, cpuctx, ctx); in group_sched_in()
2623 event_sched_out(group_event, cpuctx, ctx); in group_sched_in()
2634 struct perf_cpu_context *cpuctx, in group_can_go_on() argument
2646 if (cpuctx->exclusive) in group_can_go_on()
2669 struct perf_cpu_context *cpuctx,
2673 struct perf_cpu_context *cpuctx,
2677 static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, in task_ctx_sched_out() argument
2681 if (!cpuctx->task_ctx) in task_ctx_sched_out()
2684 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) in task_ctx_sched_out()
2687 ctx_sched_out(ctx, cpuctx, event_type); in task_ctx_sched_out()
2690 static void perf_event_sched_in(struct perf_cpu_context *cpuctx, in perf_event_sched_in() argument
2694 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); in perf_event_sched_in()
2696 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); in perf_event_sched_in()
2697 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); in perf_event_sched_in()
2699 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); in perf_event_sched_in()
2717 static void ctx_resched(struct perf_cpu_context *cpuctx, in ctx_resched() argument
2733 perf_pmu_disable(cpuctx->ctx.pmu); in ctx_resched()
2735 task_ctx_sched_out(cpuctx, task_ctx, event_type); in ctx_resched()
2745 cpu_ctx_sched_out(cpuctx, ctx_event_type); in ctx_resched()
2747 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); in ctx_resched()
2749 perf_event_sched_in(cpuctx, task_ctx, current); in ctx_resched()
2750 perf_pmu_enable(cpuctx->ctx.pmu); in ctx_resched()
2755 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_pmu_resched() local
2756 struct perf_event_context *task_ctx = cpuctx->task_ctx; in perf_pmu_resched()
2758 perf_ctx_lock(cpuctx, task_ctx); in perf_pmu_resched()
2759 ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU); in perf_pmu_resched()
2760 perf_ctx_unlock(cpuctx, task_ctx); in perf_pmu_resched()
2773 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_install_in_context() local
2774 struct perf_event_context *task_ctx = cpuctx->task_ctx; in __perf_install_in_context()
2778 raw_spin_lock(&cpuctx->ctx.lock); in __perf_install_in_context()
2797 WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx); in __perf_install_in_context()
2815 ctx_sched_out(ctx, cpuctx, EVENT_TIME); in __perf_install_in_context()
2817 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_install_in_context()
2823 perf_ctx_unlock(cpuctx, task_ctx); in __perf_install_in_context()
2948 struct perf_cpu_context *cpuctx, in __perf_event_enable() argument
2960 ctx_sched_out(ctx, cpuctx, EVENT_TIME); in __perf_event_enable()
2969 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); in __perf_event_enable()
2978 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); in __perf_event_enable()
2982 task_ctx = cpuctx->task_ctx; in __perf_event_enable()
2986 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_event_enable()
3232 struct perf_cpu_context *cpuctx, in ctx_sched_out() argument
3246 WARN_ON_ONCE(cpuctx->task_ctx); in ctx_sched_out()
3255 WARN_ON_ONCE(cpuctx->task_ctx != ctx); in ctx_sched_out()
3257 cpuctx->task_ctx = NULL; in ctx_sched_out()
3273 update_cgrp_time_from_cpuctx(cpuctx); in ctx_sched_out()
3284 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
3289 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
3409 struct perf_cpu_context *cpuctx; in perf_event_context_sched_out() local
3417 cpuctx = __get_cpu_context(ctx); in perf_event_context_sched_out()
3418 if (!cpuctx->task_ctx) in perf_event_context_sched_out()
3452 if (cpuctx->sched_cb_usage && pmu->sched_task) in perf_event_context_sched_out()
3492 if (cpuctx->sched_cb_usage && pmu->sched_task) in perf_event_context_sched_out()
3494 task_ctx_sched_out(cpuctx, ctx, EVENT_ALL); in perf_event_context_sched_out()
3505 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_sched_cb_dec() local
3509 if (!--cpuctx->sched_cb_usage) in perf_sched_cb_dec()
3510 list_del(&cpuctx->sched_cb_entry); in perf_sched_cb_dec()
3516 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_sched_cb_inc() local
3518 if (!cpuctx->sched_cb_usage++) in perf_sched_cb_inc()
3519 list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list)); in perf_sched_cb_inc()
3532 static void __perf_pmu_sched_task(struct perf_cpu_context *cpuctx, bool sched_in) in __perf_pmu_sched_task() argument
3536 pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */ in __perf_pmu_sched_task()
3541 perf_ctx_lock(cpuctx, cpuctx->task_ctx); in __perf_pmu_sched_task()
3544 pmu->sched_task(cpuctx->task_ctx, sched_in); in __perf_pmu_sched_task()
3547 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); in __perf_pmu_sched_task()
3554 struct perf_cpu_context *cpuctx; in perf_pmu_sched_task() local
3559 list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) { in perf_pmu_sched_task()
3561 if (cpuctx->task_ctx) in perf_pmu_sched_task()
3564 __perf_pmu_sched_task(cpuctx, sched_in); in perf_pmu_sched_task()
3611 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, in cpu_ctx_sched_out() argument
3614 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); in cpu_ctx_sched_out()
3648 static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx, in visit_groups_merge() argument
3662 if (cpuctx) { in visit_groups_merge()
3664 .data = cpuctx->heap, in visit_groups_merge()
3666 .size = cpuctx->heap_size, in visit_groups_merge()
3669 lockdep_assert_held(&cpuctx->ctx.lock); in visit_groups_merge()
3672 if (cpuctx->cgrp) in visit_groups_merge()
3673 css = &cpuctx->cgrp->css; in visit_groups_merge()
3736 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in merge_sched_in() local
3745 if (group_can_go_on(event, cpuctx, *can_add_hw)) { in merge_sched_in()
3746 if (!group_sched_in(event, cpuctx, ctx)) in merge_sched_in()
3757 perf_mux_hrtimer_restart(cpuctx); in merge_sched_in()
3767 struct perf_cpu_context *cpuctx) in ctx_pinned_sched_in() argument
3771 if (ctx != &cpuctx->ctx) in ctx_pinned_sched_in()
3772 cpuctx = NULL; in ctx_pinned_sched_in()
3774 visit_groups_merge(cpuctx, &ctx->pinned_groups, in ctx_pinned_sched_in()
3781 struct perf_cpu_context *cpuctx) in ctx_flexible_sched_in() argument
3785 if (ctx != &cpuctx->ctx) in ctx_flexible_sched_in()
3786 cpuctx = NULL; in ctx_flexible_sched_in()
3788 visit_groups_merge(cpuctx, &ctx->flexible_groups, in ctx_flexible_sched_in()
3795 struct perf_cpu_context *cpuctx, in ctx_sched_in() argument
3810 cpuctx->task_ctx = ctx; in ctx_sched_in()
3812 WARN_ON_ONCE(cpuctx->task_ctx != ctx); in ctx_sched_in()
3829 ctx_pinned_sched_in(ctx, cpuctx); in ctx_sched_in()
3833 ctx_flexible_sched_in(ctx, cpuctx); in ctx_sched_in()
3836 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, in cpu_ctx_sched_in() argument
3840 struct perf_event_context *ctx = &cpuctx->ctx; in cpu_ctx_sched_in()
3842 ctx_sched_in(ctx, cpuctx, event_type, task); in cpu_ctx_sched_in()
3848 struct perf_cpu_context *cpuctx; in perf_event_context_sched_in() local
3851 cpuctx = __get_cpu_context(ctx); in perf_event_context_sched_in()
3857 pmu = ctx->pmu = cpuctx->ctx.pmu; in perf_event_context_sched_in()
3859 if (cpuctx->task_ctx == ctx) { in perf_event_context_sched_in()
3860 if (cpuctx->sched_cb_usage) in perf_event_context_sched_in()
3861 __perf_pmu_sched_task(cpuctx, true); in perf_event_context_sched_in()
3865 perf_ctx_lock(cpuctx, ctx); in perf_event_context_sched_in()
3883 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); in perf_event_context_sched_in()
3884 perf_event_sched_in(cpuctx, ctx, task); in perf_event_context_sched_in()
3886 if (cpuctx->sched_cb_usage && pmu->sched_task) in perf_event_context_sched_in()
3887 pmu->sched_task(cpuctx->task_ctx, true); in perf_event_context_sched_in()
3892 perf_ctx_unlock(cpuctx, ctx); in perf_event_context_sched_in()
4155 static bool perf_rotate_context(struct perf_cpu_context *cpuctx) in perf_rotate_context() argument
4166 cpu_rotate = cpuctx->ctx.rotate_necessary; in perf_rotate_context()
4167 task_ctx = cpuctx->task_ctx; in perf_rotate_context()
4173 perf_ctx_lock(cpuctx, cpuctx->task_ctx); in perf_rotate_context()
4174 perf_pmu_disable(cpuctx->ctx.pmu); in perf_rotate_context()
4179 cpu_event = ctx_event_to_rotate(&cpuctx->ctx); in perf_rotate_context()
4186 ctx_sched_out(task_ctx, cpuctx, EVENT_FLEXIBLE); in perf_rotate_context()
4188 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); in perf_rotate_context()
4193 rotate_ctx(&cpuctx->ctx, cpu_event); in perf_rotate_context()
4195 perf_event_sched_in(cpuctx, task_ctx, current); in perf_rotate_context()
4197 perf_pmu_enable(cpuctx->ctx.pmu); in perf_rotate_context()
4198 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); in perf_rotate_context()
4242 struct perf_cpu_context *cpuctx; in perf_event_enable_on_exec() local
4252 cpuctx = __get_cpu_context(ctx); in perf_event_enable_on_exec()
4253 perf_ctx_lock(cpuctx, ctx); in perf_event_enable_on_exec()
4254 ctx_sched_out(ctx, cpuctx, EVENT_TIME); in perf_event_enable_on_exec()
4265 ctx_resched(cpuctx, ctx, event_type); in perf_event_enable_on_exec()
4267 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); in perf_event_enable_on_exec()
4269 perf_ctx_unlock(cpuctx, ctx); in perf_event_enable_on_exec()
4360 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_read() local
4370 if (ctx->task && cpuctx->task_ctx != ctx) in __perf_event_read()
4629 struct perf_cpu_context *cpuctx; in find_get_context() local
4641 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in find_get_context()
4642 ctx = &cpuctx->ctx; in find_get_context()
5505 struct perf_cpu_context *cpuctx, in __perf_event_period() argument
7737 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in __perf_pmu_output_stop() local
7743 perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false); in __perf_pmu_output_stop()
7744 if (cpuctx->task_ctx) in __perf_pmu_output_stop()
7745 perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop, in __perf_pmu_output_stop()
10946 struct perf_cpu_context *cpuctx; in perf_event_mux_interval_ms_store() local
10947 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_event_mux_interval_ms_store()
10948 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); in perf_event_mux_interval_ms_store()
10951 (remote_function_f)perf_mux_hrtimer_restart, cpuctx); in perf_event_mux_interval_ms_store()
11087 struct perf_cpu_context *cpuctx; in perf_pmu_register() local
11089 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_pmu_register()
11090 __perf_event_init_context(&cpuctx->ctx); in perf_pmu_register()
11091 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); in perf_pmu_register()
11092 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); in perf_pmu_register()
11093 cpuctx->ctx.pmu = pmu; in perf_pmu_register()
11094 cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask); in perf_pmu_register()
11096 __perf_mux_hrtimer_init(cpuctx, cpu); in perf_pmu_register()
11098 cpuctx->heap_size = ARRAY_SIZE(cpuctx->heap_default); in perf_pmu_register()
11099 cpuctx->heap = cpuctx->heap_default; in perf_pmu_register()
12293 struct perf_cpu_context *cpuctx = in SYSCALL_DEFINE5() local
12296 if (!cpuctx->online) { in SYSCALL_DEFINE5()
12493 struct perf_cpu_context *cpuctx = in perf_event_create_kernel_counter() local
12495 if (!cpuctx->online) { in perf_event_create_kernel_counter()
13246 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_exit_context() local
13250 ctx_sched_out(ctx, cpuctx, EVENT_TIME); in __perf_event_exit_context()
13252 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); in __perf_event_exit_context()
13258 struct perf_cpu_context *cpuctx; in perf_event_exit_cpu_context() local
13264 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_event_exit_cpu_context()
13265 ctx = &cpuctx->ctx; in perf_event_exit_cpu_context()
13269 cpuctx->online = 0; in perf_event_exit_cpu_context()
13283 struct perf_cpu_context *cpuctx; in perf_event_init_cpu() local
13292 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_event_init_cpu()
13293 ctx = &cpuctx->ctx; in perf_event_init_cpu()
13296 cpuctx->online = 1; in perf_event_init_cpu()