Lines Matching full:event
182 static bool is_kernel_event(struct perf_event *event) in is_kernel_event() argument
184 return READ_ONCE(event->owner) == TASK_TOMBSTONE; in is_kernel_event()
196 * - removing the last event from a task ctx; this is relatively straight
199 * - adding the first event to a task ctx; this is tricky because we cannot
210 struct perf_event *event; member
218 struct perf_event *event = efs->event; in event_function() local
219 struct perf_event_context *ctx = event->ctx; in event_function()
254 efs->func(event, cpuctx, ctx, efs->data); in event_function()
261 static void event_function_call(struct perf_event *event, event_f func, void *data) in event_function_call() argument
263 struct perf_event_context *ctx = event->ctx; in event_function_call()
266 .event = event, in event_function_call()
271 if (!event->parent) { in event_function_call()
273 * If this is a !child event, we must hold ctx::mutex to in event_function_call()
274 * stabilize the event->ctx relation. See in event_function_call()
281 cpu_function_call(event->cpu, event_function, &efs); in event_function_call()
306 func(event, NULL, ctx, data); in event_function_call()
314 static void event_function_local(struct perf_event *event, event_f func, void *data) in event_function_local() argument
316 struct perf_event_context *ctx = event->ctx; in event_function_local()
353 func(event, cpuctx, ctx, data); in event_function_local()
413 * perf event paranoia level:
425 * max perf event sample rate
581 static u64 perf_event_time(struct perf_event *event);
590 static inline u64 perf_event_clock(struct perf_event *event) in perf_event_clock() argument
592 return event->clock(); in perf_event_clock()
596 * State based event timekeeping...
598 * The basic idea is to use event->state to determine which (if any) time
603 * Event groups make things a little more complicated, but not terribly so. The
618 __perf_effective_state(struct perf_event *event) in __perf_effective_state() argument
620 struct perf_event *leader = event->group_leader; in __perf_effective_state()
625 return event->state; in __perf_effective_state()
629 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) in __perf_update_times() argument
631 enum perf_event_state state = __perf_effective_state(event); in __perf_update_times()
632 u64 delta = now - event->tstamp; in __perf_update_times()
634 *enabled = event->total_time_enabled; in __perf_update_times()
638 *running = event->total_time_running; in __perf_update_times()
643 static void perf_event_update_time(struct perf_event *event) in perf_event_update_time() argument
645 u64 now = perf_event_time(event); in perf_event_update_time()
647 __perf_update_times(event, now, &event->total_time_enabled, in perf_event_update_time()
648 &event->total_time_running); in perf_event_update_time()
649 event->tstamp = now; in perf_event_update_time()
661 perf_event_set_state(struct perf_event *event, enum perf_event_state state) in perf_event_set_state() argument
663 if (event->state == state) in perf_event_set_state()
666 perf_event_update_time(event); in perf_event_set_state()
671 if ((event->state < 0) ^ (state < 0)) in perf_event_set_state()
672 perf_event_update_sibling_time(event); in perf_event_set_state()
674 WRITE_ONCE(event->state, state); in perf_event_set_state()
697 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
699 struct perf_event_context *ctx = event->ctx; in perf_cgroup_match()
702 /* @event doesn't care about cgroup */ in perf_cgroup_match()
703 if (!event->cgrp) in perf_cgroup_match()
711 * Cgroup scoping is recursive. An event enabled for a cgroup is in perf_cgroup_match()
713 * cgroup is a descendant of @event's (the test covers identity in perf_cgroup_match()
717 event->cgrp->css.cgroup); in perf_cgroup_match()
720 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
722 css_put(&event->cgrp->css); in perf_detach_cgroup()
723 event->cgrp = NULL; in perf_detach_cgroup()
726 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
728 return event->cgrp != NULL; in is_cgroup_event()
731 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
735 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time()
739 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) in perf_cgroup_event_time_now() argument
743 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time_now()
781 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
789 if (!is_cgroup_event(event)) in update_cgrp_time_from_event()
792 info = this_cpu_ptr(event->cgrp->info); in update_cgrp_time_from_event()
876 static int perf_cgroup_ensure_storage(struct perf_event *event, in perf_cgroup_ensure_storage() argument
891 cpuctx = per_cpu_ptr(event->pmu->pmu_cpu_context, cpu); in perf_cgroup_ensure_storage()
917 static inline int perf_cgroup_connect(int fd, struct perf_event *event, in perf_cgroup_connect() argument
936 ret = perf_cgroup_ensure_storage(event, css); in perf_cgroup_connect()
941 event->cgrp = cgrp; in perf_cgroup_connect()
949 perf_detach_cgroup(event); in perf_cgroup_connect()
958 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
962 if (!is_cgroup_event(event)) in perf_cgroup_event_enable()
976 per_cpu_ptr(&cgrp_cpuctx_list, event->cpu)); in perf_cgroup_event_enable()
980 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
984 if (!is_cgroup_event(event)) in perf_cgroup_event_disable()
1003 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
1008 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
1011 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
1016 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
1025 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, in perf_cgroup_connect() argument
1037 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
1042 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) in perf_cgroup_event_time_now() argument
1048 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
1053 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1249 * because the sys_perf_event_open() case will install a new event and break
1260 * quiesce the event, after which we can install it in the new location. This
1261 * means that only external vectors (perf_fops, prctl) can perturb the event
1265 * However; because event->ctx can change while we're waiting to acquire
1284 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) in perf_event_ctx_lock_nested() argument
1290 ctx = READ_ONCE(event->ctx); in perf_event_ctx_lock_nested()
1298 if (event->ctx != ctx) { in perf_event_ctx_lock_nested()
1308 perf_event_ctx_lock(struct perf_event *event) in perf_event_ctx_lock() argument
1310 return perf_event_ctx_lock_nested(event, 0); in perf_event_ctx_lock()
1313 static void perf_event_ctx_unlock(struct perf_event *event, in perf_event_ctx_unlock() argument
1339 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p, in perf_event_pid_type() argument
1346 if (event->parent) in perf_event_pid_type()
1347 event = event->parent; in perf_event_pid_type()
1349 nr = __task_pid_nr_ns(p, type, event->ns); in perf_event_pid_type()
1356 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) in perf_event_pid() argument
1358 return perf_event_pid_type(event, p, PIDTYPE_TGID); in perf_event_pid()
1361 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) in perf_event_tid() argument
1363 return perf_event_pid_type(event, p, PIDTYPE_PID); in perf_event_tid()
1367 * If we inherit events we want to return the parent event id
1370 static u64 primary_event_id(struct perf_event *event) in primary_event_id() argument
1372 u64 id = event->id; in primary_event_id()
1374 if (event->parent) in primary_event_id()
1375 id = event->parent->id; in primary_event_id()
1495 static u64 perf_event_time(struct perf_event *event) in perf_event_time() argument
1497 struct perf_event_context *ctx = event->ctx; in perf_event_time()
1502 if (is_cgroup_event(event)) in perf_event_time()
1503 return perf_cgroup_event_time(event); in perf_event_time()
1508 static u64 perf_event_time_now(struct perf_event *event, u64 now) in perf_event_time_now() argument
1510 struct perf_event_context *ctx = event->ctx; in perf_event_time_now()
1515 if (is_cgroup_event(event)) in perf_event_time_now()
1516 return perf_cgroup_event_time_now(event, now); in perf_event_time_now()
1525 static enum event_type_t get_event_type(struct perf_event *event) in get_event_type() argument
1527 struct perf_event_context *ctx = event->ctx; in get_event_type()
1536 if (event->group_leader != event) in get_event_type()
1537 event = event->group_leader; in get_event_type()
1539 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; in get_event_type()
1547 * Helper function to initialize event group nodes.
1549 static void init_event_group(struct perf_event *event) in init_event_group() argument
1551 RB_CLEAR_NODE(&event->group_node); in init_event_group()
1552 event->group_index = 0; in init_event_group()
1557 * based on event attrs bits.
1560 get_event_groups(struct perf_event *event, struct perf_event_context *ctx) in get_event_groups() argument
1562 if (event->attr.pinned) in get_event_groups()
1577 static inline struct cgroup *event_cgroup(const struct perf_event *event) in event_cgroup() argument
1582 if (event->cgrp) in event_cgroup()
1583 cgroup = event->cgrp->css.cgroup; in event_cgroup()
1590 * Compare function for event groups;
1665 * Insert @event into @groups' tree; using {@event->cpu, ++@groups->index} for
1671 struct perf_event *event) in perf_event_groups_insert() argument
1673 event->group_index = ++groups->index; in perf_event_groups_insert()
1675 rb_add(&event->group_node, &groups->tree, __group_less); in perf_event_groups_insert()
1679 * Helper function to insert event into the pinned or flexible groups.
1682 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) in add_event_to_groups() argument
1686 groups = get_event_groups(event, ctx); in add_event_to_groups()
1687 perf_event_groups_insert(groups, event); in add_event_to_groups()
1695 struct perf_event *event) in perf_event_groups_delete() argument
1697 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) || in perf_event_groups_delete()
1700 rb_erase(&event->group_node, &groups->tree); in perf_event_groups_delete()
1701 init_event_group(event); in perf_event_groups_delete()
1705 * Helper function to delete event from its groups.
1708 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) in del_event_from_groups() argument
1712 groups = get_event_groups(event, ctx); in del_event_from_groups()
1713 perf_event_groups_delete(groups, event); in del_event_from_groups()
1717 * Get the leftmost event in the cpu/cgroup subtree.
1740 perf_event_groups_next(struct perf_event *event) in perf_event_groups_next() argument
1743 .cpu = event->cpu, in perf_event_groups_next()
1744 .cgroup = event_cgroup(event), in perf_event_groups_next()
1748 next = rb_next_match(&key, &event->group_node, __group_cmp); in perf_event_groups_next()
1758 #define perf_event_groups_for_each(event, groups) \ argument
1759 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1760 typeof(*event), group_node); event; \
1761 event = rb_entry_safe(rb_next(&event->group_node), \
1762 typeof(*event), group_node))
1765 * Add an event from the lists for its context.
1769 list_add_event(struct perf_event *event, struct perf_event_context *ctx) in list_add_event() argument
1773 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); in list_add_event()
1774 event->attach_state |= PERF_ATTACH_CONTEXT; in list_add_event()
1776 event->tstamp = perf_event_time(event); in list_add_event()
1779 * If we're a stand alone event or group leader, we go to the context in list_add_event()
1783 if (event->group_leader == event) { in list_add_event()
1784 event->group_caps = event->event_caps; in list_add_event()
1785 add_event_to_groups(event, ctx); in list_add_event()
1788 list_add_rcu(&event->event_entry, &ctx->event_list); in list_add_event()
1790 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) in list_add_event()
1792 if (event->attr.inherit_stat) in list_add_event()
1795 if (event->state > PERF_EVENT_STATE_OFF) in list_add_event()
1796 perf_cgroup_event_enable(event, ctx); in list_add_event()
1802 * Initialize event state based on the perf_event_attr::disabled.
1804 static inline void perf_event__state_init(struct perf_event *event) in perf_event__state_init() argument
1806 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init()
1810 static void __perf_event_read_size(struct perf_event *event, int nr_siblings) in __perf_event_read_size() argument
1816 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) in __perf_event_read_size()
1819 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) in __perf_event_read_size()
1822 if (event->attr.read_format & PERF_FORMAT_ID) in __perf_event_read_size()
1825 if (event->attr.read_format & PERF_FORMAT_LOST) in __perf_event_read_size()
1828 if (event->attr.read_format & PERF_FORMAT_GROUP) { in __perf_event_read_size()
1834 event->read_size = size; in __perf_event_read_size()
1837 static void __perf_event_header_size(struct perf_event *event, u64 sample_type) in __perf_event_header_size() argument
1855 size += event->read_size; in __perf_event_header_size()
1875 event->header_size = size; in __perf_event_header_size()
1882 static void perf_event__header_size(struct perf_event *event) in perf_event__header_size() argument
1884 __perf_event_read_size(event, in perf_event__header_size()
1885 event->group_leader->nr_siblings); in perf_event__header_size()
1886 __perf_event_header_size(event, event->attr.sample_type); in perf_event__header_size()
1889 static void perf_event__id_header_size(struct perf_event *event) in perf_event__id_header_size() argument
1892 u64 sample_type = event->attr.sample_type; in perf_event__id_header_size()
1913 event->id_header_size = size; in perf_event__id_header_size()
1916 static bool perf_event_validate_size(struct perf_event *event) in perf_event_validate_size() argument
1920 * attach the event. in perf_event_validate_size()
1922 __perf_event_read_size(event, event->group_leader->nr_siblings + 1); in perf_event_validate_size()
1923 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); in perf_event_validate_size()
1924 perf_event__id_header_size(event); in perf_event_validate_size()
1930 if (event->read_size + event->header_size + in perf_event_validate_size()
1931 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) in perf_event_validate_size()
1937 static void perf_group_attach(struct perf_event *event) in perf_group_attach() argument
1939 struct perf_event *group_leader = event->group_leader, *pos; in perf_group_attach()
1941 lockdep_assert_held(&event->ctx->lock); in perf_group_attach()
1946 if (event->attach_state & PERF_ATTACH_GROUP) in perf_group_attach()
1949 event->attach_state |= PERF_ATTACH_GROUP; in perf_group_attach()
1951 if (group_leader == event) in perf_group_attach()
1954 WARN_ON_ONCE(group_leader->ctx != event->ctx); in perf_group_attach()
1956 group_leader->group_caps &= event->event_caps; in perf_group_attach()
1958 list_add_tail(&event->sibling_list, &group_leader->sibling_list); in perf_group_attach()
1968 * Remove an event from the lists for its context.
1972 list_del_event(struct perf_event *event, struct perf_event_context *ctx) in list_del_event() argument
1974 WARN_ON_ONCE(event->ctx != ctx); in list_del_event()
1980 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) in list_del_event()
1983 event->attach_state &= ~PERF_ATTACH_CONTEXT; in list_del_event()
1986 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) in list_del_event()
1988 if (event->attr.inherit_stat) in list_del_event()
1991 list_del_rcu(&event->event_entry); in list_del_event()
1993 if (event->group_leader == event) in list_del_event()
1994 del_event_from_groups(event, ctx); in list_del_event()
1997 * If event was in error state, then keep it in list_del_event()
2001 * of the event in list_del_event()
2003 if (event->state > PERF_EVENT_STATE_OFF) { in list_del_event()
2004 perf_cgroup_event_disable(event, ctx); in list_del_event()
2005 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in list_del_event()
2012 perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event) in perf_aux_output_match() argument
2017 if (!event->pmu->aux_output_match) in perf_aux_output_match()
2020 return event->pmu->aux_output_match(aux_event); in perf_aux_output_match()
2023 static void put_event(struct perf_event *event);
2024 static void event_sched_out(struct perf_event *event,
2028 static void perf_put_aux_event(struct perf_event *event) in perf_put_aux_event() argument
2030 struct perf_event_context *ctx = event->ctx; in perf_put_aux_event()
2035 * If event uses aux_event tear down the link in perf_put_aux_event()
2037 if (event->aux_event) { in perf_put_aux_event()
2038 iter = event->aux_event; in perf_put_aux_event()
2039 event->aux_event = NULL; in perf_put_aux_event()
2045 * If the event is an aux_event, tear down all links to in perf_put_aux_event()
2048 for_each_sibling_event(iter, event->group_leader) { in perf_put_aux_event()
2049 if (iter->aux_event != event) in perf_put_aux_event()
2053 put_event(event); in perf_put_aux_event()
2061 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_put_aux_event()
2065 static bool perf_need_aux_event(struct perf_event *event) in perf_need_aux_event() argument
2067 return !!event->attr.aux_output || !!event->attr.aux_sample_size; in perf_need_aux_event()
2070 static int perf_get_aux_event(struct perf_event *event, in perf_get_aux_event() argument
2074 * Our group leader must be an aux event if we want to be in perf_get_aux_event()
2075 * an aux_output. This way, the aux event will precede its in perf_get_aux_event()
2085 if (event->attr.aux_output && event->attr.aux_sample_size) in perf_get_aux_event()
2088 if (event->attr.aux_output && in perf_get_aux_event()
2089 !perf_aux_output_match(event, group_leader)) in perf_get_aux_event()
2092 if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux) in perf_get_aux_event()
2099 * Link aux_outputs to their aux event; this is undone in in perf_get_aux_event()
2104 event->aux_event = group_leader; in perf_get_aux_event()
2109 static inline struct list_head *get_event_list(struct perf_event *event) in get_event_list() argument
2111 struct perf_event_context *ctx = event->ctx; in get_event_list()
2112 return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active; in get_event_list()
2121 static inline void perf_remove_sibling_event(struct perf_event *event) in perf_remove_sibling_event() argument
2123 struct perf_event_context *ctx = event->ctx; in perf_remove_sibling_event()
2126 event_sched_out(event, cpuctx, ctx); in perf_remove_sibling_event()
2127 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_remove_sibling_event()
2130 static void perf_group_detach(struct perf_event *event) in perf_group_detach() argument
2132 struct perf_event *leader = event->group_leader; in perf_group_detach()
2134 struct perf_event_context *ctx = event->ctx; in perf_group_detach()
2141 if (!(event->attach_state & PERF_ATTACH_GROUP)) in perf_group_detach()
2144 event->attach_state &= ~PERF_ATTACH_GROUP; in perf_group_detach()
2146 perf_put_aux_event(event); in perf_group_detach()
2151 if (leader != event) { in perf_group_detach()
2152 list_del_init(&event->sibling_list); in perf_group_detach()
2153 event->group_leader->nr_siblings--; in perf_group_detach()
2158 * If this was a group event with sibling events then in perf_group_detach()
2162 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { in perf_group_detach()
2171 sibling->group_caps = event->group_caps; in perf_group_detach()
2173 if (!RB_EMPTY_NODE(&event->group_node)) { in perf_group_detach()
2174 add_event_to_groups(sibling, event->ctx); in perf_group_detach()
2180 WARN_ON_ONCE(sibling->ctx != event->ctx); in perf_group_detach()
2192 static void perf_child_detach(struct perf_event *event) in perf_child_detach() argument
2194 struct perf_event *parent_event = event->parent; in perf_child_detach()
2196 if (!(event->attach_state & PERF_ATTACH_CHILD)) in perf_child_detach()
2199 event->attach_state &= ~PERF_ATTACH_CHILD; in perf_child_detach()
2206 sync_child_event(event); in perf_child_detach()
2207 list_del_init(&event->child_list); in perf_child_detach()
2210 static bool is_orphaned_event(struct perf_event *event) in is_orphaned_event() argument
2212 return event->state == PERF_EVENT_STATE_DEAD; in is_orphaned_event()
2215 static inline int __pmu_filter_match(struct perf_event *event) in __pmu_filter_match() argument
2217 struct pmu *pmu = event->pmu; in __pmu_filter_match()
2218 return pmu->filter_match ? pmu->filter_match(event) : 1; in __pmu_filter_match()
2222 * Check whether we should attempt to schedule an event group based on
2223 * PMU-specific filtering. An event group can consist of HW and SW events,
2227 static inline int pmu_filter_match(struct perf_event *event) in pmu_filter_match() argument
2233 if (!__pmu_filter_match(event)) in pmu_filter_match()
2237 for_each_sibling_event(sibling, event) { in pmu_filter_match()
2249 event_filter_match(struct perf_event *event) in event_filter_match() argument
2251 return (event->cpu == -1 || event->cpu == smp_processor_id()) && in event_filter_match()
2252 perf_cgroup_match(event) && pmu_filter_match(event); in event_filter_match()
2256 event_sched_out(struct perf_event *event, in event_sched_out() argument
2262 WARN_ON_ONCE(event->ctx != ctx); in event_sched_out()
2265 if (event->state != PERF_EVENT_STATE_ACTIVE) in event_sched_out()
2273 list_del_init(&event->active_list); in event_sched_out()
2275 perf_pmu_disable(event->pmu); in event_sched_out()
2277 event->pmu->del(event, 0); in event_sched_out()
2278 event->oncpu = -1; in event_sched_out()
2280 if (event->pending_disable) { in event_sched_out()
2281 event->pending_disable = 0; in event_sched_out()
2282 perf_cgroup_event_disable(event, ctx); in event_sched_out()
2286 if (event->pending_sigtrap) { in event_sched_out()
2289 event->pending_sigtrap = 0; in event_sched_out()
2291 !event->pending_work) { in event_sched_out()
2292 event->pending_work = 1; in event_sched_out()
2294 WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount)); in event_sched_out()
2295 task_work_add(current, &event->pending_task, TWA_RESUME); in event_sched_out()
2298 local_dec(&event->ctx->nr_pending); in event_sched_out()
2301 perf_event_set_state(event, state); in event_sched_out()
2303 if (!is_software_event(event)) in event_sched_out()
2307 if (event->attr.freq && event->attr.sample_freq) in event_sched_out()
2309 if (event->attr.exclusive || !cpuctx->active_oncpu) in event_sched_out()
2312 perf_pmu_enable(event->pmu); in event_sched_out()
2320 struct perf_event *event; in group_sched_out() local
2332 for_each_sibling_event(event, group_event) in group_sched_out()
2333 event_sched_out(event, cpuctx, ctx); in group_sched_out()
2343 * Cross CPU call to remove a performance event
2345 * We disable the event on the hardware level first. After that we
2349 __perf_remove_from_context(struct perf_event *event, in __perf_remove_from_context() argument
2366 event->pending_disable = 1; in __perf_remove_from_context()
2367 event_sched_out(event, cpuctx, ctx); in __perf_remove_from_context()
2369 perf_group_detach(event); in __perf_remove_from_context()
2371 perf_child_detach(event); in __perf_remove_from_context()
2372 list_del_event(event, ctx); in __perf_remove_from_context()
2374 event->state = PERF_EVENT_STATE_DEAD; in __perf_remove_from_context()
2390 * Remove the event from a task's (or a CPU's) list of events.
2392 * If event->ctx is a cloned context, callers must make sure that
2393 * every task struct that event->ctx->task could possibly point to
2399 static void perf_remove_from_context(struct perf_event *event, unsigned long flags) in perf_remove_from_context() argument
2401 struct perf_event_context *ctx = event->ctx; in perf_remove_from_context()
2415 if (!ctx->is_active && !is_cgroup_event(event)) { in perf_remove_from_context()
2416 __perf_remove_from_context(event, __get_cpu_context(ctx), in perf_remove_from_context()
2423 event_function_call(event, __perf_remove_from_context, (void *)flags); in perf_remove_from_context()
2427 * Cross CPU call to disable a performance event
2429 static void __perf_event_disable(struct perf_event *event, in __perf_event_disable() argument
2434 if (event->state < PERF_EVENT_STATE_INACTIVE) in __perf_event_disable()
2439 update_cgrp_time_from_event(event); in __perf_event_disable()
2442 if (event == event->group_leader) in __perf_event_disable()
2443 group_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2445 event_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2447 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in __perf_event_disable()
2448 perf_cgroup_event_disable(event, ctx); in __perf_event_disable()
2452 * Disable an event.
2454 * If event->ctx is a cloned context, callers must make sure that
2455 * every task struct that event->ctx->task could possibly point to
2458 * hold the top-level event's child_mutex, so any descendant that
2461 * When called from perf_pending_irq it's OK because event->ctx
2465 static void _perf_event_disable(struct perf_event *event) in _perf_event_disable() argument
2467 struct perf_event_context *ctx = event->ctx; in _perf_event_disable()
2470 if (event->state <= PERF_EVENT_STATE_OFF) { in _perf_event_disable()
2476 event_function_call(event, __perf_event_disable, NULL); in _perf_event_disable()
2479 void perf_event_disable_local(struct perf_event *event) in perf_event_disable_local() argument
2481 event_function_local(event, __perf_event_disable, NULL); in perf_event_disable_local()
2488 void perf_event_disable(struct perf_event *event) in perf_event_disable() argument
2492 ctx = perf_event_ctx_lock(event); in perf_event_disable()
2493 _perf_event_disable(event); in perf_event_disable()
2494 perf_event_ctx_unlock(event, ctx); in perf_event_disable()
2498 void perf_event_disable_inatomic(struct perf_event *event) in perf_event_disable_inatomic() argument
2500 event->pending_disable = 1; in perf_event_disable_inatomic()
2501 irq_work_queue(&event->pending_irq); in perf_event_disable_inatomic()
2506 static void perf_log_throttle(struct perf_event *event, int enable);
2507 static void perf_log_itrace_start(struct perf_event *event);
2510 event_sched_in(struct perf_event *event, in event_sched_in() argument
2516 WARN_ON_ONCE(event->ctx != ctx); in event_sched_in()
2520 if (event->state <= PERF_EVENT_STATE_OFF) in event_sched_in()
2523 WRITE_ONCE(event->oncpu, smp_processor_id()); in event_sched_in()
2525 * Order event::oncpu write to happen before the ACTIVE state is in event_sched_in()
2530 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE); in event_sched_in()
2537 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { in event_sched_in()
2538 perf_log_throttle(event, 1); in event_sched_in()
2539 event->hw.interrupts = 0; in event_sched_in()
2542 perf_pmu_disable(event->pmu); in event_sched_in()
2544 perf_log_itrace_start(event); in event_sched_in()
2546 if (event->pmu->add(event, PERF_EF_START)) { in event_sched_in()
2547 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_sched_in()
2548 event->oncpu = -1; in event_sched_in()
2553 if (!is_software_event(event)) in event_sched_in()
2557 if (event->attr.freq && event->attr.sample_freq) in event_sched_in()
2560 if (event->attr.exclusive) in event_sched_in()
2564 perf_pmu_enable(event->pmu); in event_sched_in()
2574 struct perf_event *event, *partial_group = NULL; in group_sched_in() local
2588 for_each_sibling_event(event, group_event) { in group_sched_in()
2589 if (event_sched_in(event, cpuctx, ctx)) { in group_sched_in()
2590 partial_group = event; in group_sched_in()
2602 * The events up to the failed event are scheduled out normally. in group_sched_in()
2604 for_each_sibling_event(event, group_event) { in group_sched_in()
2605 if (event == partial_group) in group_sched_in()
2608 event_sched_out(event, cpuctx, ctx); in group_sched_in()
2618 * Work out whether we can put this event group on the CPU now.
2620 static int group_can_go_on(struct perf_event *event, in group_can_go_on() argument
2627 if (event->group_caps & PERF_EV_CAP_SOFTWARE) in group_can_go_on()
2639 if (event->attr.exclusive && !list_empty(get_event_list(event))) in group_can_go_on()
2648 static void add_event_to_ctx(struct perf_event *event, in add_event_to_ctx() argument
2651 list_add_event(event, ctx); in add_event_to_ctx()
2652 perf_group_attach(event); in add_event_to_ctx()
2695 * time an event is added, only do it for the groups of equal priority and
2749 * Cross CPU call to install and enable a performance event
2756 struct perf_event *event = info; in __perf_install_in_context() local
2757 struct perf_event_context *ctx = event->ctx; in __perf_install_in_context()
2788 if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) { in __perf_install_in_context()
2790 * If the current cgroup doesn't match the event's in __perf_install_in_context()
2795 event->cgrp->css.cgroup); in __perf_install_in_context()
2801 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2802 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_install_in_context()
2804 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2813 static bool exclusive_event_installable(struct perf_event *event,
2817 * Attach a performance event to a context.
2823 struct perf_event *event, in perf_install_in_context() argument
2830 WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); in perf_install_in_context()
2832 if (event->cpu != -1) in perf_install_in_context()
2833 event->cpu = cpu; in perf_install_in_context()
2836 * Ensures that if we can observe event->ctx, both the event and ctx in perf_install_in_context()
2839 smp_store_release(&event->ctx, ctx); in perf_install_in_context()
2843 * without IPI. Except when this is the first event for the context, in in perf_install_in_context()
2849 * event will issue the IPI and reprogram the hardware. in perf_install_in_context()
2851 if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && in perf_install_in_context()
2852 ctx->nr_events && !is_cgroup_event(event)) { in perf_install_in_context()
2858 add_event_to_ctx(event, ctx); in perf_install_in_context()
2864 cpu_function_call(cpu, __perf_install_in_context, event); in perf_install_in_context()
2906 if (!task_function_call(task, __perf_install_in_context, event)) in perf_install_in_context()
2922 * thus we can safely install the event. in perf_install_in_context()
2928 add_event_to_ctx(event, ctx); in perf_install_in_context()
2933 * Cross CPU call to enable a performance event
2935 static void __perf_event_enable(struct perf_event *event, in __perf_event_enable() argument
2940 struct perf_event *leader = event->group_leader; in __perf_event_enable()
2943 if (event->state >= PERF_EVENT_STATE_INACTIVE || in __perf_event_enable()
2944 event->state <= PERF_EVENT_STATE_ERROR) in __perf_event_enable()
2950 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in __perf_event_enable()
2951 perf_cgroup_event_enable(event, ctx); in __perf_event_enable()
2956 if (!event_filter_match(event)) { in __perf_event_enable()
2962 * If the event is in a group and isn't the group leader, in __perf_event_enable()
2965 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) { in __perf_event_enable()
2974 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_event_enable()
2978 * Enable an event.
2980 * If event->ctx is a cloned context, callers must make sure that
2981 * every task struct that event->ctx->task could possibly point to
2986 static void _perf_event_enable(struct perf_event *event) in _perf_event_enable() argument
2988 struct perf_event_context *ctx = event->ctx; in _perf_event_enable()
2991 if (event->state >= PERF_EVENT_STATE_INACTIVE || in _perf_event_enable()
2992 event->state < PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
2999 * If the event is in error state, clear that first. in _perf_event_enable()
3001 * That way, if we see the event in error state below, we know that it in _perf_event_enable()
3005 if (event->state == PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
3009 if (event->event_caps & PERF_EV_CAP_SIBLING && in _perf_event_enable()
3010 event->group_leader == event) in _perf_event_enable()
3013 event->state = PERF_EVENT_STATE_OFF; in _perf_event_enable()
3017 event_function_call(event, __perf_event_enable, NULL); in _perf_event_enable()
3023 void perf_event_enable(struct perf_event *event) in perf_event_enable() argument
3027 ctx = perf_event_ctx_lock(event); in perf_event_enable()
3028 _perf_event_enable(event); in perf_event_enable()
3029 perf_event_ctx_unlock(event, ctx); in perf_event_enable()
3034 struct perf_event *event; member
3041 struct perf_event *event = sd->event; in __perf_event_stop() local
3044 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in __perf_event_stop()
3052 * so we need to check again lest we try to stop another CPU's event. in __perf_event_stop()
3054 if (READ_ONCE(event->oncpu) != smp_processor_id()) in __perf_event_stop()
3057 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_stop()
3065 * Since this is happening on an event-local CPU, no trace is lost in __perf_event_stop()
3069 event->pmu->start(event, 0); in __perf_event_stop()
3074 static int perf_event_stop(struct perf_event *event, int restart) in perf_event_stop() argument
3077 .event = event, in perf_event_stop()
3083 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in perf_event_stop()
3090 * We only want to restart ACTIVE events, so if the event goes in perf_event_stop()
3091 * inactive here (event->oncpu==-1), there's nothing more to do; in perf_event_stop()
3094 ret = cpu_function_call(READ_ONCE(event->oncpu), in perf_event_stop()
3107 * event::addr_filter_ranges array and bump the event::addr_filters_gen;
3108 * (p2) when an event is scheduled in (pmu::add), it calls
3112 * If (p1) happens while the event is active, we restart it to force (p2).
3123 void perf_event_addr_filters_sync(struct perf_event *event) in perf_event_addr_filters_sync() argument
3125 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_sync()
3127 if (!has_addr_filter(event)) in perf_event_addr_filters_sync()
3131 if (event->addr_filters_gen != event->hw.addr_filters_gen) { in perf_event_addr_filters_sync()
3132 event->pmu->addr_filters_sync(event); in perf_event_addr_filters_sync()
3133 event->hw.addr_filters_gen = event->addr_filters_gen; in perf_event_addr_filters_sync()
3139 static int _perf_event_refresh(struct perf_event *event, int refresh) in _perf_event_refresh() argument
3144 if (event->attr.inherit || !is_sampling_event(event)) in _perf_event_refresh()
3147 atomic_add(refresh, &event->event_limit); in _perf_event_refresh()
3148 _perf_event_enable(event); in _perf_event_refresh()
3156 int perf_event_refresh(struct perf_event *event, int refresh) in perf_event_refresh() argument
3161 ctx = perf_event_ctx_lock(event); in perf_event_refresh()
3162 ret = _perf_event_refresh(event, refresh); in perf_event_refresh()
3163 perf_event_ctx_unlock(event, ctx); in perf_event_refresh()
3185 * Copy event-type-independent attributes that may be modified.
3193 static int perf_event_modify_attr(struct perf_event *event, in perf_event_modify_attr() argument
3200 if (event->attr.type != attr->type) in perf_event_modify_attr()
3203 switch (event->attr.type) { in perf_event_modify_attr()
3212 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_modify_attr()
3214 mutex_lock(&event->child_mutex); in perf_event_modify_attr()
3216 * Event-type-independent attributes must be copied before event-type in perf_event_modify_attr()
3220 perf_event_modify_copy_attr(&event->attr, attr); in perf_event_modify_attr()
3221 err = func(event, attr); in perf_event_modify_attr()
3224 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_modify_attr()
3231 mutex_unlock(&event->child_mutex); in perf_event_modify_attr()
3239 struct perf_event *event, *tmp; in ctx_sched_out() local
3292 list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) in ctx_sched_out()
3293 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
3297 list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list) in ctx_sched_out()
3298 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
3348 static void __perf_event_sync_stat(struct perf_event *event, in __perf_event_sync_stat() argument
3353 if (!event->attr.inherit_stat) in __perf_event_sync_stat()
3357 * Update the event value, we cannot use perf_event_read() in __perf_event_sync_stat()
3360 * we know the event must be on the current CPU, therefore we in __perf_event_sync_stat()
3363 if (event->state == PERF_EVENT_STATE_ACTIVE) in __perf_event_sync_stat()
3364 event->pmu->read(event); in __perf_event_sync_stat()
3366 perf_event_update_time(event); in __perf_event_sync_stat()
3369 * In order to keep per-task stats reliable we need to flip the event in __perf_event_sync_stat()
3373 value = local64_xchg(&event->count, value); in __perf_event_sync_stat()
3376 swap(event->total_time_enabled, next_event->total_time_enabled); in __perf_event_sync_stat()
3377 swap(event->total_time_running, next_event->total_time_running); in __perf_event_sync_stat()
3382 perf_event_update_userpage(event); in __perf_event_sync_stat()
3389 struct perf_event *event, *next_event; in perf_event_sync_stat() local
3396 event = list_first_entry(&ctx->event_list, in perf_event_sync_stat()
3402 while (&event->event_entry != &ctx->event_list && in perf_event_sync_stat()
3405 __perf_event_sync_stat(event, next_event); in perf_event_sync_stat()
3407 event = list_next_entry(event, event_entry); in perf_event_sync_stat()
3550 * This callback is relevant even to per-cpu events; for example multi event
3600 * We stop each event and update the event value in event->count.
3603 * sets the disabled bit in the control field of event _before_
3604 * accessing the event control register. If a NMI hits, then it will
3605 * not restart the event.
3624 * cgroup event are system-wide mode only in __perf_event_task_sched_out()
3660 static void __heap_add(struct min_heap *heap, struct perf_event *event) in __heap_add() argument
3664 if (event) { in __heap_add()
3665 itrs[heap->nr] = event; in __heap_add()
3678 /* Space for per CPU and/or any CPU event iterators. */ in visit_groups_merge()
3733 * Because the userpage is strictly per-event (there is no concept of context,
3739 static inline bool event_update_userpage(struct perf_event *event) in event_update_userpage() argument
3741 if (likely(!atomic_read(&event->mmap_count))) in event_update_userpage()
3744 perf_event_update_time(event); in event_update_userpage()
3745 perf_event_update_userpage(event); in event_update_userpage()
3752 struct perf_event *event; in group_update_userpage() local
3757 for_each_sibling_event(event, group_event) in group_update_userpage()
3758 event_update_userpage(event); in group_update_userpage()
3761 static int merge_sched_in(struct perf_event *event, void *data) in merge_sched_in() argument
3763 struct perf_event_context *ctx = event->ctx; in merge_sched_in()
3767 if (event->state <= PERF_EVENT_STATE_OFF) in merge_sched_in()
3770 if (!event_filter_match(event)) in merge_sched_in()
3773 if (group_can_go_on(event, cpuctx, *can_add_hw)) { in merge_sched_in()
3774 if (!group_sched_in(event, cpuctx, ctx)) in merge_sched_in()
3775 list_add_tail(&event->active_list, get_event_list(event)); in merge_sched_in()
3778 if (event->state == PERF_EVENT_STATE_INACTIVE) { in merge_sched_in()
3780 if (event->attr.pinned) { in merge_sched_in()
3781 perf_cgroup_event_disable(event, ctx); in merge_sched_in()
3782 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in merge_sched_in()
3786 group_update_userpage(event); in merge_sched_in()
3928 * We restore the event value and then enable it.
3931 * sets the enabled bit in the control field of event _before_
3932 * accessing the event control register. If a NMI hits, then it will
3933 * keep the event running.
3956 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) in perf_calculate_period() argument
3958 u64 frequency = event->attr.sample_freq; in perf_calculate_period()
4032 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) in perf_adjust_period() argument
4034 struct hw_perf_event *hwc = &event->hw; in perf_adjust_period()
4038 period = perf_calculate_period(event, nsec, count); in perf_adjust_period()
4052 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_period()
4057 event->pmu->start(event, PERF_EF_RELOAD); in perf_adjust_period()
4069 struct perf_event *event; in perf_adjust_freq_unthr_context() local
4085 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_adjust_freq_unthr_context()
4086 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_adjust_freq_unthr_context()
4089 if (!event_filter_match(event)) in perf_adjust_freq_unthr_context()
4092 perf_pmu_disable(event->pmu); in perf_adjust_freq_unthr_context()
4094 hwc = &event->hw; in perf_adjust_freq_unthr_context()
4098 perf_log_throttle(event, 1); in perf_adjust_freq_unthr_context()
4099 event->pmu->start(event, 0); in perf_adjust_freq_unthr_context()
4102 if (!event->attr.freq || !event->attr.sample_freq) in perf_adjust_freq_unthr_context()
4106 * stop the event and update event->count in perf_adjust_freq_unthr_context()
4108 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_freq_unthr_context()
4110 now = local64_read(&event->count); in perf_adjust_freq_unthr_context()
4115 * restart the event in perf_adjust_freq_unthr_context()
4117 * we have stopped the event so tell that in perf_adjust_freq_unthr_context()
4122 perf_adjust_period(event, period, delta, false); in perf_adjust_freq_unthr_context()
4124 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); in perf_adjust_freq_unthr_context()
4126 perf_pmu_enable(event->pmu); in perf_adjust_freq_unthr_context()
4134 * Move @event to the tail of the @ctx's elegible events.
4136 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) in rotate_ctx() argument
4145 perf_event_groups_delete(&ctx->flexible_groups, event); in rotate_ctx()
4146 perf_event_groups_insert(&ctx->flexible_groups, event); in rotate_ctx()
4149 /* pick an event from the flexible_groups to rotate */
4153 struct perf_event *event; in ctx_event_to_rotate() local
4155 /* pick the first active flexible event */ in ctx_event_to_rotate()
4156 event = list_first_entry_or_null(&ctx->flexible_active, in ctx_event_to_rotate()
4159 /* if no active flexible event, pick the first event */ in ctx_event_to_rotate()
4160 if (!event) { in ctx_event_to_rotate()
4161 event = rb_entry_safe(rb_first(&ctx->flexible_groups.tree), in ctx_event_to_rotate()
4162 typeof(*event), group_node); in ctx_event_to_rotate()
4171 return event; in ctx_event_to_rotate()
4182 * events, thus the event count values are stable. in perf_rotate_context()
4238 static int event_enable_on_exec(struct perf_event *event, in event_enable_on_exec() argument
4241 if (!event->attr.enable_on_exec) in event_enable_on_exec()
4244 event->attr.enable_on_exec = 0; in event_enable_on_exec()
4245 if (event->state >= PERF_EVENT_STATE_INACTIVE) in event_enable_on_exec()
4248 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_enable_on_exec()
4262 struct perf_event *event; in perf_event_enable_on_exec() local
4274 list_for_each_entry(event, &ctx->event_list, event_entry) { in perf_event_enable_on_exec()
4275 enabled |= event_enable_on_exec(event, ctx); in perf_event_enable_on_exec()
4276 event_type |= get_event_type(event); in perf_event_enable_on_exec()
4280 * Unclone and reschedule this context if we enabled any event. in perf_event_enable_on_exec()
4297 static void perf_remove_from_owner(struct perf_event *event);
4298 static void perf_event_exit_event(struct perf_event *event,
4308 struct perf_event *event, *next; in perf_event_remove_on_exec() local
4321 list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { in perf_event_remove_on_exec()
4322 if (!event->attr.remove_on_exec) in perf_event_remove_on_exec()
4325 if (!is_kernel_event(event)) in perf_event_remove_on_exec()
4326 perf_remove_from_owner(event); in perf_event_remove_on_exec()
4330 perf_event_exit_event(event, ctx); in perf_event_remove_on_exec()
4348 struct perf_event *event; member
4353 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) in __perf_event_read_cpu() argument
4357 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { in __perf_event_read_cpu()
4371 * Cross CPU call to read the hardware event
4376 struct perf_event *sub, *event = data->event; in __perf_event_read() local
4377 struct perf_event_context *ctx = event->ctx; in __perf_event_read()
4379 struct pmu *pmu = event->pmu; in __perf_event_read()
4385 * event->count would have been updated to a recent sample in __perf_event_read()
4386 * when the event was scheduled out. in __perf_event_read()
4394 update_cgrp_time_from_event(event); in __perf_event_read()
4397 perf_event_update_time(event); in __perf_event_read()
4399 perf_event_update_sibling_time(event); in __perf_event_read()
4401 if (event->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_read()
4405 pmu->read(event); in __perf_event_read()
4412 pmu->read(event); in __perf_event_read()
4414 for_each_sibling_event(sub, event) { in __perf_event_read()
4417 * Use sibling's PMU rather than @event's since in __perf_event_read()
4430 static inline u64 perf_event_count(struct perf_event *event) in perf_event_count() argument
4432 return local64_read(&event->count) + atomic64_read(&event->child_count); in perf_event_count()
4435 static void calc_timer_values(struct perf_event *event, in calc_timer_values() argument
4443 ctx_time = perf_event_time_now(event, *now); in calc_timer_values()
4444 __perf_update_times(event, ctx_time, enabled, running); in calc_timer_values()
4448 * NMI-safe method to read a local event, that is an event that
4455 int perf_event_read_local(struct perf_event *event, u64 *value, in perf_event_read_local() argument
4468 * It must not be an event with inherit set, we cannot read in perf_event_read_local()
4471 if (event->attr.inherit) { in perf_event_read_local()
4476 /* If this is a per-task event, it must be for current */ in perf_event_read_local()
4477 if ((event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4478 event->hw.target != current) { in perf_event_read_local()
4483 /* If this is a per-CPU event, it must be for this CPU */ in perf_event_read_local()
4484 if (!(event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4485 event->cpu != smp_processor_id()) { in perf_event_read_local()
4490 /* If this is a pinned event it must be running on this CPU */ in perf_event_read_local()
4491 if (event->attr.pinned && event->oncpu != smp_processor_id()) { in perf_event_read_local()
4497 * If the event is currently on this CPU, its either a per-task event, in perf_event_read_local()
4501 if (event->oncpu == smp_processor_id()) in perf_event_read_local()
4502 event->pmu->read(event); in perf_event_read_local()
4504 *value = local64_read(&event->count); in perf_event_read_local()
4508 calc_timer_values(event, &__now, &__enabled, &__running); in perf_event_read_local()
4520 static int perf_event_read(struct perf_event *event, bool group) in perf_event_read() argument
4522 enum perf_event_state state = READ_ONCE(event->state); in perf_event_read()
4526 * If event is enabled and currently active on a CPU, update the in perf_event_read()
4527 * value in the event structure: in perf_event_read()
4541 event_cpu = READ_ONCE(event->oncpu); in perf_event_read()
4546 .event = event, in perf_event_read()
4552 event_cpu = __perf_event_read_cpu(event, event_cpu); in perf_event_read()
4558 * If event_cpu isn't a valid CPU it means the event got in perf_event_read()
4559 * scheduled out and that will have updated the event count. in perf_event_read()
4561 * Therefore, either way, we'll have an up-to-date event count in perf_event_read()
4569 struct perf_event_context *ctx = event->ctx; in perf_event_read()
4573 state = event->state; in perf_event_read()
4585 update_cgrp_time_from_event(event); in perf_event_read()
4588 perf_event_update_time(event); in perf_event_read()
4590 perf_event_update_sibling_time(event); in perf_event_read()
4655 struct perf_event *event) in find_get_context() argument
4662 int cpu = event->cpu; in find_get_context()
4665 /* Must be root to operate on a CPU event: */ in find_get_context()
4666 err = perf_allow_cpu(&event->attr); in find_get_context()
4685 if (event->attach_state & PERF_ATTACH_TASK_DATA) { in find_get_context()
4752 static void perf_event_free_filter(struct perf_event *event);
4756 struct perf_event *event; in free_event_rcu() local
4758 event = container_of(head, struct perf_event, rcu_head); in free_event_rcu()
4759 if (event->ns) in free_event_rcu()
4760 put_pid_ns(event->ns); in free_event_rcu()
4761 perf_event_free_filter(event); in free_event_rcu()
4762 kmem_cache_free(perf_event_cache, event); in free_event_rcu()
4765 static void ring_buffer_attach(struct perf_event *event,
4768 static void detach_sb_event(struct perf_event *event) in detach_sb_event() argument
4770 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in detach_sb_event()
4773 list_del_rcu(&event->sb_list); in detach_sb_event()
4777 static bool is_sb_event(struct perf_event *event) in is_sb_event() argument
4779 struct perf_event_attr *attr = &event->attr; in is_sb_event()
4781 if (event->parent) in is_sb_event()
4784 if (event->attach_state & PERF_ATTACH_TASK) in is_sb_event()
4796 static void unaccount_pmu_sb_event(struct perf_event *event) in unaccount_pmu_sb_event() argument
4798 if (is_sb_event(event)) in unaccount_pmu_sb_event()
4799 detach_sb_event(event); in unaccount_pmu_sb_event()
4802 static void unaccount_event_cpu(struct perf_event *event, int cpu) in unaccount_event_cpu() argument
4804 if (event->parent) in unaccount_event_cpu()
4807 if (is_cgroup_event(event)) in unaccount_event_cpu()
4833 static void unaccount_event(struct perf_event *event) in unaccount_event() argument
4837 if (event->parent) in unaccount_event()
4840 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in unaccount_event()
4842 if (event->attr.mmap || event->attr.mmap_data) in unaccount_event()
4844 if (event->attr.build_id) in unaccount_event()
4846 if (event->attr.comm) in unaccount_event()
4848 if (event->attr.namespaces) in unaccount_event()
4850 if (event->attr.cgroup) in unaccount_event()
4852 if (event->attr.task) in unaccount_event()
4854 if (event->attr.freq) in unaccount_event()
4856 if (event->attr.context_switch) { in unaccount_event()
4860 if (is_cgroup_event(event)) in unaccount_event()
4862 if (has_branch_stack(event)) in unaccount_event()
4864 if (event->attr.ksymbol) in unaccount_event()
4866 if (event->attr.bpf_event) in unaccount_event()
4868 if (event->attr.text_poke) in unaccount_event()
4876 unaccount_event_cpu(event, event->cpu); in unaccount_event()
4878 unaccount_pmu_sb_event(event); in unaccount_event()
4891 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
4901 static int exclusive_event_init(struct perf_event *event) in exclusive_event_init() argument
4903 struct pmu *pmu = event->pmu; in exclusive_event_init()
4916 * Since this is called in perf_event_alloc() path, event::ctx in exclusive_event_init()
4918 * to mean "per-task event", because unlike other attach states it in exclusive_event_init()
4921 if (event->attach_state & PERF_ATTACH_TASK) { in exclusive_event_init()
4932 static void exclusive_event_destroy(struct perf_event *event) in exclusive_event_destroy() argument
4934 struct pmu *pmu = event->pmu; in exclusive_event_destroy()
4940 if (event->attach_state & PERF_ATTACH_TASK) in exclusive_event_destroy()
4956 static bool exclusive_event_installable(struct perf_event *event, in exclusive_event_installable() argument
4960 struct pmu *pmu = event->pmu; in exclusive_event_installable()
4968 if (exclusive_event_match(iter_event, event)) in exclusive_event_installable()
4975 static void perf_addr_filters_splice(struct perf_event *event,
4978 static void _free_event(struct perf_event *event) in _free_event() argument
4980 irq_work_sync(&event->pending_irq); in _free_event()
4982 unaccount_event(event); in _free_event()
4984 security_perf_event_free(event); in _free_event()
4986 if (event->rb) { in _free_event()
4988 * Can happen when we close an event with re-directed output. in _free_event()
4993 mutex_lock(&event->mmap_mutex); in _free_event()
4994 ring_buffer_attach(event, NULL); in _free_event()
4995 mutex_unlock(&event->mmap_mutex); in _free_event()
4998 if (is_cgroup_event(event)) in _free_event()
4999 perf_detach_cgroup(event); in _free_event()
5001 if (!event->parent) { in _free_event()
5002 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in _free_event()
5006 perf_event_free_bpf_prog(event); in _free_event()
5007 perf_addr_filters_splice(event, NULL); in _free_event()
5008 kfree(event->addr_filter_ranges); in _free_event()
5010 if (event->destroy) in _free_event()
5011 event->destroy(event); in _free_event()
5017 if (event->hw.target) in _free_event()
5018 put_task_struct(event->hw.target); in _free_event()
5024 if (event->ctx) in _free_event()
5025 put_ctx(event->ctx); in _free_event()
5027 exclusive_event_destroy(event); in _free_event()
5028 module_put(event->pmu->module); in _free_event()
5030 call_rcu(&event->rcu_head, free_event_rcu); in _free_event()
5035 * where the event isn't exposed yet and inherited events.
5037 static void free_event(struct perf_event *event) in free_event() argument
5039 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, in free_event()
5040 "unexpected event refcount: %ld; ptr=%p\n", in free_event()
5041 atomic_long_read(&event->refcount), event)) { in free_event()
5046 _free_event(event); in free_event()
5050 * Remove user event from the owner task.
5052 static void perf_remove_from_owner(struct perf_event *event) in perf_remove_from_owner() argument
5060 * indeed free this event, otherwise we need to serialize on in perf_remove_from_owner()
5063 owner = READ_ONCE(event->owner); in perf_remove_from_owner()
5086 * We have to re-check the event->owner field, if it is cleared in perf_remove_from_owner()
5089 * event. in perf_remove_from_owner()
5091 if (event->owner) { in perf_remove_from_owner()
5092 list_del_init(&event->owner_entry); in perf_remove_from_owner()
5093 smp_store_release(&event->owner, NULL); in perf_remove_from_owner()
5100 static void put_event(struct perf_event *event) in put_event() argument
5102 if (!atomic_long_dec_and_test(&event->refcount)) in put_event()
5105 _free_event(event); in put_event()
5109 * Kill an event dead; while event:refcount will preserve the event
5113 int perf_event_release_kernel(struct perf_event *event) in perf_event_release_kernel() argument
5115 struct perf_event_context *ctx = event->ctx; in perf_event_release_kernel()
5124 WARN_ON_ONCE(event->attach_state & in perf_event_release_kernel()
5129 if (!is_kernel_event(event)) in perf_event_release_kernel()
5130 perf_remove_from_owner(event); in perf_event_release_kernel()
5132 ctx = perf_event_ctx_lock(event); in perf_event_release_kernel()
5136 * Mark this event as STATE_DEAD, there is no external reference to it in perf_event_release_kernel()
5139 * Anybody acquiring event->child_mutex after the below loop _must_ in perf_event_release_kernel()
5146 perf_remove_from_context(event, DETACH_GROUP|DETACH_DEAD); in perf_event_release_kernel()
5148 perf_event_ctx_unlock(event, ctx); in perf_event_release_kernel()
5151 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5152 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_release_kernel()
5163 * Since the event cannot get freed while we hold the in perf_event_release_kernel()
5174 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5176 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5183 tmp = list_first_entry_or_null(&event->child_list, in perf_event_release_kernel()
5192 put_event(event); in perf_event_release_kernel()
5195 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5200 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5209 * Wake any perf_event_free_task() waiting for this event to be in perf_event_release_kernel()
5217 put_event(event); /* Must be the 'last' reference */ in perf_event_release_kernel()
5231 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in __perf_event_read_value() argument
5239 mutex_lock(&event->child_mutex); in __perf_event_read_value()
5241 (void)perf_event_read(event, false); in __perf_event_read_value()
5242 total += perf_event_count(event); in __perf_event_read_value()
5244 *enabled += event->total_time_enabled + in __perf_event_read_value()
5245 atomic64_read(&event->child_total_time_enabled); in __perf_event_read_value()
5246 *running += event->total_time_running + in __perf_event_read_value()
5247 atomic64_read(&event->child_total_time_running); in __perf_event_read_value()
5249 list_for_each_entry(child, &event->child_list, child_list) { in __perf_event_read_value()
5255 mutex_unlock(&event->child_mutex); in __perf_event_read_value()
5260 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in perf_event_read_value() argument
5265 ctx = perf_event_ctx_lock(event); in perf_event_read_value()
5266 count = __perf_event_read_value(event, enabled, running); in perf_event_read_value()
5267 perf_event_ctx_unlock(event, ctx); in perf_event_read_value()
5324 static int perf_read_group(struct perf_event *event, in perf_read_group() argument
5327 struct perf_event *leader = event->group_leader, *child; in perf_read_group()
5334 values = kzalloc(event->read_size, GFP_KERNEL); in perf_read_group()
5358 ret = event->read_size; in perf_read_group()
5359 if (copy_to_user(buf, values, event->read_size)) in perf_read_group()
5370 static int perf_read_one(struct perf_event *event, in perf_read_one() argument
5377 values[n++] = __perf_event_read_value(event, &enabled, &running); in perf_read_one()
5383 values[n++] = primary_event_id(event); in perf_read_one()
5385 values[n++] = atomic64_read(&event->lost_samples); in perf_read_one()
5393 static bool is_event_hup(struct perf_event *event) in is_event_hup() argument
5397 if (event->state > PERF_EVENT_STATE_EXIT) in is_event_hup()
5400 mutex_lock(&event->child_mutex); in is_event_hup()
5401 no_children = list_empty(&event->child_list); in is_event_hup()
5402 mutex_unlock(&event->child_mutex); in is_event_hup()
5407 * Read the performance event - simple non blocking version for now
5410 __perf_read(struct perf_event *event, char __user *buf, size_t count) in __perf_read() argument
5412 u64 read_format = event->attr.read_format; in __perf_read()
5416 * Return end-of-file for a read on an event that is in in __perf_read()
5420 if (event->state == PERF_EVENT_STATE_ERROR) in __perf_read()
5423 if (count < event->read_size) in __perf_read()
5426 WARN_ON_ONCE(event->ctx->parent_ctx); in __perf_read()
5428 ret = perf_read_group(event, read_format, buf); in __perf_read()
5430 ret = perf_read_one(event, read_format, buf); in __perf_read()
5438 struct perf_event *event = file->private_data; in perf_read() local
5442 ret = security_perf_event_read(event); in perf_read()
5446 ctx = perf_event_ctx_lock(event); in perf_read()
5447 ret = __perf_read(event, buf, count); in perf_read()
5448 perf_event_ctx_unlock(event, ctx); in perf_read()
5455 struct perf_event *event = file->private_data; in perf_poll() local
5459 poll_wait(file, &event->waitq, wait); in perf_poll()
5461 if (is_event_hup(event)) in perf_poll()
5465 * Pin the event->rb by taking event->mmap_mutex; otherwise in perf_poll()
5468 mutex_lock(&event->mmap_mutex); in perf_poll()
5469 rb = event->rb; in perf_poll()
5472 mutex_unlock(&event->mmap_mutex); in perf_poll()
5476 static void _perf_event_reset(struct perf_event *event) in _perf_event_reset() argument
5478 (void)perf_event_read(event, false); in _perf_event_reset()
5479 local64_set(&event->count, 0); in _perf_event_reset()
5480 perf_event_update_userpage(event); in _perf_event_reset()
5483 /* Assume it's not an event with inherit set. */
5484 u64 perf_event_pause(struct perf_event *event, bool reset) in perf_event_pause() argument
5489 ctx = perf_event_ctx_lock(event); in perf_event_pause()
5490 WARN_ON_ONCE(event->attr.inherit); in perf_event_pause()
5491 _perf_event_disable(event); in perf_event_pause()
5492 count = local64_read(&event->count); in perf_event_pause()
5494 local64_set(&event->count, 0); in perf_event_pause()
5495 perf_event_ctx_unlock(event, ctx); in perf_event_pause()
5502 * Holding the top-level event's child_mutex means that any
5503 * descendant process that has inherited this event will block
5507 static void perf_event_for_each_child(struct perf_event *event, in perf_event_for_each_child() argument
5512 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_for_each_child()
5514 mutex_lock(&event->child_mutex); in perf_event_for_each_child()
5515 func(event); in perf_event_for_each_child()
5516 list_for_each_entry(child, &event->child_list, child_list) in perf_event_for_each_child()
5518 mutex_unlock(&event->child_mutex); in perf_event_for_each_child()
5521 static void perf_event_for_each(struct perf_event *event, in perf_event_for_each() argument
5524 struct perf_event_context *ctx = event->ctx; in perf_event_for_each()
5529 event = event->group_leader; in perf_event_for_each()
5531 perf_event_for_each_child(event, func); in perf_event_for_each()
5532 for_each_sibling_event(sibling, event) in perf_event_for_each()
5536 static void __perf_event_period(struct perf_event *event, in __perf_event_period() argument
5544 if (event->attr.freq) { in __perf_event_period()
5545 event->attr.sample_freq = value; in __perf_event_period()
5547 event->attr.sample_period = value; in __perf_event_period()
5548 event->hw.sample_period = value; in __perf_event_period()
5551 active = (event->state == PERF_EVENT_STATE_ACTIVE); in __perf_event_period()
5556 * trying to unthrottle while we already re-started the event. in __perf_event_period()
5558 if (event->hw.interrupts == MAX_INTERRUPTS) { in __perf_event_period()
5559 event->hw.interrupts = 0; in __perf_event_period()
5560 perf_log_throttle(event, 1); in __perf_event_period()
5562 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_period()
5565 local64_set(&event->hw.period_left, 0); in __perf_event_period()
5568 event->pmu->start(event, PERF_EF_RELOAD); in __perf_event_period()
5573 static int perf_event_check_period(struct perf_event *event, u64 value) in perf_event_check_period() argument
5575 return event->pmu->check_period(event, value); in perf_event_check_period()
5578 static int _perf_event_period(struct perf_event *event, u64 value) in _perf_event_period() argument
5580 if (!is_sampling_event(event)) in _perf_event_period()
5586 if (event->attr.freq && value > sysctl_perf_event_sample_rate) in _perf_event_period()
5589 if (perf_event_check_period(event, value)) in _perf_event_period()
5592 if (!event->attr.freq && (value & (1ULL << 63))) in _perf_event_period()
5595 event_function_call(event, __perf_event_period, &value); in _perf_event_period()
5600 int perf_event_period(struct perf_event *event, u64 value) in perf_event_period() argument
5605 ctx = perf_event_ctx_lock(event); in perf_event_period()
5606 ret = _perf_event_period(event, value); in perf_event_period()
5607 perf_event_ctx_unlock(event, ctx); in perf_event_period()
5629 static int perf_event_set_output(struct perf_event *event,
5631 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
5635 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) in _perf_ioctl() argument
5652 return _perf_event_refresh(event, arg); in _perf_ioctl()
5661 return _perf_event_period(event, value); in _perf_ioctl()
5665 u64 id = primary_event_id(event); in _perf_ioctl()
5682 ret = perf_event_set_output(event, output_event); in _perf_ioctl()
5685 ret = perf_event_set_output(event, NULL); in _perf_ioctl()
5691 return perf_event_set_filter(event, (void __user *)arg); in _perf_ioctl()
5702 err = perf_event_set_bpf_prog(event, prog, 0); in _perf_ioctl()
5715 rb = rcu_dereference(event->rb); in _perf_ioctl()
5726 return perf_event_query_prog_array(event, (void __user *)arg); in _perf_ioctl()
5736 return perf_event_modify_attr(event, &new_attr); in _perf_ioctl()
5743 perf_event_for_each(event, func); in _perf_ioctl()
5745 perf_event_for_each_child(event, func); in _perf_ioctl()
5752 struct perf_event *event = file->private_data; in perf_ioctl() local
5757 ret = security_perf_event_write(event); in perf_ioctl()
5761 ctx = perf_event_ctx_lock(event); in perf_ioctl()
5762 ret = _perf_ioctl(event, cmd, arg); in perf_ioctl()
5763 perf_event_ctx_unlock(event, ctx); in perf_ioctl()
5793 struct perf_event *event; in perf_event_task_enable() local
5796 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { in perf_event_task_enable()
5797 ctx = perf_event_ctx_lock(event); in perf_event_task_enable()
5798 perf_event_for_each_child(event, _perf_event_enable); in perf_event_task_enable()
5799 perf_event_ctx_unlock(event, ctx); in perf_event_task_enable()
5809 struct perf_event *event; in perf_event_task_disable() local
5812 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { in perf_event_task_disable()
5813 ctx = perf_event_ctx_lock(event); in perf_event_task_disable()
5814 perf_event_for_each_child(event, _perf_event_disable); in perf_event_task_disable()
5815 perf_event_ctx_unlock(event, ctx); in perf_event_task_disable()
5822 static int perf_event_index(struct perf_event *event) in perf_event_index() argument
5824 if (event->hw.state & PERF_HES_STOPPED) in perf_event_index()
5827 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_event_index()
5830 return event->pmu->event_idx(event); in perf_event_index()
5833 static void perf_event_init_userpage(struct perf_event *event) in perf_event_init_userpage() argument
5839 rb = rcu_dereference(event->rb); in perf_event_init_userpage()
5856 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) in arch_perf_update_userpage() argument
5865 void perf_event_update_userpage(struct perf_event *event) in perf_event_update_userpage() argument
5872 rb = rcu_dereference(event->rb); in perf_event_update_userpage()
5878 * based on snapshot values taken when the event in perf_event_update_userpage()
5885 calc_timer_values(event, &now, &enabled, &running); in perf_event_update_userpage()
5895 userpg->index = perf_event_index(event); in perf_event_update_userpage()
5896 userpg->offset = perf_event_count(event); in perf_event_update_userpage()
5898 userpg->offset -= local64_read(&event->hw.prev_count); in perf_event_update_userpage()
5901 atomic64_read(&event->child_total_time_enabled); in perf_event_update_userpage()
5904 atomic64_read(&event->child_total_time_running); in perf_event_update_userpage()
5906 arch_perf_update_userpage(event, userpg, now); in perf_event_update_userpage()
5918 struct perf_event *event = vmf->vma->vm_file->private_data; in perf_mmap_fault() local
5929 rb = rcu_dereference(event->rb); in perf_mmap_fault()
5951 static void ring_buffer_attach(struct perf_event *event, in ring_buffer_attach() argument
5957 WARN_ON_ONCE(event->parent); in ring_buffer_attach()
5959 if (event->rb) { in ring_buffer_attach()
5962 * event->rb_entry and wait/clear when adding event->rb_entry. in ring_buffer_attach()
5964 WARN_ON_ONCE(event->rcu_pending); in ring_buffer_attach()
5966 old_rb = event->rb; in ring_buffer_attach()
5968 list_del_rcu(&event->rb_entry); in ring_buffer_attach()
5971 event->rcu_batches = get_state_synchronize_rcu(); in ring_buffer_attach()
5972 event->rcu_pending = 1; in ring_buffer_attach()
5976 if (event->rcu_pending) { in ring_buffer_attach()
5977 cond_synchronize_rcu(event->rcu_batches); in ring_buffer_attach()
5978 event->rcu_pending = 0; in ring_buffer_attach()
5982 list_add_rcu(&event->rb_entry, &rb->event_list); in ring_buffer_attach()
5987 * Avoid racing with perf_mmap_close(AUX): stop the event in ring_buffer_attach()
5988 * before swizzling the event::rb pointer; if it's getting in ring_buffer_attach()
5996 if (has_aux(event)) in ring_buffer_attach()
5997 perf_event_stop(event, 0); in ring_buffer_attach()
5999 rcu_assign_pointer(event->rb, rb); in ring_buffer_attach()
6008 wake_up_all(&event->waitq); in ring_buffer_attach()
6012 static void ring_buffer_wakeup(struct perf_event *event) in ring_buffer_wakeup() argument
6016 if (event->parent) in ring_buffer_wakeup()
6017 event = event->parent; in ring_buffer_wakeup()
6020 rb = rcu_dereference(event->rb); in ring_buffer_wakeup()
6022 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) in ring_buffer_wakeup()
6023 wake_up_all(&event->waitq); in ring_buffer_wakeup()
6028 struct perf_buffer *ring_buffer_get(struct perf_event *event) in ring_buffer_get() argument
6032 if (event->parent) in ring_buffer_get()
6033 event = event->parent; in ring_buffer_get()
6036 rb = rcu_dereference(event->rb); in ring_buffer_get()
6058 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_open() local
6060 atomic_inc(&event->mmap_count); in perf_mmap_open()
6061 atomic_inc(&event->rb->mmap_count); in perf_mmap_open()
6064 atomic_inc(&event->rb->aux_mmap_count); in perf_mmap_open()
6066 if (event->pmu->event_mapped) in perf_mmap_open()
6067 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap_open()
6070 static void perf_pmu_output_stop(struct perf_event *event);
6074 * event, or through other events by use of perf_event_set_output().
6082 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_close() local
6083 struct perf_buffer *rb = ring_buffer_get(event); in perf_mmap_close()
6089 if (event->pmu->event_unmapped) in perf_mmap_close()
6090 event->pmu->event_unmapped(event, vma->vm_mm); in perf_mmap_close()
6094 * event->mmap_count, so it is ok to use event->mmap_mutex to in perf_mmap_close()
6098 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { in perf_mmap_close()
6105 perf_pmu_output_stop(event); in perf_mmap_close()
6115 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6121 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) in perf_mmap_close()
6124 ring_buffer_attach(event, NULL); in perf_mmap_close()
6125 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6138 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { in perf_mmap_close()
6139 if (!atomic_long_inc_not_zero(&event->refcount)) { in perf_mmap_close()
6141 * This event is en-route to free_event() which will in perf_mmap_close()
6148 mutex_lock(&event->mmap_mutex); in perf_mmap_close()
6154 * If we find a different rb; ignore this event, a next in perf_mmap_close()
6159 if (event->rb == rb) in perf_mmap_close()
6160 ring_buffer_attach(event, NULL); in perf_mmap_close()
6162 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6163 put_event(event); in perf_mmap_close()
6200 struct perf_event *event = file->private_data; in perf_mmap() local
6215 if (event->cpu == -1 && event->attr.inherit) in perf_mmap()
6221 ret = security_perf_event_read(event); in perf_mmap()
6237 if (!event->rb) in perf_mmap()
6242 mutex_lock(&event->mmap_mutex); in perf_mmap()
6245 rb = event->rb; in perf_mmap()
6297 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_mmap()
6299 mutex_lock(&event->mmap_mutex); in perf_mmap()
6300 if (event->rb) { in perf_mmap()
6301 if (data_page_nr(event->rb) != nr_pages) { in perf_mmap()
6306 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { in perf_mmap()
6309 * event and try again. in perf_mmap()
6311 ring_buffer_attach(event, NULL); in perf_mmap()
6312 mutex_unlock(&event->mmap_mutex); in perf_mmap()
6358 WARN_ON(!rb && event->rb); in perf_mmap()
6365 event->attr.watermark ? event->attr.wakeup_watermark : 0, in perf_mmap()
6366 event->cpu, flags); in perf_mmap()
6377 ring_buffer_attach(event, rb); in perf_mmap()
6379 perf_event_update_time(event); in perf_mmap()
6380 perf_event_init_userpage(event); in perf_mmap()
6381 perf_event_update_userpage(event); in perf_mmap()
6383 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, in perf_mmap()
6384 event->attr.aux_watermark, flags); in perf_mmap()
6394 atomic_inc(&event->mmap_count); in perf_mmap()
6399 mutex_unlock(&event->mmap_mutex); in perf_mmap()
6408 if (event->pmu->event_mapped) in perf_mmap()
6409 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap()
6417 struct perf_event *event = filp->private_data; in perf_fasync() local
6421 retval = fasync_helper(fd, filp, on, &event->fasync); in perf_fasync()
6442 * Perf event wakeup
6448 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) in perf_event_fasync() argument
6451 if (event->parent) in perf_event_fasync()
6452 event = event->parent; in perf_event_fasync()
6453 return &event->fasync; in perf_event_fasync()
6456 void perf_event_wakeup(struct perf_event *event) in perf_event_wakeup() argument
6458 ring_buffer_wakeup(event); in perf_event_wakeup()
6460 if (event->pending_kill) { in perf_event_wakeup()
6461 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); in perf_event_wakeup()
6462 event->pending_kill = 0; in perf_event_wakeup()
6466 static void perf_sigtrap(struct perf_event *event) in perf_sigtrap() argument
6473 if (WARN_ON_ONCE(event->ctx->task != current)) in perf_sigtrap()
6483 send_sig_perf((void __user *)event->pending_addr, in perf_sigtrap()
6484 event->attr.type, event->attr.sig_data); in perf_sigtrap()
6488 * Deliver the pending work in-event-context or follow the context.
6490 static void __perf_pending_irq(struct perf_event *event) in __perf_pending_irq() argument
6492 int cpu = READ_ONCE(event->oncpu); in __perf_pending_irq()
6495 * If the event isn't running; we done. event_sched_out() will have in __perf_pending_irq()
6502 * Yay, we hit home and are in the context of the event. in __perf_pending_irq()
6505 if (event->pending_sigtrap) { in __perf_pending_irq()
6506 event->pending_sigtrap = 0; in __perf_pending_irq()
6507 perf_sigtrap(event); in __perf_pending_irq()
6508 local_dec(&event->ctx->nr_pending); in __perf_pending_irq()
6510 if (event->pending_disable) { in __perf_pending_irq()
6511 event->pending_disable = 0; in __perf_pending_irq()
6512 perf_event_disable_local(event); in __perf_pending_irq()
6535 * But the event runs on CPU-B and wants disabling there. in __perf_pending_irq()
6537 irq_work_queue_on(&event->pending_irq, cpu); in __perf_pending_irq()
6542 struct perf_event *event = container_of(entry, struct perf_event, pending_irq); in perf_pending_irq() local
6552 * The wakeup isn't bound to the context of the event -- it can happen in perf_pending_irq()
6553 * irrespective of where the event is. in perf_pending_irq()
6555 if (event->pending_wakeup) { in perf_pending_irq()
6556 event->pending_wakeup = 0; in perf_pending_irq()
6557 perf_event_wakeup(event); in perf_pending_irq()
6560 __perf_pending_irq(event); in perf_pending_irq()
6568 struct perf_event *event = container_of(head, struct perf_event, pending_task); in perf_pending_task() local
6578 if (event->pending_work) { in perf_pending_task()
6579 event->pending_work = 0; in perf_pending_task()
6580 perf_sigtrap(event); in perf_pending_task()
6581 local_dec(&event->ctx->nr_pending); in perf_pending_task()
6588 put_event(event); in perf_pending_task()
6762 static unsigned long perf_prepare_sample_aux(struct perf_event *event, in perf_prepare_sample_aux() argument
6766 struct perf_event *sampler = event->aux_event; in perf_prepare_sample_aux()
6801 struct perf_event *event, in perf_pmu_snapshot_aux() argument
6811 * the IRQ ones, that is, for example, re-starting an event that's just in perf_pmu_snapshot_aux()
6813 * doesn't change the event state. in perf_pmu_snapshot_aux()
6825 ret = event->pmu->snapshot_aux(event, handle, size); in perf_pmu_snapshot_aux()
6834 static void perf_aux_sample_output(struct perf_event *event, in perf_aux_sample_output() argument
6838 struct perf_event *sampler = event->aux_event; in perf_aux_sample_output()
6880 struct perf_event *event, in __perf_event_header__init_id() argument
6883 data->type = event->attr.sample_type; in __perf_event_header__init_id()
6884 header->size += event->id_header_size; in __perf_event_header__init_id()
6888 data->tid_entry.pid = perf_event_pid(event, current); in __perf_event_header__init_id()
6889 data->tid_entry.tid = perf_event_tid(event, current); in __perf_event_header__init_id()
6893 data->time = perf_event_clock(event); in __perf_event_header__init_id()
6896 data->id = primary_event_id(event); in __perf_event_header__init_id()
6899 data->stream_id = event->id; in __perf_event_header__init_id()
6909 struct perf_event *event) in perf_event_header__init_id() argument
6911 if (event->attr.sample_id_all) in perf_event_header__init_id()
6912 __perf_event_header__init_id(header, data, event, event->attr.sample_type); in perf_event_header__init_id()
6939 void perf_event__output_id_sample(struct perf_event *event, in perf_event__output_id_sample() argument
6943 if (event->attr.sample_id_all) in perf_event__output_id_sample()
6948 struct perf_event *event, in perf_output_read_one() argument
6951 u64 read_format = event->attr.read_format; in perf_output_read_one()
6955 values[n++] = perf_event_count(event); in perf_output_read_one()
6958 atomic64_read(&event->child_total_time_enabled); in perf_output_read_one()
6962 atomic64_read(&event->child_total_time_running); in perf_output_read_one()
6965 values[n++] = primary_event_id(event); in perf_output_read_one()
6967 values[n++] = atomic64_read(&event->lost_samples); in perf_output_read_one()
6973 struct perf_event *event, in perf_output_read_group() argument
6976 struct perf_event *leader = event->group_leader, *sub; in perf_output_read_group()
6977 u64 read_format = event->attr.read_format; in perf_output_read_group()
6996 if ((leader != event) && in perf_output_read_group()
7011 if ((sub != event) && in perf_output_read_group()
7038 struct perf_event *event) in perf_output_read() argument
7041 u64 read_format = event->attr.read_format; in perf_output_read()
7045 * based on snapshot values taken when the event in perf_output_read()
7053 calc_timer_values(event, &now, &enabled, &running); in perf_output_read()
7055 if (event->attr.read_format & PERF_FORMAT_GROUP) in perf_output_read()
7056 perf_output_read_group(handle, event, enabled, running); in perf_output_read()
7058 perf_output_read_one(handle, event, enabled, running); in perf_output_read()
7064 struct perf_event *event) in perf_output_sample() argument
7098 perf_output_read(handle, event); in perf_output_sample()
7149 if (branch_sample_hw_index(event)) in perf_output_sample()
7171 u64 mask = event->attr.sample_regs_user; in perf_output_sample()
7202 u64 mask = event->attr.sample_regs_intr; in perf_output_sample()
7226 perf_aux_sample_output(event, handle, data); in perf_output_sample()
7229 if (!event->attr.watermark) { in perf_output_sample()
7230 int wakeup_events = event->attr.wakeup_events; in perf_output_sample()
7369 perf_callchain(struct perf_event *event, struct pt_regs *regs) in perf_callchain() argument
7371 bool kernel = !event->attr.exclude_callchain_kernel; in perf_callchain()
7372 bool user = !event->attr.exclude_callchain_user; in perf_callchain()
7374 bool crosstask = event->ctx->task && event->ctx->task != current; in perf_callchain()
7375 const u32 max_stack = event->attr.sample_max_stack; in perf_callchain()
7388 struct perf_event *event, in perf_prepare_sample() argument
7391 u64 sample_type = event->attr.sample_type; in perf_prepare_sample()
7395 header->size = sizeof(*header) + event->header_size; in perf_prepare_sample()
7405 __perf_event_header__init_id(header, data, event, filtered_sample_type); in perf_prepare_sample()
7414 data->callchain = perf_callchain(event, regs); in perf_prepare_sample()
7450 if (branch_sample_hw_index(event)) in perf_prepare_sample()
7467 u64 mask = event->attr.sample_regs_user; in perf_prepare_sample()
7481 u16 stack_size = event->attr.sample_stack_user; in perf_prepare_sample()
7520 u64 mask = event->attr.sample_regs_intr; in perf_prepare_sample()
7565 event->attr.aux_sample_size); in perf_prepare_sample()
7567 size = perf_prepare_sample_aux(event, data, size); in perf_prepare_sample()
7584 __perf_event_output(struct perf_event *event, in __perf_event_output() argument
7599 perf_prepare_sample(&header, data, event, regs); in __perf_event_output()
7601 err = output_begin(&handle, data, event, header.size); in __perf_event_output()
7605 perf_output_sample(&handle, &header, data, event); in __perf_event_output()
7615 perf_event_output_forward(struct perf_event *event, in perf_event_output_forward() argument
7619 __perf_event_output(event, data, regs, perf_output_begin_forward); in perf_event_output_forward()
7623 perf_event_output_backward(struct perf_event *event, in perf_event_output_backward() argument
7627 __perf_event_output(event, data, regs, perf_output_begin_backward); in perf_event_output_backward()
7631 perf_event_output(struct perf_event *event, in perf_event_output() argument
7635 return __perf_event_output(event, data, regs, perf_output_begin); in perf_event_output()
7650 perf_event_read_event(struct perf_event *event, in perf_event_read_event() argument
7659 .size = sizeof(read_event) + event->read_size, in perf_event_read_event()
7661 .pid = perf_event_pid(event, task), in perf_event_read_event()
7662 .tid = perf_event_tid(event, task), in perf_event_read_event()
7666 perf_event_header__init_id(&read_event.header, &sample, event); in perf_event_read_event()
7667 ret = perf_output_begin(&handle, &sample, event, read_event.header.size); in perf_event_read_event()
7672 perf_output_read(&handle, event); in perf_event_read_event()
7673 perf_event__output_id_sample(event, &handle, &sample); in perf_event_read_event()
7678 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
7685 struct perf_event *event; in perf_iterate_ctx() local
7687 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_iterate_ctx()
7689 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_ctx()
7691 if (!event_filter_match(event)) in perf_iterate_ctx()
7695 output(event, data); in perf_iterate_ctx()
7702 struct perf_event *event; in perf_iterate_sb_cpu() local
7704 list_for_each_entry_rcu(event, &pel->list, sb_list) { in perf_iterate_sb_cpu()
7707 * if we observe event->ctx, both event and ctx will be in perf_iterate_sb_cpu()
7710 if (!smp_load_acquire(&event->ctx)) in perf_iterate_sb_cpu()
7713 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_sb_cpu()
7715 if (!event_filter_match(event)) in perf_iterate_sb_cpu()
7717 output(event, data); in perf_iterate_sb_cpu()
7725 * your event, otherwise it might not get delivered.
7763 static void perf_event_addr_filters_exec(struct perf_event *event, void *data) in perf_event_addr_filters_exec() argument
7765 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_exec()
7770 if (!has_addr_filter(event)) in perf_event_addr_filters_exec()
7776 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_exec()
7777 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_exec()
7785 event->addr_filters_gen++; in perf_event_addr_filters_exec()
7789 perf_event_stop(event, 1); in perf_event_addr_filters_exec()
7816 static void __perf_event_output_stop(struct perf_event *event, void *data) in __perf_event_output_stop() argument
7818 struct perf_event *parent = event->parent; in __perf_event_output_stop()
7822 .event = event, in __perf_event_output_stop()
7825 if (!has_aux(event)) in __perf_event_output_stop()
7829 parent = event; in __perf_event_output_stop()
7835 * We are using event::rb to determine if the event should be stopped, in __perf_event_output_stop()
7837 * which will make us skip the event that actually needs to be stopped. in __perf_event_output_stop()
7838 * So ring_buffer_attach() has to stop an aux event before re-assigning in __perf_event_output_stop()
7847 struct perf_event *event = info; in __perf_pmu_output_stop() local
7848 struct pmu *pmu = event->ctx->pmu; in __perf_pmu_output_stop()
7851 .rb = event->rb, in __perf_pmu_output_stop()
7864 static void perf_pmu_output_stop(struct perf_event *event) in perf_pmu_output_stop() argument
7871 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) { in perf_pmu_output_stop()
7875 * sufficient to stop the event itself if it's active, since in perf_pmu_output_stop()
7885 err = cpu_function_call(cpu, __perf_pmu_output_stop, event); in perf_pmu_output_stop()
7915 static int perf_event_task_match(struct perf_event *event) in perf_event_task_match() argument
7917 return event->attr.comm || event->attr.mmap || in perf_event_task_match()
7918 event->attr.mmap2 || event->attr.mmap_data || in perf_event_task_match()
7919 event->attr.task; in perf_event_task_match()
7922 static void perf_event_task_output(struct perf_event *event, in perf_event_task_output() argument
7931 if (!perf_event_task_match(event)) in perf_event_task_output()
7934 perf_event_header__init_id(&task_event->event_id.header, &sample, event); in perf_event_task_output()
7936 ret = perf_output_begin(&handle, &sample, event, in perf_event_task_output()
7941 task_event->event_id.pid = perf_event_pid(event, task); in perf_event_task_output()
7942 task_event->event_id.tid = perf_event_tid(event, task); in perf_event_task_output()
7945 task_event->event_id.ppid = perf_event_pid(event, in perf_event_task_output()
7947 task_event->event_id.ptid = perf_event_pid(event, in perf_event_task_output()
7950 task_event->event_id.ppid = perf_event_pid(event, current); in perf_event_task_output()
7951 task_event->event_id.ptid = perf_event_tid(event, current); in perf_event_task_output()
7954 task_event->event_id.time = perf_event_clock(event); in perf_event_task_output()
7958 perf_event__output_id_sample(event, &handle, &sample); in perf_event_task_output()
8021 static int perf_event_comm_match(struct perf_event *event) in perf_event_comm_match() argument
8023 return event->attr.comm; in perf_event_comm_match()
8026 static void perf_event_comm_output(struct perf_event *event, in perf_event_comm_output() argument
8035 if (!perf_event_comm_match(event)) in perf_event_comm_output()
8038 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); in perf_event_comm_output()
8039 ret = perf_output_begin(&handle, &sample, event, in perf_event_comm_output()
8045 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); in perf_event_comm_output()
8046 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); in perf_event_comm_output()
8052 perf_event__output_id_sample(event, &handle, &sample); in perf_event_comm_output()
8120 static int perf_event_namespaces_match(struct perf_event *event) in perf_event_namespaces_match() argument
8122 return event->attr.namespaces; in perf_event_namespaces_match()
8125 static void perf_event_namespaces_output(struct perf_event *event, in perf_event_namespaces_output() argument
8134 if (!perf_event_namespaces_match(event)) in perf_event_namespaces_output()
8138 &sample, event); in perf_event_namespaces_output()
8139 ret = perf_output_begin(&handle, &sample, event, in perf_event_namespaces_output()
8144 namespaces_event->event_id.pid = perf_event_pid(event, in perf_event_namespaces_output()
8146 namespaces_event->event_id.tid = perf_event_tid(event, in perf_event_namespaces_output()
8151 perf_event__output_id_sample(event, &handle, &sample); in perf_event_namespaces_output()
8248 static int perf_event_cgroup_match(struct perf_event *event) in perf_event_cgroup_match() argument
8250 return event->attr.cgroup; in perf_event_cgroup_match()
8253 static void perf_event_cgroup_output(struct perf_event *event, void *data) in perf_event_cgroup_output() argument
8261 if (!perf_event_cgroup_match(event)) in perf_event_cgroup_output()
8265 &sample, event); in perf_event_cgroup_output()
8266 ret = perf_output_begin(&handle, &sample, event, in perf_event_cgroup_output()
8274 perf_event__output_id_sample(event, &handle, &sample); in perf_event_cgroup_output()
8359 static int perf_event_mmap_match(struct perf_event *event, in perf_event_mmap_match() argument
8366 return (!executable && event->attr.mmap_data) || in perf_event_mmap_match()
8367 (executable && (event->attr.mmap || event->attr.mmap2)); in perf_event_mmap_match()
8370 static void perf_event_mmap_output(struct perf_event *event, in perf_event_mmap_output() argument
8381 if (!perf_event_mmap_match(event, data)) in perf_event_mmap_output()
8384 if (event->attr.mmap2) { in perf_event_mmap_output()
8394 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); in perf_event_mmap_output()
8395 ret = perf_output_begin(&handle, &sample, event, in perf_event_mmap_output()
8400 mmap_event->event_id.pid = perf_event_pid(event, current); in perf_event_mmap_output()
8401 mmap_event->event_id.tid = perf_event_tid(event, current); in perf_event_mmap_output()
8403 use_build_id = event->attr.build_id && mmap_event->build_id_size; in perf_event_mmap_output()
8405 if (event->attr.mmap2 && use_build_id) in perf_event_mmap_output()
8410 if (event->attr.mmap2) { in perf_event_mmap_output()
8429 perf_event__output_id_sample(event, &handle, &sample); in perf_event_mmap_output()
8601 static void __perf_addr_filters_adjust(struct perf_event *event, void *data) in __perf_addr_filters_adjust() argument
8603 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in __perf_addr_filters_adjust()
8609 if (!has_addr_filter(event)) in __perf_addr_filters_adjust()
8618 &event->addr_filter_ranges[count])) in __perf_addr_filters_adjust()
8625 event->addr_filters_gen++; in __perf_addr_filters_adjust()
8629 perf_event_stop(event, 1); in __perf_addr_filters_adjust()
8693 void perf_event_aux_event(struct perf_event *event, unsigned long head, in perf_event_aux_event() argument
8715 perf_event_header__init_id(&rec.header, &sample, event); in perf_event_aux_event()
8716 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_event_aux_event()
8722 perf_event__output_id_sample(event, &handle, &sample); in perf_event_aux_event()
8730 void perf_log_lost_samples(struct perf_event *event, u64 lost) in perf_log_lost_samples() argument
8748 perf_event_header__init_id(&lost_samples_event.header, &sample, event); in perf_log_lost_samples()
8750 ret = perf_output_begin(&handle, &sample, event, in perf_log_lost_samples()
8756 perf_event__output_id_sample(event, &handle, &sample); in perf_log_lost_samples()
8775 static int perf_event_switch_match(struct perf_event *event) in perf_event_switch_match() argument
8777 return event->attr.context_switch; in perf_event_switch_match()
8780 static void perf_event_switch_output(struct perf_event *event, void *data) in perf_event_switch_output() argument
8787 if (!perf_event_switch_match(event)) in perf_event_switch_output()
8791 if (event->ctx->task) { in perf_event_switch_output()
8798 perf_event_pid(event, se->next_prev); in perf_event_switch_output()
8800 perf_event_tid(event, se->next_prev); in perf_event_switch_output()
8803 perf_event_header__init_id(&se->event_id.header, &sample, event); in perf_event_switch_output()
8805 ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size); in perf_event_switch_output()
8809 if (event->ctx->task) in perf_event_switch_output()
8814 perf_event__output_id_sample(event, &handle, &sample); in perf_event_switch_output()
8852 static void perf_log_throttle(struct perf_event *event, int enable) in perf_log_throttle() argument
8869 .time = perf_event_clock(event), in perf_log_throttle()
8870 .id = primary_event_id(event), in perf_log_throttle()
8871 .stream_id = event->id, in perf_log_throttle()
8877 perf_event_header__init_id(&throttle_event.header, &sample, event); in perf_log_throttle()
8879 ret = perf_output_begin(&handle, &sample, event, in perf_log_throttle()
8885 perf_event__output_id_sample(event, &handle, &sample); in perf_log_throttle()
8905 static int perf_event_ksymbol_match(struct perf_event *event) in perf_event_ksymbol_match() argument
8907 return event->attr.ksymbol; in perf_event_ksymbol_match()
8910 static void perf_event_ksymbol_output(struct perf_event *event, void *data) in perf_event_ksymbol_output() argument
8917 if (!perf_event_ksymbol_match(event)) in perf_event_ksymbol_output()
8921 &sample, event); in perf_event_ksymbol_output()
8922 ret = perf_output_begin(&handle, &sample, event, in perf_event_ksymbol_output()
8929 perf_event__output_id_sample(event, &handle, &sample); in perf_event_ksymbol_output()
8995 static int perf_event_bpf_match(struct perf_event *event) in perf_event_bpf_match() argument
8997 return event->attr.bpf_event; in perf_event_bpf_match()
9000 static void perf_event_bpf_output(struct perf_event *event, void *data) in perf_event_bpf_output() argument
9007 if (!perf_event_bpf_match(event)) in perf_event_bpf_output()
9011 &sample, event); in perf_event_bpf_output()
9012 ret = perf_output_begin(&handle, data, event, in perf_event_bpf_output()
9018 perf_event__output_id_sample(event, &handle, &sample); in perf_event_bpf_output()
9103 static int perf_event_text_poke_match(struct perf_event *event) in perf_event_text_poke_match() argument
9105 return event->attr.text_poke; in perf_event_text_poke_match()
9108 static void perf_event_text_poke_output(struct perf_event *event, void *data) in perf_event_text_poke_output() argument
9116 if (!perf_event_text_poke_match(event)) in perf_event_text_poke_output()
9119 perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event); in perf_event_text_poke_output()
9121 ret = perf_output_begin(&handle, &sample, event, in perf_event_text_poke_output()
9136 perf_event__output_id_sample(event, &handle, &sample); in perf_event_text_poke_output()
9173 void perf_event_itrace_started(struct perf_event *event) in perf_event_itrace_started() argument
9175 event->attach_state |= PERF_ATTACH_ITRACE; in perf_event_itrace_started()
9178 static void perf_log_itrace_start(struct perf_event *event) in perf_log_itrace_start() argument
9189 if (event->parent) in perf_log_itrace_start()
9190 event = event->parent; in perf_log_itrace_start()
9192 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || in perf_log_itrace_start()
9193 event->attach_state & PERF_ATTACH_ITRACE) in perf_log_itrace_start()
9199 rec.pid = perf_event_pid(event, current); in perf_log_itrace_start()
9200 rec.tid = perf_event_tid(event, current); in perf_log_itrace_start()
9202 perf_event_header__init_id(&rec.header, &sample, event); in perf_log_itrace_start()
9203 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_log_itrace_start()
9209 perf_event__output_id_sample(event, &handle, &sample); in perf_log_itrace_start()
9214 void perf_report_aux_output_id(struct perf_event *event, u64 hw_id) in perf_report_aux_output_id() argument
9224 if (event->parent) in perf_report_aux_output_id()
9225 event = event->parent; in perf_report_aux_output_id()
9232 perf_event_header__init_id(&rec.header, &sample, event); in perf_report_aux_output_id()
9233 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_report_aux_output_id()
9239 perf_event__output_id_sample(event, &handle, &sample); in perf_report_aux_output_id()
9245 __perf_event_account_interrupt(struct perf_event *event, int throttle) in __perf_event_account_interrupt() argument
9247 struct hw_perf_event *hwc = &event->hw; in __perf_event_account_interrupt()
9262 perf_log_throttle(event, 0); in __perf_event_account_interrupt()
9267 if (event->attr.freq) { in __perf_event_account_interrupt()
9274 perf_adjust_period(event, delta, hwc->last_period, true); in __perf_event_account_interrupt()
9280 int perf_event_account_interrupt(struct perf_event *event) in perf_event_account_interrupt() argument
9282 return __perf_event_account_interrupt(event, 1); in perf_event_account_interrupt()
9285 static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs) in sample_is_allowed() argument
9292 if (event->attr.exclude_kernel && !user_mode(regs)) in sample_is_allowed()
9299 * Generic event overflow handling, sampling.
9302 static int __perf_event_overflow(struct perf_event *event, in __perf_event_overflow() argument
9306 int events = atomic_read(&event->event_limit); in __perf_event_overflow()
9313 if (unlikely(!is_sampling_event(event))) in __perf_event_overflow()
9316 ret = __perf_event_account_interrupt(event, throttle); in __perf_event_overflow()
9323 event->pending_kill = POLL_IN; in __perf_event_overflow()
9324 if (events && atomic_dec_and_test(&event->event_limit)) { in __perf_event_overflow()
9326 event->pending_kill = POLL_HUP; in __perf_event_overflow()
9327 perf_event_disable_inatomic(event); in __perf_event_overflow()
9330 if (event->attr.sigtrap) { in __perf_event_overflow()
9334 * it is the first event, on the other hand, we should also not in __perf_event_overflow()
9337 bool valid_sample = sample_is_allowed(event, regs); in __perf_event_overflow()
9342 if (!event->pending_sigtrap) { in __perf_event_overflow()
9343 event->pending_sigtrap = pending_id; in __perf_event_overflow()
9344 local_inc(&event->ctx->nr_pending); in __perf_event_overflow()
9345 } else if (event->attr.exclude_kernel && valid_sample) { in __perf_event_overflow()
9358 WARN_ON_ONCE(event->pending_sigtrap != pending_id); in __perf_event_overflow()
9361 event->pending_addr = 0; in __perf_event_overflow()
9363 event->pending_addr = data->addr; in __perf_event_overflow()
9364 irq_work_queue(&event->pending_irq); in __perf_event_overflow()
9367 READ_ONCE(event->overflow_handler)(event, data, regs); in __perf_event_overflow() local
9369 if (*perf_event_fasync(event) && event->pending_kill) { in __perf_event_overflow()
9370 event->pending_wakeup = 1; in __perf_event_overflow()
9371 irq_work_queue(&event->pending_irq); in __perf_event_overflow()
9377 int perf_event_overflow(struct perf_event *event, in perf_event_overflow() argument
9381 return __perf_event_overflow(event, 1, data, regs); in perf_event_overflow()
9385 * Generic software event infrastructure
9400 * We directly increment event->count and keep a second value in
9401 * event->hw.period_left to count intervals. This period event
9406 u64 perf_swevent_set_period(struct perf_event *event) in perf_swevent_set_period() argument
9408 struct hw_perf_event *hwc = &event->hw; in perf_swevent_set_period()
9429 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, in perf_swevent_overflow() argument
9433 struct hw_perf_event *hwc = &event->hw; in perf_swevent_overflow()
9437 overflow = perf_swevent_set_period(event); in perf_swevent_overflow()
9443 if (__perf_event_overflow(event, throttle, in perf_swevent_overflow()
9455 static void perf_swevent_event(struct perf_event *event, u64 nr, in perf_swevent_event() argument
9459 struct hw_perf_event *hwc = &event->hw; in perf_swevent_event()
9461 local64_add(nr, &event->count); in perf_swevent_event()
9466 if (!is_sampling_event(event)) in perf_swevent_event()
9469 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { in perf_swevent_event()
9471 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
9473 data->period = event->hw.last_period; in perf_swevent_event()
9475 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) in perf_swevent_event()
9476 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
9481 perf_swevent_overflow(event, 0, data, regs); in perf_swevent_event()
9484 static int perf_exclude_event(struct perf_event *event, in perf_exclude_event() argument
9487 if (event->hw.state & PERF_HES_STOPPED) in perf_exclude_event()
9491 if (event->attr.exclude_user && user_mode(regs)) in perf_exclude_event()
9494 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_exclude_event()
9501 static int perf_swevent_match(struct perf_event *event, in perf_swevent_match() argument
9507 if (event->attr.type != type) in perf_swevent_match()
9510 if (event->attr.config != event_id) in perf_swevent_match()
9513 if (perf_exclude_event(event, regs)) in perf_swevent_match()
9547 /* For the event head insertion and removal in the hlist */
9549 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) in find_swevent_head() argument
9552 u32 event_id = event->attr.config; in find_swevent_head()
9553 u64 type = event->attr.type; in find_swevent_head()
9556 * Event scheduling is always serialized against hlist allocation in find_swevent_head()
9561 lockdep_is_held(&event->ctx->lock)); in find_swevent_head()
9574 struct perf_event *event; in do_perf_sw_event() local
9582 hlist_for_each_entry_rcu(event, head, hlist_entry) { in do_perf_sw_event()
9583 if (perf_swevent_match(event, type, event_id, data, regs)) in do_perf_sw_event()
9584 perf_swevent_event(event, nr, data, regs); in do_perf_sw_event()
9634 static void perf_swevent_read(struct perf_event *event) in perf_swevent_read() argument
9638 static int perf_swevent_add(struct perf_event *event, int flags) in perf_swevent_add() argument
9641 struct hw_perf_event *hwc = &event->hw; in perf_swevent_add()
9644 if (is_sampling_event(event)) { in perf_swevent_add()
9646 perf_swevent_set_period(event); in perf_swevent_add()
9651 head = find_swevent_head(swhash, event); in perf_swevent_add()
9655 hlist_add_head_rcu(&event->hlist_entry, head); in perf_swevent_add()
9656 perf_event_update_userpage(event); in perf_swevent_add()
9661 static void perf_swevent_del(struct perf_event *event, int flags) in perf_swevent_del() argument
9663 hlist_del_rcu(&event->hlist_entry); in perf_swevent_del()
9666 static void perf_swevent_start(struct perf_event *event, int flags) in perf_swevent_start() argument
9668 event->hw.state = 0; in perf_swevent_start()
9671 static void perf_swevent_stop(struct perf_event *event, int flags) in perf_swevent_stop() argument
9673 event->hw.state = PERF_HES_STOPPED; in perf_swevent_stop()
9765 static void sw_perf_event_destroy(struct perf_event *event) in sw_perf_event_destroy() argument
9767 u64 event_id = event->attr.config; in sw_perf_event_destroy()
9769 WARN_ON(event->parent); in sw_perf_event_destroy()
9775 static int perf_swevent_init(struct perf_event *event) in perf_swevent_init() argument
9777 u64 event_id = event->attr.config; in perf_swevent_init()
9779 if (event->attr.type != PERF_TYPE_SOFTWARE) in perf_swevent_init()
9785 if (has_branch_stack(event)) in perf_swevent_init()
9800 if (!event->parent) { in perf_swevent_init()
9808 event->destroy = sw_perf_event_destroy; in perf_swevent_init()
9829 static int perf_tp_filter_match(struct perf_event *event, in perf_tp_filter_match() argument
9835 if (event->parent) in perf_tp_filter_match()
9836 event = event->parent; in perf_tp_filter_match()
9838 if (likely(!event->filter) || filter_match_preds(event->filter, record)) in perf_tp_filter_match()
9843 static int perf_tp_event_match(struct perf_event *event, in perf_tp_event_match() argument
9847 if (event->hw.state & PERF_HES_STOPPED) in perf_tp_event_match()
9852 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_tp_event_match()
9855 if (!perf_tp_filter_match(event, data)) in perf_tp_event_match()
9873 perf_tp_event(call->event.type, count, raw_data, size, regs, head, in perf_trace_run_bpf_submit()
9883 struct perf_event *event; in perf_tp_event() local
9898 hlist_for_each_entry_rcu(event, head, hlist_entry) { in perf_tp_event()
9899 if (perf_tp_event_match(event, &data, regs)) in perf_tp_event()
9900 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
9905 * deliver this event there too. in perf_tp_event()
9916 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_tp_event()
9917 if (event->cpu != smp_processor_id()) in perf_tp_event()
9919 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event()
9921 if (event->attr.config != entry->type) in perf_tp_event()
9924 if (event->attr.sigtrap) in perf_tp_event()
9926 if (perf_tp_event_match(event, &data, regs)) in perf_tp_event()
9927 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
9937 static void tp_perf_event_destroy(struct perf_event *event) in tp_perf_event_destroy() argument
9939 perf_trace_destroy(event); in tp_perf_event_destroy()
9942 static int perf_tp_event_init(struct perf_event *event) in perf_tp_event_init() argument
9946 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event_init()
9952 if (has_branch_stack(event)) in perf_tp_event_init()
9955 err = perf_trace_init(event); in perf_tp_event_init()
9959 event->destroy = tp_perf_event_destroy; in perf_tp_event_init()
10015 static int perf_kprobe_event_init(struct perf_event *event);
10027 static int perf_kprobe_event_init(struct perf_event *event) in perf_kprobe_event_init() argument
10032 if (event->attr.type != perf_kprobe.type) in perf_kprobe_event_init()
10041 if (has_branch_stack(event)) in perf_kprobe_event_init()
10044 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_kprobe_event_init()
10045 err = perf_kprobe_init(event, is_retprobe); in perf_kprobe_event_init()
10049 event->destroy = perf_kprobe_destroy; in perf_kprobe_event_init()
10074 static int perf_uprobe_event_init(struct perf_event *event);
10086 static int perf_uprobe_event_init(struct perf_event *event) in perf_uprobe_event_init() argument
10092 if (event->attr.type != perf_uprobe.type) in perf_uprobe_event_init()
10101 if (has_branch_stack(event)) in perf_uprobe_event_init()
10104 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_uprobe_event_init()
10105 ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT; in perf_uprobe_event_init()
10106 err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe); in perf_uprobe_event_init()
10110 event->destroy = perf_uprobe_destroy; in perf_uprobe_event_init()
10127 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
10129 ftrace_profile_free_filter(event); in perf_event_free_filter()
10133 static void bpf_overflow_handler(struct perf_event *event, in bpf_overflow_handler() argument
10139 .event = event, in bpf_overflow_handler()
10148 prog = READ_ONCE(event->prog); in bpf_overflow_handler()
10151 (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) && in bpf_overflow_handler()
10153 data->callchain = perf_callchain(event, regs); in bpf_overflow_handler()
10165 event->orig_overflow_handler(event, data, regs); in bpf_overflow_handler()
10168 static int perf_event_set_bpf_handler(struct perf_event *event, in perf_event_set_bpf_handler() argument
10172 if (event->overflow_handler_context) in perf_event_set_bpf_handler()
10176 if (event->prog) in perf_event_set_bpf_handler()
10182 if (event->attr.precise_ip && in perf_event_set_bpf_handler()
10184 (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) || in perf_event_set_bpf_handler()
10185 event->attr.exclude_callchain_kernel || in perf_event_set_bpf_handler()
10186 event->attr.exclude_callchain_user)) { in perf_event_set_bpf_handler()
10199 event->prog = prog; in perf_event_set_bpf_handler()
10200 event->bpf_cookie = bpf_cookie; in perf_event_set_bpf_handler()
10201 event->orig_overflow_handler = READ_ONCE(event->overflow_handler); in perf_event_set_bpf_handler()
10202 WRITE_ONCE(event->overflow_handler, bpf_overflow_handler); in perf_event_set_bpf_handler()
10206 static void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
10208 struct bpf_prog *prog = event->prog; in perf_event_free_bpf_handler()
10213 WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler); in perf_event_free_bpf_handler()
10214 event->prog = NULL; in perf_event_free_bpf_handler()
10218 static int perf_event_set_bpf_handler(struct perf_event *event, in perf_event_set_bpf_handler() argument
10224 static void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
10230 * returns true if the event is a tracepoint, or a kprobe/upprobe created
10233 static inline bool perf_event_is_tracing(struct perf_event *event) in perf_event_is_tracing() argument
10235 if (event->pmu == &perf_tracepoint) in perf_event_is_tracing()
10238 if (event->pmu == &perf_kprobe) in perf_event_is_tracing()
10242 if (event->pmu == &perf_uprobe) in perf_event_is_tracing()
10248 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, in perf_event_set_bpf_prog() argument
10253 if (!perf_event_is_tracing(event)) in perf_event_set_bpf_prog()
10254 return perf_event_set_bpf_handler(event, prog, bpf_cookie); in perf_event_set_bpf_prog()
10256 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_KPROBE; in perf_event_set_bpf_prog()
10257 is_uprobe = event->tp_event->flags & TRACE_EVENT_FL_UPROBE; in perf_event_set_bpf_prog()
10258 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; in perf_event_set_bpf_prog()
10259 is_syscall_tp = is_syscall_trace_event(event->tp_event); in perf_event_set_bpf_prog()
10278 int off = trace_event_get_offsets(event->tp_event); in perf_event_set_bpf_prog()
10284 return perf_event_attach_bpf_prog(event, prog, bpf_cookie); in perf_event_set_bpf_prog()
10287 void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
10289 if (!perf_event_is_tracing(event)) { in perf_event_free_bpf_prog()
10290 perf_event_free_bpf_handler(event); in perf_event_free_bpf_prog()
10293 perf_event_detach_bpf_prog(event); in perf_event_free_bpf_prog()
10302 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
10306 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, in perf_event_set_bpf_prog() argument
10312 void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
10334 perf_addr_filter_new(struct perf_event *event, struct list_head *filters) in perf_addr_filter_new() argument
10336 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu); in perf_addr_filter_new()
10363 static void perf_addr_filters_splice(struct perf_event *event, in perf_addr_filters_splice() argument
10369 if (!has_addr_filter(event)) in perf_addr_filters_splice()
10373 if (event->parent) in perf_addr_filters_splice()
10376 raw_spin_lock_irqsave(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
10378 list_splice_init(&event->addr_filters.list, &list); in perf_addr_filters_splice()
10380 list_splice(head, &event->addr_filters.list); in perf_addr_filters_splice()
10382 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
10409 * Update event's address range filters based on the
10412 static void perf_event_addr_filters_apply(struct perf_event *event) in perf_event_addr_filters_apply() argument
10414 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_apply()
10415 struct task_struct *task = READ_ONCE(event->ctx->task); in perf_event_addr_filters_apply()
10422 * We may observe TASK_TOMBSTONE, which means that the event tear-down in perf_event_addr_filters_apply()
10443 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_apply()
10444 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_apply()
10446 perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]); in perf_event_addr_filters_apply()
10448 event->addr_filter_ranges[count].start = filter->offset; in perf_event_addr_filters_apply()
10449 event->addr_filter_ranges[count].size = filter->size; in perf_event_addr_filters_apply()
10455 event->addr_filters_gen++; in perf_event_addr_filters_apply()
10465 perf_event_stop(event, 1); in perf_event_addr_filters_apply()
10519 perf_event_parse_addr_filter(struct perf_event *event, char *fstr, in perf_event_parse_addr_filter() argument
10546 filter = perf_addr_filter_new(event, filters); in perf_event_parse_addr_filter()
10605 * Make sure that it doesn't contradict itself or the event's in perf_event_parse_addr_filter()
10632 if (!event->ctx->task) in perf_event_parse_addr_filter()
10647 event->addr_filters.nr_file_filters++; in perf_event_parse_addr_filter()
10676 perf_event_set_addr_filter(struct perf_event *event, char *filter_str) in perf_event_set_addr_filter() argument
10685 lockdep_assert_held(&event->ctx->mutex); in perf_event_set_addr_filter()
10687 if (WARN_ON_ONCE(event->parent)) in perf_event_set_addr_filter()
10690 ret = perf_event_parse_addr_filter(event, filter_str, &filters); in perf_event_set_addr_filter()
10694 ret = event->pmu->addr_filters_validate(&filters); in perf_event_set_addr_filter()
10699 perf_addr_filters_splice(event, &filters); in perf_event_set_addr_filter()
10702 perf_event_for_each_child(event, perf_event_addr_filters_apply); in perf_event_set_addr_filter()
10710 event->addr_filters.nr_file_filters = 0; in perf_event_set_addr_filter()
10715 static int perf_event_set_filter(struct perf_event *event, void __user *arg) in perf_event_set_filter() argument
10725 if (perf_event_is_tracing(event)) { in perf_event_set_filter()
10726 struct perf_event_context *ctx = event->ctx; in perf_event_set_filter()
10736 * This can result in event getting moved to a different ctx, in perf_event_set_filter()
10740 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); in perf_event_set_filter()
10744 if (has_addr_filter(event)) in perf_event_set_filter()
10745 ret = perf_event_set_addr_filter(event, filter_str); in perf_event_set_filter()
10760 struct perf_event *event; in perf_swevent_hrtimer() local
10763 event = container_of(hrtimer, struct perf_event, hw.hrtimer); in perf_swevent_hrtimer()
10765 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_swevent_hrtimer()
10768 event->pmu->read(event); in perf_swevent_hrtimer()
10770 perf_sample_data_init(&data, 0, event->hw.last_period); in perf_swevent_hrtimer()
10773 if (regs && !perf_exclude_event(event, regs)) { in perf_swevent_hrtimer()
10774 if (!(event->attr.exclude_idle && is_idle_task(current))) in perf_swevent_hrtimer()
10775 if (__perf_event_overflow(event, 1, &data, regs)) in perf_swevent_hrtimer()
10779 period = max_t(u64, 10000, event->hw.sample_period); in perf_swevent_hrtimer()
10785 static void perf_swevent_start_hrtimer(struct perf_event *event) in perf_swevent_start_hrtimer() argument
10787 struct hw_perf_event *hwc = &event->hw; in perf_swevent_start_hrtimer()
10790 if (!is_sampling_event(event)) in perf_swevent_start_hrtimer()
10806 static void perf_swevent_cancel_hrtimer(struct perf_event *event) in perf_swevent_cancel_hrtimer() argument
10808 struct hw_perf_event *hwc = &event->hw; in perf_swevent_cancel_hrtimer()
10810 if (is_sampling_event(event)) { in perf_swevent_cancel_hrtimer()
10818 static void perf_swevent_init_hrtimer(struct perf_event *event) in perf_swevent_init_hrtimer() argument
10820 struct hw_perf_event *hwc = &event->hw; in perf_swevent_init_hrtimer()
10822 if (!is_sampling_event(event)) in perf_swevent_init_hrtimer()
10832 if (event->attr.freq) { in perf_swevent_init_hrtimer()
10833 long freq = event->attr.sample_freq; in perf_swevent_init_hrtimer()
10835 event->attr.sample_period = NSEC_PER_SEC / freq; in perf_swevent_init_hrtimer()
10836 hwc->sample_period = event->attr.sample_period; in perf_swevent_init_hrtimer()
10839 event->attr.freq = 0; in perf_swevent_init_hrtimer()
10844 * Software event: cpu wall time clock
10847 static void cpu_clock_event_update(struct perf_event *event) in cpu_clock_event_update() argument
10853 prev = local64_xchg(&event->hw.prev_count, now); in cpu_clock_event_update()
10854 local64_add(now - prev, &event->count); in cpu_clock_event_update()
10857 static void cpu_clock_event_start(struct perf_event *event, int flags) in cpu_clock_event_start() argument
10859 local64_set(&event->hw.prev_count, local_clock()); in cpu_clock_event_start()
10860 perf_swevent_start_hrtimer(event); in cpu_clock_event_start()
10863 static void cpu_clock_event_stop(struct perf_event *event, int flags) in cpu_clock_event_stop() argument
10865 perf_swevent_cancel_hrtimer(event); in cpu_clock_event_stop()
10866 cpu_clock_event_update(event); in cpu_clock_event_stop()
10869 static int cpu_clock_event_add(struct perf_event *event, int flags) in cpu_clock_event_add() argument
10872 cpu_clock_event_start(event, flags); in cpu_clock_event_add()
10873 perf_event_update_userpage(event); in cpu_clock_event_add()
10878 static void cpu_clock_event_del(struct perf_event *event, int flags) in cpu_clock_event_del() argument
10880 cpu_clock_event_stop(event, flags); in cpu_clock_event_del()
10883 static void cpu_clock_event_read(struct perf_event *event) in cpu_clock_event_read() argument
10885 cpu_clock_event_update(event); in cpu_clock_event_read()
10888 static int cpu_clock_event_init(struct perf_event *event) in cpu_clock_event_init() argument
10890 if (event->attr.type != PERF_TYPE_SOFTWARE) in cpu_clock_event_init()
10893 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) in cpu_clock_event_init()
10899 if (has_branch_stack(event)) in cpu_clock_event_init()
10902 perf_swevent_init_hrtimer(event); in cpu_clock_event_init()
10921 * Software event: task time clock
10924 static void task_clock_event_update(struct perf_event *event, u64 now) in task_clock_event_update() argument
10929 prev = local64_xchg(&event->hw.prev_count, now); in task_clock_event_update()
10931 local64_add(delta, &event->count); in task_clock_event_update()
10934 static void task_clock_event_start(struct perf_event *event, int flags) in task_clock_event_start() argument
10936 local64_set(&event->hw.prev_count, event->ctx->time); in task_clock_event_start()
10937 perf_swevent_start_hrtimer(event); in task_clock_event_start()
10940 static void task_clock_event_stop(struct perf_event *event, int flags) in task_clock_event_stop() argument
10942 perf_swevent_cancel_hrtimer(event); in task_clock_event_stop()
10943 task_clock_event_update(event, event->ctx->time); in task_clock_event_stop()
10946 static int task_clock_event_add(struct perf_event *event, int flags) in task_clock_event_add() argument
10949 task_clock_event_start(event, flags); in task_clock_event_add()
10950 perf_event_update_userpage(event); in task_clock_event_add()
10955 static void task_clock_event_del(struct perf_event *event, int flags) in task_clock_event_del() argument
10957 task_clock_event_stop(event, PERF_EF_UPDATE); in task_clock_event_del()
10960 static void task_clock_event_read(struct perf_event *event) in task_clock_event_read() argument
10963 u64 delta = now - event->ctx->timestamp; in task_clock_event_read()
10964 u64 time = event->ctx->time + delta; in task_clock_event_read()
10966 task_clock_event_update(event, time); in task_clock_event_read()
10969 static int task_clock_event_init(struct perf_event *event) in task_clock_event_init() argument
10971 if (event->attr.type != PERF_TYPE_SOFTWARE) in task_clock_event_init()
10974 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) in task_clock_event_init()
10980 if (has_branch_stack(event)) in task_clock_event_init()
10983 perf_swevent_init_hrtimer(event); in task_clock_event_init()
11014 static int perf_event_nop_int(struct perf_event *event, u64 value) in perf_event_nop_int() argument
11056 static int perf_event_idx_default(struct perf_event *event) in perf_event_idx_default() argument
11342 * is fast, provided a valid software event is provided. in perf_pmu_register()
11396 static inline bool has_extended_regs(struct perf_event *event) in has_extended_regs() argument
11398 return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) || in has_extended_regs()
11399 (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK); in has_extended_regs()
11402 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) in perf_try_init_event() argument
11413 * if this is a sibling event, acquire the ctx->mutex to protect in perf_try_init_event()
11416 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) { in perf_try_init_event()
11421 ctx = perf_event_ctx_lock_nested(event->group_leader, in perf_try_init_event()
11426 event->pmu = pmu; in perf_try_init_event()
11427 ret = pmu->event_init(event); in perf_try_init_event()
11430 perf_event_ctx_unlock(event->group_leader, ctx); in perf_try_init_event()
11434 has_extended_regs(event)) in perf_try_init_event()
11438 event_has_any_exclude_flag(event)) in perf_try_init_event()
11441 if (ret && event->destroy) in perf_try_init_event()
11442 event->destroy(event); in perf_try_init_event()
11451 static struct pmu *perf_init_event(struct perf_event *event) in perf_init_event() argument
11460 if (event->parent && event->parent->pmu) { in perf_init_event()
11461 pmu = event->parent->pmu; in perf_init_event()
11462 ret = perf_try_init_event(pmu, event); in perf_init_event()
11471 type = event->attr.type; in perf_init_event()
11473 type = event->attr.config >> PERF_PMU_TYPE_SHIFT; in perf_init_event()
11478 event->attr.config &= PERF_HW_EVENT_MASK; in perf_init_event()
11487 if (event->attr.type != type && type != PERF_TYPE_RAW && in perf_init_event()
11491 ret = perf_try_init_event(pmu, event); in perf_init_event()
11492 if (ret == -ENOENT && event->attr.type != type && !extended_type) { in perf_init_event()
11493 type = event->attr.type; in perf_init_event()
11504 ret = perf_try_init_event(pmu, event); in perf_init_event()
11521 static void attach_sb_event(struct perf_event *event) in attach_sb_event() argument
11523 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in attach_sb_event()
11526 list_add_rcu(&event->sb_list, &pel->list); in attach_sb_event()
11537 static void account_pmu_sb_event(struct perf_event *event) in account_pmu_sb_event() argument
11539 if (is_sb_event(event)) in account_pmu_sb_event()
11540 attach_sb_event(event); in account_pmu_sb_event()
11543 static void account_event_cpu(struct perf_event *event, int cpu) in account_event_cpu() argument
11545 if (event->parent) in account_event_cpu()
11548 if (is_cgroup_event(event)) in account_event_cpu()
11573 static void account_event(struct perf_event *event) in account_event() argument
11577 if (event->parent) in account_event()
11580 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in account_event()
11582 if (event->attr.mmap || event->attr.mmap_data) in account_event()
11584 if (event->attr.build_id) in account_event()
11586 if (event->attr.comm) in account_event()
11588 if (event->attr.namespaces) in account_event()
11590 if (event->attr.cgroup) in account_event()
11592 if (event->attr.task) in account_event()
11594 if (event->attr.freq) in account_event()
11596 if (event->attr.context_switch) { in account_event()
11600 if (has_branch_stack(event)) in account_event()
11602 if (is_cgroup_event(event)) in account_event()
11604 if (event->attr.ksymbol) in account_event()
11606 if (event->attr.bpf_event) in account_event()
11608 if (event->attr.text_poke) in account_event()
11639 account_event_cpu(event, event->cpu); in account_event()
11641 account_pmu_sb_event(event); in account_event()
11645 * Allocate and initialize an event structure
11656 struct perf_event *event; in perf_event_alloc() local
11671 event = kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO, in perf_event_alloc()
11673 if (!event) in perf_event_alloc()
11681 group_leader = event; in perf_event_alloc()
11683 mutex_init(&event->child_mutex); in perf_event_alloc()
11684 INIT_LIST_HEAD(&event->child_list); in perf_event_alloc()
11686 INIT_LIST_HEAD(&event->event_entry); in perf_event_alloc()
11687 INIT_LIST_HEAD(&event->sibling_list); in perf_event_alloc()
11688 INIT_LIST_HEAD(&event->active_list); in perf_event_alloc()
11689 init_event_group(event); in perf_event_alloc()
11690 INIT_LIST_HEAD(&event->rb_entry); in perf_event_alloc()
11691 INIT_LIST_HEAD(&event->active_entry); in perf_event_alloc()
11692 INIT_LIST_HEAD(&event->addr_filters.list); in perf_event_alloc()
11693 INIT_HLIST_NODE(&event->hlist_entry); in perf_event_alloc()
11696 init_waitqueue_head(&event->waitq); in perf_event_alloc()
11697 init_irq_work(&event->pending_irq, perf_pending_irq); in perf_event_alloc()
11698 init_task_work(&event->pending_task, perf_pending_task); in perf_event_alloc()
11700 mutex_init(&event->mmap_mutex); in perf_event_alloc()
11701 raw_spin_lock_init(&event->addr_filters.lock); in perf_event_alloc()
11703 atomic_long_set(&event->refcount, 1); in perf_event_alloc()
11704 event->cpu = cpu; in perf_event_alloc()
11705 event->attr = *attr; in perf_event_alloc()
11706 event->group_leader = group_leader; in perf_event_alloc()
11707 event->pmu = NULL; in perf_event_alloc()
11708 event->oncpu = -1; in perf_event_alloc()
11710 event->parent = parent_event; in perf_event_alloc()
11712 event->ns = get_pid_ns(task_active_pid_ns(current)); in perf_event_alloc()
11713 event->id = atomic64_inc_return(&perf_event_id); in perf_event_alloc()
11715 event->state = PERF_EVENT_STATE_INACTIVE; in perf_event_alloc()
11718 event->event_caps = parent_event->event_caps; in perf_event_alloc()
11721 event->attach_state = PERF_ATTACH_TASK; in perf_event_alloc()
11727 event->hw.target = get_task_struct(task); in perf_event_alloc()
11730 event->clock = &local_clock; in perf_event_alloc()
11732 event->clock = parent_event->clock; in perf_event_alloc()
11742 event->prog = prog; in perf_event_alloc()
11743 event->orig_overflow_handler = in perf_event_alloc()
11750 event->overflow_handler = overflow_handler; in perf_event_alloc()
11751 event->overflow_handler_context = context; in perf_event_alloc()
11752 } else if (is_write_backward(event)){ in perf_event_alloc()
11753 event->overflow_handler = perf_event_output_backward; in perf_event_alloc()
11754 event->overflow_handler_context = NULL; in perf_event_alloc()
11756 event->overflow_handler = perf_event_output_forward; in perf_event_alloc()
11757 event->overflow_handler_context = NULL; in perf_event_alloc()
11760 perf_event__state_init(event); in perf_event_alloc()
11764 hwc = &event->hw; in perf_event_alloc()
11779 if (!has_branch_stack(event)) in perf_event_alloc()
11780 event->attr.branch_sample_type = 0; in perf_event_alloc()
11782 pmu = perf_init_event(event); in perf_event_alloc()
11797 if (event->attr.aux_output && in perf_event_alloc()
11804 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); in perf_event_alloc()
11809 err = exclusive_event_init(event); in perf_event_alloc()
11813 if (has_addr_filter(event)) { in perf_event_alloc()
11814 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters, in perf_event_alloc()
11817 if (!event->addr_filter_ranges) { in perf_event_alloc()
11826 if (event->parent) { in perf_event_alloc()
11827 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_alloc()
11830 memcpy(event->addr_filter_ranges, in perf_event_alloc()
11831 event->parent->addr_filter_ranges, in perf_event_alloc()
11837 event->addr_filters_gen = 1; in perf_event_alloc()
11840 if (!event->parent) { in perf_event_alloc()
11841 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { in perf_event_alloc()
11848 err = security_perf_event_alloc(event); in perf_event_alloc()
11853 account_event(event); in perf_event_alloc()
11855 return event; in perf_event_alloc()
11858 if (!event->parent) { in perf_event_alloc()
11859 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in perf_event_alloc()
11863 kfree(event->addr_filter_ranges); in perf_event_alloc()
11866 exclusive_event_destroy(event); in perf_event_alloc()
11869 if (is_cgroup_event(event)) in perf_event_alloc()
11870 perf_detach_cgroup(event); in perf_event_alloc()
11871 if (event->destroy) in perf_event_alloc()
11872 event->destroy(event); in perf_event_alloc()
11875 if (event->hw.target) in perf_event_alloc()
11876 put_task_struct(event->hw.target); in perf_event_alloc()
11877 call_rcu(&event->rcu_head, free_event_rcu); in perf_event_alloc()
12018 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) in perf_event_set_output() argument
12024 mutex_lock(&event->mmap_mutex); in perf_event_set_output()
12029 if (event == output_event) in perf_event_set_output()
12035 if (output_event->cpu != event->cpu) in perf_event_set_output()
12041 if (output_event->cpu == -1 && output_event->ctx != event->ctx) in perf_event_set_output()
12047 if (output_event->clock != event->clock) in perf_event_set_output()
12054 if (is_write_backward(output_event) != is_write_backward(event)) in perf_event_set_output()
12060 if (has_aux(event) && has_aux(output_event) && in perf_event_set_output()
12061 event->pmu != output_event->pmu) in perf_event_set_output()
12067 * restarts after every removal, it is guaranteed this new event is in perf_event_set_output()
12071 mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex); in perf_event_set_output()
12074 if (atomic_read(&event->mmap_count)) in perf_event_set_output()
12090 ring_buffer_attach(event, rb); in perf_event_set_output()
12094 mutex_unlock(&event->mmap_mutex); in perf_event_set_output()
12102 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) in perf_event_set_clock() argument
12108 event->clock = &ktime_get_mono_fast_ns; in perf_event_set_clock()
12113 event->clock = &ktime_get_raw_fast_ns; in perf_event_set_clock()
12118 event->clock = &ktime_get_real_ns; in perf_event_set_clock()
12122 event->clock = &ktime_get_boottime_ns; in perf_event_set_clock()
12126 event->clock = &ktime_get_clocktai_ns; in perf_event_set_clock()
12133 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) in perf_event_set_clock()
12202 * sys_perf_event_open - open a performance event, associate it to a task/cpu
12207 * @group_fd: group leader event fd
12208 * @flags: perf event open flags
12215 struct perf_event *event, *sibling; in SYSCALL_DEFINE5() local
12318 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, in SYSCALL_DEFINE5()
12320 if (IS_ERR(event)) { in SYSCALL_DEFINE5()
12321 err = PTR_ERR(event); in SYSCALL_DEFINE5()
12325 if (is_sampling_event(event)) { in SYSCALL_DEFINE5()
12326 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { in SYSCALL_DEFINE5()
12336 pmu = event->pmu; in SYSCALL_DEFINE5()
12339 err = perf_event_set_clock(event, attr.clockid); in SYSCALL_DEFINE5()
12345 event->event_caps |= PERF_EV_CAP_SOFTWARE; in SYSCALL_DEFINE5()
12348 if (is_software_event(event) && in SYSCALL_DEFINE5()
12351 * If the event is a sw event, but the group_leader in SYSCALL_DEFINE5()
12359 } else if (!is_software_event(event) && in SYSCALL_DEFINE5()
12364 * try to add a hardware event, move the whole group to in SYSCALL_DEFINE5()
12374 ctx = find_get_context(pmu, task, event); in SYSCALL_DEFINE5()
12381 * Look up the group leader (we will attach this event to it): in SYSCALL_DEFINE5()
12394 if (group_leader->clock != event->clock) in SYSCALL_DEFINE5()
12402 if (group_leader->cpu != event->cpu) in SYSCALL_DEFINE5()
12431 err = perf_event_set_output(event, output_event); in SYSCALL_DEFINE5()
12436 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, in SYSCALL_DEFINE5()
12451 * perf_install_in_context() call for this new event to in SYSCALL_DEFINE5()
12475 * if this new event wound up on the same ctx, if so in SYSCALL_DEFINE5()
12518 if (!perf_event_validate_size(event)) { in SYSCALL_DEFINE5()
12525 * Check if the @cpu we're creating an event for is online. in SYSCALL_DEFINE5()
12539 if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) { in SYSCALL_DEFINE5()
12546 * because we need to serialize with concurrent event creation. in SYSCALL_DEFINE5()
12548 if (!exclusive_event_installable(event, ctx)) { in SYSCALL_DEFINE5()
12597 * event. What we want here is event in the initial in SYSCALL_DEFINE5()
12608 * perf_install_in_context() which is the point the event is active and in SYSCALL_DEFINE5()
12611 perf_event__header_size(event); in SYSCALL_DEFINE5()
12612 perf_event__id_header_size(event); in SYSCALL_DEFINE5()
12614 event->owner = current; in SYSCALL_DEFINE5()
12616 perf_install_in_context(ctx, event, event->cpu); in SYSCALL_DEFINE5()
12629 list_add_tail(&event->owner_entry, ¤t->perf_event_list); in SYSCALL_DEFINE5()
12634 * new event on the sibling_list. This ensures destruction in SYSCALL_DEFINE5()
12657 * and that will take care of freeing the event. in SYSCALL_DEFINE5()
12660 free_event(event); in SYSCALL_DEFINE5()
12677 * @overflow_handler: callback to trigger when we hit the event
12687 struct perf_event *event; in perf_event_create_kernel_counter() local
12697 event = perf_event_alloc(attr, cpu, task, NULL, NULL, in perf_event_create_kernel_counter()
12699 if (IS_ERR(event)) { in perf_event_create_kernel_counter()
12700 err = PTR_ERR(event); in perf_event_create_kernel_counter()
12705 event->owner = TASK_TOMBSTONE; in perf_event_create_kernel_counter()
12710 ctx = find_get_context(event->pmu, task, event); in perf_event_create_kernel_counter()
12725 * Check if the @cpu we're creating an event for is online. in perf_event_create_kernel_counter()
12738 if (!exclusive_event_installable(event, ctx)) { in perf_event_create_kernel_counter()
12743 perf_install_in_context(ctx, event, event->cpu); in perf_event_create_kernel_counter()
12747 return event; in perf_event_create_kernel_counter()
12754 free_event(event); in perf_event_create_kernel_counter()
12764 struct perf_event *event, *tmp; in perf_pmu_migrate_context() local
12775 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, in perf_pmu_migrate_context()
12777 perf_remove_from_context(event, 0); in perf_pmu_migrate_context()
12778 unaccount_event_cpu(event, src_cpu); in perf_pmu_migrate_context()
12780 list_add(&event->migrate_entry, &events); in perf_pmu_migrate_context()
12796 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { in perf_pmu_migrate_context()
12797 if (event->group_leader == event) in perf_pmu_migrate_context()
12800 list_del(&event->migrate_entry); in perf_pmu_migrate_context()
12801 if (event->state >= PERF_EVENT_STATE_OFF) in perf_pmu_migrate_context()
12802 event->state = PERF_EVENT_STATE_INACTIVE; in perf_pmu_migrate_context()
12803 account_event_cpu(event, dst_cpu); in perf_pmu_migrate_context()
12804 perf_install_in_context(dst_ctx, event, dst_cpu); in perf_pmu_migrate_context()
12812 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { in perf_pmu_migrate_context()
12813 list_del(&event->migrate_entry); in perf_pmu_migrate_context()
12814 if (event->state >= PERF_EVENT_STATE_OFF) in perf_pmu_migrate_context()
12815 event->state = PERF_EVENT_STATE_INACTIVE; in perf_pmu_migrate_context()
12816 account_event_cpu(event, dst_cpu); in perf_pmu_migrate_context()
12817 perf_install_in_context(dst_ctx, event, dst_cpu); in perf_pmu_migrate_context()
12850 perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) in perf_event_exit_event() argument
12852 struct perf_event *parent_event = event->parent; in perf_event_exit_event()
12872 perf_remove_from_context(event, detach_flags); in perf_event_exit_event()
12875 if (event->state > PERF_EVENT_STATE_EXIT) in perf_event_exit_event()
12876 perf_event_set_state(event, PERF_EVENT_STATE_EXIT); in perf_event_exit_event()
12888 free_event(event); in perf_event_exit_event()
12896 perf_event_wakeup(event); in perf_event_exit_event()
12961 * When a child task exits, feed back event values to parent events.
12968 struct perf_event *event, *tmp; in perf_event_exit_task() local
12972 list_for_each_entry_safe(event, tmp, &child->perf_event_list, in perf_event_exit_task()
12974 list_del_init(&event->owner_entry); in perf_event_exit_task()
12981 smp_store_release(&event->owner, NULL); in perf_event_exit_task()
12997 static void perf_free_event(struct perf_event *event, in perf_free_event() argument
13000 struct perf_event *parent = event->parent; in perf_free_event()
13006 list_del_init(&event->child_list); in perf_free_event()
13012 perf_group_detach(event); in perf_free_event()
13013 list_del_event(event, ctx); in perf_free_event()
13015 free_event(event); in perf_free_event()
13028 struct perf_event *event, *tmp; in perf_event_free_task() local
13049 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) in perf_event_free_task()
13050 perf_free_event(event, ctx); in perf_event_free_task()
13063 * _free_event()'s put_task_struct(event->hw.target) will be a in perf_event_free_task()
13103 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) in perf_event_attrs() argument
13105 if (!event) in perf_event_attrs()
13108 return &event->attr; in perf_event_attrs()
13112 * Inherit an event from parent task to child task.
13178 * Make the child state follow the state of the parent event, in inherit_event()
13217 * Link this into the parent event's child list in inherit_event()
13226 * Inherits an event group.
13268 * Creates the child task context and tries to inherit the event-group.
13271 * inherited_all set when we 'fail' to inherit an orphaned event; this is
13279 inherit_task_group(struct perf_event *event, struct task_struct *parent, in inherit_task_group() argument
13287 if (!event->attr.inherit || in inherit_task_group()
13288 (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) || in inherit_task_group()
13290 (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) { in inherit_task_group()
13310 ret = inherit_group(event, parent, parent_ctx, in inherit_task_group()
13327 struct perf_event *event; in perf_event_init_context() local
13361 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) { in perf_event_init_context()
13362 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
13378 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) { in perf_event_init_context()
13379 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
13484 struct perf_event *event; in __perf_event_exit_context() local
13488 list_for_each_entry(event, &ctx->event_list, event_entry) in __perf_event_exit_context()
13489 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); in __perf_event_exit_context()