Lines Matching full:event

177 static bool is_kernel_event(struct perf_event *event)  in is_kernel_event()  argument
179 return READ_ONCE(event->owner) == TASK_TOMBSTONE; in is_kernel_event()
191 * - removing the last event from a task ctx; this is relatively straight
194 * - adding the first event to a task ctx; this is tricky because we cannot
205 struct perf_event *event; member
213 struct perf_event *event = efs->event; in event_function() local
214 struct perf_event_context *ctx = event->ctx; in event_function()
249 efs->func(event, cpuctx, ctx, efs->data); in event_function()
256 static void event_function_call(struct perf_event *event, event_f func, void *data) in event_function_call() argument
258 struct perf_event_context *ctx = event->ctx; in event_function_call()
261 .event = event, in event_function_call()
266 if (!event->parent) { in event_function_call()
268 * If this is a !child event, we must hold ctx::mutex to in event_function_call()
269 * stabilize the the event->ctx relation. See in event_function_call()
276 cpu_function_call(event->cpu, event_function, &efs); in event_function_call()
301 func(event, NULL, ctx, data); in event_function_call()
309 static void event_function_local(struct perf_event *event, event_f func, void *data) in event_function_local() argument
311 struct perf_event_context *ctx = event->ctx; in event_function_local()
348 func(event, cpuctx, ctx, data); in event_function_local()
405 * perf event paranoia level:
417 * max perf event sample rate
574 static u64 perf_event_time(struct perf_event *event);
588 static inline u64 perf_event_clock(struct perf_event *event) in perf_event_clock() argument
590 return event->clock(); in perf_event_clock()
594 * State based event timekeeping...
596 * The basic idea is to use event->state to determine which (if any) time
601 * Event groups make things a little more complicated, but not terribly so. The
616 __perf_effective_state(struct perf_event *event) in __perf_effective_state() argument
618 struct perf_event *leader = event->group_leader; in __perf_effective_state()
623 return event->state; in __perf_effective_state()
627 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) in __perf_update_times() argument
629 enum perf_event_state state = __perf_effective_state(event); in __perf_update_times()
630 u64 delta = now - event->tstamp; in __perf_update_times()
632 *enabled = event->total_time_enabled; in __perf_update_times()
636 *running = event->total_time_running; in __perf_update_times()
641 static void perf_event_update_time(struct perf_event *event) in perf_event_update_time() argument
643 u64 now = perf_event_time(event); in perf_event_update_time()
645 __perf_update_times(event, now, &event->total_time_enabled, in perf_event_update_time()
646 &event->total_time_running); in perf_event_update_time()
647 event->tstamp = now; in perf_event_update_time()
659 perf_event_set_state(struct perf_event *event, enum perf_event_state state) in perf_event_set_state() argument
661 if (event->state == state) in perf_event_set_state()
664 perf_event_update_time(event); in perf_event_set_state()
669 if ((event->state < 0) ^ (state < 0)) in perf_event_set_state()
670 perf_event_update_sibling_time(event); in perf_event_set_state()
672 WRITE_ONCE(event->state, state); in perf_event_set_state()
678 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
680 struct perf_event_context *ctx = event->ctx; in perf_cgroup_match()
683 /* @event doesn't care about cgroup */ in perf_cgroup_match()
684 if (!event->cgrp) in perf_cgroup_match()
692 * Cgroup scoping is recursive. An event enabled for a cgroup is in perf_cgroup_match()
694 * cgroup is a descendant of @event's (the test covers identity in perf_cgroup_match()
698 event->cgrp->css.cgroup); in perf_cgroup_match()
701 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
703 css_put(&event->cgrp->css); in perf_detach_cgroup()
704 event->cgrp = NULL; in perf_detach_cgroup()
707 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
709 return event->cgrp != NULL; in is_cgroup_event()
712 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
716 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time()
746 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
754 if (!is_cgroup_event(event)) in update_cgrp_time_from_event()
757 cgrp = perf_cgroup_from_task(current, event->ctx); in update_cgrp_time_from_event()
761 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) in update_cgrp_time_from_event()
762 __update_cgrp_time(event->cgrp); in update_cgrp_time_from_event()
792 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
901 static int perf_cgroup_ensure_storage(struct perf_event *event, in perf_cgroup_ensure_storage() argument
916 cpuctx = per_cpu_ptr(event->pmu->pmu_cpu_context, cpu); in perf_cgroup_ensure_storage()
942 static inline int perf_cgroup_connect(int fd, struct perf_event *event, in perf_cgroup_connect() argument
961 ret = perf_cgroup_ensure_storage(event, css); in perf_cgroup_connect()
966 event->cgrp = cgrp; in perf_cgroup_connect()
974 perf_detach_cgroup(event); in perf_cgroup_connect()
983 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) in perf_cgroup_set_shadow_time() argument
986 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_set_shadow_time()
987 event->shadow_ctx_time = now - t->timestamp; in perf_cgroup_set_shadow_time()
991 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
995 if (!is_cgroup_event(event)) in perf_cgroup_event_enable()
1006 * matching the event's cgroup, we must do this for every new event, in perf_cgroup_event_enable()
1013 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) in perf_cgroup_event_enable()
1021 per_cpu_ptr(&cgrp_cpuctx_list, event->cpu)); in perf_cgroup_event_enable()
1025 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1029 if (!is_cgroup_event(event)) in perf_cgroup_event_disable()
1050 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
1055 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
1058 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
1063 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
1081 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, in perf_cgroup_connect() argument
1100 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) in perf_cgroup_set_shadow_time() argument
1104 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
1110 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
1115 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1307 * because the sys_perf_event_open() case will install a new event and break
1318 * quiesce the event, after which we can install it in the new location. This
1319 * means that only external vectors (perf_fops, prctl) can perturb the event
1323 * However; because event->ctx can change while we're waiting to acquire
1342 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) in perf_event_ctx_lock_nested() argument
1348 ctx = READ_ONCE(event->ctx); in perf_event_ctx_lock_nested()
1356 if (event->ctx != ctx) { in perf_event_ctx_lock_nested()
1366 perf_event_ctx_lock(struct perf_event *event) in perf_event_ctx_lock() argument
1368 return perf_event_ctx_lock_nested(event, 0); in perf_event_ctx_lock()
1371 static void perf_event_ctx_unlock(struct perf_event *event, in perf_event_ctx_unlock() argument
1397 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p, in perf_event_pid_type() argument
1404 if (event->parent) in perf_event_pid_type()
1405 event = event->parent; in perf_event_pid_type()
1407 nr = __task_pid_nr_ns(p, type, event->ns); in perf_event_pid_type()
1414 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) in perf_event_pid() argument
1416 return perf_event_pid_type(event, p, PIDTYPE_TGID); in perf_event_pid()
1419 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) in perf_event_tid() argument
1421 return perf_event_pid_type(event, p, PIDTYPE_PID); in perf_event_tid()
1425 * If we inherit events we want to return the parent event id
1428 static u64 primary_event_id(struct perf_event *event) in primary_event_id() argument
1430 u64 id = event->id; in primary_event_id()
1432 if (event->parent) in primary_event_id()
1433 id = event->parent->id; in primary_event_id()
1534 static u64 perf_event_time(struct perf_event *event) in perf_event_time() argument
1536 struct perf_event_context *ctx = event->ctx; in perf_event_time()
1538 if (is_cgroup_event(event)) in perf_event_time()
1539 return perf_cgroup_event_time(event); in perf_event_time()
1544 static enum event_type_t get_event_type(struct perf_event *event) in get_event_type() argument
1546 struct perf_event_context *ctx = event->ctx; in get_event_type()
1555 if (event->group_leader != event) in get_event_type()
1556 event = event->group_leader; in get_event_type()
1558 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; in get_event_type()
1566 * Helper function to initialize event group nodes.
1568 static void init_event_group(struct perf_event *event) in init_event_group() argument
1570 RB_CLEAR_NODE(&event->group_node); in init_event_group()
1571 event->group_index = 0; in init_event_group()
1576 * based on event attrs bits.
1579 get_event_groups(struct perf_event *event, struct perf_event_context *ctx) in get_event_groups() argument
1581 if (event->attr.pinned) in get_event_groups()
1597 * Compare function for event groups;
1643 * Insert @event into @groups' tree; using {@event->cpu, ++@groups->index} for
1649 struct perf_event *event) in perf_event_groups_insert() argument
1655 event->group_index = ++groups->index; in perf_event_groups_insert()
1664 if (perf_event_groups_less(event, node_event)) in perf_event_groups_insert()
1670 rb_link_node(&event->group_node, parent, node); in perf_event_groups_insert()
1671 rb_insert_color(&event->group_node, &groups->tree); in perf_event_groups_insert()
1675 * Helper function to insert event into the pinned or flexible groups.
1678 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) in add_event_to_groups() argument
1682 groups = get_event_groups(event, ctx); in add_event_to_groups()
1683 perf_event_groups_insert(groups, event); in add_event_to_groups()
1691 struct perf_event *event) in perf_event_groups_delete() argument
1693 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) || in perf_event_groups_delete()
1696 rb_erase(&event->group_node, &groups->tree); in perf_event_groups_delete()
1697 init_event_group(event); in perf_event_groups_delete()
1701 * Helper function to delete event from its groups.
1704 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) in del_event_from_groups() argument
1708 groups = get_event_groups(event, ctx); in del_event_from_groups()
1709 perf_event_groups_delete(groups, event); in del_event_from_groups()
1713 * Get the leftmost event in the cpu/cgroup subtree.
1764 perf_event_groups_next(struct perf_event *event) in perf_event_groups_next() argument
1772 next = rb_entry_safe(rb_next(&event->group_node), typeof(*event), group_node); in perf_event_groups_next()
1773 if (next == NULL || next->cpu != event->cpu) in perf_event_groups_next()
1777 if (event->cgrp && event->cgrp->css.cgroup) in perf_event_groups_next()
1778 curr_cgrp_id = event->cgrp->css.cgroup->kn->id; in perf_event_groups_next()
1792 #define perf_event_groups_for_each(event, groups) \ argument
1793 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1794 typeof(*event), group_node); event; \
1795 event = rb_entry_safe(rb_next(&event->group_node), \
1796 typeof(*event), group_node))
1799 * Add an event from the lists for its context.
1803 list_add_event(struct perf_event *event, struct perf_event_context *ctx) in list_add_event() argument
1807 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); in list_add_event()
1808 event->attach_state |= PERF_ATTACH_CONTEXT; in list_add_event()
1810 event->tstamp = perf_event_time(event); in list_add_event()
1813 * If we're a stand alone event or group leader, we go to the context in list_add_event()
1817 if (event->group_leader == event) { in list_add_event()
1818 event->group_caps = event->event_caps; in list_add_event()
1819 add_event_to_groups(event, ctx); in list_add_event()
1822 list_add_rcu(&event->event_entry, &ctx->event_list); in list_add_event()
1824 if (event->attr.inherit_stat) in list_add_event()
1827 if (event->state > PERF_EVENT_STATE_OFF) in list_add_event()
1828 perf_cgroup_event_enable(event, ctx); in list_add_event()
1834 * Initialize event state based on the perf_event_attr::disabled.
1836 static inline void perf_event__state_init(struct perf_event *event) in perf_event__state_init() argument
1838 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init()
1842 static void __perf_event_read_size(struct perf_event *event, int nr_siblings) in __perf_event_read_size() argument
1848 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) in __perf_event_read_size()
1851 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) in __perf_event_read_size()
1854 if (event->attr.read_format & PERF_FORMAT_ID) in __perf_event_read_size()
1857 if (event->attr.read_format & PERF_FORMAT_GROUP) { in __perf_event_read_size()
1863 event->read_size = size; in __perf_event_read_size()
1866 static void __perf_event_header_size(struct perf_event *event, u64 sample_type) in __perf_event_header_size() argument
1884 size += event->read_size; in __perf_event_header_size()
1898 event->header_size = size; in __perf_event_header_size()
1905 static void perf_event__header_size(struct perf_event *event) in perf_event__header_size() argument
1907 __perf_event_read_size(event, in perf_event__header_size()
1908 event->group_leader->nr_siblings); in perf_event__header_size()
1909 __perf_event_header_size(event, event->attr.sample_type); in perf_event__header_size()
1912 static void perf_event__id_header_size(struct perf_event *event) in perf_event__id_header_size() argument
1915 u64 sample_type = event->attr.sample_type; in perf_event__id_header_size()
1936 event->id_header_size = size; in perf_event__id_header_size()
1939 static bool perf_event_validate_size(struct perf_event *event) in perf_event_validate_size() argument
1943 * attach the event. in perf_event_validate_size()
1945 __perf_event_read_size(event, event->group_leader->nr_siblings + 1); in perf_event_validate_size()
1946 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); in perf_event_validate_size()
1947 perf_event__id_header_size(event); in perf_event_validate_size()
1953 if (event->read_size + event->header_size + in perf_event_validate_size()
1954 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) in perf_event_validate_size()
1960 static void perf_group_attach(struct perf_event *event) in perf_group_attach() argument
1962 struct perf_event *group_leader = event->group_leader, *pos; in perf_group_attach()
1964 lockdep_assert_held(&event->ctx->lock); in perf_group_attach()
1969 if (event->attach_state & PERF_ATTACH_GROUP) in perf_group_attach()
1972 event->attach_state |= PERF_ATTACH_GROUP; in perf_group_attach()
1974 if (group_leader == event) in perf_group_attach()
1977 WARN_ON_ONCE(group_leader->ctx != event->ctx); in perf_group_attach()
1979 group_leader->group_caps &= event->event_caps; in perf_group_attach()
1981 list_add_tail(&event->sibling_list, &group_leader->sibling_list); in perf_group_attach()
1991 * Remove an event from the lists for its context.
1995 list_del_event(struct perf_event *event, struct perf_event_context *ctx) in list_del_event() argument
1997 WARN_ON_ONCE(event->ctx != ctx); in list_del_event()
2003 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) in list_del_event()
2006 event->attach_state &= ~PERF_ATTACH_CONTEXT; in list_del_event()
2009 if (event->attr.inherit_stat) in list_del_event()
2012 list_del_rcu(&event->event_entry); in list_del_event()
2014 if (event->group_leader == event) in list_del_event()
2015 del_event_from_groups(event, ctx); in list_del_event()
2018 * If event was in error state, then keep it in list_del_event()
2022 * of the event in list_del_event()
2024 if (event->state > PERF_EVENT_STATE_OFF) { in list_del_event()
2025 perf_cgroup_event_disable(event, ctx); in list_del_event()
2026 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in list_del_event()
2033 perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event) in perf_aux_output_match() argument
2038 if (!event->pmu->aux_output_match) in perf_aux_output_match()
2041 return event->pmu->aux_output_match(aux_event); in perf_aux_output_match()
2044 static void put_event(struct perf_event *event);
2045 static void event_sched_out(struct perf_event *event,
2049 static void perf_put_aux_event(struct perf_event *event) in perf_put_aux_event() argument
2051 struct perf_event_context *ctx = event->ctx; in perf_put_aux_event()
2056 * If event uses aux_event tear down the link in perf_put_aux_event()
2058 if (event->aux_event) { in perf_put_aux_event()
2059 iter = event->aux_event; in perf_put_aux_event()
2060 event->aux_event = NULL; in perf_put_aux_event()
2066 * If the event is an aux_event, tear down all links to in perf_put_aux_event()
2069 for_each_sibling_event(iter, event->group_leader) { in perf_put_aux_event()
2070 if (iter->aux_event != event) in perf_put_aux_event()
2074 put_event(event); in perf_put_aux_event()
2082 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_put_aux_event()
2086 static bool perf_need_aux_event(struct perf_event *event) in perf_need_aux_event() argument
2088 return !!event->attr.aux_output || !!event->attr.aux_sample_size; in perf_need_aux_event()
2091 static int perf_get_aux_event(struct perf_event *event, in perf_get_aux_event() argument
2095 * Our group leader must be an aux event if we want to be in perf_get_aux_event()
2096 * an aux_output. This way, the aux event will precede its in perf_get_aux_event()
2106 if (event->attr.aux_output && event->attr.aux_sample_size) in perf_get_aux_event()
2109 if (event->attr.aux_output && in perf_get_aux_event()
2110 !perf_aux_output_match(event, group_leader)) in perf_get_aux_event()
2113 if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux) in perf_get_aux_event()
2120 * Link aux_outputs to their aux event; this is undone in in perf_get_aux_event()
2125 event->aux_event = group_leader; in perf_get_aux_event()
2130 static inline struct list_head *get_event_list(struct perf_event *event) in get_event_list() argument
2132 struct perf_event_context *ctx = event->ctx; in get_event_list()
2133 return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active; in get_event_list()
2142 static inline void perf_remove_sibling_event(struct perf_event *event) in perf_remove_sibling_event() argument
2144 struct perf_event_context *ctx = event->ctx; in perf_remove_sibling_event()
2147 event_sched_out(event, cpuctx, ctx); in perf_remove_sibling_event()
2148 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_remove_sibling_event()
2151 static void perf_group_detach(struct perf_event *event) in perf_group_detach() argument
2153 struct perf_event *leader = event->group_leader; in perf_group_detach()
2155 struct perf_event_context *ctx = event->ctx; in perf_group_detach()
2162 if (!(event->attach_state & PERF_ATTACH_GROUP)) in perf_group_detach()
2165 event->attach_state &= ~PERF_ATTACH_GROUP; in perf_group_detach()
2167 perf_put_aux_event(event); in perf_group_detach()
2172 if (leader != event) { in perf_group_detach()
2173 list_del_init(&event->sibling_list); in perf_group_detach()
2174 event->group_leader->nr_siblings--; in perf_group_detach()
2179 * If this was a group event with sibling events then in perf_group_detach()
2183 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { in perf_group_detach()
2192 sibling->group_caps = event->group_caps; in perf_group_detach()
2194 if (!RB_EMPTY_NODE(&event->group_node)) { in perf_group_detach()
2195 add_event_to_groups(sibling, event->ctx); in perf_group_detach()
2201 WARN_ON_ONCE(sibling->ctx != event->ctx); in perf_group_detach()
2211 static bool is_orphaned_event(struct perf_event *event) in is_orphaned_event() argument
2213 return event->state == PERF_EVENT_STATE_DEAD; in is_orphaned_event()
2216 static inline int __pmu_filter_match(struct perf_event *event) in __pmu_filter_match() argument
2218 struct pmu *pmu = event->pmu; in __pmu_filter_match()
2219 return pmu->filter_match ? pmu->filter_match(event) : 1; in __pmu_filter_match()
2223 * Check whether we should attempt to schedule an event group based on
2224 * PMU-specific filtering. An event group can consist of HW and SW events,
2228 static inline int pmu_filter_match(struct perf_event *event) in pmu_filter_match() argument
2232 if (!__pmu_filter_match(event)) in pmu_filter_match()
2235 for_each_sibling_event(sibling, event) { in pmu_filter_match()
2244 event_filter_match(struct perf_event *event) in event_filter_match() argument
2246 return (event->cpu == -1 || event->cpu == smp_processor_id()) && in event_filter_match()
2247 perf_cgroup_match(event) && pmu_filter_match(event); in event_filter_match()
2251 event_sched_out(struct perf_event *event, in event_sched_out() argument
2257 WARN_ON_ONCE(event->ctx != ctx); in event_sched_out()
2260 if (event->state != PERF_EVENT_STATE_ACTIVE) in event_sched_out()
2268 list_del_init(&event->active_list); in event_sched_out()
2270 perf_pmu_disable(event->pmu); in event_sched_out()
2272 event->pmu->del(event, 0); in event_sched_out()
2273 event->oncpu = -1; in event_sched_out()
2275 if (READ_ONCE(event->pending_disable) >= 0) { in event_sched_out()
2276 WRITE_ONCE(event->pending_disable, -1); in event_sched_out()
2277 perf_cgroup_event_disable(event, ctx); in event_sched_out()
2280 perf_event_set_state(event, state); in event_sched_out()
2282 if (!is_software_event(event)) in event_sched_out()
2286 if (event->attr.freq && event->attr.sample_freq) in event_sched_out()
2288 if (event->attr.exclusive || !cpuctx->active_oncpu) in event_sched_out()
2291 perf_pmu_enable(event->pmu); in event_sched_out()
2299 struct perf_event *event; in group_sched_out() local
2311 for_each_sibling_event(event, group_event) in group_sched_out()
2312 event_sched_out(event, cpuctx, ctx); in group_sched_out()
2320 * Cross CPU call to remove a performance event
2322 * We disable the event on the hardware level first. After that we
2326 __perf_remove_from_context(struct perf_event *event, in __perf_remove_from_context() argument
2338 event_sched_out(event, cpuctx, ctx); in __perf_remove_from_context()
2340 perf_group_detach(event); in __perf_remove_from_context()
2341 list_del_event(event, ctx); in __perf_remove_from_context()
2354 * Remove the event from a task's (or a CPU's) list of events.
2356 * If event->ctx is a cloned context, callers must make sure that
2357 * every task struct that event->ctx->task could possibly point to
2363 static void perf_remove_from_context(struct perf_event *event, unsigned long flags) in perf_remove_from_context() argument
2365 struct perf_event_context *ctx = event->ctx; in perf_remove_from_context()
2369 event_function_call(event, __perf_remove_from_context, (void *)flags); in perf_remove_from_context()
2377 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); in perf_remove_from_context()
2379 (event->attach_state & PERF_ATTACH_GROUP)) { in perf_remove_from_context()
2385 perf_group_detach(event); in perf_remove_from_context()
2391 * Cross CPU call to disable a performance event
2393 static void __perf_event_disable(struct perf_event *event, in __perf_event_disable() argument
2398 if (event->state < PERF_EVENT_STATE_INACTIVE) in __perf_event_disable()
2403 update_cgrp_time_from_event(event); in __perf_event_disable()
2406 if (event == event->group_leader) in __perf_event_disable()
2407 group_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2409 event_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2411 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in __perf_event_disable()
2412 perf_cgroup_event_disable(event, ctx); in __perf_event_disable()
2416 * Disable an event.
2418 * If event->ctx is a cloned context, callers must make sure that
2419 * every task struct that event->ctx->task could possibly point to
2422 * hold the top-level event's child_mutex, so any descendant that
2425 * When called from perf_pending_event it's OK because event->ctx
2429 static void _perf_event_disable(struct perf_event *event) in _perf_event_disable() argument
2431 struct perf_event_context *ctx = event->ctx; in _perf_event_disable()
2434 if (event->state <= PERF_EVENT_STATE_OFF) { in _perf_event_disable()
2440 event_function_call(event, __perf_event_disable, NULL); in _perf_event_disable()
2443 void perf_event_disable_local(struct perf_event *event) in perf_event_disable_local() argument
2445 event_function_local(event, __perf_event_disable, NULL); in perf_event_disable_local()
2452 void perf_event_disable(struct perf_event *event) in perf_event_disable() argument
2456 ctx = perf_event_ctx_lock(event); in perf_event_disable()
2457 _perf_event_disable(event); in perf_event_disable()
2458 perf_event_ctx_unlock(event, ctx); in perf_event_disable()
2462 void perf_event_disable_inatomic(struct perf_event *event) in perf_event_disable_inatomic() argument
2464 WRITE_ONCE(event->pending_disable, smp_processor_id()); in perf_event_disable_inatomic()
2466 irq_work_queue(&event->pending); in perf_event_disable_inatomic()
2469 static void perf_set_shadow_time(struct perf_event *event, in perf_set_shadow_time() argument
2487 * - event is guaranteed scheduled in in perf_set_shadow_time()
2497 if (is_cgroup_event(event)) in perf_set_shadow_time()
2498 perf_cgroup_set_shadow_time(event, event->tstamp); in perf_set_shadow_time()
2500 event->shadow_ctx_time = event->tstamp - ctx->timestamp; in perf_set_shadow_time()
2505 static void perf_log_throttle(struct perf_event *event, int enable);
2506 static void perf_log_itrace_start(struct perf_event *event);
2509 event_sched_in(struct perf_event *event, in event_sched_in() argument
2515 WARN_ON_ONCE(event->ctx != ctx); in event_sched_in()
2519 if (event->state <= PERF_EVENT_STATE_OFF) in event_sched_in()
2522 WRITE_ONCE(event->oncpu, smp_processor_id()); in event_sched_in()
2524 * Order event::oncpu write to happen before the ACTIVE state is in event_sched_in()
2529 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE); in event_sched_in()
2536 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { in event_sched_in()
2537 perf_log_throttle(event, 1); in event_sched_in()
2538 event->hw.interrupts = 0; in event_sched_in()
2541 perf_pmu_disable(event->pmu); in event_sched_in()
2543 perf_set_shadow_time(event, ctx); in event_sched_in()
2545 perf_log_itrace_start(event); in event_sched_in()
2547 if (event->pmu->add(event, PERF_EF_START)) { in event_sched_in()
2548 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_sched_in()
2549 event->oncpu = -1; in event_sched_in()
2554 if (!is_software_event(event)) in event_sched_in()
2558 if (event->attr.freq && event->attr.sample_freq) in event_sched_in()
2561 if (event->attr.exclusive) in event_sched_in()
2565 perf_pmu_enable(event->pmu); in event_sched_in()
2575 struct perf_event *event, *partial_group = NULL; in group_sched_in() local
2589 for_each_sibling_event(event, group_event) { in group_sched_in()
2590 if (event_sched_in(event, cpuctx, ctx)) { in group_sched_in()
2591 partial_group = event; in group_sched_in()
2603 * The events up to the failed event are scheduled out normally. in group_sched_in()
2605 for_each_sibling_event(event, group_event) { in group_sched_in()
2606 if (event == partial_group) in group_sched_in()
2609 event_sched_out(event, cpuctx, ctx); in group_sched_in()
2619 * Work out whether we can put this event group on the CPU now.
2621 static int group_can_go_on(struct perf_event *event, in group_can_go_on() argument
2628 if (event->group_caps & PERF_EV_CAP_SOFTWARE) in group_can_go_on()
2640 if (event->attr.exclusive && !list_empty(get_event_list(event))) in group_can_go_on()
2649 static void add_event_to_ctx(struct perf_event *event, in add_event_to_ctx() argument
2652 list_add_event(event, ctx); in add_event_to_ctx()
2653 perf_group_attach(event); in add_event_to_ctx()
2698 * time an event is added, only do it for the groups of equal priority and
2752 * Cross CPU call to install and enable a performance event
2759 struct perf_event *event = info; in __perf_install_in_context() local
2760 struct perf_event_context *ctx = event->ctx; in __perf_install_in_context()
2791 if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) { in __perf_install_in_context()
2793 * If the current cgroup doesn't match the event's in __perf_install_in_context()
2798 event->cgrp->css.cgroup); in __perf_install_in_context()
2804 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2805 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_install_in_context()
2807 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2816 static bool exclusive_event_installable(struct perf_event *event,
2820 * Attach a performance event to a context.
2826 struct perf_event *event, in perf_install_in_context() argument
2833 WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); in perf_install_in_context()
2835 if (event->cpu != -1) in perf_install_in_context()
2836 event->cpu = cpu; in perf_install_in_context()
2839 * Ensures that if we can observe event->ctx, both the event and ctx in perf_install_in_context()
2842 smp_store_release(&event->ctx, ctx); in perf_install_in_context()
2846 * without IPI. Except when this is the first event for the context, in in perf_install_in_context()
2850 * event will issue the IPI and reprogram the hardware. in perf_install_in_context()
2852 if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && ctx->nr_events) { in perf_install_in_context()
2858 add_event_to_ctx(event, ctx); in perf_install_in_context()
2864 cpu_function_call(cpu, __perf_install_in_context, event); in perf_install_in_context()
2906 if (!task_function_call(task, __perf_install_in_context, event)) in perf_install_in_context()
2922 * thus we can safely install the event. in perf_install_in_context()
2928 add_event_to_ctx(event, ctx); in perf_install_in_context()
2933 * Cross CPU call to enable a performance event
2935 static void __perf_event_enable(struct perf_event *event, in __perf_event_enable() argument
2940 struct perf_event *leader = event->group_leader; in __perf_event_enable()
2943 if (event->state >= PERF_EVENT_STATE_INACTIVE || in __perf_event_enable()
2944 event->state <= PERF_EVENT_STATE_ERROR) in __perf_event_enable()
2950 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in __perf_event_enable()
2951 perf_cgroup_event_enable(event, ctx); in __perf_event_enable()
2956 if (!event_filter_match(event)) { in __perf_event_enable()
2962 * If the event is in a group and isn't the group leader, in __perf_event_enable()
2965 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) { in __perf_event_enable()
2974 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_event_enable()
2978 * Enable an event.
2980 * If event->ctx is a cloned context, callers must make sure that
2981 * every task struct that event->ctx->task could possibly point to
2986 static void _perf_event_enable(struct perf_event *event) in _perf_event_enable() argument
2988 struct perf_event_context *ctx = event->ctx; in _perf_event_enable()
2991 if (event->state >= PERF_EVENT_STATE_INACTIVE || in _perf_event_enable()
2992 event->state < PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
2999 * If the event is in error state, clear that first. in _perf_event_enable()
3001 * That way, if we see the event in error state below, we know that it in _perf_event_enable()
3005 if (event->state == PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
3009 if (event->event_caps & PERF_EV_CAP_SIBLING && in _perf_event_enable()
3010 event->group_leader == event) in _perf_event_enable()
3013 event->state = PERF_EVENT_STATE_OFF; in _perf_event_enable()
3017 event_function_call(event, __perf_event_enable, NULL); in _perf_event_enable()
3023 void perf_event_enable(struct perf_event *event) in perf_event_enable() argument
3027 ctx = perf_event_ctx_lock(event); in perf_event_enable()
3028 _perf_event_enable(event); in perf_event_enable()
3029 perf_event_ctx_unlock(event, ctx); in perf_event_enable()
3034 struct perf_event *event; member
3041 struct perf_event *event = sd->event; in __perf_event_stop() local
3044 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in __perf_event_stop()
3052 * so we need to check again lest we try to stop another CPU's event. in __perf_event_stop()
3054 if (READ_ONCE(event->oncpu) != smp_processor_id()) in __perf_event_stop()
3057 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_stop()
3065 * Since this is happening on an event-local CPU, no trace is lost in __perf_event_stop()
3069 event->pmu->start(event, 0); in __perf_event_stop()
3074 static int perf_event_stop(struct perf_event *event, int restart) in perf_event_stop() argument
3077 .event = event, in perf_event_stop()
3083 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in perf_event_stop()
3090 * We only want to restart ACTIVE events, so if the event goes in perf_event_stop()
3091 * inactive here (event->oncpu==-1), there's nothing more to do; in perf_event_stop()
3094 ret = cpu_function_call(READ_ONCE(event->oncpu), in perf_event_stop()
3107 * event::addr_filter_ranges array and bump the event::addr_filters_gen;
3108 * (p2) when an event is scheduled in (pmu::add), it calls
3112 * If (p1) happens while the event is active, we restart it to force (p2).
3123 void perf_event_addr_filters_sync(struct perf_event *event) in perf_event_addr_filters_sync() argument
3125 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_sync()
3127 if (!has_addr_filter(event)) in perf_event_addr_filters_sync()
3131 if (event->addr_filters_gen != event->hw.addr_filters_gen) { in perf_event_addr_filters_sync()
3132 event->pmu->addr_filters_sync(event); in perf_event_addr_filters_sync()
3133 event->hw.addr_filters_gen = event->addr_filters_gen; in perf_event_addr_filters_sync()
3139 static int _perf_event_refresh(struct perf_event *event, int refresh) in _perf_event_refresh() argument
3144 if (event->attr.inherit || !is_sampling_event(event)) in _perf_event_refresh()
3147 atomic_add(refresh, &event->event_limit); in _perf_event_refresh()
3148 _perf_event_enable(event); in _perf_event_refresh()
3156 int perf_event_refresh(struct perf_event *event, int refresh) in perf_event_refresh() argument
3161 ctx = perf_event_ctx_lock(event); in perf_event_refresh()
3162 ret = _perf_event_refresh(event, refresh); in perf_event_refresh()
3163 perf_event_ctx_unlock(event, ctx); in perf_event_refresh()
3184 static int perf_event_modify_attr(struct perf_event *event, in perf_event_modify_attr() argument
3187 if (event->attr.type != attr->type) in perf_event_modify_attr()
3190 switch (event->attr.type) { in perf_event_modify_attr()
3192 return perf_event_modify_breakpoint(event, attr); in perf_event_modify_attr()
3203 struct perf_event *event, *tmp; in ctx_sched_out() local
3251 list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) in ctx_sched_out()
3252 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
3256 list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list) in ctx_sched_out()
3257 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
3307 static void __perf_event_sync_stat(struct perf_event *event, in __perf_event_sync_stat() argument
3312 if (!event->attr.inherit_stat) in __perf_event_sync_stat()
3316 * Update the event value, we cannot use perf_event_read() in __perf_event_sync_stat()
3319 * we know the event must be on the current CPU, therefore we in __perf_event_sync_stat()
3322 if (event->state == PERF_EVENT_STATE_ACTIVE) in __perf_event_sync_stat()
3323 event->pmu->read(event); in __perf_event_sync_stat()
3325 perf_event_update_time(event); in __perf_event_sync_stat()
3328 * In order to keep per-task stats reliable we need to flip the event in __perf_event_sync_stat()
3332 value = local64_xchg(&event->count, value); in __perf_event_sync_stat()
3335 swap(event->total_time_enabled, next_event->total_time_enabled); in __perf_event_sync_stat()
3336 swap(event->total_time_running, next_event->total_time_running); in __perf_event_sync_stat()
3341 perf_event_update_userpage(event); in __perf_event_sync_stat()
3348 struct perf_event *event, *next_event; in perf_event_sync_stat() local
3355 event = list_first_entry(&ctx->event_list, in perf_event_sync_stat()
3361 while (&event->event_entry != &ctx->event_list && in perf_event_sync_stat()
3364 __perf_event_sync_stat(event, next_event); in perf_event_sync_stat()
3366 event = list_next_entry(event, event_entry); in perf_event_sync_stat()
3488 * This callback is relevant even to per-cpu events; for example multi event
3520 * We stop each event and update the event value in event->count.
3523 * sets the disabled bit in the control field of event _before_
3524 * accessing the event control register. If a NMI hits, then it will
3525 * not restart the event.
3541 * cgroup event are system-wide mode only in __perf_event_task_sched_out()
3577 static void __heap_add(struct min_heap *heap, struct perf_event *event) in __heap_add() argument
3581 if (event) { in __heap_add()
3582 itrs[heap->nr] = event; in __heap_add()
3595 /* Space for per CPU and/or any CPU event iterators. */ in visit_groups_merge()
3649 static int merge_sched_in(struct perf_event *event, void *data) in merge_sched_in() argument
3651 struct perf_event_context *ctx = event->ctx; in merge_sched_in()
3655 if (event->state <= PERF_EVENT_STATE_OFF) in merge_sched_in()
3658 if (!event_filter_match(event)) in merge_sched_in()
3661 if (group_can_go_on(event, cpuctx, *can_add_hw)) { in merge_sched_in()
3662 if (!group_sched_in(event, cpuctx, ctx)) in merge_sched_in()
3663 list_add_tail(&event->active_list, get_event_list(event)); in merge_sched_in()
3666 if (event->state == PERF_EVENT_STATE_INACTIVE) { in merge_sched_in()
3667 if (event->attr.pinned) { in merge_sched_in()
3668 perf_cgroup_event_disable(event, ctx); in merge_sched_in()
3669 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in merge_sched_in()
3807 * We restore the event value and then enable it.
3810 * sets the enabled bit in the control field of event _before_
3811 * accessing the event control register. If a NMI hits, then it will
3812 * keep the event running.
3822 * to switch in PMU state; cgroup event are system-wide mode only. in __perf_event_task_sched_in()
3842 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) in perf_calculate_period() argument
3844 u64 frequency = event->attr.sample_freq; in perf_calculate_period()
3918 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) in perf_adjust_period() argument
3920 struct hw_perf_event *hwc = &event->hw; in perf_adjust_period()
3924 period = perf_calculate_period(event, nsec, count); in perf_adjust_period()
3938 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_period()
3943 event->pmu->start(event, PERF_EF_RELOAD); in perf_adjust_period()
3955 struct perf_event *event; in perf_adjust_freq_unthr_context() local
3971 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_adjust_freq_unthr_context()
3972 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_adjust_freq_unthr_context()
3975 if (!event_filter_match(event)) in perf_adjust_freq_unthr_context()
3978 perf_pmu_disable(event->pmu); in perf_adjust_freq_unthr_context()
3980 hwc = &event->hw; in perf_adjust_freq_unthr_context()
3984 perf_log_throttle(event, 1); in perf_adjust_freq_unthr_context()
3985 event->pmu->start(event, 0); in perf_adjust_freq_unthr_context()
3988 if (!event->attr.freq || !event->attr.sample_freq) in perf_adjust_freq_unthr_context()
3992 * stop the event and update event->count in perf_adjust_freq_unthr_context()
3994 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_freq_unthr_context()
3996 now = local64_read(&event->count); in perf_adjust_freq_unthr_context()
4001 * restart the event in perf_adjust_freq_unthr_context()
4003 * we have stopped the event so tell that in perf_adjust_freq_unthr_context()
4008 perf_adjust_period(event, period, delta, false); in perf_adjust_freq_unthr_context()
4010 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); in perf_adjust_freq_unthr_context()
4012 perf_pmu_enable(event->pmu); in perf_adjust_freq_unthr_context()
4020 * Move @event to the tail of the @ctx's elegible events.
4022 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) in rotate_ctx() argument
4031 perf_event_groups_delete(&ctx->flexible_groups, event); in rotate_ctx()
4032 perf_event_groups_insert(&ctx->flexible_groups, event); in rotate_ctx()
4035 /* pick an event from the flexible_groups to rotate */
4039 struct perf_event *event; in ctx_event_to_rotate() local
4041 /* pick the first active flexible event */ in ctx_event_to_rotate()
4042 event = list_first_entry_or_null(&ctx->flexible_active, in ctx_event_to_rotate()
4045 /* if no active flexible event, pick the first event */ in ctx_event_to_rotate()
4046 if (!event) { in ctx_event_to_rotate()
4047 event = rb_entry_safe(rb_first(&ctx->flexible_groups.tree), in ctx_event_to_rotate()
4048 typeof(*event), group_node); in ctx_event_to_rotate()
4057 return event; in ctx_event_to_rotate()
4068 * events, thus the event count values are stable. in perf_rotate_context()
4124 static int event_enable_on_exec(struct perf_event *event, in event_enable_on_exec() argument
4127 if (!event->attr.enable_on_exec) in event_enable_on_exec()
4130 event->attr.enable_on_exec = 0; in event_enable_on_exec()
4131 if (event->state >= PERF_EVENT_STATE_INACTIVE) in event_enable_on_exec()
4134 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_enable_on_exec()
4148 struct perf_event *event; in perf_event_enable_on_exec() local
4160 list_for_each_entry(event, &ctx->event_list, event_entry) { in perf_event_enable_on_exec()
4161 enabled |= event_enable_on_exec(event, ctx); in perf_event_enable_on_exec()
4162 event_type |= get_event_type(event); in perf_event_enable_on_exec()
4166 * Unclone and reschedule this context if we enabled any event. in perf_event_enable_on_exec()
4184 struct perf_event *event; member
4189 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) in __perf_event_read_cpu() argument
4193 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { in __perf_event_read_cpu()
4207 * Cross CPU call to read the hardware event
4212 struct perf_event *sub, *event = data->event; in __perf_event_read() local
4213 struct perf_event_context *ctx = event->ctx; in __perf_event_read()
4215 struct pmu *pmu = event->pmu; in __perf_event_read()
4221 * event->count would have been updated to a recent sample in __perf_event_read()
4222 * when the event was scheduled out. in __perf_event_read()
4230 update_cgrp_time_from_event(event); in __perf_event_read()
4233 perf_event_update_time(event); in __perf_event_read()
4235 perf_event_update_sibling_time(event); in __perf_event_read()
4237 if (event->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_read()
4241 pmu->read(event); in __perf_event_read()
4248 pmu->read(event); in __perf_event_read()
4250 for_each_sibling_event(sub, event) { in __perf_event_read()
4253 * Use sibling's PMU rather than @event's since in __perf_event_read()
4266 static inline u64 perf_event_count(struct perf_event *event) in perf_event_count() argument
4268 return local64_read(&event->count) + atomic64_read(&event->child_count); in perf_event_count()
4272 * NMI-safe method to read a local event, that is an event that
4279 int perf_event_read_local(struct perf_event *event, u64 *value, in perf_event_read_local() argument
4292 * It must not be an event with inherit set, we cannot read in perf_event_read_local()
4295 if (event->attr.inherit) { in perf_event_read_local()
4300 /* If this is a per-task event, it must be for current */ in perf_event_read_local()
4301 if ((event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4302 event->hw.target != current) { in perf_event_read_local()
4307 /* If this is a per-CPU event, it must be for this CPU */ in perf_event_read_local()
4308 if (!(event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4309 event->cpu != smp_processor_id()) { in perf_event_read_local()
4314 /* If this is a pinned event it must be running on this CPU */ in perf_event_read_local()
4315 if (event->attr.pinned && event->oncpu != smp_processor_id()) { in perf_event_read_local()
4321 * If the event is currently on this CPU, its either a per-task event, in perf_event_read_local()
4325 if (event->oncpu == smp_processor_id()) in perf_event_read_local()
4326 event->pmu->read(event); in perf_event_read_local()
4328 *value = local64_read(&event->count); in perf_event_read_local()
4330 u64 now = event->shadow_ctx_time + perf_clock(); in perf_event_read_local()
4333 __perf_update_times(event, now, &__enabled, &__running); in perf_event_read_local()
4345 static int perf_event_read(struct perf_event *event, bool group) in perf_event_read() argument
4347 enum perf_event_state state = READ_ONCE(event->state); in perf_event_read()
4351 * If event is enabled and currently active on a CPU, update the in perf_event_read()
4352 * value in the event structure: in perf_event_read()
4366 event_cpu = READ_ONCE(event->oncpu); in perf_event_read()
4371 .event = event, in perf_event_read()
4377 event_cpu = __perf_event_read_cpu(event, event_cpu); in perf_event_read()
4383 * If event_cpu isn't a valid CPU it means the event got in perf_event_read()
4384 * scheduled out and that will have updated the event count. in perf_event_read()
4386 * Therefore, either way, we'll have an up-to-date event count in perf_event_read()
4394 struct perf_event_context *ctx = event->ctx; in perf_event_read()
4398 state = event->state; in perf_event_read()
4410 update_cgrp_time_from_event(event); in perf_event_read()
4413 perf_event_update_time(event); in perf_event_read()
4415 perf_event_update_sibling_time(event); in perf_event_read()
4480 struct perf_event *event) in find_get_context() argument
4487 int cpu = event->cpu; in find_get_context()
4490 /* Must be root to operate on a CPU event: */ in find_get_context()
4491 err = perf_allow_cpu(&event->attr); in find_get_context()
4508 if (event->attach_state & PERF_ATTACH_TASK_DATA) { in find_get_context()
4575 static void perf_event_free_filter(struct perf_event *event);
4576 static void perf_event_free_bpf_prog(struct perf_event *event);
4580 struct perf_event *event; in free_event_rcu() local
4582 event = container_of(head, struct perf_event, rcu_head); in free_event_rcu()
4583 if (event->ns) in free_event_rcu()
4584 put_pid_ns(event->ns); in free_event_rcu()
4585 perf_event_free_filter(event); in free_event_rcu()
4586 kfree(event); in free_event_rcu()
4589 static void ring_buffer_attach(struct perf_event *event,
4592 static void detach_sb_event(struct perf_event *event) in detach_sb_event() argument
4594 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in detach_sb_event()
4597 list_del_rcu(&event->sb_list); in detach_sb_event()
4601 static bool is_sb_event(struct perf_event *event) in is_sb_event() argument
4603 struct perf_event_attr *attr = &event->attr; in is_sb_event()
4605 if (event->parent) in is_sb_event()
4608 if (event->attach_state & PERF_ATTACH_TASK) in is_sb_event()
4620 static void unaccount_pmu_sb_event(struct perf_event *event) in unaccount_pmu_sb_event() argument
4622 if (is_sb_event(event)) in unaccount_pmu_sb_event()
4623 detach_sb_event(event); in unaccount_pmu_sb_event()
4626 static void unaccount_event_cpu(struct perf_event *event, int cpu) in unaccount_event_cpu() argument
4628 if (event->parent) in unaccount_event_cpu()
4631 if (is_cgroup_event(event)) in unaccount_event_cpu()
4657 static void unaccount_event(struct perf_event *event) in unaccount_event() argument
4661 if (event->parent) in unaccount_event()
4664 if (event->attach_state & PERF_ATTACH_TASK) in unaccount_event()
4666 if (event->attr.mmap || event->attr.mmap_data) in unaccount_event()
4668 if (event->attr.comm) in unaccount_event()
4670 if (event->attr.namespaces) in unaccount_event()
4672 if (event->attr.cgroup) in unaccount_event()
4674 if (event->attr.task) in unaccount_event()
4676 if (event->attr.freq) in unaccount_event()
4678 if (event->attr.context_switch) { in unaccount_event()
4682 if (is_cgroup_event(event)) in unaccount_event()
4684 if (has_branch_stack(event)) in unaccount_event()
4686 if (event->attr.ksymbol) in unaccount_event()
4688 if (event->attr.bpf_event) in unaccount_event()
4690 if (event->attr.text_poke) in unaccount_event()
4698 unaccount_event_cpu(event, event->cpu); in unaccount_event()
4700 unaccount_pmu_sb_event(event); in unaccount_event()
4713 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
4723 static int exclusive_event_init(struct perf_event *event) in exclusive_event_init() argument
4725 struct pmu *pmu = event->pmu; in exclusive_event_init()
4738 * Since this is called in perf_event_alloc() path, event::ctx in exclusive_event_init()
4740 * to mean "per-task event", because unlike other attach states it in exclusive_event_init()
4743 if (event->attach_state & PERF_ATTACH_TASK) { in exclusive_event_init()
4754 static void exclusive_event_destroy(struct perf_event *event) in exclusive_event_destroy() argument
4756 struct pmu *pmu = event->pmu; in exclusive_event_destroy()
4762 if (event->attach_state & PERF_ATTACH_TASK) in exclusive_event_destroy()
4778 static bool exclusive_event_installable(struct perf_event *event, in exclusive_event_installable() argument
4782 struct pmu *pmu = event->pmu; in exclusive_event_installable()
4790 if (exclusive_event_match(iter_event, event)) in exclusive_event_installable()
4797 static void perf_addr_filters_splice(struct perf_event *event,
4800 static void _free_event(struct perf_event *event) in _free_event() argument
4802 irq_work_sync(&event->pending); in _free_event()
4804 unaccount_event(event); in _free_event()
4806 security_perf_event_free(event); in _free_event()
4808 if (event->rb) { in _free_event()
4810 * Can happen when we close an event with re-directed output. in _free_event()
4815 mutex_lock(&event->mmap_mutex); in _free_event()
4816 ring_buffer_attach(event, NULL); in _free_event()
4817 mutex_unlock(&event->mmap_mutex); in _free_event()
4820 if (is_cgroup_event(event)) in _free_event()
4821 perf_detach_cgroup(event); in _free_event()
4823 if (!event->parent) { in _free_event()
4824 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in _free_event()
4828 perf_event_free_bpf_prog(event); in _free_event()
4829 perf_addr_filters_splice(event, NULL); in _free_event()
4830 kfree(event->addr_filter_ranges); in _free_event()
4832 if (event->destroy) in _free_event()
4833 event->destroy(event); in _free_event()
4839 if (event->hw.target) in _free_event()
4840 put_task_struct(event->hw.target); in _free_event()
4846 if (event->ctx) in _free_event()
4847 put_ctx(event->ctx); in _free_event()
4849 exclusive_event_destroy(event); in _free_event()
4850 module_put(event->pmu->module); in _free_event()
4852 call_rcu(&event->rcu_head, free_event_rcu); in _free_event()
4857 * where the event isn't exposed yet and inherited events.
4859 static void free_event(struct perf_event *event) in free_event() argument
4861 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, in free_event()
4862 "unexpected event refcount: %ld; ptr=%p\n", in free_event()
4863 atomic_long_read(&event->refcount), event)) { in free_event()
4868 _free_event(event); in free_event()
4872 * Remove user event from the owner task.
4874 static void perf_remove_from_owner(struct perf_event *event) in perf_remove_from_owner() argument
4882 * indeed free this event, otherwise we need to serialize on in perf_remove_from_owner()
4885 owner = READ_ONCE(event->owner); in perf_remove_from_owner()
4908 * We have to re-check the event->owner field, if it is cleared in perf_remove_from_owner()
4911 * event. in perf_remove_from_owner()
4913 if (event->owner) { in perf_remove_from_owner()
4914 list_del_init(&event->owner_entry); in perf_remove_from_owner()
4915 smp_store_release(&event->owner, NULL); in perf_remove_from_owner()
4922 static void put_event(struct perf_event *event) in put_event() argument
4924 if (!atomic_long_dec_and_test(&event->refcount)) in put_event()
4927 _free_event(event); in put_event()
4931 * Kill an event dead; while event:refcount will preserve the event
4935 int perf_event_release_kernel(struct perf_event *event) in perf_event_release_kernel() argument
4937 struct perf_event_context *ctx = event->ctx; in perf_event_release_kernel()
4946 WARN_ON_ONCE(event->attach_state & in perf_event_release_kernel()
4951 if (!is_kernel_event(event)) in perf_event_release_kernel()
4952 perf_remove_from_owner(event); in perf_event_release_kernel()
4954 ctx = perf_event_ctx_lock(event); in perf_event_release_kernel()
4956 perf_remove_from_context(event, DETACH_GROUP); in perf_event_release_kernel()
4960 * Mark this event as STATE_DEAD, there is no external reference to it in perf_event_release_kernel()
4963 * Anybody acquiring event->child_mutex after the below loop _must_ in perf_event_release_kernel()
4970 event->state = PERF_EVENT_STATE_DEAD; in perf_event_release_kernel()
4973 perf_event_ctx_unlock(event, ctx); in perf_event_release_kernel()
4976 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
4977 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_release_kernel()
4988 * Since the event cannot get freed while we hold the in perf_event_release_kernel()
4999 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5001 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5008 tmp = list_first_entry_or_null(&event->child_list, in perf_event_release_kernel()
5017 put_event(event); in perf_event_release_kernel()
5020 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5025 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5034 * Wake any perf_event_free_task() waiting for this event to be in perf_event_release_kernel()
5042 put_event(event); /* Must be the 'last' reference */ in perf_event_release_kernel()
5056 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in __perf_event_read_value() argument
5064 mutex_lock(&event->child_mutex); in __perf_event_read_value()
5066 (void)perf_event_read(event, false); in __perf_event_read_value()
5067 total += perf_event_count(event); in __perf_event_read_value()
5069 *enabled += event->total_time_enabled + in __perf_event_read_value()
5070 atomic64_read(&event->child_total_time_enabled); in __perf_event_read_value()
5071 *running += event->total_time_running + in __perf_event_read_value()
5072 atomic64_read(&event->child_total_time_running); in __perf_event_read_value()
5074 list_for_each_entry(child, &event->child_list, child_list) { in __perf_event_read_value()
5080 mutex_unlock(&event->child_mutex); in __perf_event_read_value()
5085 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in perf_event_read_value() argument
5090 ctx = perf_event_ctx_lock(event); in perf_event_read_value()
5091 count = __perf_event_read_value(event, enabled, running); in perf_event_read_value()
5092 perf_event_ctx_unlock(event, ctx); in perf_event_read_value()
5145 static int perf_read_group(struct perf_event *event, in perf_read_group() argument
5148 struct perf_event *leader = event->group_leader, *child; in perf_read_group()
5155 values = kzalloc(event->read_size, GFP_KERNEL); in perf_read_group()
5179 ret = event->read_size; in perf_read_group()
5180 if (copy_to_user(buf, values, event->read_size)) in perf_read_group()
5191 static int perf_read_one(struct perf_event *event, in perf_read_one() argument
5198 values[n++] = __perf_event_read_value(event, &enabled, &running); in perf_read_one()
5204 values[n++] = primary_event_id(event); in perf_read_one()
5212 static bool is_event_hup(struct perf_event *event) in is_event_hup() argument
5216 if (event->state > PERF_EVENT_STATE_EXIT) in is_event_hup()
5219 mutex_lock(&event->child_mutex); in is_event_hup()
5220 no_children = list_empty(&event->child_list); in is_event_hup()
5221 mutex_unlock(&event->child_mutex); in is_event_hup()
5226 * Read the performance event - simple non blocking version for now
5229 __perf_read(struct perf_event *event, char __user *buf, size_t count) in __perf_read() argument
5231 u64 read_format = event->attr.read_format; in __perf_read()
5235 * Return end-of-file for a read on an event that is in in __perf_read()
5239 if (event->state == PERF_EVENT_STATE_ERROR) in __perf_read()
5242 if (count < event->read_size) in __perf_read()
5245 WARN_ON_ONCE(event->ctx->parent_ctx); in __perf_read()
5247 ret = perf_read_group(event, read_format, buf); in __perf_read()
5249 ret = perf_read_one(event, read_format, buf); in __perf_read()
5257 struct perf_event *event = file->private_data; in perf_read() local
5261 ret = security_perf_event_read(event); in perf_read()
5265 ctx = perf_event_ctx_lock(event); in perf_read()
5266 ret = __perf_read(event, buf, count); in perf_read()
5267 perf_event_ctx_unlock(event, ctx); in perf_read()
5274 struct perf_event *event = file->private_data; in perf_poll() local
5278 poll_wait(file, &event->waitq, wait); in perf_poll()
5280 if (is_event_hup(event)) in perf_poll()
5284 * Pin the event->rb by taking event->mmap_mutex; otherwise in perf_poll()
5287 mutex_lock(&event->mmap_mutex); in perf_poll()
5288 rb = event->rb; in perf_poll()
5291 mutex_unlock(&event->mmap_mutex); in perf_poll()
5295 static void _perf_event_reset(struct perf_event *event) in _perf_event_reset() argument
5297 (void)perf_event_read(event, false); in _perf_event_reset()
5298 local64_set(&event->count, 0); in _perf_event_reset()
5299 perf_event_update_userpage(event); in _perf_event_reset()
5302 /* Assume it's not an event with inherit set. */
5303 u64 perf_event_pause(struct perf_event *event, bool reset) in perf_event_pause() argument
5308 ctx = perf_event_ctx_lock(event); in perf_event_pause()
5309 WARN_ON_ONCE(event->attr.inherit); in perf_event_pause()
5310 _perf_event_disable(event); in perf_event_pause()
5311 count = local64_read(&event->count); in perf_event_pause()
5313 local64_set(&event->count, 0); in perf_event_pause()
5314 perf_event_ctx_unlock(event, ctx); in perf_event_pause()
5321 * Holding the top-level event's child_mutex means that any
5322 * descendant process that has inherited this event will block
5326 static void perf_event_for_each_child(struct perf_event *event, in perf_event_for_each_child() argument
5331 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_for_each_child()
5333 mutex_lock(&event->child_mutex); in perf_event_for_each_child()
5334 func(event); in perf_event_for_each_child()
5335 list_for_each_entry(child, &event->child_list, child_list) in perf_event_for_each_child()
5337 mutex_unlock(&event->child_mutex); in perf_event_for_each_child()
5340 static void perf_event_for_each(struct perf_event *event, in perf_event_for_each() argument
5343 struct perf_event_context *ctx = event->ctx; in perf_event_for_each()
5348 event = event->group_leader; in perf_event_for_each()
5350 perf_event_for_each_child(event, func); in perf_event_for_each()
5351 for_each_sibling_event(sibling, event) in perf_event_for_each()
5355 static void __perf_event_period(struct perf_event *event, in __perf_event_period() argument
5363 if (event->attr.freq) { in __perf_event_period()
5364 event->attr.sample_freq = value; in __perf_event_period()
5366 event->attr.sample_period = value; in __perf_event_period()
5367 event->hw.sample_period = value; in __perf_event_period()
5370 active = (event->state == PERF_EVENT_STATE_ACTIVE); in __perf_event_period()
5375 * trying to unthrottle while we already re-started the event. in __perf_event_period()
5377 if (event->hw.interrupts == MAX_INTERRUPTS) { in __perf_event_period()
5378 event->hw.interrupts = 0; in __perf_event_period()
5379 perf_log_throttle(event, 1); in __perf_event_period()
5381 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_period()
5384 local64_set(&event->hw.period_left, 0); in __perf_event_period()
5387 event->pmu->start(event, PERF_EF_RELOAD); in __perf_event_period()
5392 static int perf_event_check_period(struct perf_event *event, u64 value) in perf_event_check_period() argument
5394 return event->pmu->check_period(event, value); in perf_event_check_period()
5397 static int _perf_event_period(struct perf_event *event, u64 value) in _perf_event_period() argument
5399 if (!is_sampling_event(event)) in _perf_event_period()
5405 if (event->attr.freq && value > sysctl_perf_event_sample_rate) in _perf_event_period()
5408 if (perf_event_check_period(event, value)) in _perf_event_period()
5411 if (!event->attr.freq && (value & (1ULL << 63))) in _perf_event_period()
5414 event_function_call(event, __perf_event_period, &value); in _perf_event_period()
5419 int perf_event_period(struct perf_event *event, u64 value) in perf_event_period() argument
5424 ctx = perf_event_ctx_lock(event); in perf_event_period()
5425 ret = _perf_event_period(event, value); in perf_event_period()
5426 perf_event_ctx_unlock(event, ctx); in perf_event_period()
5448 static int perf_event_set_output(struct perf_event *event,
5450 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
5451 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
5455 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) in _perf_ioctl() argument
5472 return _perf_event_refresh(event, arg); in _perf_ioctl()
5481 return _perf_event_period(event, value); in _perf_ioctl()
5485 u64 id = primary_event_id(event); in _perf_ioctl()
5502 ret = perf_event_set_output(event, output_event); in _perf_ioctl()
5505 ret = perf_event_set_output(event, NULL); in _perf_ioctl()
5511 return perf_event_set_filter(event, (void __user *)arg); in _perf_ioctl()
5514 return perf_event_set_bpf_prog(event, arg); in _perf_ioctl()
5520 rb = rcu_dereference(event->rb); in _perf_ioctl()
5531 return perf_event_query_prog_array(event, (void __user *)arg); in _perf_ioctl()
5541 return perf_event_modify_attr(event, &new_attr); in _perf_ioctl()
5548 perf_event_for_each(event, func); in _perf_ioctl()
5550 perf_event_for_each_child(event, func); in _perf_ioctl()
5557 struct perf_event *event = file->private_data; in perf_ioctl() local
5562 ret = security_perf_event_write(event); in perf_ioctl()
5566 ctx = perf_event_ctx_lock(event); in perf_ioctl()
5567 ret = _perf_ioctl(event, cmd, arg); in perf_ioctl()
5568 perf_event_ctx_unlock(event, ctx); in perf_ioctl()
5598 struct perf_event *event; in perf_event_task_enable() local
5601 list_for_each_entry(event, &current->perf_event_list, owner_entry) { in perf_event_task_enable()
5602 ctx = perf_event_ctx_lock(event); in perf_event_task_enable()
5603 perf_event_for_each_child(event, _perf_event_enable); in perf_event_task_enable()
5604 perf_event_ctx_unlock(event, ctx); in perf_event_task_enable()
5614 struct perf_event *event; in perf_event_task_disable() local
5617 list_for_each_entry(event, &current->perf_event_list, owner_entry) { in perf_event_task_disable()
5618 ctx = perf_event_ctx_lock(event); in perf_event_task_disable()
5619 perf_event_for_each_child(event, _perf_event_disable); in perf_event_task_disable()
5620 perf_event_ctx_unlock(event, ctx); in perf_event_task_disable()
5627 static int perf_event_index(struct perf_event *event) in perf_event_index() argument
5629 if (event->hw.state & PERF_HES_STOPPED) in perf_event_index()
5632 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_event_index()
5635 return event->pmu->event_idx(event); in perf_event_index()
5638 static void calc_timer_values(struct perf_event *event, in calc_timer_values() argument
5646 ctx_time = event->shadow_ctx_time + *now; in calc_timer_values()
5647 __perf_update_times(event, ctx_time, enabled, running); in calc_timer_values()
5650 static void perf_event_init_userpage(struct perf_event *event) in perf_event_init_userpage() argument
5656 rb = rcu_dereference(event->rb); in perf_event_init_userpage()
5673 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) in arch_perf_update_userpage() argument
5682 void perf_event_update_userpage(struct perf_event *event) in perf_event_update_userpage() argument
5689 rb = rcu_dereference(event->rb); in perf_event_update_userpage()
5695 * based on snapshot values taken when the event in perf_event_update_userpage()
5702 calc_timer_values(event, &now, &enabled, &running); in perf_event_update_userpage()
5712 userpg->index = perf_event_index(event); in perf_event_update_userpage()
5713 userpg->offset = perf_event_count(event); in perf_event_update_userpage()
5715 userpg->offset -= local64_read(&event->hw.prev_count); in perf_event_update_userpage()
5718 atomic64_read(&event->child_total_time_enabled); in perf_event_update_userpage()
5721 atomic64_read(&event->child_total_time_running); in perf_event_update_userpage()
5723 arch_perf_update_userpage(event, userpg, now); in perf_event_update_userpage()
5735 struct perf_event *event = vmf->vma->vm_file->private_data; in perf_mmap_fault() local
5746 rb = rcu_dereference(event->rb); in perf_mmap_fault()
5768 static void ring_buffer_attach(struct perf_event *event, in ring_buffer_attach() argument
5774 if (event->rb) { in ring_buffer_attach()
5777 * event->rb_entry and wait/clear when adding event->rb_entry. in ring_buffer_attach()
5779 WARN_ON_ONCE(event->rcu_pending); in ring_buffer_attach()
5781 old_rb = event->rb; in ring_buffer_attach()
5783 list_del_rcu(&event->rb_entry); in ring_buffer_attach()
5786 event->rcu_batches = get_state_synchronize_rcu(); in ring_buffer_attach()
5787 event->rcu_pending = 1; in ring_buffer_attach()
5791 if (event->rcu_pending) { in ring_buffer_attach()
5792 cond_synchronize_rcu(event->rcu_batches); in ring_buffer_attach()
5793 event->rcu_pending = 0; in ring_buffer_attach()
5797 list_add_rcu(&event->rb_entry, &rb->event_list); in ring_buffer_attach()
5802 * Avoid racing with perf_mmap_close(AUX): stop the event in ring_buffer_attach()
5803 * before swizzling the event::rb pointer; if it's getting in ring_buffer_attach()
5811 if (has_aux(event)) in ring_buffer_attach()
5812 perf_event_stop(event, 0); in ring_buffer_attach()
5814 rcu_assign_pointer(event->rb, rb); in ring_buffer_attach()
5823 wake_up_all(&event->waitq); in ring_buffer_attach()
5827 static void ring_buffer_wakeup(struct perf_event *event) in ring_buffer_wakeup() argument
5832 rb = rcu_dereference(event->rb); in ring_buffer_wakeup()
5834 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) in ring_buffer_wakeup()
5835 wake_up_all(&event->waitq); in ring_buffer_wakeup()
5840 struct perf_buffer *ring_buffer_get(struct perf_event *event) in ring_buffer_get() argument
5845 rb = rcu_dereference(event->rb); in ring_buffer_get()
5867 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_open() local
5869 atomic_inc(&event->mmap_count); in perf_mmap_open()
5870 atomic_inc(&event->rb->mmap_count); in perf_mmap_open()
5873 atomic_inc(&event->rb->aux_mmap_count); in perf_mmap_open()
5875 if (event->pmu->event_mapped) in perf_mmap_open()
5876 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap_open()
5879 static void perf_pmu_output_stop(struct perf_event *event);
5883 * event, or through other events by use of perf_event_set_output().
5891 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_close() local
5892 struct perf_buffer *rb = ring_buffer_get(event); in perf_mmap_close()
5898 if (event->pmu->event_unmapped) in perf_mmap_close()
5899 event->pmu->event_unmapped(event, vma->vm_mm); in perf_mmap_close()
5903 * event->mmap_count, so it is ok to use event->mmap_mutex to in perf_mmap_close()
5907 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { in perf_mmap_close()
5914 perf_pmu_output_stop(event); in perf_mmap_close()
5924 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
5930 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) in perf_mmap_close()
5933 ring_buffer_attach(event, NULL); in perf_mmap_close()
5934 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
5947 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { in perf_mmap_close()
5948 if (!atomic_long_inc_not_zero(&event->refcount)) { in perf_mmap_close()
5950 * This event is en-route to free_event() which will in perf_mmap_close()
5957 mutex_lock(&event->mmap_mutex); in perf_mmap_close()
5963 * If we find a different rb; ignore this event, a next in perf_mmap_close()
5968 if (event->rb == rb) in perf_mmap_close()
5969 ring_buffer_attach(event, NULL); in perf_mmap_close()
5971 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
5972 put_event(event); in perf_mmap_close()
6009 struct perf_event *event = file->private_data; in perf_mmap() local
6024 if (event->cpu == -1 && event->attr.inherit) in perf_mmap()
6030 ret = security_perf_event_read(event); in perf_mmap()
6046 if (!event->rb) in perf_mmap()
6051 mutex_lock(&event->mmap_mutex); in perf_mmap()
6054 rb = event->rb; in perf_mmap()
6106 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_mmap()
6108 mutex_lock(&event->mmap_mutex); in perf_mmap()
6109 if (event->rb) { in perf_mmap()
6110 if (event->rb->nr_pages != nr_pages) { in perf_mmap()
6115 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { in perf_mmap()
6121 mutex_unlock(&event->mmap_mutex); in perf_mmap()
6167 WARN_ON(!rb && event->rb); in perf_mmap()
6174 event->attr.watermark ? event->attr.wakeup_watermark : 0, in perf_mmap()
6175 event->cpu, flags); in perf_mmap()
6186 ring_buffer_attach(event, rb); in perf_mmap()
6188 perf_event_init_userpage(event); in perf_mmap()
6189 perf_event_update_userpage(event); in perf_mmap()
6191 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, in perf_mmap()
6192 event->attr.aux_watermark, flags); in perf_mmap()
6202 atomic_inc(&event->mmap_count); in perf_mmap()
6207 mutex_unlock(&event->mmap_mutex); in perf_mmap()
6216 if (event->pmu->event_mapped) in perf_mmap()
6217 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap()
6225 struct perf_event *event = filp->private_data; in perf_fasync() local
6229 retval = fasync_helper(fd, filp, on, &event->fasync); in perf_fasync()
6250 * Perf event wakeup
6256 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) in perf_event_fasync() argument
6259 if (event->parent) in perf_event_fasync()
6260 event = event->parent; in perf_event_fasync()
6261 return &event->fasync; in perf_event_fasync()
6264 void perf_event_wakeup(struct perf_event *event) in perf_event_wakeup() argument
6266 ring_buffer_wakeup(event); in perf_event_wakeup()
6268 if (event->pending_kill) { in perf_event_wakeup()
6269 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); in perf_event_wakeup()
6270 event->pending_kill = 0; in perf_event_wakeup()
6274 static void perf_pending_event_disable(struct perf_event *event) in perf_pending_event_disable() argument
6276 int cpu = READ_ONCE(event->pending_disable); in perf_pending_event_disable()
6282 WRITE_ONCE(event->pending_disable, -1); in perf_pending_event_disable()
6283 perf_event_disable_local(event); in perf_pending_event_disable()
6305 * But the event runs on CPU-B and wants disabling there. in perf_pending_event_disable()
6307 irq_work_queue_on(&event->pending, cpu); in perf_pending_event_disable()
6312 struct perf_event *event = container_of(entry, struct perf_event, pending); in perf_pending_event() local
6321 perf_pending_event_disable(event); in perf_pending_event()
6323 if (event->pending_wakeup) { in perf_pending_event()
6324 event->pending_wakeup = 0; in perf_pending_event()
6325 perf_event_wakeup(event); in perf_pending_event()
6489 static unsigned long perf_prepare_sample_aux(struct perf_event *event, in perf_prepare_sample_aux() argument
6493 struct perf_event *sampler = event->aux_event; in perf_prepare_sample_aux()
6528 struct perf_event *event, in perf_pmu_snapshot_aux() argument
6538 * the IRQ ones, that is, for example, re-starting an event that's just in perf_pmu_snapshot_aux()
6540 * doesn't change the event state. in perf_pmu_snapshot_aux()
6552 ret = event->pmu->snapshot_aux(event, handle, size); in perf_pmu_snapshot_aux()
6561 static void perf_aux_sample_output(struct perf_event *event, in perf_aux_sample_output() argument
6565 struct perf_event *sampler = event->aux_event; in perf_aux_sample_output()
6607 struct perf_event *event) in __perf_event_header__init_id() argument
6609 u64 sample_type = event->attr.sample_type; in __perf_event_header__init_id()
6612 header->size += event->id_header_size; in __perf_event_header__init_id()
6616 data->tid_entry.pid = perf_event_pid(event, current); in __perf_event_header__init_id()
6617 data->tid_entry.tid = perf_event_tid(event, current); in __perf_event_header__init_id()
6621 data->time = perf_event_clock(event); in __perf_event_header__init_id()
6624 data->id = primary_event_id(event); in __perf_event_header__init_id()
6627 data->stream_id = event->id; in __perf_event_header__init_id()
6637 struct perf_event *event) in perf_event_header__init_id() argument
6639 if (event->attr.sample_id_all) in perf_event_header__init_id()
6640 __perf_event_header__init_id(header, data, event); in perf_event_header__init_id()
6667 void perf_event__output_id_sample(struct perf_event *event, in perf_event__output_id_sample() argument
6671 if (event->attr.sample_id_all) in perf_event__output_id_sample()
6676 struct perf_event *event, in perf_output_read_one() argument
6679 u64 read_format = event->attr.read_format; in perf_output_read_one()
6683 values[n++] = perf_event_count(event); in perf_output_read_one()
6686 atomic64_read(&event->child_total_time_enabled); in perf_output_read_one()
6690 atomic64_read(&event->child_total_time_running); in perf_output_read_one()
6693 values[n++] = primary_event_id(event); in perf_output_read_one()
6699 struct perf_event *event, in perf_output_read_group() argument
6702 struct perf_event *leader = event->group_leader, *sub; in perf_output_read_group()
6703 u64 read_format = event->attr.read_format; in perf_output_read_group()
6715 if ((leader != event) && in perf_output_read_group()
6728 if ((sub != event) && in perf_output_read_group()
6751 struct perf_event *event) in perf_output_read() argument
6754 u64 read_format = event->attr.read_format; in perf_output_read()
6758 * based on snapshot values taken when the event in perf_output_read()
6766 calc_timer_values(event, &now, &enabled, &running); in perf_output_read()
6768 if (event->attr.read_format & PERF_FORMAT_GROUP) in perf_output_read()
6769 perf_output_read_group(handle, event, enabled, running); in perf_output_read()
6771 perf_output_read_one(handle, event, enabled, running); in perf_output_read()
6774 static inline bool perf_sample_save_hw_index(struct perf_event *event) in perf_sample_save_hw_index() argument
6776 return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX; in perf_sample_save_hw_index()
6782 struct perf_event *event) in perf_output_sample() argument
6816 perf_output_read(handle, event); in perf_output_sample()
6867 if (perf_sample_save_hw_index(event)) in perf_output_sample()
6889 u64 mask = event->attr.sample_regs_user; in perf_output_sample()
6920 u64 mask = event->attr.sample_regs_intr; in perf_output_sample()
6938 perf_aux_sample_output(event, handle, data); in perf_output_sample()
6941 if (!event->attr.watermark) { in perf_output_sample()
6942 int wakeup_events = event->attr.wakeup_events; in perf_output_sample()
6994 perf_callchain(struct perf_event *event, struct pt_regs *regs) in perf_callchain() argument
6996 bool kernel = !event->attr.exclude_callchain_kernel; in perf_callchain()
6997 bool user = !event->attr.exclude_callchain_user; in perf_callchain()
6999 bool crosstask = event->ctx->task && event->ctx->task != current; in perf_callchain()
7000 const u32 max_stack = event->attr.sample_max_stack; in perf_callchain()
7013 struct perf_event *event, in perf_prepare_sample() argument
7016 u64 sample_type = event->attr.sample_type; in perf_prepare_sample()
7019 header->size = sizeof(*header) + event->header_size; in perf_prepare_sample()
7024 __perf_event_header__init_id(header, data, event); in perf_prepare_sample()
7033 data->callchain = perf_callchain(event, regs); in perf_prepare_sample()
7068 if (perf_sample_save_hw_index(event)) in perf_prepare_sample()
7085 u64 mask = event->attr.sample_regs_user; in perf_prepare_sample()
7099 u16 stack_size = event->attr.sample_stack_user; in perf_prepare_sample()
7124 u64 mask = event->attr.sample_regs_intr; in perf_prepare_sample()
7157 event->attr.aux_sample_size); in perf_prepare_sample()
7159 size = perf_prepare_sample_aux(event, data, size); in perf_prepare_sample()
7176 __perf_event_output(struct perf_event *event, in __perf_event_output() argument
7191 perf_prepare_sample(&header, data, event, regs); in __perf_event_output()
7193 err = output_begin(&handle, data, event, header.size); in __perf_event_output()
7197 perf_output_sample(&handle, &header, data, event); in __perf_event_output()
7207 perf_event_output_forward(struct perf_event *event, in perf_event_output_forward() argument
7211 __perf_event_output(event, data, regs, perf_output_begin_forward); in perf_event_output_forward()
7215 perf_event_output_backward(struct perf_event *event, in perf_event_output_backward() argument
7219 __perf_event_output(event, data, regs, perf_output_begin_backward); in perf_event_output_backward()
7223 perf_event_output(struct perf_event *event, in perf_event_output() argument
7227 return __perf_event_output(event, data, regs, perf_output_begin); in perf_event_output()
7242 perf_event_read_event(struct perf_event *event, in perf_event_read_event() argument
7251 .size = sizeof(read_event) + event->read_size, in perf_event_read_event()
7253 .pid = perf_event_pid(event, task), in perf_event_read_event()
7254 .tid = perf_event_tid(event, task), in perf_event_read_event()
7258 perf_event_header__init_id(&read_event.header, &sample, event); in perf_event_read_event()
7259 ret = perf_output_begin(&handle, &sample, event, read_event.header.size); in perf_event_read_event()
7264 perf_output_read(&handle, event); in perf_event_read_event()
7265 perf_event__output_id_sample(event, &handle, &sample); in perf_event_read_event()
7270 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
7277 struct perf_event *event; in perf_iterate_ctx() local
7279 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_iterate_ctx()
7281 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_ctx()
7283 if (!event_filter_match(event)) in perf_iterate_ctx()
7287 output(event, data); in perf_iterate_ctx()
7294 struct perf_event *event; in perf_iterate_sb_cpu() local
7296 list_for_each_entry_rcu(event, &pel->list, sb_list) { in perf_iterate_sb_cpu()
7299 * if we observe event->ctx, both event and ctx will be in perf_iterate_sb_cpu()
7302 if (!smp_load_acquire(&event->ctx)) in perf_iterate_sb_cpu()
7305 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_sb_cpu()
7307 if (!event_filter_match(event)) in perf_iterate_sb_cpu()
7309 output(event, data); in perf_iterate_sb_cpu()
7317 * your event, otherwise it might not get delivered.
7355 static void perf_event_addr_filters_exec(struct perf_event *event, void *data) in perf_event_addr_filters_exec() argument
7357 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_exec()
7362 if (!has_addr_filter(event)) in perf_event_addr_filters_exec()
7368 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_exec()
7369 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_exec()
7377 event->addr_filters_gen++; in perf_event_addr_filters_exec()
7381 perf_event_stop(event, 1); in perf_event_addr_filters_exec()
7408 static void __perf_event_output_stop(struct perf_event *event, void *data) in __perf_event_output_stop() argument
7410 struct perf_event *parent = event->parent; in __perf_event_output_stop()
7414 .event = event, in __perf_event_output_stop()
7417 if (!has_aux(event)) in __perf_event_output_stop()
7421 parent = event; in __perf_event_output_stop()
7427 * We are using event::rb to determine if the event should be stopped, in __perf_event_output_stop()
7429 * which will make us skip the event that actually needs to be stopped. in __perf_event_output_stop()
7430 * So ring_buffer_attach() has to stop an aux event before re-assigning in __perf_event_output_stop()
7439 struct perf_event *event = info; in __perf_pmu_output_stop() local
7440 struct pmu *pmu = event->ctx->pmu; in __perf_pmu_output_stop()
7443 .rb = event->rb, in __perf_pmu_output_stop()
7456 static void perf_pmu_output_stop(struct perf_event *event) in perf_pmu_output_stop() argument
7463 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) { in perf_pmu_output_stop()
7467 * sufficient to stop the event itself if it's active, since in perf_pmu_output_stop()
7477 err = cpu_function_call(cpu, __perf_pmu_output_stop, event); in perf_pmu_output_stop()
7507 static int perf_event_task_match(struct perf_event *event) in perf_event_task_match() argument
7509 return event->attr.comm || event->attr.mmap || in perf_event_task_match()
7510 event->attr.mmap2 || event->attr.mmap_data || in perf_event_task_match()
7511 event->attr.task; in perf_event_task_match()
7514 static void perf_event_task_output(struct perf_event *event, in perf_event_task_output() argument
7523 if (!perf_event_task_match(event)) in perf_event_task_output()
7526 perf_event_header__init_id(&task_event->event_id.header, &sample, event); in perf_event_task_output()
7528 ret = perf_output_begin(&handle, &sample, event, in perf_event_task_output()
7533 task_event->event_id.pid = perf_event_pid(event, task); in perf_event_task_output()
7534 task_event->event_id.tid = perf_event_tid(event, task); in perf_event_task_output()
7537 task_event->event_id.ppid = perf_event_pid(event, in perf_event_task_output()
7539 task_event->event_id.ptid = perf_event_pid(event, in perf_event_task_output()
7542 task_event->event_id.ppid = perf_event_pid(event, current); in perf_event_task_output()
7543 task_event->event_id.ptid = perf_event_tid(event, current); in perf_event_task_output()
7546 task_event->event_id.time = perf_event_clock(event); in perf_event_task_output()
7550 perf_event__output_id_sample(event, &handle, &sample); in perf_event_task_output()
7613 static int perf_event_comm_match(struct perf_event *event) in perf_event_comm_match() argument
7615 return event->attr.comm; in perf_event_comm_match()
7618 static void perf_event_comm_output(struct perf_event *event, in perf_event_comm_output() argument
7627 if (!perf_event_comm_match(event)) in perf_event_comm_output()
7630 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); in perf_event_comm_output()
7631 ret = perf_output_begin(&handle, &sample, event, in perf_event_comm_output()
7637 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); in perf_event_comm_output()
7638 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); in perf_event_comm_output()
7644 perf_event__output_id_sample(event, &handle, &sample); in perf_event_comm_output()
7712 static int perf_event_namespaces_match(struct perf_event *event) in perf_event_namespaces_match() argument
7714 return event->attr.namespaces; in perf_event_namespaces_match()
7717 static void perf_event_namespaces_output(struct perf_event *event, in perf_event_namespaces_output() argument
7726 if (!perf_event_namespaces_match(event)) in perf_event_namespaces_output()
7730 &sample, event); in perf_event_namespaces_output()
7731 ret = perf_output_begin(&handle, &sample, event, in perf_event_namespaces_output()
7736 namespaces_event->event_id.pid = perf_event_pid(event, in perf_event_namespaces_output()
7738 namespaces_event->event_id.tid = perf_event_tid(event, in perf_event_namespaces_output()
7743 perf_event__output_id_sample(event, &handle, &sample); in perf_event_namespaces_output()
7840 static int perf_event_cgroup_match(struct perf_event *event) in perf_event_cgroup_match() argument
7842 return event->attr.cgroup; in perf_event_cgroup_match()
7845 static void perf_event_cgroup_output(struct perf_event *event, void *data) in perf_event_cgroup_output() argument
7853 if (!perf_event_cgroup_match(event)) in perf_event_cgroup_output()
7857 &sample, event); in perf_event_cgroup_output()
7858 ret = perf_output_begin(&handle, &sample, event, in perf_event_cgroup_output()
7866 perf_event__output_id_sample(event, &handle, &sample); in perf_event_cgroup_output()
7949 static int perf_event_mmap_match(struct perf_event *event, in perf_event_mmap_match() argument
7956 return (!executable && event->attr.mmap_data) || in perf_event_mmap_match()
7957 (executable && (event->attr.mmap || event->attr.mmap2)); in perf_event_mmap_match()
7960 static void perf_event_mmap_output(struct perf_event *event, in perf_event_mmap_output() argument
7970 if (!perf_event_mmap_match(event, data)) in perf_event_mmap_output()
7973 if (event->attr.mmap2) { in perf_event_mmap_output()
7983 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); in perf_event_mmap_output()
7984 ret = perf_output_begin(&handle, &sample, event, in perf_event_mmap_output()
7989 mmap_event->event_id.pid = perf_event_pid(event, current); in perf_event_mmap_output()
7990 mmap_event->event_id.tid = perf_event_tid(event, current); in perf_event_mmap_output()
7994 if (event->attr.mmap2) { in perf_event_mmap_output()
8006 perf_event__output_id_sample(event, &handle, &sample); in perf_event_mmap_output()
8179 static void __perf_addr_filters_adjust(struct perf_event *event, void *data) in __perf_addr_filters_adjust() argument
8181 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in __perf_addr_filters_adjust()
8187 if (!has_addr_filter(event)) in __perf_addr_filters_adjust()
8196 &event->addr_filter_ranges[count])) in __perf_addr_filters_adjust()
8203 event->addr_filters_gen++; in __perf_addr_filters_adjust()
8207 perf_event_stop(event, 1); in __perf_addr_filters_adjust()
8271 void perf_event_aux_event(struct perf_event *event, unsigned long head, in perf_event_aux_event() argument
8293 perf_event_header__init_id(&rec.header, &sample, event); in perf_event_aux_event()
8294 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_event_aux_event()
8300 perf_event__output_id_sample(event, &handle, &sample); in perf_event_aux_event()
8308 void perf_log_lost_samples(struct perf_event *event, u64 lost) in perf_log_lost_samples() argument
8326 perf_event_header__init_id(&lost_samples_event.header, &sample, event); in perf_log_lost_samples()
8328 ret = perf_output_begin(&handle, &sample, event, in perf_log_lost_samples()
8334 perf_event__output_id_sample(event, &handle, &sample); in perf_log_lost_samples()
8353 static int perf_event_switch_match(struct perf_event *event) in perf_event_switch_match() argument
8355 return event->attr.context_switch; in perf_event_switch_match()
8358 static void perf_event_switch_output(struct perf_event *event, void *data) in perf_event_switch_output() argument
8365 if (!perf_event_switch_match(event)) in perf_event_switch_output()
8369 if (event->ctx->task) { in perf_event_switch_output()
8376 perf_event_pid(event, se->next_prev); in perf_event_switch_output()
8378 perf_event_tid(event, se->next_prev); in perf_event_switch_output()
8381 perf_event_header__init_id(&se->event_id.header, &sample, event); in perf_event_switch_output()
8383 ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size); in perf_event_switch_output()
8387 if (event->ctx->task) in perf_event_switch_output()
8392 perf_event__output_id_sample(event, &handle, &sample); in perf_event_switch_output()
8431 static void perf_log_throttle(struct perf_event *event, int enable) in perf_log_throttle() argument
8448 .time = perf_event_clock(event), in perf_log_throttle()
8449 .id = primary_event_id(event), in perf_log_throttle()
8450 .stream_id = event->id, in perf_log_throttle()
8456 perf_event_header__init_id(&throttle_event.header, &sample, event); in perf_log_throttle()
8458 ret = perf_output_begin(&handle, &sample, event, in perf_log_throttle()
8464 perf_event__output_id_sample(event, &handle, &sample); in perf_log_throttle()
8484 static int perf_event_ksymbol_match(struct perf_event *event) in perf_event_ksymbol_match() argument
8486 return event->attr.ksymbol; in perf_event_ksymbol_match()
8489 static void perf_event_ksymbol_output(struct perf_event *event, void *data) in perf_event_ksymbol_output() argument
8496 if (!perf_event_ksymbol_match(event)) in perf_event_ksymbol_output()
8500 &sample, event); in perf_event_ksymbol_output()
8501 ret = perf_output_begin(&handle, &sample, event, in perf_event_ksymbol_output()
8508 perf_event__output_id_sample(event, &handle, &sample); in perf_event_ksymbol_output()
8574 static int perf_event_bpf_match(struct perf_event *event) in perf_event_bpf_match() argument
8576 return event->attr.bpf_event; in perf_event_bpf_match()
8579 static void perf_event_bpf_output(struct perf_event *event, void *data) in perf_event_bpf_output() argument
8586 if (!perf_event_bpf_match(event)) in perf_event_bpf_output()
8590 &sample, event); in perf_event_bpf_output()
8591 ret = perf_output_begin(&handle, data, event, in perf_event_bpf_output()
8597 perf_event__output_id_sample(event, &handle, &sample); in perf_event_bpf_output()
8682 static int perf_event_text_poke_match(struct perf_event *event) in perf_event_text_poke_match() argument
8684 return event->attr.text_poke; in perf_event_text_poke_match()
8687 static void perf_event_text_poke_output(struct perf_event *event, void *data) in perf_event_text_poke_output() argument
8695 if (!perf_event_text_poke_match(event)) in perf_event_text_poke_output()
8698 perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event); in perf_event_text_poke_output()
8700 ret = perf_output_begin(&handle, &sample, event, in perf_event_text_poke_output()
8715 perf_event__output_id_sample(event, &handle, &sample); in perf_event_text_poke_output()
8752 void perf_event_itrace_started(struct perf_event *event) in perf_event_itrace_started() argument
8754 event->attach_state |= PERF_ATTACH_ITRACE; in perf_event_itrace_started()
8757 static void perf_log_itrace_start(struct perf_event *event) in perf_log_itrace_start() argument
8768 if (event->parent) in perf_log_itrace_start()
8769 event = event->parent; in perf_log_itrace_start()
8771 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || in perf_log_itrace_start()
8772 event->attach_state & PERF_ATTACH_ITRACE) in perf_log_itrace_start()
8778 rec.pid = perf_event_pid(event, current); in perf_log_itrace_start()
8779 rec.tid = perf_event_tid(event, current); in perf_log_itrace_start()
8781 perf_event_header__init_id(&rec.header, &sample, event); in perf_log_itrace_start()
8782 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_log_itrace_start()
8788 perf_event__output_id_sample(event, &handle, &sample); in perf_log_itrace_start()
8794 __perf_event_account_interrupt(struct perf_event *event, int throttle) in __perf_event_account_interrupt() argument
8796 struct hw_perf_event *hwc = &event->hw; in __perf_event_account_interrupt()
8811 perf_log_throttle(event, 0); in __perf_event_account_interrupt()
8816 if (event->attr.freq) { in __perf_event_account_interrupt()
8823 perf_adjust_period(event, delta, hwc->last_period, true); in __perf_event_account_interrupt()
8829 int perf_event_account_interrupt(struct perf_event *event) in perf_event_account_interrupt() argument
8831 return __perf_event_account_interrupt(event, 1); in perf_event_account_interrupt()
8835 * Generic event overflow handling, sampling.
8838 static int __perf_event_overflow(struct perf_event *event, in __perf_event_overflow() argument
8842 int events = atomic_read(&event->event_limit); in __perf_event_overflow()
8849 if (unlikely(!is_sampling_event(event))) in __perf_event_overflow()
8852 ret = __perf_event_account_interrupt(event, throttle); in __perf_event_overflow()
8859 event->pending_kill = POLL_IN; in __perf_event_overflow()
8860 if (events && atomic_dec_and_test(&event->event_limit)) { in __perf_event_overflow()
8862 event->pending_kill = POLL_HUP; in __perf_event_overflow()
8864 perf_event_disable_inatomic(event); in __perf_event_overflow()
8867 READ_ONCE(event->overflow_handler)(event, data, regs); in __perf_event_overflow() local
8869 if (*perf_event_fasync(event) && event->pending_kill) { in __perf_event_overflow()
8870 event->pending_wakeup = 1; in __perf_event_overflow()
8871 irq_work_queue(&event->pending); in __perf_event_overflow()
8877 int perf_event_overflow(struct perf_event *event, in perf_event_overflow() argument
8881 return __perf_event_overflow(event, 1, data, regs); in perf_event_overflow()
8885 * Generic software event infrastructure
8900 * We directly increment event->count and keep a second value in
8901 * event->hw.period_left to count intervals. This period event
8906 u64 perf_swevent_set_period(struct perf_event *event) in perf_swevent_set_period() argument
8908 struct hw_perf_event *hwc = &event->hw; in perf_swevent_set_period()
8929 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, in perf_swevent_overflow() argument
8933 struct hw_perf_event *hwc = &event->hw; in perf_swevent_overflow()
8937 overflow = perf_swevent_set_period(event); in perf_swevent_overflow()
8943 if (__perf_event_overflow(event, throttle, in perf_swevent_overflow()
8955 static void perf_swevent_event(struct perf_event *event, u64 nr, in perf_swevent_event() argument
8959 struct hw_perf_event *hwc = &event->hw; in perf_swevent_event()
8961 local64_add(nr, &event->count); in perf_swevent_event()
8966 if (!is_sampling_event(event)) in perf_swevent_event()
8969 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { in perf_swevent_event()
8971 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
8973 data->period = event->hw.last_period; in perf_swevent_event()
8975 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) in perf_swevent_event()
8976 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
8981 perf_swevent_overflow(event, 0, data, regs); in perf_swevent_event()
8984 static int perf_exclude_event(struct perf_event *event, in perf_exclude_event() argument
8987 if (event->hw.state & PERF_HES_STOPPED) in perf_exclude_event()
8991 if (event->attr.exclude_user && user_mode(regs)) in perf_exclude_event()
8994 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_exclude_event()
9001 static int perf_swevent_match(struct perf_event *event, in perf_swevent_match() argument
9007 if (event->attr.type != type) in perf_swevent_match()
9010 if (event->attr.config != event_id) in perf_swevent_match()
9013 if (perf_exclude_event(event, regs)) in perf_swevent_match()
9047 /* For the event head insertion and removal in the hlist */
9049 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) in find_swevent_head() argument
9052 u32 event_id = event->attr.config; in find_swevent_head()
9053 u64 type = event->attr.type; in find_swevent_head()
9056 * Event scheduling is always serialized against hlist allocation in find_swevent_head()
9061 lockdep_is_held(&event->ctx->lock)); in find_swevent_head()
9074 struct perf_event *event; in do_perf_sw_event() local
9082 hlist_for_each_entry_rcu(event, head, hlist_entry) { in do_perf_sw_event()
9083 if (perf_swevent_match(event, type, event_id, data, regs)) in do_perf_sw_event()
9084 perf_swevent_event(event, nr, data, regs); in do_perf_sw_event()
9134 static void perf_swevent_read(struct perf_event *event) in perf_swevent_read() argument
9138 static int perf_swevent_add(struct perf_event *event, int flags) in perf_swevent_add() argument
9141 struct hw_perf_event *hwc = &event->hw; in perf_swevent_add()
9144 if (is_sampling_event(event)) { in perf_swevent_add()
9146 perf_swevent_set_period(event); in perf_swevent_add()
9151 head = find_swevent_head(swhash, event); in perf_swevent_add()
9155 hlist_add_head_rcu(&event->hlist_entry, head); in perf_swevent_add()
9156 perf_event_update_userpage(event); in perf_swevent_add()
9161 static void perf_swevent_del(struct perf_event *event, int flags) in perf_swevent_del() argument
9163 hlist_del_rcu(&event->hlist_entry); in perf_swevent_del()
9166 static void perf_swevent_start(struct perf_event *event, int flags) in perf_swevent_start() argument
9168 event->hw.state = 0; in perf_swevent_start()
9171 static void perf_swevent_stop(struct perf_event *event, int flags) in perf_swevent_stop() argument
9173 event->hw.state = PERF_HES_STOPPED; in perf_swevent_stop()
9265 static void sw_perf_event_destroy(struct perf_event *event) in sw_perf_event_destroy() argument
9267 u64 event_id = event->attr.config; in sw_perf_event_destroy()
9269 WARN_ON(event->parent); in sw_perf_event_destroy()
9275 static int perf_swevent_init(struct perf_event *event) in perf_swevent_init() argument
9277 u64 event_id = event->attr.config; in perf_swevent_init()
9279 if (event->attr.type != PERF_TYPE_SOFTWARE) in perf_swevent_init()
9285 if (has_branch_stack(event)) in perf_swevent_init()
9300 if (!event->parent) { in perf_swevent_init()
9308 event->destroy = sw_perf_event_destroy; in perf_swevent_init()
9329 static int perf_tp_filter_match(struct perf_event *event, in perf_tp_filter_match() argument
9335 if (event->parent) in perf_tp_filter_match()
9336 event = event->parent; in perf_tp_filter_match()
9338 if (likely(!event->filter) || filter_match_preds(event->filter, record)) in perf_tp_filter_match()
9343 static int perf_tp_event_match(struct perf_event *event, in perf_tp_event_match() argument
9347 if (event->hw.state & PERF_HES_STOPPED) in perf_tp_event_match()
9352 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_tp_event_match()
9355 if (!perf_tp_filter_match(event, data)) in perf_tp_event_match()
9373 perf_tp_event(call->event.type, count, raw_data, size, regs, head, in perf_trace_run_bpf_submit()
9383 struct perf_event *event; in perf_tp_event() local
9397 hlist_for_each_entry_rcu(event, head, hlist_entry) { in perf_tp_event()
9398 if (perf_tp_event_match(event, &data, regs)) in perf_tp_event()
9399 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
9404 * deliver this event there too. in perf_tp_event()
9415 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_tp_event()
9416 if (event->cpu != smp_processor_id()) in perf_tp_event()
9418 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event()
9420 if (event->attr.config != entry->type) in perf_tp_event()
9422 if (perf_tp_event_match(event, &data, regs)) in perf_tp_event()
9423 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
9433 static void tp_perf_event_destroy(struct perf_event *event) in tp_perf_event_destroy() argument
9435 perf_trace_destroy(event); in tp_perf_event_destroy()
9438 static int perf_tp_event_init(struct perf_event *event) in perf_tp_event_init() argument
9442 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event_init()
9448 if (has_branch_stack(event)) in perf_tp_event_init()
9451 err = perf_trace_init(event); in perf_tp_event_init()
9455 event->destroy = tp_perf_event_destroy; in perf_tp_event_init()
9511 static int perf_kprobe_event_init(struct perf_event *event);
9523 static int perf_kprobe_event_init(struct perf_event *event) in perf_kprobe_event_init() argument
9528 if (event->attr.type != perf_kprobe.type) in perf_kprobe_event_init()
9537 if (has_branch_stack(event)) in perf_kprobe_event_init()
9540 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_kprobe_event_init()
9541 err = perf_kprobe_init(event, is_retprobe); in perf_kprobe_event_init()
9545 event->destroy = perf_kprobe_destroy; in perf_kprobe_event_init()
9570 static int perf_uprobe_event_init(struct perf_event *event);
9582 static int perf_uprobe_event_init(struct perf_event *event) in perf_uprobe_event_init() argument
9588 if (event->attr.type != perf_uprobe.type) in perf_uprobe_event_init()
9597 if (has_branch_stack(event)) in perf_uprobe_event_init()
9600 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_uprobe_event_init()
9601 ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT; in perf_uprobe_event_init()
9602 err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe); in perf_uprobe_event_init()
9606 event->destroy = perf_uprobe_destroy; in perf_uprobe_event_init()
9623 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
9625 ftrace_profile_free_filter(event); in perf_event_free_filter()
9629 static void bpf_overflow_handler(struct perf_event *event, in bpf_overflow_handler() argument
9635 .event = event, in bpf_overflow_handler()
9643 ret = BPF_PROG_RUN(event->prog, &ctx); in bpf_overflow_handler()
9650 event->orig_overflow_handler(event, data, regs); in bpf_overflow_handler()
9653 static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_handler() argument
9657 if (event->overflow_handler_context) in perf_event_set_bpf_handler()
9661 if (event->prog) in perf_event_set_bpf_handler()
9668 if (event->attr.precise_ip && in perf_event_set_bpf_handler()
9670 (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY) || in perf_event_set_bpf_handler()
9671 event->attr.exclude_callchain_kernel || in perf_event_set_bpf_handler()
9672 event->attr.exclude_callchain_user)) { in perf_event_set_bpf_handler()
9686 event->prog = prog; in perf_event_set_bpf_handler()
9687 event->orig_overflow_handler = READ_ONCE(event->overflow_handler); in perf_event_set_bpf_handler()
9688 WRITE_ONCE(event->overflow_handler, bpf_overflow_handler); in perf_event_set_bpf_handler()
9692 static void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
9694 struct bpf_prog *prog = event->prog; in perf_event_free_bpf_handler()
9699 WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler); in perf_event_free_bpf_handler()
9700 event->prog = NULL; in perf_event_free_bpf_handler()
9704 static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_handler() argument
9708 static void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
9714 * returns true if the event is a tracepoint, or a kprobe/upprobe created
9717 static inline bool perf_event_is_tracing(struct perf_event *event) in perf_event_is_tracing() argument
9719 if (event->pmu == &perf_tracepoint) in perf_event_is_tracing()
9722 if (event->pmu == &perf_kprobe) in perf_event_is_tracing()
9726 if (event->pmu == &perf_uprobe) in perf_event_is_tracing()
9732 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_prog() argument
9738 if (!perf_event_is_tracing(event)) in perf_event_set_bpf_prog()
9739 return perf_event_set_bpf_handler(event, prog_fd); in perf_event_set_bpf_prog()
9741 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE; in perf_event_set_bpf_prog()
9742 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; in perf_event_set_bpf_prog()
9743 is_syscall_tp = is_syscall_trace_event(event->tp_event); in perf_event_set_bpf_prog()
9762 !(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) { in perf_event_set_bpf_prog()
9768 int off = trace_event_get_offsets(event->tp_event); in perf_event_set_bpf_prog()
9776 ret = perf_event_attach_bpf_prog(event, prog); in perf_event_set_bpf_prog()
9782 static void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
9784 if (!perf_event_is_tracing(event)) { in perf_event_free_bpf_prog()
9785 perf_event_free_bpf_handler(event); in perf_event_free_bpf_prog()
9788 perf_event_detach_bpf_prog(event); in perf_event_free_bpf_prog()
9797 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
9801 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) in perf_event_set_bpf_prog() argument
9806 static void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
9828 perf_addr_filter_new(struct perf_event *event, struct list_head *filters) in perf_addr_filter_new() argument
9830 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu); in perf_addr_filter_new()
9857 static void perf_addr_filters_splice(struct perf_event *event, in perf_addr_filters_splice() argument
9863 if (!has_addr_filter(event)) in perf_addr_filters_splice()
9867 if (event->parent) in perf_addr_filters_splice()
9870 raw_spin_lock_irqsave(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
9872 list_splice_init(&event->addr_filters.list, &list); in perf_addr_filters_splice()
9874 list_splice(head, &event->addr_filters.list); in perf_addr_filters_splice()
9876 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
9902 * Update event's address range filters based on the
9905 static void perf_event_addr_filters_apply(struct perf_event *event) in perf_event_addr_filters_apply() argument
9907 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_apply()
9908 struct task_struct *task = READ_ONCE(event->ctx->task); in perf_event_addr_filters_apply()
9915 * We may observe TASK_TOMBSTONE, which means that the event tear-down in perf_event_addr_filters_apply()
9922 mm = get_task_mm(event->ctx->task); in perf_event_addr_filters_apply()
9936 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_apply()
9937 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_apply()
9939 perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]); in perf_event_addr_filters_apply()
9941 event->addr_filter_ranges[count].start = filter->offset; in perf_event_addr_filters_apply()
9942 event->addr_filter_ranges[count].size = filter->size; in perf_event_addr_filters_apply()
9948 event->addr_filters_gen++; in perf_event_addr_filters_apply()
9958 perf_event_stop(event, 1); in perf_event_addr_filters_apply()
10012 perf_event_parse_addr_filter(struct perf_event *event, char *fstr, in perf_event_parse_addr_filter() argument
10039 filter = perf_addr_filter_new(event, filters); in perf_event_parse_addr_filter()
10098 * Make sure that it doesn't contradict itself or the event's in perf_event_parse_addr_filter()
10103 if (kernel && event->attr.exclude_kernel) in perf_event_parse_addr_filter()
10127 if (!event->ctx->task) in perf_event_parse_addr_filter()
10142 event->addr_filters.nr_file_filters++; in perf_event_parse_addr_filter()
10168 perf_event_set_addr_filter(struct perf_event *event, char *filter_str) in perf_event_set_addr_filter() argument
10177 lockdep_assert_held(&event->ctx->mutex); in perf_event_set_addr_filter()
10179 if (WARN_ON_ONCE(event->parent)) in perf_event_set_addr_filter()
10182 ret = perf_event_parse_addr_filter(event, filter_str, &filters); in perf_event_set_addr_filter()
10186 ret = event->pmu->addr_filters_validate(&filters); in perf_event_set_addr_filter()
10191 perf_addr_filters_splice(event, &filters); in perf_event_set_addr_filter()
10194 perf_event_for_each_child(event, perf_event_addr_filters_apply); in perf_event_set_addr_filter()
10202 event->addr_filters.nr_file_filters = 0; in perf_event_set_addr_filter()
10207 static int perf_event_set_filter(struct perf_event *event, void __user *arg) in perf_event_set_filter() argument
10217 if (perf_event_is_tracing(event)) { in perf_event_set_filter()
10218 struct perf_event_context *ctx = event->ctx; in perf_event_set_filter()
10228 * This can result in event getting moved to a different ctx, in perf_event_set_filter()
10232 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); in perf_event_set_filter()
10236 if (has_addr_filter(event)) in perf_event_set_filter()
10237 ret = perf_event_set_addr_filter(event, filter_str); in perf_event_set_filter()
10252 struct perf_event *event; in perf_swevent_hrtimer() local
10255 event = container_of(hrtimer, struct perf_event, hw.hrtimer); in perf_swevent_hrtimer()
10257 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_swevent_hrtimer()
10260 event->pmu->read(event); in perf_swevent_hrtimer()
10262 perf_sample_data_init(&data, 0, event->hw.last_period); in perf_swevent_hrtimer()
10265 if (regs && !perf_exclude_event(event, regs)) { in perf_swevent_hrtimer()
10266 if (!(event->attr.exclude_idle && is_idle_task(current))) in perf_swevent_hrtimer()
10267 if (__perf_event_overflow(event, 1, &data, regs)) in perf_swevent_hrtimer()
10271 period = max_t(u64, 10000, event->hw.sample_period); in perf_swevent_hrtimer()
10277 static void perf_swevent_start_hrtimer(struct perf_event *event) in perf_swevent_start_hrtimer() argument
10279 struct hw_perf_event *hwc = &event->hw; in perf_swevent_start_hrtimer()
10282 if (!is_sampling_event(event)) in perf_swevent_start_hrtimer()
10298 static void perf_swevent_cancel_hrtimer(struct perf_event *event) in perf_swevent_cancel_hrtimer() argument
10300 struct hw_perf_event *hwc = &event->hw; in perf_swevent_cancel_hrtimer()
10302 if (is_sampling_event(event)) { in perf_swevent_cancel_hrtimer()
10310 static void perf_swevent_init_hrtimer(struct perf_event *event) in perf_swevent_init_hrtimer() argument
10312 struct hw_perf_event *hwc = &event->hw; in perf_swevent_init_hrtimer()
10314 if (!is_sampling_event(event)) in perf_swevent_init_hrtimer()
10324 if (event->attr.freq) { in perf_swevent_init_hrtimer()
10325 long freq = event->attr.sample_freq; in perf_swevent_init_hrtimer()
10327 event->attr.sample_period = NSEC_PER_SEC / freq; in perf_swevent_init_hrtimer()
10328 hwc->sample_period = event->attr.sample_period; in perf_swevent_init_hrtimer()
10331 event->attr.freq = 0; in perf_swevent_init_hrtimer()
10336 * Software event: cpu wall time clock
10339 static void cpu_clock_event_update(struct perf_event *event) in cpu_clock_event_update() argument
10345 prev = local64_xchg(&event->hw.prev_count, now); in cpu_clock_event_update()
10346 local64_add(now - prev, &event->count); in cpu_clock_event_update()
10349 static void cpu_clock_event_start(struct perf_event *event, int flags) in cpu_clock_event_start() argument
10351 local64_set(&event->hw.prev_count, local_clock()); in cpu_clock_event_start()
10352 perf_swevent_start_hrtimer(event); in cpu_clock_event_start()
10355 static void cpu_clock_event_stop(struct perf_event *event, int flags) in cpu_clock_event_stop() argument
10357 perf_swevent_cancel_hrtimer(event); in cpu_clock_event_stop()
10358 cpu_clock_event_update(event); in cpu_clock_event_stop()
10361 static int cpu_clock_event_add(struct perf_event *event, int flags) in cpu_clock_event_add() argument
10364 cpu_clock_event_start(event, flags); in cpu_clock_event_add()
10365 perf_event_update_userpage(event); in cpu_clock_event_add()
10370 static void cpu_clock_event_del(struct perf_event *event, int flags) in cpu_clock_event_del() argument
10372 cpu_clock_event_stop(event, flags); in cpu_clock_event_del()
10375 static void cpu_clock_event_read(struct perf_event *event) in cpu_clock_event_read() argument
10377 cpu_clock_event_update(event); in cpu_clock_event_read()
10380 static int cpu_clock_event_init(struct perf_event *event) in cpu_clock_event_init() argument
10382 if (event->attr.type != PERF_TYPE_SOFTWARE) in cpu_clock_event_init()
10385 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) in cpu_clock_event_init()
10391 if (has_branch_stack(event)) in cpu_clock_event_init()
10394 perf_swevent_init_hrtimer(event); in cpu_clock_event_init()
10413 * Software event: task time clock
10416 static void task_clock_event_update(struct perf_event *event, u64 now) in task_clock_event_update() argument
10421 prev = local64_xchg(&event->hw.prev_count, now); in task_clock_event_update()
10423 local64_add(delta, &event->count); in task_clock_event_update()
10426 static void task_clock_event_start(struct perf_event *event, int flags) in task_clock_event_start() argument
10428 local64_set(&event->hw.prev_count, event->ctx->time); in task_clock_event_start()
10429 perf_swevent_start_hrtimer(event); in task_clock_event_start()
10432 static void task_clock_event_stop(struct perf_event *event, int flags) in task_clock_event_stop() argument
10434 perf_swevent_cancel_hrtimer(event); in task_clock_event_stop()
10435 task_clock_event_update(event, event->ctx->time); in task_clock_event_stop()
10438 static int task_clock_event_add(struct perf_event *event, int flags) in task_clock_event_add() argument
10441 task_clock_event_start(event, flags); in task_clock_event_add()
10442 perf_event_update_userpage(event); in task_clock_event_add()
10447 static void task_clock_event_del(struct perf_event *event, int flags) in task_clock_event_del() argument
10449 task_clock_event_stop(event, PERF_EF_UPDATE); in task_clock_event_del()
10452 static void task_clock_event_read(struct perf_event *event) in task_clock_event_read() argument
10455 u64 delta = now - event->ctx->timestamp; in task_clock_event_read()
10456 u64 time = event->ctx->time + delta; in task_clock_event_read()
10458 task_clock_event_update(event, time); in task_clock_event_read()
10461 static int task_clock_event_init(struct perf_event *event) in task_clock_event_init() argument
10463 if (event->attr.type != PERF_TYPE_SOFTWARE) in task_clock_event_init()
10466 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) in task_clock_event_init()
10472 if (has_branch_stack(event)) in task_clock_event_init()
10475 perf_swevent_init_hrtimer(event); in task_clock_event_init()
10506 static int perf_event_nop_int(struct perf_event *event, u64 value) in perf_event_nop_int() argument
10548 static int perf_event_idx_default(struct perf_event *event) in perf_event_idx_default() argument
10834 * is fast, provided a valid software event is provided. in perf_pmu_register()
10888 static inline bool has_extended_regs(struct perf_event *event) in has_extended_regs() argument
10890 return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) || in has_extended_regs()
10891 (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK); in has_extended_regs()
10894 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) in perf_try_init_event() argument
10905 * if this is a sibling event, acquire the ctx->mutex to protect in perf_try_init_event()
10908 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) { in perf_try_init_event()
10913 ctx = perf_event_ctx_lock_nested(event->group_leader, in perf_try_init_event()
10918 event->pmu = pmu; in perf_try_init_event()
10919 ret = pmu->event_init(event); in perf_try_init_event()
10922 perf_event_ctx_unlock(event->group_leader, ctx); in perf_try_init_event()
10926 has_extended_regs(event)) in perf_try_init_event()
10930 event_has_any_exclude_flag(event)) in perf_try_init_event()
10933 if (ret && event->destroy) in perf_try_init_event()
10934 event->destroy(event); in perf_try_init_event()
10943 static struct pmu *perf_init_event(struct perf_event *event) in perf_init_event() argument
10951 if (event->parent && event->parent->pmu) { in perf_init_event()
10952 pmu = event->parent->pmu; in perf_init_event()
10953 ret = perf_try_init_event(pmu, event); in perf_init_event()
10962 type = event->attr.type; in perf_init_event()
10971 ret = perf_try_init_event(pmu, event); in perf_init_event()
10972 if (ret == -ENOENT && event->attr.type != type) { in perf_init_event()
10973 type = event->attr.type; in perf_init_event()
10984 ret = perf_try_init_event(pmu, event); in perf_init_event()
11000 static void attach_sb_event(struct perf_event *event) in attach_sb_event() argument
11002 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in attach_sb_event()
11005 list_add_rcu(&event->sb_list, &pel->list); in attach_sb_event()
11016 static void account_pmu_sb_event(struct perf_event *event) in account_pmu_sb_event() argument
11018 if (is_sb_event(event)) in account_pmu_sb_event()
11019 attach_sb_event(event); in account_pmu_sb_event()
11022 static void account_event_cpu(struct perf_event *event, int cpu) in account_event_cpu() argument
11024 if (event->parent) in account_event_cpu()
11027 if (is_cgroup_event(event)) in account_event_cpu()
11052 static void account_event(struct perf_event *event) in account_event() argument
11056 if (event->parent) in account_event()
11059 if (event->attach_state & PERF_ATTACH_TASK) in account_event()
11061 if (event->attr.mmap || event->attr.mmap_data) in account_event()
11063 if (event->attr.comm) in account_event()
11065 if (event->attr.namespaces) in account_event()
11067 if (event->attr.cgroup) in account_event()
11069 if (event->attr.task) in account_event()
11071 if (event->attr.freq) in account_event()
11073 if (event->attr.context_switch) { in account_event()
11077 if (has_branch_stack(event)) in account_event()
11079 if (is_cgroup_event(event)) in account_event()
11081 if (event->attr.ksymbol) in account_event()
11083 if (event->attr.bpf_event) in account_event()
11085 if (event->attr.text_poke) in account_event()
11116 account_event_cpu(event, event->cpu); in account_event()
11118 account_pmu_sb_event(event); in account_event()
11122 * Allocate and initialize an event structure
11133 struct perf_event *event; in perf_event_alloc() local
11142 event = kzalloc(sizeof(*event), GFP_KERNEL); in perf_event_alloc()
11143 if (!event) in perf_event_alloc()
11151 group_leader = event; in perf_event_alloc()
11153 mutex_init(&event->child_mutex); in perf_event_alloc()
11154 INIT_LIST_HEAD(&event->child_list); in perf_event_alloc()
11156 INIT_LIST_HEAD(&event->event_entry); in perf_event_alloc()
11157 INIT_LIST_HEAD(&event->sibling_list); in perf_event_alloc()
11158 INIT_LIST_HEAD(&event->active_list); in perf_event_alloc()
11159 init_event_group(event); in perf_event_alloc()
11160 INIT_LIST_HEAD(&event->rb_entry); in perf_event_alloc()
11161 INIT_LIST_HEAD(&event->active_entry); in perf_event_alloc()
11162 INIT_LIST_HEAD(&event->addr_filters.list); in perf_event_alloc()
11163 INIT_HLIST_NODE(&event->hlist_entry); in perf_event_alloc()
11166 init_waitqueue_head(&event->waitq); in perf_event_alloc()
11167 event->pending_disable = -1; in perf_event_alloc()
11168 init_irq_work(&event->pending, perf_pending_event); in perf_event_alloc()
11170 mutex_init(&event->mmap_mutex); in perf_event_alloc()
11171 raw_spin_lock_init(&event->addr_filters.lock); in perf_event_alloc()
11173 atomic_long_set(&event->refcount, 1); in perf_event_alloc()
11174 event->cpu = cpu; in perf_event_alloc()
11175 event->attr = *attr; in perf_event_alloc()
11176 event->group_leader = group_leader; in perf_event_alloc()
11177 event->pmu = NULL; in perf_event_alloc()
11178 event->oncpu = -1; in perf_event_alloc()
11180 event->parent = parent_event; in perf_event_alloc()
11182 event->ns = get_pid_ns(task_active_pid_ns(current)); in perf_event_alloc()
11183 event->id = atomic64_inc_return(&perf_event_id); in perf_event_alloc()
11185 event->state = PERF_EVENT_STATE_INACTIVE; in perf_event_alloc()
11188 event->attach_state = PERF_ATTACH_TASK; in perf_event_alloc()
11194 event->hw.target = get_task_struct(task); in perf_event_alloc()
11197 event->clock = &local_clock; in perf_event_alloc()
11199 event->clock = parent_event->clock; in perf_event_alloc()
11209 event->prog = prog; in perf_event_alloc()
11210 event->orig_overflow_handler = in perf_event_alloc()
11217 event->overflow_handler = overflow_handler; in perf_event_alloc()
11218 event->overflow_handler_context = context; in perf_event_alloc()
11219 } else if (is_write_backward(event)){ in perf_event_alloc()
11220 event->overflow_handler = perf_event_output_backward; in perf_event_alloc()
11221 event->overflow_handler_context = NULL; in perf_event_alloc()
11223 event->overflow_handler = perf_event_output_forward; in perf_event_alloc()
11224 event->overflow_handler_context = NULL; in perf_event_alloc()
11227 perf_event__state_init(event); in perf_event_alloc()
11231 hwc = &event->hw; in perf_event_alloc()
11246 if (!has_branch_stack(event)) in perf_event_alloc()
11247 event->attr.branch_sample_type = 0; in perf_event_alloc()
11249 pmu = perf_init_event(event); in perf_event_alloc()
11264 if (event->attr.aux_output && in perf_event_alloc()
11271 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); in perf_event_alloc()
11276 err = exclusive_event_init(event); in perf_event_alloc()
11280 if (has_addr_filter(event)) { in perf_event_alloc()
11281 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters, in perf_event_alloc()
11284 if (!event->addr_filter_ranges) { in perf_event_alloc()
11293 if (event->parent) { in perf_event_alloc()
11294 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_alloc()
11297 memcpy(event->addr_filter_ranges, in perf_event_alloc()
11298 event->parent->addr_filter_ranges, in perf_event_alloc()
11304 event->addr_filters_gen = 1; in perf_event_alloc()
11307 if (!event->parent) { in perf_event_alloc()
11308 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { in perf_event_alloc()
11315 err = security_perf_event_alloc(event); in perf_event_alloc()
11320 account_event(event); in perf_event_alloc()
11322 return event; in perf_event_alloc()
11325 if (!event->parent) { in perf_event_alloc()
11326 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in perf_event_alloc()
11330 kfree(event->addr_filter_ranges); in perf_event_alloc()
11333 exclusive_event_destroy(event); in perf_event_alloc()
11336 if (is_cgroup_event(event)) in perf_event_alloc()
11337 perf_detach_cgroup(event); in perf_event_alloc()
11338 if (event->destroy) in perf_event_alloc()
11339 event->destroy(event); in perf_event_alloc()
11342 if (event->ns) in perf_event_alloc()
11343 put_pid_ns(event->ns); in perf_event_alloc()
11344 if (event->hw.target) in perf_event_alloc()
11345 put_task_struct(event->hw.target); in perf_event_alloc()
11346 kfree(event); in perf_event_alloc()
11466 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) in perf_event_set_output() argument
11475 if (event == output_event) in perf_event_set_output()
11481 if (output_event->cpu != event->cpu) in perf_event_set_output()
11487 if (output_event->cpu == -1 && output_event->ctx != event->ctx) in perf_event_set_output()
11493 if (output_event->clock != event->clock) in perf_event_set_output()
11500 if (is_write_backward(output_event) != is_write_backward(event)) in perf_event_set_output()
11506 if (has_aux(event) && has_aux(output_event) && in perf_event_set_output()
11507 event->pmu != output_event->pmu) in perf_event_set_output()
11511 mutex_lock(&event->mmap_mutex); in perf_event_set_output()
11513 if (atomic_read(&event->mmap_count)) in perf_event_set_output()
11523 ring_buffer_attach(event, rb); in perf_event_set_output()
11527 mutex_unlock(&event->mmap_mutex); in perf_event_set_output()
11542 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) in perf_event_set_clock() argument
11548 event->clock = &ktime_get_mono_fast_ns; in perf_event_set_clock()
11553 event->clock = &ktime_get_raw_fast_ns; in perf_event_set_clock()
11558 event->clock = &ktime_get_real_ns; in perf_event_set_clock()
11562 event->clock = &ktime_get_boottime_ns; in perf_event_set_clock()
11566 event->clock = &ktime_get_clocktai_ns; in perf_event_set_clock()
11573 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) in perf_event_set_clock()
11611 * sys_perf_event_open - open a performance event, associate it to a task/cpu
11616 * @group_fd: group leader event fd
11623 struct perf_event *event, *sibling; in SYSCALL_DEFINE5() local
11732 * perf_install_in_context() call for this new event to in SYSCALL_DEFINE5()
11744 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, in SYSCALL_DEFINE5()
11746 if (IS_ERR(event)) { in SYSCALL_DEFINE5()
11747 err = PTR_ERR(event); in SYSCALL_DEFINE5()
11751 if (is_sampling_event(event)) { in SYSCALL_DEFINE5()
11752 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { in SYSCALL_DEFINE5()
11762 pmu = event->pmu; in SYSCALL_DEFINE5()
11765 err = perf_event_set_clock(event, attr.clockid); in SYSCALL_DEFINE5()
11771 event->event_caps |= PERF_EV_CAP_SOFTWARE; in SYSCALL_DEFINE5()
11774 if (is_software_event(event) && in SYSCALL_DEFINE5()
11777 * If the event is a sw event, but the group_leader in SYSCALL_DEFINE5()
11785 } else if (!is_software_event(event) && in SYSCALL_DEFINE5()
11790 * try to add a hardware event, move the whole group to in SYSCALL_DEFINE5()
11800 ctx = find_get_context(pmu, task, event); in SYSCALL_DEFINE5()
11807 * Look up the group leader (we will attach this event to it): in SYSCALL_DEFINE5()
11820 if (group_leader->clock != event->clock) in SYSCALL_DEFINE5()
11828 if (group_leader->cpu != event->cpu) in SYSCALL_DEFINE5()
11854 err = perf_event_set_output(event, output_event); in SYSCALL_DEFINE5()
11859 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, in SYSCALL_DEFINE5()
11882 * if this new event wound up on the same ctx, if so in SYSCALL_DEFINE5()
11914 if (!perf_event_validate_size(event)) { in SYSCALL_DEFINE5()
11921 * Check if the @cpu we're creating an event for is online. in SYSCALL_DEFINE5()
11935 if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) { in SYSCALL_DEFINE5()
11942 * because we need to serialize with concurrent event creation. in SYSCALL_DEFINE5()
11944 if (!exclusive_event_installable(event, ctx)) { in SYSCALL_DEFINE5()
11993 * event. What we want here is event in the initial in SYSCALL_DEFINE5()
12004 * perf_install_in_context() which is the point the event is active and in SYSCALL_DEFINE5()
12007 perf_event__header_size(event); in SYSCALL_DEFINE5()
12008 perf_event__id_header_size(event); in SYSCALL_DEFINE5()
12010 event->owner = current; in SYSCALL_DEFINE5()
12012 perf_install_in_context(ctx, event, event->cpu); in SYSCALL_DEFINE5()
12025 list_add_tail(&event->owner_entry, &current->perf_event_list); in SYSCALL_DEFINE5()
12030 * new event on the sibling_list. This ensures destruction in SYSCALL_DEFINE5()
12050 * and that will take care of freeing the event. in SYSCALL_DEFINE5()
12053 free_event(event); in SYSCALL_DEFINE5()
12081 struct perf_event *event; in perf_event_create_kernel_counter() local
12091 event = perf_event_alloc(attr, cpu, task, NULL, NULL, in perf_event_create_kernel_counter()
12093 if (IS_ERR(event)) { in perf_event_create_kernel_counter()
12094 err = PTR_ERR(event); in perf_event_create_kernel_counter()
12099 event->owner = TASK_TOMBSTONE; in perf_event_create_kernel_counter()
12104 ctx = find_get_context(event->pmu, task, event); in perf_event_create_kernel_counter()
12119 * Check if the @cpu we're creating an event for is online. in perf_event_create_kernel_counter()
12132 if (!exclusive_event_installable(event, ctx)) { in perf_event_create_kernel_counter()
12137 perf_install_in_context(ctx, event, event->cpu); in perf_event_create_kernel_counter()
12141 return event; in perf_event_create_kernel_counter()
12148 free_event(event); in perf_event_create_kernel_counter()
12158 struct perf_event *event, *tmp; in perf_pmu_migrate_context() local
12169 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, in perf_pmu_migrate_context()
12171 perf_remove_from_context(event, 0); in perf_pmu_migrate_context()
12172 unaccount_event_cpu(event, src_cpu); in perf_pmu_migrate_context()
12174 list_add(&event->migrate_entry, &events); in perf_pmu_migrate_context()
12190 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { in perf_pmu_migrate_context()
12191 if (event->group_leader == event) in perf_pmu_migrate_context()
12194 list_del(&event->migrate_entry); in perf_pmu_migrate_context()
12195 if (event->state >= PERF_EVENT_STATE_OFF) in perf_pmu_migrate_context()
12196 event->state = PERF_EVENT_STATE_INACTIVE; in perf_pmu_migrate_context()
12197 account_event_cpu(event, dst_cpu); in perf_pmu_migrate_context()
12198 perf_install_in_context(dst_ctx, event, dst_cpu); in perf_pmu_migrate_context()
12206 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { in perf_pmu_migrate_context()
12207 list_del(&event->migrate_entry); in perf_pmu_migrate_context()
12208 if (event->state >= PERF_EVENT_STATE_OFF) in perf_pmu_migrate_context()
12209 event->state = PERF_EVENT_STATE_INACTIVE; in perf_pmu_migrate_context()
12210 account_event_cpu(event, dst_cpu); in perf_pmu_migrate_context()
12211 perf_install_in_context(dst_ctx, event, dst_cpu); in perf_pmu_migrate_context()
12282 * Remove this event from the parent's list in perf_event_exit_event()
12359 * When a child task exits, feed back event values to parent events.
12366 struct perf_event *event, *tmp; in perf_event_exit_task() local
12370 list_for_each_entry_safe(event, tmp, &child->perf_event_list, in perf_event_exit_task()
12372 list_del_init(&event->owner_entry); in perf_event_exit_task()
12379 smp_store_release(&event->owner, NULL); in perf_event_exit_task()
12395 static void perf_free_event(struct perf_event *event, in perf_free_event() argument
12398 struct perf_event *parent = event->parent; in perf_free_event()
12404 list_del_init(&event->child_list); in perf_free_event()
12410 perf_group_detach(event); in perf_free_event()
12411 list_del_event(event, ctx); in perf_free_event()
12413 free_event(event); in perf_free_event()
12426 struct perf_event *event, *tmp; in perf_event_free_task() local
12447 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) in perf_event_free_task()
12448 perf_free_event(event, ctx); in perf_event_free_task()
12461 * _free_event()'s put_task_struct(event->hw.target) will be a in perf_event_free_task()
12501 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) in perf_event_attrs() argument
12503 if (!event) in perf_event_attrs()
12506 return &event->attr; in perf_event_attrs()
12510 * Inherit an event from parent task to child task.
12576 * Make the child state follow the state of the parent event, in inherit_event()
12614 * Link this into the parent event's child list in inherit_event()
12623 * Inherits an event group.
12665 * Creates the child task context and tries to inherit the event-group.
12668 * inherited_all set when we 'fail' to inherit an orphaned event; this is
12676 inherit_task_group(struct perf_event *event, struct task_struct *parent, in inherit_task_group() argument
12684 if (!event->attr.inherit) { in inherit_task_group()
12704 ret = inherit_group(event, parent, parent_ctx, in inherit_task_group()
12720 struct perf_event *event; in perf_event_init_context() local
12754 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) { in perf_event_init_context()
12755 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
12770 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) { in perf_event_init_context()
12771 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
12874 struct perf_event *event; in __perf_event_exit_context() local
12878 list_for_each_entry(event, &ctx->event_list, event_entry) in __perf_event_exit_context()
12879 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); in __perf_event_exit_context()