Lines Matching full:event
181 static bool is_kernel_event(struct perf_event *event) in is_kernel_event() argument
183 return READ_ONCE(event->owner) == TASK_TOMBSTONE; in is_kernel_event()
195 * - removing the last event from a task ctx; this is relatively straight
198 * - adding the first event to a task ctx; this is tricky because we cannot
209 struct perf_event *event; member
217 struct perf_event *event = efs->event; in event_function() local
218 struct perf_event_context *ctx = event->ctx; in event_function()
253 efs->func(event, cpuctx, ctx, efs->data); in event_function()
260 static void event_function_call(struct perf_event *event, event_f func, void *data) in event_function_call() argument
262 struct perf_event_context *ctx = event->ctx; in event_function_call()
265 .event = event, in event_function_call()
270 if (!event->parent) { in event_function_call()
272 * If this is a !child event, we must hold ctx::mutex to in event_function_call()
273 * stabilize the event->ctx relation. See in event_function_call()
280 cpu_function_call(event->cpu, event_function, &efs); in event_function_call()
305 func(event, NULL, ctx, data); in event_function_call()
313 static void event_function_local(struct perf_event *event, event_f func, void *data) in event_function_local() argument
315 struct perf_event_context *ctx = event->ctx; in event_function_local()
352 func(event, cpuctx, ctx, data); in event_function_local()
412 * perf event paranoia level:
424 * max perf event sample rate
581 static u64 perf_event_time(struct perf_event *event);
590 static inline u64 perf_event_clock(struct perf_event *event) in perf_event_clock() argument
592 return event->clock(); in perf_event_clock()
596 * State based event timekeeping...
598 * The basic idea is to use event->state to determine which (if any) time
603 * Event groups make things a little more complicated, but not terribly so. The
618 __perf_effective_state(struct perf_event *event) in __perf_effective_state() argument
620 struct perf_event *leader = event->group_leader; in __perf_effective_state()
625 return event->state; in __perf_effective_state()
629 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) in __perf_update_times() argument
631 enum perf_event_state state = __perf_effective_state(event); in __perf_update_times()
632 u64 delta = now - event->tstamp; in __perf_update_times()
634 *enabled = event->total_time_enabled; in __perf_update_times()
638 *running = event->total_time_running; in __perf_update_times()
643 static void perf_event_update_time(struct perf_event *event) in perf_event_update_time() argument
645 u64 now = perf_event_time(event); in perf_event_update_time()
647 __perf_update_times(event, now, &event->total_time_enabled, in perf_event_update_time()
648 &event->total_time_running); in perf_event_update_time()
649 event->tstamp = now; in perf_event_update_time()
661 perf_event_set_state(struct perf_event *event, enum perf_event_state state) in perf_event_set_state() argument
663 if (event->state == state) in perf_event_set_state()
666 perf_event_update_time(event); in perf_event_set_state()
671 if ((event->state < 0) ^ (state < 0)) in perf_event_set_state()
672 perf_event_update_sibling_time(event); in perf_event_set_state()
674 WRITE_ONCE(event->state, state); in perf_event_set_state()
680 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
682 struct perf_event_context *ctx = event->ctx; in perf_cgroup_match()
685 /* @event doesn't care about cgroup */ in perf_cgroup_match()
686 if (!event->cgrp) in perf_cgroup_match()
694 * Cgroup scoping is recursive. An event enabled for a cgroup is in perf_cgroup_match()
696 * cgroup is a descendant of @event's (the test covers identity in perf_cgroup_match()
700 event->cgrp->css.cgroup); in perf_cgroup_match()
703 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
705 css_put(&event->cgrp->css); in perf_detach_cgroup()
706 event->cgrp = NULL; in perf_detach_cgroup()
709 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
711 return event->cgrp != NULL; in is_cgroup_event()
714 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
718 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time()
748 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
756 if (!is_cgroup_event(event)) in update_cgrp_time_from_event()
759 cgrp = perf_cgroup_from_task(current, event->ctx); in update_cgrp_time_from_event()
763 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) in update_cgrp_time_from_event()
764 __update_cgrp_time(event->cgrp); in update_cgrp_time_from_event()
794 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
903 static int perf_cgroup_ensure_storage(struct perf_event *event, in perf_cgroup_ensure_storage() argument
918 cpuctx = per_cpu_ptr(event->pmu->pmu_cpu_context, cpu); in perf_cgroup_ensure_storage()
944 static inline int perf_cgroup_connect(int fd, struct perf_event *event, in perf_cgroup_connect() argument
963 ret = perf_cgroup_ensure_storage(event, css); in perf_cgroup_connect()
968 event->cgrp = cgrp; in perf_cgroup_connect()
976 perf_detach_cgroup(event); in perf_cgroup_connect()
985 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) in perf_cgroup_set_shadow_time() argument
988 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_set_shadow_time()
989 event->shadow_ctx_time = now - t->timestamp; in perf_cgroup_set_shadow_time()
993 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
997 if (!is_cgroup_event(event)) in perf_cgroup_event_enable()
1008 * matching the event's cgroup, we must do this for every new event, in perf_cgroup_event_enable()
1015 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) in perf_cgroup_event_enable()
1023 per_cpu_ptr(&cgrp_cpuctx_list, event->cpu)); in perf_cgroup_event_enable()
1027 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1031 if (!is_cgroup_event(event)) in perf_cgroup_event_disable()
1052 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
1057 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
1060 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
1065 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
1083 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, in perf_cgroup_connect() argument
1102 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) in perf_cgroup_set_shadow_time() argument
1106 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
1112 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
1117 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1309 * because the sys_perf_event_open() case will install a new event and break
1320 * quiesce the event, after which we can install it in the new location. This
1321 * means that only external vectors (perf_fops, prctl) can perturb the event
1325 * However; because event->ctx can change while we're waiting to acquire
1344 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) in perf_event_ctx_lock_nested() argument
1350 ctx = READ_ONCE(event->ctx); in perf_event_ctx_lock_nested()
1358 if (event->ctx != ctx) { in perf_event_ctx_lock_nested()
1368 perf_event_ctx_lock(struct perf_event *event) in perf_event_ctx_lock() argument
1370 return perf_event_ctx_lock_nested(event, 0); in perf_event_ctx_lock()
1373 static void perf_event_ctx_unlock(struct perf_event *event, in perf_event_ctx_unlock() argument
1399 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p, in perf_event_pid_type() argument
1406 if (event->parent) in perf_event_pid_type()
1407 event = event->parent; in perf_event_pid_type()
1409 nr = __task_pid_nr_ns(p, type, event->ns); in perf_event_pid_type()
1416 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) in perf_event_pid() argument
1418 return perf_event_pid_type(event, p, PIDTYPE_TGID); in perf_event_pid()
1421 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) in perf_event_tid() argument
1423 return perf_event_pid_type(event, p, PIDTYPE_PID); in perf_event_tid()
1427 * If we inherit events we want to return the parent event id
1430 static u64 primary_event_id(struct perf_event *event) in primary_event_id() argument
1432 u64 id = event->id; in primary_event_id()
1434 if (event->parent) in primary_event_id()
1435 id = event->parent->id; in primary_event_id()
1536 static u64 perf_event_time(struct perf_event *event) in perf_event_time() argument
1538 struct perf_event_context *ctx = event->ctx; in perf_event_time()
1540 if (is_cgroup_event(event)) in perf_event_time()
1541 return perf_cgroup_event_time(event); in perf_event_time()
1546 static enum event_type_t get_event_type(struct perf_event *event) in get_event_type() argument
1548 struct perf_event_context *ctx = event->ctx; in get_event_type()
1557 if (event->group_leader != event) in get_event_type()
1558 event = event->group_leader; in get_event_type()
1560 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; in get_event_type()
1568 * Helper function to initialize event group nodes.
1570 static void init_event_group(struct perf_event *event) in init_event_group() argument
1572 RB_CLEAR_NODE(&event->group_node); in init_event_group()
1573 event->group_index = 0; in init_event_group()
1578 * based on event attrs bits.
1581 get_event_groups(struct perf_event *event, struct perf_event_context *ctx) in get_event_groups() argument
1583 if (event->attr.pinned) in get_event_groups()
1598 static inline struct cgroup *event_cgroup(const struct perf_event *event) in event_cgroup() argument
1603 if (event->cgrp) in event_cgroup()
1604 cgroup = event->cgrp->css.cgroup; in event_cgroup()
1611 * Compare function for event groups;
1686 * Insert @event into @groups' tree; using {@event->cpu, ++@groups->index} for
1692 struct perf_event *event) in perf_event_groups_insert() argument
1694 event->group_index = ++groups->index; in perf_event_groups_insert()
1696 rb_add(&event->group_node, &groups->tree, __group_less); in perf_event_groups_insert()
1700 * Helper function to insert event into the pinned or flexible groups.
1703 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) in add_event_to_groups() argument
1707 groups = get_event_groups(event, ctx); in add_event_to_groups()
1708 perf_event_groups_insert(groups, event); in add_event_to_groups()
1716 struct perf_event *event) in perf_event_groups_delete() argument
1718 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) || in perf_event_groups_delete()
1721 rb_erase(&event->group_node, &groups->tree); in perf_event_groups_delete()
1722 init_event_group(event); in perf_event_groups_delete()
1726 * Helper function to delete event from its groups.
1729 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) in del_event_from_groups() argument
1733 groups = get_event_groups(event, ctx); in del_event_from_groups()
1734 perf_event_groups_delete(groups, event); in del_event_from_groups()
1738 * Get the leftmost event in the cpu/cgroup subtree.
1761 perf_event_groups_next(struct perf_event *event) in perf_event_groups_next() argument
1764 .cpu = event->cpu, in perf_event_groups_next()
1765 .cgroup = event_cgroup(event), in perf_event_groups_next()
1769 next = rb_next_match(&key, &event->group_node, __group_cmp); in perf_event_groups_next()
1779 #define perf_event_groups_for_each(event, groups) \ argument
1780 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1781 typeof(*event), group_node); event; \
1782 event = rb_entry_safe(rb_next(&event->group_node), \
1783 typeof(*event), group_node))
1786 * Add an event from the lists for its context.
1790 list_add_event(struct perf_event *event, struct perf_event_context *ctx) in list_add_event() argument
1794 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); in list_add_event()
1795 event->attach_state |= PERF_ATTACH_CONTEXT; in list_add_event()
1797 event->tstamp = perf_event_time(event); in list_add_event()
1800 * If we're a stand alone event or group leader, we go to the context in list_add_event()
1804 if (event->group_leader == event) { in list_add_event()
1805 event->group_caps = event->event_caps; in list_add_event()
1806 add_event_to_groups(event, ctx); in list_add_event()
1809 list_add_rcu(&event->event_entry, &ctx->event_list); in list_add_event()
1811 if (event->attr.inherit_stat) in list_add_event()
1814 if (event->state > PERF_EVENT_STATE_OFF) in list_add_event()
1815 perf_cgroup_event_enable(event, ctx); in list_add_event()
1821 * Initialize event state based on the perf_event_attr::disabled.
1823 static inline void perf_event__state_init(struct perf_event *event) in perf_event__state_init() argument
1825 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init()
1829 static void __perf_event_read_size(struct perf_event *event, int nr_siblings) in __perf_event_read_size() argument
1835 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) in __perf_event_read_size()
1838 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) in __perf_event_read_size()
1841 if (event->attr.read_format & PERF_FORMAT_ID) in __perf_event_read_size()
1844 if (event->attr.read_format & PERF_FORMAT_GROUP) { in __perf_event_read_size()
1850 event->read_size = size; in __perf_event_read_size()
1853 static void __perf_event_header_size(struct perf_event *event, u64 sample_type) in __perf_event_header_size() argument
1871 size += event->read_size; in __perf_event_header_size()
1891 event->header_size = size; in __perf_event_header_size()
1898 static void perf_event__header_size(struct perf_event *event) in perf_event__header_size() argument
1900 __perf_event_read_size(event, in perf_event__header_size()
1901 event->group_leader->nr_siblings); in perf_event__header_size()
1902 __perf_event_header_size(event, event->attr.sample_type); in perf_event__header_size()
1905 static void perf_event__id_header_size(struct perf_event *event) in perf_event__id_header_size() argument
1908 u64 sample_type = event->attr.sample_type; in perf_event__id_header_size()
1929 event->id_header_size = size; in perf_event__id_header_size()
1932 static bool perf_event_validate_size(struct perf_event *event) in perf_event_validate_size() argument
1936 * attach the event. in perf_event_validate_size()
1938 __perf_event_read_size(event, event->group_leader->nr_siblings + 1); in perf_event_validate_size()
1939 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); in perf_event_validate_size()
1940 perf_event__id_header_size(event); in perf_event_validate_size()
1946 if (event->read_size + event->header_size + in perf_event_validate_size()
1947 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) in perf_event_validate_size()
1953 static void perf_group_attach(struct perf_event *event) in perf_group_attach() argument
1955 struct perf_event *group_leader = event->group_leader, *pos; in perf_group_attach()
1957 lockdep_assert_held(&event->ctx->lock); in perf_group_attach()
1962 if (event->attach_state & PERF_ATTACH_GROUP) in perf_group_attach()
1965 event->attach_state |= PERF_ATTACH_GROUP; in perf_group_attach()
1967 if (group_leader == event) in perf_group_attach()
1970 WARN_ON_ONCE(group_leader->ctx != event->ctx); in perf_group_attach()
1972 group_leader->group_caps &= event->event_caps; in perf_group_attach()
1974 list_add_tail(&event->sibling_list, &group_leader->sibling_list); in perf_group_attach()
1984 * Remove an event from the lists for its context.
1988 list_del_event(struct perf_event *event, struct perf_event_context *ctx) in list_del_event() argument
1990 WARN_ON_ONCE(event->ctx != ctx); in list_del_event()
1996 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) in list_del_event()
1999 event->attach_state &= ~PERF_ATTACH_CONTEXT; in list_del_event()
2002 if (event->attr.inherit_stat) in list_del_event()
2005 list_del_rcu(&event->event_entry); in list_del_event()
2007 if (event->group_leader == event) in list_del_event()
2008 del_event_from_groups(event, ctx); in list_del_event()
2011 * If event was in error state, then keep it in list_del_event()
2015 * of the event in list_del_event()
2017 if (event->state > PERF_EVENT_STATE_OFF) { in list_del_event()
2018 perf_cgroup_event_disable(event, ctx); in list_del_event()
2019 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in list_del_event()
2026 perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event) in perf_aux_output_match() argument
2031 if (!event->pmu->aux_output_match) in perf_aux_output_match()
2034 return event->pmu->aux_output_match(aux_event); in perf_aux_output_match()
2037 static void put_event(struct perf_event *event);
2038 static void event_sched_out(struct perf_event *event,
2042 static void perf_put_aux_event(struct perf_event *event) in perf_put_aux_event() argument
2044 struct perf_event_context *ctx = event->ctx; in perf_put_aux_event()
2049 * If event uses aux_event tear down the link in perf_put_aux_event()
2051 if (event->aux_event) { in perf_put_aux_event()
2052 iter = event->aux_event; in perf_put_aux_event()
2053 event->aux_event = NULL; in perf_put_aux_event()
2059 * If the event is an aux_event, tear down all links to in perf_put_aux_event()
2062 for_each_sibling_event(iter, event->group_leader) { in perf_put_aux_event()
2063 if (iter->aux_event != event) in perf_put_aux_event()
2067 put_event(event); in perf_put_aux_event()
2075 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_put_aux_event()
2079 static bool perf_need_aux_event(struct perf_event *event) in perf_need_aux_event() argument
2081 return !!event->attr.aux_output || !!event->attr.aux_sample_size; in perf_need_aux_event()
2084 static int perf_get_aux_event(struct perf_event *event, in perf_get_aux_event() argument
2088 * Our group leader must be an aux event if we want to be in perf_get_aux_event()
2089 * an aux_output. This way, the aux event will precede its in perf_get_aux_event()
2099 if (event->attr.aux_output && event->attr.aux_sample_size) in perf_get_aux_event()
2102 if (event->attr.aux_output && in perf_get_aux_event()
2103 !perf_aux_output_match(event, group_leader)) in perf_get_aux_event()
2106 if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux) in perf_get_aux_event()
2113 * Link aux_outputs to their aux event; this is undone in in perf_get_aux_event()
2118 event->aux_event = group_leader; in perf_get_aux_event()
2123 static inline struct list_head *get_event_list(struct perf_event *event) in get_event_list() argument
2125 struct perf_event_context *ctx = event->ctx; in get_event_list()
2126 return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active; in get_event_list()
2135 static inline void perf_remove_sibling_event(struct perf_event *event) in perf_remove_sibling_event() argument
2137 struct perf_event_context *ctx = event->ctx; in perf_remove_sibling_event()
2140 event_sched_out(event, cpuctx, ctx); in perf_remove_sibling_event()
2141 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_remove_sibling_event()
2144 static void perf_group_detach(struct perf_event *event) in perf_group_detach() argument
2146 struct perf_event *leader = event->group_leader; in perf_group_detach()
2148 struct perf_event_context *ctx = event->ctx; in perf_group_detach()
2155 if (!(event->attach_state & PERF_ATTACH_GROUP)) in perf_group_detach()
2158 event->attach_state &= ~PERF_ATTACH_GROUP; in perf_group_detach()
2160 perf_put_aux_event(event); in perf_group_detach()
2165 if (leader != event) { in perf_group_detach()
2166 list_del_init(&event->sibling_list); in perf_group_detach()
2167 event->group_leader->nr_siblings--; in perf_group_detach()
2172 * If this was a group event with sibling events then in perf_group_detach()
2176 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { in perf_group_detach()
2185 sibling->group_caps = event->group_caps; in perf_group_detach()
2187 if (!RB_EMPTY_NODE(&event->group_node)) { in perf_group_detach()
2188 add_event_to_groups(sibling, event->ctx); in perf_group_detach()
2194 WARN_ON_ONCE(sibling->ctx != event->ctx); in perf_group_detach()
2206 static void perf_child_detach(struct perf_event *event) in perf_child_detach() argument
2208 struct perf_event *parent_event = event->parent; in perf_child_detach()
2210 if (!(event->attach_state & PERF_ATTACH_CHILD)) in perf_child_detach()
2213 event->attach_state &= ~PERF_ATTACH_CHILD; in perf_child_detach()
2220 sync_child_event(event); in perf_child_detach()
2221 list_del_init(&event->child_list); in perf_child_detach()
2224 static bool is_orphaned_event(struct perf_event *event) in is_orphaned_event() argument
2226 return event->state == PERF_EVENT_STATE_DEAD; in is_orphaned_event()
2229 static inline int __pmu_filter_match(struct perf_event *event) in __pmu_filter_match() argument
2231 struct pmu *pmu = event->pmu; in __pmu_filter_match()
2232 return pmu->filter_match ? pmu->filter_match(event) : 1; in __pmu_filter_match()
2236 * Check whether we should attempt to schedule an event group based on
2237 * PMU-specific filtering. An event group can consist of HW and SW events,
2241 static inline int pmu_filter_match(struct perf_event *event) in pmu_filter_match() argument
2245 if (!__pmu_filter_match(event)) in pmu_filter_match()
2248 for_each_sibling_event(sibling, event) { in pmu_filter_match()
2257 event_filter_match(struct perf_event *event) in event_filter_match() argument
2259 return (event->cpu == -1 || event->cpu == smp_processor_id()) && in event_filter_match()
2260 perf_cgroup_match(event) && pmu_filter_match(event); in event_filter_match()
2264 event_sched_out(struct perf_event *event, in event_sched_out() argument
2270 WARN_ON_ONCE(event->ctx != ctx); in event_sched_out()
2273 if (event->state != PERF_EVENT_STATE_ACTIVE) in event_sched_out()
2281 list_del_init(&event->active_list); in event_sched_out()
2283 perf_pmu_disable(event->pmu); in event_sched_out()
2285 event->pmu->del(event, 0); in event_sched_out()
2286 event->oncpu = -1; in event_sched_out()
2288 if (READ_ONCE(event->pending_disable) >= 0) { in event_sched_out()
2289 WRITE_ONCE(event->pending_disable, -1); in event_sched_out()
2290 perf_cgroup_event_disable(event, ctx); in event_sched_out()
2293 perf_event_set_state(event, state); in event_sched_out()
2295 if (!is_software_event(event)) in event_sched_out()
2299 if (event->attr.freq && event->attr.sample_freq) in event_sched_out()
2301 if (event->attr.exclusive || !cpuctx->active_oncpu) in event_sched_out()
2304 perf_pmu_enable(event->pmu); in event_sched_out()
2312 struct perf_event *event; in group_sched_out() local
2324 for_each_sibling_event(event, group_event) in group_sched_out()
2325 event_sched_out(event, cpuctx, ctx); in group_sched_out()
2334 * Cross CPU call to remove a performance event
2336 * We disable the event on the hardware level first. After that we
2340 __perf_remove_from_context(struct perf_event *event, in __perf_remove_from_context() argument
2352 event_sched_out(event, cpuctx, ctx); in __perf_remove_from_context()
2354 perf_group_detach(event); in __perf_remove_from_context()
2356 perf_child_detach(event); in __perf_remove_from_context()
2357 list_del_event(event, ctx); in __perf_remove_from_context()
2370 * Remove the event from a task's (or a CPU's) list of events.
2372 * If event->ctx is a cloned context, callers must make sure that
2373 * every task struct that event->ctx->task could possibly point to
2379 static void perf_remove_from_context(struct perf_event *event, unsigned long flags) in perf_remove_from_context() argument
2381 struct perf_event_context *ctx = event->ctx; in perf_remove_from_context()
2392 __perf_remove_from_context(event, __get_cpu_context(ctx), in perf_remove_from_context()
2399 event_function_call(event, __perf_remove_from_context, (void *)flags); in perf_remove_from_context()
2403 * Cross CPU call to disable a performance event
2405 static void __perf_event_disable(struct perf_event *event, in __perf_event_disable() argument
2410 if (event->state < PERF_EVENT_STATE_INACTIVE) in __perf_event_disable()
2415 update_cgrp_time_from_event(event); in __perf_event_disable()
2418 if (event == event->group_leader) in __perf_event_disable()
2419 group_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2421 event_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2423 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in __perf_event_disable()
2424 perf_cgroup_event_disable(event, ctx); in __perf_event_disable()
2428 * Disable an event.
2430 * If event->ctx is a cloned context, callers must make sure that
2431 * every task struct that event->ctx->task could possibly point to
2434 * hold the top-level event's child_mutex, so any descendant that
2437 * When called from perf_pending_event it's OK because event->ctx
2441 static void _perf_event_disable(struct perf_event *event) in _perf_event_disable() argument
2443 struct perf_event_context *ctx = event->ctx; in _perf_event_disable()
2446 if (event->state <= PERF_EVENT_STATE_OFF) { in _perf_event_disable()
2452 event_function_call(event, __perf_event_disable, NULL); in _perf_event_disable()
2455 void perf_event_disable_local(struct perf_event *event) in perf_event_disable_local() argument
2457 event_function_local(event, __perf_event_disable, NULL); in perf_event_disable_local()
2464 void perf_event_disable(struct perf_event *event) in perf_event_disable() argument
2468 ctx = perf_event_ctx_lock(event); in perf_event_disable()
2469 _perf_event_disable(event); in perf_event_disable()
2470 perf_event_ctx_unlock(event, ctx); in perf_event_disable()
2474 void perf_event_disable_inatomic(struct perf_event *event) in perf_event_disable_inatomic() argument
2476 WRITE_ONCE(event->pending_disable, smp_processor_id()); in perf_event_disable_inatomic()
2478 irq_work_queue(&event->pending); in perf_event_disable_inatomic()
2481 static void perf_set_shadow_time(struct perf_event *event, in perf_set_shadow_time() argument
2499 * - event is guaranteed scheduled in in perf_set_shadow_time()
2509 if (is_cgroup_event(event)) in perf_set_shadow_time()
2510 perf_cgroup_set_shadow_time(event, event->tstamp); in perf_set_shadow_time()
2512 event->shadow_ctx_time = event->tstamp - ctx->timestamp; in perf_set_shadow_time()
2517 static void perf_log_throttle(struct perf_event *event, int enable);
2518 static void perf_log_itrace_start(struct perf_event *event);
2521 event_sched_in(struct perf_event *event, in event_sched_in() argument
2527 WARN_ON_ONCE(event->ctx != ctx); in event_sched_in()
2531 if (event->state <= PERF_EVENT_STATE_OFF) in event_sched_in()
2534 WRITE_ONCE(event->oncpu, smp_processor_id()); in event_sched_in()
2536 * Order event::oncpu write to happen before the ACTIVE state is in event_sched_in()
2541 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE); in event_sched_in()
2548 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { in event_sched_in()
2549 perf_log_throttle(event, 1); in event_sched_in()
2550 event->hw.interrupts = 0; in event_sched_in()
2553 perf_pmu_disable(event->pmu); in event_sched_in()
2555 perf_set_shadow_time(event, ctx); in event_sched_in()
2557 perf_log_itrace_start(event); in event_sched_in()
2559 if (event->pmu->add(event, PERF_EF_START)) { in event_sched_in()
2560 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_sched_in()
2561 event->oncpu = -1; in event_sched_in()
2566 if (!is_software_event(event)) in event_sched_in()
2570 if (event->attr.freq && event->attr.sample_freq) in event_sched_in()
2573 if (event->attr.exclusive) in event_sched_in()
2577 perf_pmu_enable(event->pmu); in event_sched_in()
2587 struct perf_event *event, *partial_group = NULL; in group_sched_in() local
2601 for_each_sibling_event(event, group_event) { in group_sched_in()
2602 if (event_sched_in(event, cpuctx, ctx)) { in group_sched_in()
2603 partial_group = event; in group_sched_in()
2615 * The events up to the failed event are scheduled out normally. in group_sched_in()
2617 for_each_sibling_event(event, group_event) { in group_sched_in()
2618 if (event == partial_group) in group_sched_in()
2621 event_sched_out(event, cpuctx, ctx); in group_sched_in()
2631 * Work out whether we can put this event group on the CPU now.
2633 static int group_can_go_on(struct perf_event *event, in group_can_go_on() argument
2640 if (event->group_caps & PERF_EV_CAP_SOFTWARE) in group_can_go_on()
2652 if (event->attr.exclusive && !list_empty(get_event_list(event))) in group_can_go_on()
2661 static void add_event_to_ctx(struct perf_event *event, in add_event_to_ctx() argument
2664 list_add_event(event, ctx); in add_event_to_ctx()
2665 perf_group_attach(event); in add_event_to_ctx()
2710 * time an event is added, only do it for the groups of equal priority and
2764 * Cross CPU call to install and enable a performance event
2771 struct perf_event *event = info; in __perf_install_in_context() local
2772 struct perf_event_context *ctx = event->ctx; in __perf_install_in_context()
2803 if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) { in __perf_install_in_context()
2805 * If the current cgroup doesn't match the event's in __perf_install_in_context()
2810 event->cgrp->css.cgroup); in __perf_install_in_context()
2816 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2817 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_install_in_context()
2819 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2828 static bool exclusive_event_installable(struct perf_event *event,
2832 * Attach a performance event to a context.
2838 struct perf_event *event, in perf_install_in_context() argument
2845 WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); in perf_install_in_context()
2847 if (event->cpu != -1) in perf_install_in_context()
2848 event->cpu = cpu; in perf_install_in_context()
2851 * Ensures that if we can observe event->ctx, both the event and ctx in perf_install_in_context()
2854 smp_store_release(&event->ctx, ctx); in perf_install_in_context()
2858 * without IPI. Except when this is the first event for the context, in in perf_install_in_context()
2862 * event will issue the IPI and reprogram the hardware. in perf_install_in_context()
2864 if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && ctx->nr_events) { in perf_install_in_context()
2870 add_event_to_ctx(event, ctx); in perf_install_in_context()
2876 cpu_function_call(cpu, __perf_install_in_context, event); in perf_install_in_context()
2918 if (!task_function_call(task, __perf_install_in_context, event)) in perf_install_in_context()
2934 * thus we can safely install the event. in perf_install_in_context()
2940 add_event_to_ctx(event, ctx); in perf_install_in_context()
2945 * Cross CPU call to enable a performance event
2947 static void __perf_event_enable(struct perf_event *event, in __perf_event_enable() argument
2952 struct perf_event *leader = event->group_leader; in __perf_event_enable()
2955 if (event->state >= PERF_EVENT_STATE_INACTIVE || in __perf_event_enable()
2956 event->state <= PERF_EVENT_STATE_ERROR) in __perf_event_enable()
2962 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in __perf_event_enable()
2963 perf_cgroup_event_enable(event, ctx); in __perf_event_enable()
2968 if (!event_filter_match(event)) { in __perf_event_enable()
2974 * If the event is in a group and isn't the group leader, in __perf_event_enable()
2977 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) { in __perf_event_enable()
2986 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_event_enable()
2990 * Enable an event.
2992 * If event->ctx is a cloned context, callers must make sure that
2993 * every task struct that event->ctx->task could possibly point to
2998 static void _perf_event_enable(struct perf_event *event) in _perf_event_enable() argument
3000 struct perf_event_context *ctx = event->ctx; in _perf_event_enable()
3003 if (event->state >= PERF_EVENT_STATE_INACTIVE || in _perf_event_enable()
3004 event->state < PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
3011 * If the event is in error state, clear that first. in _perf_event_enable()
3013 * That way, if we see the event in error state below, we know that it in _perf_event_enable()
3017 if (event->state == PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
3021 if (event->event_caps & PERF_EV_CAP_SIBLING && in _perf_event_enable()
3022 event->group_leader == event) in _perf_event_enable()
3025 event->state = PERF_EVENT_STATE_OFF; in _perf_event_enable()
3029 event_function_call(event, __perf_event_enable, NULL); in _perf_event_enable()
3035 void perf_event_enable(struct perf_event *event) in perf_event_enable() argument
3039 ctx = perf_event_ctx_lock(event); in perf_event_enable()
3040 _perf_event_enable(event); in perf_event_enable()
3041 perf_event_ctx_unlock(event, ctx); in perf_event_enable()
3046 struct perf_event *event; member
3053 struct perf_event *event = sd->event; in __perf_event_stop() local
3056 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in __perf_event_stop()
3064 * so we need to check again lest we try to stop another CPU's event. in __perf_event_stop()
3066 if (READ_ONCE(event->oncpu) != smp_processor_id()) in __perf_event_stop()
3069 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_stop()
3077 * Since this is happening on an event-local CPU, no trace is lost in __perf_event_stop()
3081 event->pmu->start(event, 0); in __perf_event_stop()
3086 static int perf_event_stop(struct perf_event *event, int restart) in perf_event_stop() argument
3089 .event = event, in perf_event_stop()
3095 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in perf_event_stop()
3102 * We only want to restart ACTIVE events, so if the event goes in perf_event_stop()
3103 * inactive here (event->oncpu==-1), there's nothing more to do; in perf_event_stop()
3106 ret = cpu_function_call(READ_ONCE(event->oncpu), in perf_event_stop()
3119 * event::addr_filter_ranges array and bump the event::addr_filters_gen;
3120 * (p2) when an event is scheduled in (pmu::add), it calls
3124 * If (p1) happens while the event is active, we restart it to force (p2).
3135 void perf_event_addr_filters_sync(struct perf_event *event) in perf_event_addr_filters_sync() argument
3137 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_sync()
3139 if (!has_addr_filter(event)) in perf_event_addr_filters_sync()
3143 if (event->addr_filters_gen != event->hw.addr_filters_gen) { in perf_event_addr_filters_sync()
3144 event->pmu->addr_filters_sync(event); in perf_event_addr_filters_sync()
3145 event->hw.addr_filters_gen = event->addr_filters_gen; in perf_event_addr_filters_sync()
3151 static int _perf_event_refresh(struct perf_event *event, int refresh) in _perf_event_refresh() argument
3156 if (event->attr.inherit || !is_sampling_event(event)) in _perf_event_refresh()
3159 atomic_add(refresh, &event->event_limit); in _perf_event_refresh()
3160 _perf_event_enable(event); in _perf_event_refresh()
3168 int perf_event_refresh(struct perf_event *event, int refresh) in perf_event_refresh() argument
3173 ctx = perf_event_ctx_lock(event); in perf_event_refresh()
3174 ret = _perf_event_refresh(event, refresh); in perf_event_refresh()
3175 perf_event_ctx_unlock(event, ctx); in perf_event_refresh()
3196 static int perf_event_modify_attr(struct perf_event *event, in perf_event_modify_attr() argument
3203 if (event->attr.type != attr->type) in perf_event_modify_attr()
3206 switch (event->attr.type) { in perf_event_modify_attr()
3215 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_modify_attr()
3217 mutex_lock(&event->child_mutex); in perf_event_modify_attr()
3218 err = func(event, attr); in perf_event_modify_attr()
3221 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_modify_attr()
3227 mutex_unlock(&event->child_mutex); in perf_event_modify_attr()
3235 struct perf_event *event, *tmp; in ctx_sched_out() local
3283 list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) in ctx_sched_out()
3284 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
3288 list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list) in ctx_sched_out()
3289 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
3339 static void __perf_event_sync_stat(struct perf_event *event, in __perf_event_sync_stat() argument
3344 if (!event->attr.inherit_stat) in __perf_event_sync_stat()
3348 * Update the event value, we cannot use perf_event_read() in __perf_event_sync_stat()
3351 * we know the event must be on the current CPU, therefore we in __perf_event_sync_stat()
3354 if (event->state == PERF_EVENT_STATE_ACTIVE) in __perf_event_sync_stat()
3355 event->pmu->read(event); in __perf_event_sync_stat()
3357 perf_event_update_time(event); in __perf_event_sync_stat()
3360 * In order to keep per-task stats reliable we need to flip the event in __perf_event_sync_stat()
3364 value = local64_xchg(&event->count, value); in __perf_event_sync_stat()
3367 swap(event->total_time_enabled, next_event->total_time_enabled); in __perf_event_sync_stat()
3368 swap(event->total_time_running, next_event->total_time_running); in __perf_event_sync_stat()
3373 perf_event_update_userpage(event); in __perf_event_sync_stat()
3380 struct perf_event *event, *next_event; in perf_event_sync_stat() local
3387 event = list_first_entry(&ctx->event_list, in perf_event_sync_stat()
3393 while (&event->event_entry != &ctx->event_list && in perf_event_sync_stat()
3396 __perf_event_sync_stat(event, next_event); in perf_event_sync_stat()
3398 event = list_next_entry(event, event_entry); in perf_event_sync_stat()
3528 * This callback is relevant even to per-cpu events; for example multi event
3578 * We stop each event and update the event value in event->count.
3581 * sets the disabled bit in the control field of event _before_
3582 * accessing the event control register. If a NMI hits, then it will
3583 * not restart the event.
3602 * cgroup event are system-wide mode only in __perf_event_task_sched_out()
3638 static void __heap_add(struct min_heap *heap, struct perf_event *event) in __heap_add() argument
3642 if (event) { in __heap_add()
3643 itrs[heap->nr] = event; in __heap_add()
3656 /* Space for per CPU and/or any CPU event iterators. */ in visit_groups_merge()
3710 static inline bool event_update_userpage(struct perf_event *event) in event_update_userpage() argument
3712 if (likely(!atomic_read(&event->mmap_count))) in event_update_userpage()
3715 perf_event_update_time(event); in event_update_userpage()
3716 perf_set_shadow_time(event, event->ctx); in event_update_userpage()
3717 perf_event_update_userpage(event); in event_update_userpage()
3724 struct perf_event *event; in group_update_userpage() local
3729 for_each_sibling_event(event, group_event) in group_update_userpage()
3730 event_update_userpage(event); in group_update_userpage()
3733 static int merge_sched_in(struct perf_event *event, void *data) in merge_sched_in() argument
3735 struct perf_event_context *ctx = event->ctx; in merge_sched_in()
3739 if (event->state <= PERF_EVENT_STATE_OFF) in merge_sched_in()
3742 if (!event_filter_match(event)) in merge_sched_in()
3745 if (group_can_go_on(event, cpuctx, *can_add_hw)) { in merge_sched_in()
3746 if (!group_sched_in(event, cpuctx, ctx)) in merge_sched_in()
3747 list_add_tail(&event->active_list, get_event_list(event)); in merge_sched_in()
3750 if (event->state == PERF_EVENT_STATE_INACTIVE) { in merge_sched_in()
3752 if (event->attr.pinned) { in merge_sched_in()
3753 perf_cgroup_event_disable(event, ctx); in merge_sched_in()
3754 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in merge_sched_in()
3758 group_update_userpage(event); in merge_sched_in()
3899 * We restore the event value and then enable it.
3902 * sets the enabled bit in the control field of event _before_
3903 * accessing the event control register. If a NMI hits, then it will
3904 * keep the event running.
3914 * to switch in PMU state; cgroup event are system-wide mode only. in __perf_event_task_sched_in()
3937 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) in perf_calculate_period() argument
3939 u64 frequency = event->attr.sample_freq; in perf_calculate_period()
4013 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) in perf_adjust_period() argument
4015 struct hw_perf_event *hwc = &event->hw; in perf_adjust_period()
4019 period = perf_calculate_period(event, nsec, count); in perf_adjust_period()
4033 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_period()
4038 event->pmu->start(event, PERF_EF_RELOAD); in perf_adjust_period()
4050 struct perf_event *event; in perf_adjust_freq_unthr_context() local
4066 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_adjust_freq_unthr_context()
4067 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_adjust_freq_unthr_context()
4070 if (!event_filter_match(event)) in perf_adjust_freq_unthr_context()
4073 perf_pmu_disable(event->pmu); in perf_adjust_freq_unthr_context()
4075 hwc = &event->hw; in perf_adjust_freq_unthr_context()
4079 perf_log_throttle(event, 1); in perf_adjust_freq_unthr_context()
4080 event->pmu->start(event, 0); in perf_adjust_freq_unthr_context()
4083 if (!event->attr.freq || !event->attr.sample_freq) in perf_adjust_freq_unthr_context()
4087 * stop the event and update event->count in perf_adjust_freq_unthr_context()
4089 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_freq_unthr_context()
4091 now = local64_read(&event->count); in perf_adjust_freq_unthr_context()
4096 * restart the event in perf_adjust_freq_unthr_context()
4098 * we have stopped the event so tell that in perf_adjust_freq_unthr_context()
4103 perf_adjust_period(event, period, delta, false); in perf_adjust_freq_unthr_context()
4105 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); in perf_adjust_freq_unthr_context()
4107 perf_pmu_enable(event->pmu); in perf_adjust_freq_unthr_context()
4115 * Move @event to the tail of the @ctx's elegible events.
4117 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) in rotate_ctx() argument
4126 perf_event_groups_delete(&ctx->flexible_groups, event); in rotate_ctx()
4127 perf_event_groups_insert(&ctx->flexible_groups, event); in rotate_ctx()
4130 /* pick an event from the flexible_groups to rotate */
4134 struct perf_event *event; in ctx_event_to_rotate() local
4136 /* pick the first active flexible event */ in ctx_event_to_rotate()
4137 event = list_first_entry_or_null(&ctx->flexible_active, in ctx_event_to_rotate()
4140 /* if no active flexible event, pick the first event */ in ctx_event_to_rotate()
4141 if (!event) { in ctx_event_to_rotate()
4142 event = rb_entry_safe(rb_first(&ctx->flexible_groups.tree), in ctx_event_to_rotate()
4143 typeof(*event), group_node); in ctx_event_to_rotate()
4152 return event; in ctx_event_to_rotate()
4163 * events, thus the event count values are stable. in perf_rotate_context()
4219 static int event_enable_on_exec(struct perf_event *event, in event_enable_on_exec() argument
4222 if (!event->attr.enable_on_exec) in event_enable_on_exec()
4225 event->attr.enable_on_exec = 0; in event_enable_on_exec()
4226 if (event->state >= PERF_EVENT_STATE_INACTIVE) in event_enable_on_exec()
4229 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_enable_on_exec()
4243 struct perf_event *event; in perf_event_enable_on_exec() local
4255 list_for_each_entry(event, &ctx->event_list, event_entry) { in perf_event_enable_on_exec()
4256 enabled |= event_enable_on_exec(event, ctx); in perf_event_enable_on_exec()
4257 event_type |= get_event_type(event); in perf_event_enable_on_exec()
4261 * Unclone and reschedule this context if we enabled any event. in perf_event_enable_on_exec()
4278 static void perf_remove_from_owner(struct perf_event *event);
4279 static void perf_event_exit_event(struct perf_event *event,
4289 struct perf_event *event, *next; in perf_event_remove_on_exec() local
4303 list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { in perf_event_remove_on_exec()
4304 if (!event->attr.remove_on_exec) in perf_event_remove_on_exec()
4307 if (!is_kernel_event(event)) in perf_event_remove_on_exec()
4308 perf_remove_from_owner(event); in perf_event_remove_on_exec()
4312 perf_event_exit_event(event, ctx); in perf_event_remove_on_exec()
4330 struct perf_event *event; member
4335 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) in __perf_event_read_cpu() argument
4339 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { in __perf_event_read_cpu()
4353 * Cross CPU call to read the hardware event
4358 struct perf_event *sub, *event = data->event; in __perf_event_read() local
4359 struct perf_event_context *ctx = event->ctx; in __perf_event_read()
4361 struct pmu *pmu = event->pmu; in __perf_event_read()
4367 * event->count would have been updated to a recent sample in __perf_event_read()
4368 * when the event was scheduled out. in __perf_event_read()
4376 update_cgrp_time_from_event(event); in __perf_event_read()
4379 perf_event_update_time(event); in __perf_event_read()
4381 perf_event_update_sibling_time(event); in __perf_event_read()
4383 if (event->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_read()
4387 pmu->read(event); in __perf_event_read()
4394 pmu->read(event); in __perf_event_read()
4396 for_each_sibling_event(sub, event) { in __perf_event_read()
4399 * Use sibling's PMU rather than @event's since in __perf_event_read()
4412 static inline u64 perf_event_count(struct perf_event *event) in perf_event_count() argument
4414 return local64_read(&event->count) + atomic64_read(&event->child_count); in perf_event_count()
4418 * NMI-safe method to read a local event, that is an event that
4425 int perf_event_read_local(struct perf_event *event, u64 *value, in perf_event_read_local() argument
4438 * It must not be an event with inherit set, we cannot read in perf_event_read_local()
4441 if (event->attr.inherit) { in perf_event_read_local()
4446 /* If this is a per-task event, it must be for current */ in perf_event_read_local()
4447 if ((event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4448 event->hw.target != current) { in perf_event_read_local()
4453 /* If this is a per-CPU event, it must be for this CPU */ in perf_event_read_local()
4454 if (!(event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4455 event->cpu != smp_processor_id()) { in perf_event_read_local()
4460 /* If this is a pinned event it must be running on this CPU */ in perf_event_read_local()
4461 if (event->attr.pinned && event->oncpu != smp_processor_id()) { in perf_event_read_local()
4467 * If the event is currently on this CPU, its either a per-task event, in perf_event_read_local()
4471 if (event->oncpu == smp_processor_id()) in perf_event_read_local()
4472 event->pmu->read(event); in perf_event_read_local()
4474 *value = local64_read(&event->count); in perf_event_read_local()
4476 u64 now = event->shadow_ctx_time + perf_clock(); in perf_event_read_local()
4479 __perf_update_times(event, now, &__enabled, &__running); in perf_event_read_local()
4491 static int perf_event_read(struct perf_event *event, bool group) in perf_event_read() argument
4493 enum perf_event_state state = READ_ONCE(event->state); in perf_event_read()
4497 * If event is enabled and currently active on a CPU, update the in perf_event_read()
4498 * value in the event structure: in perf_event_read()
4512 event_cpu = READ_ONCE(event->oncpu); in perf_event_read()
4517 .event = event, in perf_event_read()
4523 event_cpu = __perf_event_read_cpu(event, event_cpu); in perf_event_read()
4529 * If event_cpu isn't a valid CPU it means the event got in perf_event_read()
4530 * scheduled out and that will have updated the event count. in perf_event_read()
4532 * Therefore, either way, we'll have an up-to-date event count in perf_event_read()
4540 struct perf_event_context *ctx = event->ctx; in perf_event_read()
4544 state = event->state; in perf_event_read()
4556 update_cgrp_time_from_event(event); in perf_event_read()
4559 perf_event_update_time(event); in perf_event_read()
4561 perf_event_update_sibling_time(event); in perf_event_read()
4626 struct perf_event *event) in find_get_context() argument
4633 int cpu = event->cpu; in find_get_context()
4636 /* Must be root to operate on a CPU event: */ in find_get_context()
4637 err = perf_allow_cpu(&event->attr); in find_get_context()
4656 if (event->attach_state & PERF_ATTACH_TASK_DATA) { in find_get_context()
4723 static void perf_event_free_filter(struct perf_event *event);
4727 struct perf_event *event; in free_event_rcu() local
4729 event = container_of(head, struct perf_event, rcu_head); in free_event_rcu()
4730 if (event->ns) in free_event_rcu()
4731 put_pid_ns(event->ns); in free_event_rcu()
4732 perf_event_free_filter(event); in free_event_rcu()
4733 kmem_cache_free(perf_event_cache, event); in free_event_rcu()
4736 static void ring_buffer_attach(struct perf_event *event,
4739 static void detach_sb_event(struct perf_event *event) in detach_sb_event() argument
4741 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in detach_sb_event()
4744 list_del_rcu(&event->sb_list); in detach_sb_event()
4748 static bool is_sb_event(struct perf_event *event) in is_sb_event() argument
4750 struct perf_event_attr *attr = &event->attr; in is_sb_event()
4752 if (event->parent) in is_sb_event()
4755 if (event->attach_state & PERF_ATTACH_TASK) in is_sb_event()
4767 static void unaccount_pmu_sb_event(struct perf_event *event) in unaccount_pmu_sb_event() argument
4769 if (is_sb_event(event)) in unaccount_pmu_sb_event()
4770 detach_sb_event(event); in unaccount_pmu_sb_event()
4773 static void unaccount_event_cpu(struct perf_event *event, int cpu) in unaccount_event_cpu() argument
4775 if (event->parent) in unaccount_event_cpu()
4778 if (is_cgroup_event(event)) in unaccount_event_cpu()
4804 static void unaccount_event(struct perf_event *event) in unaccount_event() argument
4808 if (event->parent) in unaccount_event()
4811 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in unaccount_event()
4813 if (event->attr.mmap || event->attr.mmap_data) in unaccount_event()
4815 if (event->attr.build_id) in unaccount_event()
4817 if (event->attr.comm) in unaccount_event()
4819 if (event->attr.namespaces) in unaccount_event()
4821 if (event->attr.cgroup) in unaccount_event()
4823 if (event->attr.task) in unaccount_event()
4825 if (event->attr.freq) in unaccount_event()
4827 if (event->attr.context_switch) { in unaccount_event()
4831 if (is_cgroup_event(event)) in unaccount_event()
4833 if (has_branch_stack(event)) in unaccount_event()
4835 if (event->attr.ksymbol) in unaccount_event()
4837 if (event->attr.bpf_event) in unaccount_event()
4839 if (event->attr.text_poke) in unaccount_event()
4847 unaccount_event_cpu(event, event->cpu); in unaccount_event()
4849 unaccount_pmu_sb_event(event); in unaccount_event()
4862 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
4872 static int exclusive_event_init(struct perf_event *event) in exclusive_event_init() argument
4874 struct pmu *pmu = event->pmu; in exclusive_event_init()
4887 * Since this is called in perf_event_alloc() path, event::ctx in exclusive_event_init()
4889 * to mean "per-task event", because unlike other attach states it in exclusive_event_init()
4892 if (event->attach_state & PERF_ATTACH_TASK) { in exclusive_event_init()
4903 static void exclusive_event_destroy(struct perf_event *event) in exclusive_event_destroy() argument
4905 struct pmu *pmu = event->pmu; in exclusive_event_destroy()
4911 if (event->attach_state & PERF_ATTACH_TASK) in exclusive_event_destroy()
4927 static bool exclusive_event_installable(struct perf_event *event, in exclusive_event_installable() argument
4931 struct pmu *pmu = event->pmu; in exclusive_event_installable()
4939 if (exclusive_event_match(iter_event, event)) in exclusive_event_installable()
4946 static void perf_addr_filters_splice(struct perf_event *event,
4949 static void _free_event(struct perf_event *event) in _free_event() argument
4951 irq_work_sync(&event->pending); in _free_event()
4953 unaccount_event(event); in _free_event()
4955 security_perf_event_free(event); in _free_event()
4957 if (event->rb) { in _free_event()
4959 * Can happen when we close an event with re-directed output. in _free_event()
4964 mutex_lock(&event->mmap_mutex); in _free_event()
4965 ring_buffer_attach(event, NULL); in _free_event()
4966 mutex_unlock(&event->mmap_mutex); in _free_event()
4969 if (is_cgroup_event(event)) in _free_event()
4970 perf_detach_cgroup(event); in _free_event()
4972 if (!event->parent) { in _free_event()
4973 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in _free_event()
4977 perf_event_free_bpf_prog(event); in _free_event()
4978 perf_addr_filters_splice(event, NULL); in _free_event()
4979 kfree(event->addr_filter_ranges); in _free_event()
4981 if (event->destroy) in _free_event()
4982 event->destroy(event); in _free_event()
4988 if (event->hw.target) in _free_event()
4989 put_task_struct(event->hw.target); in _free_event()
4995 if (event->ctx) in _free_event()
4996 put_ctx(event->ctx); in _free_event()
4998 exclusive_event_destroy(event); in _free_event()
4999 module_put(event->pmu->module); in _free_event()
5001 call_rcu(&event->rcu_head, free_event_rcu); in _free_event()
5006 * where the event isn't exposed yet and inherited events.
5008 static void free_event(struct perf_event *event) in free_event() argument
5010 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, in free_event()
5011 "unexpected event refcount: %ld; ptr=%p\n", in free_event()
5012 atomic_long_read(&event->refcount), event)) { in free_event()
5017 _free_event(event); in free_event()
5021 * Remove user event from the owner task.
5023 static void perf_remove_from_owner(struct perf_event *event) in perf_remove_from_owner() argument
5031 * indeed free this event, otherwise we need to serialize on in perf_remove_from_owner()
5034 owner = READ_ONCE(event->owner); in perf_remove_from_owner()
5057 * We have to re-check the event->owner field, if it is cleared in perf_remove_from_owner()
5060 * event. in perf_remove_from_owner()
5062 if (event->owner) { in perf_remove_from_owner()
5063 list_del_init(&event->owner_entry); in perf_remove_from_owner()
5064 smp_store_release(&event->owner, NULL); in perf_remove_from_owner()
5071 static void put_event(struct perf_event *event) in put_event() argument
5073 if (!atomic_long_dec_and_test(&event->refcount)) in put_event()
5076 _free_event(event); in put_event()
5080 * Kill an event dead; while event:refcount will preserve the event
5084 int perf_event_release_kernel(struct perf_event *event) in perf_event_release_kernel() argument
5086 struct perf_event_context *ctx = event->ctx; in perf_event_release_kernel()
5095 WARN_ON_ONCE(event->attach_state & in perf_event_release_kernel()
5100 if (!is_kernel_event(event)) in perf_event_release_kernel()
5101 perf_remove_from_owner(event); in perf_event_release_kernel()
5103 ctx = perf_event_ctx_lock(event); in perf_event_release_kernel()
5105 perf_remove_from_context(event, DETACH_GROUP); in perf_event_release_kernel()
5109 * Mark this event as STATE_DEAD, there is no external reference to it in perf_event_release_kernel()
5112 * Anybody acquiring event->child_mutex after the below loop _must_ in perf_event_release_kernel()
5119 event->state = PERF_EVENT_STATE_DEAD; in perf_event_release_kernel()
5122 perf_event_ctx_unlock(event, ctx); in perf_event_release_kernel()
5125 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5126 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_release_kernel()
5137 * Since the event cannot get freed while we hold the in perf_event_release_kernel()
5148 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5150 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5157 tmp = list_first_entry_or_null(&event->child_list, in perf_event_release_kernel()
5166 put_event(event); in perf_event_release_kernel()
5169 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5174 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5183 * Wake any perf_event_free_task() waiting for this event to be in perf_event_release_kernel()
5191 put_event(event); /* Must be the 'last' reference */ in perf_event_release_kernel()
5205 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in __perf_event_read_value() argument
5213 mutex_lock(&event->child_mutex); in __perf_event_read_value()
5215 (void)perf_event_read(event, false); in __perf_event_read_value()
5216 total += perf_event_count(event); in __perf_event_read_value()
5218 *enabled += event->total_time_enabled + in __perf_event_read_value()
5219 atomic64_read(&event->child_total_time_enabled); in __perf_event_read_value()
5220 *running += event->total_time_running + in __perf_event_read_value()
5221 atomic64_read(&event->child_total_time_running); in __perf_event_read_value()
5223 list_for_each_entry(child, &event->child_list, child_list) { in __perf_event_read_value()
5229 mutex_unlock(&event->child_mutex); in __perf_event_read_value()
5234 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in perf_event_read_value() argument
5239 ctx = perf_event_ctx_lock(event); in perf_event_read_value()
5240 count = __perf_event_read_value(event, enabled, running); in perf_event_read_value()
5241 perf_event_ctx_unlock(event, ctx); in perf_event_read_value()
5294 static int perf_read_group(struct perf_event *event, in perf_read_group() argument
5297 struct perf_event *leader = event->group_leader, *child; in perf_read_group()
5304 values = kzalloc(event->read_size, GFP_KERNEL); in perf_read_group()
5328 ret = event->read_size; in perf_read_group()
5329 if (copy_to_user(buf, values, event->read_size)) in perf_read_group()
5340 static int perf_read_one(struct perf_event *event, in perf_read_one() argument
5347 values[n++] = __perf_event_read_value(event, &enabled, &running); in perf_read_one()
5353 values[n++] = primary_event_id(event); in perf_read_one()
5361 static bool is_event_hup(struct perf_event *event) in is_event_hup() argument
5365 if (event->state > PERF_EVENT_STATE_EXIT) in is_event_hup()
5368 mutex_lock(&event->child_mutex); in is_event_hup()
5369 no_children = list_empty(&event->child_list); in is_event_hup()
5370 mutex_unlock(&event->child_mutex); in is_event_hup()
5375 * Read the performance event - simple non blocking version for now
5378 __perf_read(struct perf_event *event, char __user *buf, size_t count) in __perf_read() argument
5380 u64 read_format = event->attr.read_format; in __perf_read()
5384 * Return end-of-file for a read on an event that is in in __perf_read()
5388 if (event->state == PERF_EVENT_STATE_ERROR) in __perf_read()
5391 if (count < event->read_size) in __perf_read()
5394 WARN_ON_ONCE(event->ctx->parent_ctx); in __perf_read()
5396 ret = perf_read_group(event, read_format, buf); in __perf_read()
5398 ret = perf_read_one(event, read_format, buf); in __perf_read()
5406 struct perf_event *event = file->private_data; in perf_read() local
5410 ret = security_perf_event_read(event); in perf_read()
5414 ctx = perf_event_ctx_lock(event); in perf_read()
5415 ret = __perf_read(event, buf, count); in perf_read()
5416 perf_event_ctx_unlock(event, ctx); in perf_read()
5423 struct perf_event *event = file->private_data; in perf_poll() local
5427 poll_wait(file, &event->waitq, wait); in perf_poll()
5429 if (is_event_hup(event)) in perf_poll()
5433 * Pin the event->rb by taking event->mmap_mutex; otherwise in perf_poll()
5436 mutex_lock(&event->mmap_mutex); in perf_poll()
5437 rb = event->rb; in perf_poll()
5440 mutex_unlock(&event->mmap_mutex); in perf_poll()
5444 static void _perf_event_reset(struct perf_event *event) in _perf_event_reset() argument
5446 (void)perf_event_read(event, false); in _perf_event_reset()
5447 local64_set(&event->count, 0); in _perf_event_reset()
5448 perf_event_update_userpage(event); in _perf_event_reset()
5451 /* Assume it's not an event with inherit set. */
5452 u64 perf_event_pause(struct perf_event *event, bool reset) in perf_event_pause() argument
5457 ctx = perf_event_ctx_lock(event); in perf_event_pause()
5458 WARN_ON_ONCE(event->attr.inherit); in perf_event_pause()
5459 _perf_event_disable(event); in perf_event_pause()
5460 count = local64_read(&event->count); in perf_event_pause()
5462 local64_set(&event->count, 0); in perf_event_pause()
5463 perf_event_ctx_unlock(event, ctx); in perf_event_pause()
5470 * Holding the top-level event's child_mutex means that any
5471 * descendant process that has inherited this event will block
5475 static void perf_event_for_each_child(struct perf_event *event, in perf_event_for_each_child() argument
5480 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_for_each_child()
5482 mutex_lock(&event->child_mutex); in perf_event_for_each_child()
5483 func(event); in perf_event_for_each_child()
5484 list_for_each_entry(child, &event->child_list, child_list) in perf_event_for_each_child()
5486 mutex_unlock(&event->child_mutex); in perf_event_for_each_child()
5489 static void perf_event_for_each(struct perf_event *event, in perf_event_for_each() argument
5492 struct perf_event_context *ctx = event->ctx; in perf_event_for_each()
5497 event = event->group_leader; in perf_event_for_each()
5499 perf_event_for_each_child(event, func); in perf_event_for_each()
5500 for_each_sibling_event(sibling, event) in perf_event_for_each()
5504 static void __perf_event_period(struct perf_event *event, in __perf_event_period() argument
5512 if (event->attr.freq) { in __perf_event_period()
5513 event->attr.sample_freq = value; in __perf_event_period()
5515 event->attr.sample_period = value; in __perf_event_period()
5516 event->hw.sample_period = value; in __perf_event_period()
5519 active = (event->state == PERF_EVENT_STATE_ACTIVE); in __perf_event_period()
5524 * trying to unthrottle while we already re-started the event. in __perf_event_period()
5526 if (event->hw.interrupts == MAX_INTERRUPTS) { in __perf_event_period()
5527 event->hw.interrupts = 0; in __perf_event_period()
5528 perf_log_throttle(event, 1); in __perf_event_period()
5530 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_period()
5533 local64_set(&event->hw.period_left, 0); in __perf_event_period()
5536 event->pmu->start(event, PERF_EF_RELOAD); in __perf_event_period()
5541 static int perf_event_check_period(struct perf_event *event, u64 value) in perf_event_check_period() argument
5543 return event->pmu->check_period(event, value); in perf_event_check_period()
5546 static int _perf_event_period(struct perf_event *event, u64 value) in _perf_event_period() argument
5548 if (!is_sampling_event(event)) in _perf_event_period()
5554 if (event->attr.freq && value > sysctl_perf_event_sample_rate) in _perf_event_period()
5557 if (perf_event_check_period(event, value)) in _perf_event_period()
5560 if (!event->attr.freq && (value & (1ULL << 63))) in _perf_event_period()
5563 event_function_call(event, __perf_event_period, &value); in _perf_event_period()
5568 int perf_event_period(struct perf_event *event, u64 value) in perf_event_period() argument
5573 ctx = perf_event_ctx_lock(event); in perf_event_period()
5574 ret = _perf_event_period(event, value); in perf_event_period()
5575 perf_event_ctx_unlock(event, ctx); in perf_event_period()
5597 static int perf_event_set_output(struct perf_event *event,
5599 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
5603 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) in _perf_ioctl() argument
5620 return _perf_event_refresh(event, arg); in _perf_ioctl()
5629 return _perf_event_period(event, value); in _perf_ioctl()
5633 u64 id = primary_event_id(event); in _perf_ioctl()
5650 ret = perf_event_set_output(event, output_event); in _perf_ioctl()
5653 ret = perf_event_set_output(event, NULL); in _perf_ioctl()
5659 return perf_event_set_filter(event, (void __user *)arg); in _perf_ioctl()
5670 err = perf_event_set_bpf_prog(event, prog, 0); in _perf_ioctl()
5683 rb = rcu_dereference(event->rb); in _perf_ioctl()
5694 return perf_event_query_prog_array(event, (void __user *)arg); in _perf_ioctl()
5704 return perf_event_modify_attr(event, &new_attr); in _perf_ioctl()
5711 perf_event_for_each(event, func); in _perf_ioctl()
5713 perf_event_for_each_child(event, func); in _perf_ioctl()
5720 struct perf_event *event = file->private_data; in perf_ioctl() local
5725 ret = security_perf_event_write(event); in perf_ioctl()
5729 ctx = perf_event_ctx_lock(event); in perf_ioctl()
5730 ret = _perf_ioctl(event, cmd, arg); in perf_ioctl()
5731 perf_event_ctx_unlock(event, ctx); in perf_ioctl()
5761 struct perf_event *event; in perf_event_task_enable() local
5764 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { in perf_event_task_enable()
5765 ctx = perf_event_ctx_lock(event); in perf_event_task_enable()
5766 perf_event_for_each_child(event, _perf_event_enable); in perf_event_task_enable()
5767 perf_event_ctx_unlock(event, ctx); in perf_event_task_enable()
5777 struct perf_event *event; in perf_event_task_disable() local
5780 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { in perf_event_task_disable()
5781 ctx = perf_event_ctx_lock(event); in perf_event_task_disable()
5782 perf_event_for_each_child(event, _perf_event_disable); in perf_event_task_disable()
5783 perf_event_ctx_unlock(event, ctx); in perf_event_task_disable()
5790 static int perf_event_index(struct perf_event *event) in perf_event_index() argument
5792 if (event->hw.state & PERF_HES_STOPPED) in perf_event_index()
5795 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_event_index()
5798 return event->pmu->event_idx(event); in perf_event_index()
5801 static void calc_timer_values(struct perf_event *event, in calc_timer_values() argument
5809 ctx_time = event->shadow_ctx_time + *now; in calc_timer_values()
5810 __perf_update_times(event, ctx_time, enabled, running); in calc_timer_values()
5813 static void perf_event_init_userpage(struct perf_event *event) in perf_event_init_userpage() argument
5819 rb = rcu_dereference(event->rb); in perf_event_init_userpage()
5836 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) in arch_perf_update_userpage() argument
5845 void perf_event_update_userpage(struct perf_event *event) in perf_event_update_userpage() argument
5852 rb = rcu_dereference(event->rb); in perf_event_update_userpage()
5858 * based on snapshot values taken when the event in perf_event_update_userpage()
5865 calc_timer_values(event, &now, &enabled, &running); in perf_event_update_userpage()
5875 userpg->index = perf_event_index(event); in perf_event_update_userpage()
5876 userpg->offset = perf_event_count(event); in perf_event_update_userpage()
5878 userpg->offset -= local64_read(&event->hw.prev_count); in perf_event_update_userpage()
5881 atomic64_read(&event->child_total_time_enabled); in perf_event_update_userpage()
5884 atomic64_read(&event->child_total_time_running); in perf_event_update_userpage()
5886 arch_perf_update_userpage(event, userpg, now); in perf_event_update_userpage()
5898 struct perf_event *event = vmf->vma->vm_file->private_data; in perf_mmap_fault() local
5909 rb = rcu_dereference(event->rb); in perf_mmap_fault()
5931 static void ring_buffer_attach(struct perf_event *event, in ring_buffer_attach() argument
5937 if (event->rb) { in ring_buffer_attach()
5940 * event->rb_entry and wait/clear when adding event->rb_entry. in ring_buffer_attach()
5942 WARN_ON_ONCE(event->rcu_pending); in ring_buffer_attach()
5944 old_rb = event->rb; in ring_buffer_attach()
5946 list_del_rcu(&event->rb_entry); in ring_buffer_attach()
5949 event->rcu_batches = get_state_synchronize_rcu(); in ring_buffer_attach()
5950 event->rcu_pending = 1; in ring_buffer_attach()
5954 if (event->rcu_pending) { in ring_buffer_attach()
5955 cond_synchronize_rcu(event->rcu_batches); in ring_buffer_attach()
5956 event->rcu_pending = 0; in ring_buffer_attach()
5960 list_add_rcu(&event->rb_entry, &rb->event_list); in ring_buffer_attach()
5965 * Avoid racing with perf_mmap_close(AUX): stop the event in ring_buffer_attach()
5966 * before swizzling the event::rb pointer; if it's getting in ring_buffer_attach()
5974 if (has_aux(event)) in ring_buffer_attach()
5975 perf_event_stop(event, 0); in ring_buffer_attach()
5977 rcu_assign_pointer(event->rb, rb); in ring_buffer_attach()
5986 wake_up_all(&event->waitq); in ring_buffer_attach()
5990 static void ring_buffer_wakeup(struct perf_event *event) in ring_buffer_wakeup() argument
5995 rb = rcu_dereference(event->rb); in ring_buffer_wakeup()
5997 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) in ring_buffer_wakeup()
5998 wake_up_all(&event->waitq); in ring_buffer_wakeup()
6003 struct perf_buffer *ring_buffer_get(struct perf_event *event) in ring_buffer_get() argument
6008 rb = rcu_dereference(event->rb); in ring_buffer_get()
6030 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_open() local
6032 atomic_inc(&event->mmap_count); in perf_mmap_open()
6033 atomic_inc(&event->rb->mmap_count); in perf_mmap_open()
6036 atomic_inc(&event->rb->aux_mmap_count); in perf_mmap_open()
6038 if (event->pmu->event_mapped) in perf_mmap_open()
6039 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap_open()
6042 static void perf_pmu_output_stop(struct perf_event *event);
6046 * event, or through other events by use of perf_event_set_output().
6054 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_close() local
6055 struct perf_buffer *rb = ring_buffer_get(event); in perf_mmap_close()
6061 if (event->pmu->event_unmapped) in perf_mmap_close()
6062 event->pmu->event_unmapped(event, vma->vm_mm); in perf_mmap_close()
6066 * event->mmap_count, so it is ok to use event->mmap_mutex to in perf_mmap_close()
6070 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { in perf_mmap_close()
6077 perf_pmu_output_stop(event); in perf_mmap_close()
6087 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6093 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) in perf_mmap_close()
6096 ring_buffer_attach(event, NULL); in perf_mmap_close()
6097 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6110 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { in perf_mmap_close()
6111 if (!atomic_long_inc_not_zero(&event->refcount)) { in perf_mmap_close()
6113 * This event is en-route to free_event() which will in perf_mmap_close()
6120 mutex_lock(&event->mmap_mutex); in perf_mmap_close()
6126 * If we find a different rb; ignore this event, a next in perf_mmap_close()
6131 if (event->rb == rb) in perf_mmap_close()
6132 ring_buffer_attach(event, NULL); in perf_mmap_close()
6134 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6135 put_event(event); in perf_mmap_close()
6172 struct perf_event *event = file->private_data; in perf_mmap() local
6187 if (event->cpu == -1 && event->attr.inherit) in perf_mmap()
6193 ret = security_perf_event_read(event); in perf_mmap()
6209 if (!event->rb) in perf_mmap()
6214 mutex_lock(&event->mmap_mutex); in perf_mmap()
6217 rb = event->rb; in perf_mmap()
6269 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_mmap()
6271 mutex_lock(&event->mmap_mutex); in perf_mmap()
6272 if (event->rb) { in perf_mmap()
6273 if (event->rb->nr_pages != nr_pages) { in perf_mmap()
6278 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { in perf_mmap()
6284 mutex_unlock(&event->mmap_mutex); in perf_mmap()
6330 WARN_ON(!rb && event->rb); in perf_mmap()
6337 event->attr.watermark ? event->attr.wakeup_watermark : 0, in perf_mmap()
6338 event->cpu, flags); in perf_mmap()
6349 ring_buffer_attach(event, rb); in perf_mmap()
6351 perf_event_update_time(event); in perf_mmap()
6352 perf_set_shadow_time(event, event->ctx); in perf_mmap()
6353 perf_event_init_userpage(event); in perf_mmap()
6354 perf_event_update_userpage(event); in perf_mmap()
6356 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, in perf_mmap()
6357 event->attr.aux_watermark, flags); in perf_mmap()
6367 atomic_inc(&event->mmap_count); in perf_mmap()
6372 mutex_unlock(&event->mmap_mutex); in perf_mmap()
6381 if (event->pmu->event_mapped) in perf_mmap()
6382 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap()
6390 struct perf_event *event = filp->private_data; in perf_fasync() local
6394 retval = fasync_helper(fd, filp, on, &event->fasync); in perf_fasync()
6415 * Perf event wakeup
6421 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) in perf_event_fasync() argument
6424 if (event->parent) in perf_event_fasync()
6425 event = event->parent; in perf_event_fasync()
6426 return &event->fasync; in perf_event_fasync()
6429 void perf_event_wakeup(struct perf_event *event) in perf_event_wakeup() argument
6431 ring_buffer_wakeup(event); in perf_event_wakeup()
6433 if (event->pending_kill) { in perf_event_wakeup()
6434 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); in perf_event_wakeup()
6435 event->pending_kill = 0; in perf_event_wakeup()
6439 static void perf_sigtrap(struct perf_event *event) in perf_sigtrap() argument
6446 if (WARN_ON_ONCE(event->ctx->task != current)) in perf_sigtrap()
6455 force_sig_perf((void __user *)event->pending_addr, in perf_sigtrap()
6456 event->attr.type, event->attr.sig_data); in perf_sigtrap()
6459 static void perf_pending_event_disable(struct perf_event *event) in perf_pending_event_disable() argument
6461 int cpu = READ_ONCE(event->pending_disable); in perf_pending_event_disable()
6467 WRITE_ONCE(event->pending_disable, -1); in perf_pending_event_disable()
6469 if (event->attr.sigtrap) { in perf_pending_event_disable()
6470 perf_sigtrap(event); in perf_pending_event_disable()
6471 atomic_set_release(&event->event_limit, 1); /* rearm event */ in perf_pending_event_disable()
6475 perf_event_disable_local(event); in perf_pending_event_disable()
6497 * But the event runs on CPU-B and wants disabling there. in perf_pending_event_disable()
6499 irq_work_queue_on(&event->pending, cpu); in perf_pending_event_disable()
6504 struct perf_event *event = container_of(entry, struct perf_event, pending); in perf_pending_event() local
6513 perf_pending_event_disable(event); in perf_pending_event()
6515 if (event->pending_wakeup) { in perf_pending_event()
6516 event->pending_wakeup = 0; in perf_pending_event()
6517 perf_event_wakeup(event); in perf_pending_event()
6681 static unsigned long perf_prepare_sample_aux(struct perf_event *event, in perf_prepare_sample_aux() argument
6685 struct perf_event *sampler = event->aux_event; in perf_prepare_sample_aux()
6720 struct perf_event *event, in perf_pmu_snapshot_aux() argument
6730 * the IRQ ones, that is, for example, re-starting an event that's just in perf_pmu_snapshot_aux()
6732 * doesn't change the event state. in perf_pmu_snapshot_aux()
6744 ret = event->pmu->snapshot_aux(event, handle, size); in perf_pmu_snapshot_aux()
6753 static void perf_aux_sample_output(struct perf_event *event, in perf_aux_sample_output() argument
6757 struct perf_event *sampler = event->aux_event; in perf_aux_sample_output()
6799 struct perf_event *event) in __perf_event_header__init_id() argument
6801 u64 sample_type = event->attr.sample_type; in __perf_event_header__init_id()
6804 header->size += event->id_header_size; in __perf_event_header__init_id()
6808 data->tid_entry.pid = perf_event_pid(event, current); in __perf_event_header__init_id()
6809 data->tid_entry.tid = perf_event_tid(event, current); in __perf_event_header__init_id()
6813 data->time = perf_event_clock(event); in __perf_event_header__init_id()
6816 data->id = primary_event_id(event); in __perf_event_header__init_id()
6819 data->stream_id = event->id; in __perf_event_header__init_id()
6829 struct perf_event *event) in perf_event_header__init_id() argument
6831 if (event->attr.sample_id_all) in perf_event_header__init_id()
6832 __perf_event_header__init_id(header, data, event); in perf_event_header__init_id()
6859 void perf_event__output_id_sample(struct perf_event *event, in perf_event__output_id_sample() argument
6863 if (event->attr.sample_id_all) in perf_event__output_id_sample()
6868 struct perf_event *event, in perf_output_read_one() argument
6871 u64 read_format = event->attr.read_format; in perf_output_read_one()
6875 values[n++] = perf_event_count(event); in perf_output_read_one()
6878 atomic64_read(&event->child_total_time_enabled); in perf_output_read_one()
6882 atomic64_read(&event->child_total_time_running); in perf_output_read_one()
6885 values[n++] = primary_event_id(event); in perf_output_read_one()
6891 struct perf_event *event, in perf_output_read_group() argument
6894 struct perf_event *leader = event->group_leader, *sub; in perf_output_read_group()
6895 u64 read_format = event->attr.read_format; in perf_output_read_group()
6907 if ((leader != event) && in perf_output_read_group()
6920 if ((sub != event) && in perf_output_read_group()
6943 struct perf_event *event) in perf_output_read() argument
6946 u64 read_format = event->attr.read_format; in perf_output_read()
6950 * based on snapshot values taken when the event in perf_output_read()
6958 calc_timer_values(event, &now, &enabled, &running); in perf_output_read()
6960 if (event->attr.read_format & PERF_FORMAT_GROUP) in perf_output_read()
6961 perf_output_read_group(handle, event, enabled, running); in perf_output_read()
6963 perf_output_read_one(handle, event, enabled, running); in perf_output_read()
6966 static inline bool perf_sample_save_hw_index(struct perf_event *event) in perf_sample_save_hw_index() argument
6968 return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX; in perf_sample_save_hw_index()
6974 struct perf_event *event) in perf_output_sample() argument
7008 perf_output_read(handle, event); in perf_output_sample()
7059 if (perf_sample_save_hw_index(event)) in perf_output_sample()
7081 u64 mask = event->attr.sample_regs_user; in perf_output_sample()
7112 u64 mask = event->attr.sample_regs_intr; in perf_output_sample()
7136 perf_aux_sample_output(event, handle, data); in perf_output_sample()
7139 if (!event->attr.watermark) { in perf_output_sample()
7140 int wakeup_events = event->attr.wakeup_events; in perf_output_sample()
7279 perf_callchain(struct perf_event *event, struct pt_regs *regs) in perf_callchain() argument
7281 bool kernel = !event->attr.exclude_callchain_kernel; in perf_callchain()
7282 bool user = !event->attr.exclude_callchain_user; in perf_callchain()
7284 bool crosstask = event->ctx->task && event->ctx->task != current; in perf_callchain()
7285 const u32 max_stack = event->attr.sample_max_stack; in perf_callchain()
7298 struct perf_event *event, in perf_prepare_sample() argument
7301 u64 sample_type = event->attr.sample_type; in perf_prepare_sample()
7304 header->size = sizeof(*header) + event->header_size; in perf_prepare_sample()
7309 __perf_event_header__init_id(header, data, event); in perf_prepare_sample()
7318 data->callchain = perf_callchain(event, regs); in perf_prepare_sample()
7353 if (perf_sample_save_hw_index(event)) in perf_prepare_sample()
7370 u64 mask = event->attr.sample_regs_user; in perf_prepare_sample()
7384 u16 stack_size = event->attr.sample_stack_user; in perf_prepare_sample()
7409 u64 mask = event->attr.sample_regs_intr; in perf_prepare_sample()
7453 event->attr.aux_sample_size); in perf_prepare_sample()
7455 size = perf_prepare_sample_aux(event, data, size); in perf_prepare_sample()
7472 __perf_event_output(struct perf_event *event, in __perf_event_output() argument
7487 perf_prepare_sample(&header, data, event, regs); in __perf_event_output()
7489 err = output_begin(&handle, data, event, header.size); in __perf_event_output()
7493 perf_output_sample(&handle, &header, data, event); in __perf_event_output()
7503 perf_event_output_forward(struct perf_event *event, in perf_event_output_forward() argument
7507 __perf_event_output(event, data, regs, perf_output_begin_forward); in perf_event_output_forward()
7511 perf_event_output_backward(struct perf_event *event, in perf_event_output_backward() argument
7515 __perf_event_output(event, data, regs, perf_output_begin_backward); in perf_event_output_backward()
7519 perf_event_output(struct perf_event *event, in perf_event_output() argument
7523 return __perf_event_output(event, data, regs, perf_output_begin); in perf_event_output()
7538 perf_event_read_event(struct perf_event *event, in perf_event_read_event() argument
7547 .size = sizeof(read_event) + event->read_size, in perf_event_read_event()
7549 .pid = perf_event_pid(event, task), in perf_event_read_event()
7550 .tid = perf_event_tid(event, task), in perf_event_read_event()
7554 perf_event_header__init_id(&read_event.header, &sample, event); in perf_event_read_event()
7555 ret = perf_output_begin(&handle, &sample, event, read_event.header.size); in perf_event_read_event()
7560 perf_output_read(&handle, event); in perf_event_read_event()
7561 perf_event__output_id_sample(event, &handle, &sample); in perf_event_read_event()
7566 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
7573 struct perf_event *event; in perf_iterate_ctx() local
7575 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_iterate_ctx()
7577 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_ctx()
7579 if (!event_filter_match(event)) in perf_iterate_ctx()
7583 output(event, data); in perf_iterate_ctx()
7590 struct perf_event *event; in perf_iterate_sb_cpu() local
7592 list_for_each_entry_rcu(event, &pel->list, sb_list) { in perf_iterate_sb_cpu()
7595 * if we observe event->ctx, both event and ctx will be in perf_iterate_sb_cpu()
7598 if (!smp_load_acquire(&event->ctx)) in perf_iterate_sb_cpu()
7601 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_sb_cpu()
7603 if (!event_filter_match(event)) in perf_iterate_sb_cpu()
7605 output(event, data); in perf_iterate_sb_cpu()
7613 * your event, otherwise it might not get delivered.
7651 static void perf_event_addr_filters_exec(struct perf_event *event, void *data) in perf_event_addr_filters_exec() argument
7653 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_exec()
7658 if (!has_addr_filter(event)) in perf_event_addr_filters_exec()
7664 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_exec()
7665 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_exec()
7673 event->addr_filters_gen++; in perf_event_addr_filters_exec()
7677 perf_event_stop(event, 1); in perf_event_addr_filters_exec()
7704 static void __perf_event_output_stop(struct perf_event *event, void *data) in __perf_event_output_stop() argument
7706 struct perf_event *parent = event->parent; in __perf_event_output_stop()
7710 .event = event, in __perf_event_output_stop()
7713 if (!has_aux(event)) in __perf_event_output_stop()
7717 parent = event; in __perf_event_output_stop()
7723 * We are using event::rb to determine if the event should be stopped, in __perf_event_output_stop()
7725 * which will make us skip the event that actually needs to be stopped. in __perf_event_output_stop()
7726 * So ring_buffer_attach() has to stop an aux event before re-assigning in __perf_event_output_stop()
7735 struct perf_event *event = info; in __perf_pmu_output_stop() local
7736 struct pmu *pmu = event->ctx->pmu; in __perf_pmu_output_stop()
7739 .rb = event->rb, in __perf_pmu_output_stop()
7752 static void perf_pmu_output_stop(struct perf_event *event) in perf_pmu_output_stop() argument
7759 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) { in perf_pmu_output_stop()
7763 * sufficient to stop the event itself if it's active, since in perf_pmu_output_stop()
7773 err = cpu_function_call(cpu, __perf_pmu_output_stop, event); in perf_pmu_output_stop()
7803 static int perf_event_task_match(struct perf_event *event) in perf_event_task_match() argument
7805 return event->attr.comm || event->attr.mmap || in perf_event_task_match()
7806 event->attr.mmap2 || event->attr.mmap_data || in perf_event_task_match()
7807 event->attr.task; in perf_event_task_match()
7810 static void perf_event_task_output(struct perf_event *event, in perf_event_task_output() argument
7819 if (!perf_event_task_match(event)) in perf_event_task_output()
7822 perf_event_header__init_id(&task_event->event_id.header, &sample, event); in perf_event_task_output()
7824 ret = perf_output_begin(&handle, &sample, event, in perf_event_task_output()
7829 task_event->event_id.pid = perf_event_pid(event, task); in perf_event_task_output()
7830 task_event->event_id.tid = perf_event_tid(event, task); in perf_event_task_output()
7833 task_event->event_id.ppid = perf_event_pid(event, in perf_event_task_output()
7835 task_event->event_id.ptid = perf_event_pid(event, in perf_event_task_output()
7838 task_event->event_id.ppid = perf_event_pid(event, current); in perf_event_task_output()
7839 task_event->event_id.ptid = perf_event_tid(event, current); in perf_event_task_output()
7842 task_event->event_id.time = perf_event_clock(event); in perf_event_task_output()
7846 perf_event__output_id_sample(event, &handle, &sample); in perf_event_task_output()
7909 static int perf_event_comm_match(struct perf_event *event) in perf_event_comm_match() argument
7911 return event->attr.comm; in perf_event_comm_match()
7914 static void perf_event_comm_output(struct perf_event *event, in perf_event_comm_output() argument
7923 if (!perf_event_comm_match(event)) in perf_event_comm_output()
7926 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); in perf_event_comm_output()
7927 ret = perf_output_begin(&handle, &sample, event, in perf_event_comm_output()
7933 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); in perf_event_comm_output()
7934 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); in perf_event_comm_output()
7940 perf_event__output_id_sample(event, &handle, &sample); in perf_event_comm_output()
8008 static int perf_event_namespaces_match(struct perf_event *event) in perf_event_namespaces_match() argument
8010 return event->attr.namespaces; in perf_event_namespaces_match()
8013 static void perf_event_namespaces_output(struct perf_event *event, in perf_event_namespaces_output() argument
8022 if (!perf_event_namespaces_match(event)) in perf_event_namespaces_output()
8026 &sample, event); in perf_event_namespaces_output()
8027 ret = perf_output_begin(&handle, &sample, event, in perf_event_namespaces_output()
8032 namespaces_event->event_id.pid = perf_event_pid(event, in perf_event_namespaces_output()
8034 namespaces_event->event_id.tid = perf_event_tid(event, in perf_event_namespaces_output()
8039 perf_event__output_id_sample(event, &handle, &sample); in perf_event_namespaces_output()
8136 static int perf_event_cgroup_match(struct perf_event *event) in perf_event_cgroup_match() argument
8138 return event->attr.cgroup; in perf_event_cgroup_match()
8141 static void perf_event_cgroup_output(struct perf_event *event, void *data) in perf_event_cgroup_output() argument
8149 if (!perf_event_cgroup_match(event)) in perf_event_cgroup_output()
8153 &sample, event); in perf_event_cgroup_output()
8154 ret = perf_output_begin(&handle, &sample, event, in perf_event_cgroup_output()
8162 perf_event__output_id_sample(event, &handle, &sample); in perf_event_cgroup_output()
8247 static int perf_event_mmap_match(struct perf_event *event, in perf_event_mmap_match() argument
8254 return (!executable && event->attr.mmap_data) || in perf_event_mmap_match()
8255 (executable && (event->attr.mmap || event->attr.mmap2)); in perf_event_mmap_match()
8258 static void perf_event_mmap_output(struct perf_event *event, in perf_event_mmap_output() argument
8269 if (!perf_event_mmap_match(event, data)) in perf_event_mmap_output()
8272 if (event->attr.mmap2) { in perf_event_mmap_output()
8282 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); in perf_event_mmap_output()
8283 ret = perf_output_begin(&handle, &sample, event, in perf_event_mmap_output()
8288 mmap_event->event_id.pid = perf_event_pid(event, current); in perf_event_mmap_output()
8289 mmap_event->event_id.tid = perf_event_tid(event, current); in perf_event_mmap_output()
8291 use_build_id = event->attr.build_id && mmap_event->build_id_size; in perf_event_mmap_output()
8293 if (event->attr.mmap2 && use_build_id) in perf_event_mmap_output()
8298 if (event->attr.mmap2) { in perf_event_mmap_output()
8317 perf_event__output_id_sample(event, &handle, &sample); in perf_event_mmap_output()
8489 static void __perf_addr_filters_adjust(struct perf_event *event, void *data) in __perf_addr_filters_adjust() argument
8491 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in __perf_addr_filters_adjust()
8497 if (!has_addr_filter(event)) in __perf_addr_filters_adjust()
8506 &event->addr_filter_ranges[count])) in __perf_addr_filters_adjust()
8513 event->addr_filters_gen++; in __perf_addr_filters_adjust()
8517 perf_event_stop(event, 1); in __perf_addr_filters_adjust()
8581 void perf_event_aux_event(struct perf_event *event, unsigned long head, in perf_event_aux_event() argument
8603 perf_event_header__init_id(&rec.header, &sample, event); in perf_event_aux_event()
8604 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_event_aux_event()
8610 perf_event__output_id_sample(event, &handle, &sample); in perf_event_aux_event()
8618 void perf_log_lost_samples(struct perf_event *event, u64 lost) in perf_log_lost_samples() argument
8636 perf_event_header__init_id(&lost_samples_event.header, &sample, event); in perf_log_lost_samples()
8638 ret = perf_output_begin(&handle, &sample, event, in perf_log_lost_samples()
8644 perf_event__output_id_sample(event, &handle, &sample); in perf_log_lost_samples()
8663 static int perf_event_switch_match(struct perf_event *event) in perf_event_switch_match() argument
8665 return event->attr.context_switch; in perf_event_switch_match()
8668 static void perf_event_switch_output(struct perf_event *event, void *data) in perf_event_switch_output() argument
8675 if (!perf_event_switch_match(event)) in perf_event_switch_output()
8679 if (event->ctx->task) { in perf_event_switch_output()
8686 perf_event_pid(event, se->next_prev); in perf_event_switch_output()
8688 perf_event_tid(event, se->next_prev); in perf_event_switch_output()
8691 perf_event_header__init_id(&se->event_id.header, &sample, event); in perf_event_switch_output()
8693 ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size); in perf_event_switch_output()
8697 if (event->ctx->task) in perf_event_switch_output()
8702 perf_event__output_id_sample(event, &handle, &sample); in perf_event_switch_output()
8740 static void perf_log_throttle(struct perf_event *event, int enable) in perf_log_throttle() argument
8757 .time = perf_event_clock(event), in perf_log_throttle()
8758 .id = primary_event_id(event), in perf_log_throttle()
8759 .stream_id = event->id, in perf_log_throttle()
8765 perf_event_header__init_id(&throttle_event.header, &sample, event); in perf_log_throttle()
8767 ret = perf_output_begin(&handle, &sample, event, in perf_log_throttle()
8773 perf_event__output_id_sample(event, &handle, &sample); in perf_log_throttle()
8793 static int perf_event_ksymbol_match(struct perf_event *event) in perf_event_ksymbol_match() argument
8795 return event->attr.ksymbol; in perf_event_ksymbol_match()
8798 static void perf_event_ksymbol_output(struct perf_event *event, void *data) in perf_event_ksymbol_output() argument
8805 if (!perf_event_ksymbol_match(event)) in perf_event_ksymbol_output()
8809 &sample, event); in perf_event_ksymbol_output()
8810 ret = perf_output_begin(&handle, &sample, event, in perf_event_ksymbol_output()
8817 perf_event__output_id_sample(event, &handle, &sample); in perf_event_ksymbol_output()
8883 static int perf_event_bpf_match(struct perf_event *event) in perf_event_bpf_match() argument
8885 return event->attr.bpf_event; in perf_event_bpf_match()
8888 static void perf_event_bpf_output(struct perf_event *event, void *data) in perf_event_bpf_output() argument
8895 if (!perf_event_bpf_match(event)) in perf_event_bpf_output()
8899 &sample, event); in perf_event_bpf_output()
8900 ret = perf_output_begin(&handle, data, event, in perf_event_bpf_output()
8906 perf_event__output_id_sample(event, &handle, &sample); in perf_event_bpf_output()
8991 static int perf_event_text_poke_match(struct perf_event *event) in perf_event_text_poke_match() argument
8993 return event->attr.text_poke; in perf_event_text_poke_match()
8996 static void perf_event_text_poke_output(struct perf_event *event, void *data) in perf_event_text_poke_output() argument
9004 if (!perf_event_text_poke_match(event)) in perf_event_text_poke_output()
9007 perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event); in perf_event_text_poke_output()
9009 ret = perf_output_begin(&handle, &sample, event, in perf_event_text_poke_output()
9024 perf_event__output_id_sample(event, &handle, &sample); in perf_event_text_poke_output()
9061 void perf_event_itrace_started(struct perf_event *event) in perf_event_itrace_started() argument
9063 event->attach_state |= PERF_ATTACH_ITRACE; in perf_event_itrace_started()
9066 static void perf_log_itrace_start(struct perf_event *event) in perf_log_itrace_start() argument
9077 if (event->parent) in perf_log_itrace_start()
9078 event = event->parent; in perf_log_itrace_start()
9080 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || in perf_log_itrace_start()
9081 event->attach_state & PERF_ATTACH_ITRACE) in perf_log_itrace_start()
9087 rec.pid = perf_event_pid(event, current); in perf_log_itrace_start()
9088 rec.tid = perf_event_tid(event, current); in perf_log_itrace_start()
9090 perf_event_header__init_id(&rec.header, &sample, event); in perf_log_itrace_start()
9091 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_log_itrace_start()
9097 perf_event__output_id_sample(event, &handle, &sample); in perf_log_itrace_start()
9103 __perf_event_account_interrupt(struct perf_event *event, int throttle) in __perf_event_account_interrupt() argument
9105 struct hw_perf_event *hwc = &event->hw; in __perf_event_account_interrupt()
9120 perf_log_throttle(event, 0); in __perf_event_account_interrupt()
9125 if (event->attr.freq) { in __perf_event_account_interrupt()
9132 perf_adjust_period(event, delta, hwc->last_period, true); in __perf_event_account_interrupt()
9138 int perf_event_account_interrupt(struct perf_event *event) in perf_event_account_interrupt() argument
9140 return __perf_event_account_interrupt(event, 1); in perf_event_account_interrupt()
9144 * Generic event overflow handling, sampling.
9147 static int __perf_event_overflow(struct perf_event *event, in __perf_event_overflow() argument
9151 int events = atomic_read(&event->event_limit); in __perf_event_overflow()
9158 if (unlikely(!is_sampling_event(event))) in __perf_event_overflow()
9161 ret = __perf_event_account_interrupt(event, throttle); in __perf_event_overflow()
9168 event->pending_kill = POLL_IN; in __perf_event_overflow()
9169 if (events && atomic_dec_and_test(&event->event_limit)) { in __perf_event_overflow()
9171 event->pending_kill = POLL_HUP; in __perf_event_overflow()
9172 event->pending_addr = data->addr; in __perf_event_overflow()
9174 perf_event_disable_inatomic(event); in __perf_event_overflow()
9177 READ_ONCE(event->overflow_handler)(event, data, regs); in __perf_event_overflow() local
9179 if (*perf_event_fasync(event) && event->pending_kill) { in __perf_event_overflow()
9180 event->pending_wakeup = 1; in __perf_event_overflow()
9181 irq_work_queue(&event->pending); in __perf_event_overflow()
9187 int perf_event_overflow(struct perf_event *event, in perf_event_overflow() argument
9191 return __perf_event_overflow(event, 1, data, regs); in perf_event_overflow()
9195 * Generic software event infrastructure
9210 * We directly increment event->count and keep a second value in
9211 * event->hw.period_left to count intervals. This period event
9216 u64 perf_swevent_set_period(struct perf_event *event) in perf_swevent_set_period() argument
9218 struct hw_perf_event *hwc = &event->hw; in perf_swevent_set_period()
9239 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, in perf_swevent_overflow() argument
9243 struct hw_perf_event *hwc = &event->hw; in perf_swevent_overflow()
9247 overflow = perf_swevent_set_period(event); in perf_swevent_overflow()
9253 if (__perf_event_overflow(event, throttle, in perf_swevent_overflow()
9265 static void perf_swevent_event(struct perf_event *event, u64 nr, in perf_swevent_event() argument
9269 struct hw_perf_event *hwc = &event->hw; in perf_swevent_event()
9271 local64_add(nr, &event->count); in perf_swevent_event()
9276 if (!is_sampling_event(event)) in perf_swevent_event()
9279 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { in perf_swevent_event()
9281 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
9283 data->period = event->hw.last_period; in perf_swevent_event()
9285 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) in perf_swevent_event()
9286 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
9291 perf_swevent_overflow(event, 0, data, regs); in perf_swevent_event()
9294 static int perf_exclude_event(struct perf_event *event, in perf_exclude_event() argument
9297 if (event->hw.state & PERF_HES_STOPPED) in perf_exclude_event()
9301 if (event->attr.exclude_user && user_mode(regs)) in perf_exclude_event()
9304 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_exclude_event()
9311 static int perf_swevent_match(struct perf_event *event, in perf_swevent_match() argument
9317 if (event->attr.type != type) in perf_swevent_match()
9320 if (event->attr.config != event_id) in perf_swevent_match()
9323 if (perf_exclude_event(event, regs)) in perf_swevent_match()
9357 /* For the event head insertion and removal in the hlist */
9359 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) in find_swevent_head() argument
9362 u32 event_id = event->attr.config; in find_swevent_head()
9363 u64 type = event->attr.type; in find_swevent_head()
9366 * Event scheduling is always serialized against hlist allocation in find_swevent_head()
9371 lockdep_is_held(&event->ctx->lock)); in find_swevent_head()
9384 struct perf_event *event; in do_perf_sw_event() local
9392 hlist_for_each_entry_rcu(event, head, hlist_entry) { in do_perf_sw_event()
9393 if (perf_swevent_match(event, type, event_id, data, regs)) in do_perf_sw_event()
9394 perf_swevent_event(event, nr, data, regs); in do_perf_sw_event()
9444 static void perf_swevent_read(struct perf_event *event) in perf_swevent_read() argument
9448 static int perf_swevent_add(struct perf_event *event, int flags) in perf_swevent_add() argument
9451 struct hw_perf_event *hwc = &event->hw; in perf_swevent_add()
9454 if (is_sampling_event(event)) { in perf_swevent_add()
9456 perf_swevent_set_period(event); in perf_swevent_add()
9461 head = find_swevent_head(swhash, event); in perf_swevent_add()
9465 hlist_add_head_rcu(&event->hlist_entry, head); in perf_swevent_add()
9466 perf_event_update_userpage(event); in perf_swevent_add()
9471 static void perf_swevent_del(struct perf_event *event, int flags) in perf_swevent_del() argument
9473 hlist_del_rcu(&event->hlist_entry); in perf_swevent_del()
9476 static void perf_swevent_start(struct perf_event *event, int flags) in perf_swevent_start() argument
9478 event->hw.state = 0; in perf_swevent_start()
9481 static void perf_swevent_stop(struct perf_event *event, int flags) in perf_swevent_stop() argument
9483 event->hw.state = PERF_HES_STOPPED; in perf_swevent_stop()
9575 static void sw_perf_event_destroy(struct perf_event *event) in sw_perf_event_destroy() argument
9577 u64 event_id = event->attr.config; in sw_perf_event_destroy()
9579 WARN_ON(event->parent); in sw_perf_event_destroy()
9585 static int perf_swevent_init(struct perf_event *event) in perf_swevent_init() argument
9587 u64 event_id = event->attr.config; in perf_swevent_init()
9589 if (event->attr.type != PERF_TYPE_SOFTWARE) in perf_swevent_init()
9595 if (has_branch_stack(event)) in perf_swevent_init()
9610 if (!event->parent) { in perf_swevent_init()
9618 event->destroy = sw_perf_event_destroy; in perf_swevent_init()
9639 static int perf_tp_filter_match(struct perf_event *event, in perf_tp_filter_match() argument
9645 if (event->parent) in perf_tp_filter_match()
9646 event = event->parent; in perf_tp_filter_match()
9648 if (likely(!event->filter) || filter_match_preds(event->filter, record)) in perf_tp_filter_match()
9653 static int perf_tp_event_match(struct perf_event *event, in perf_tp_event_match() argument
9657 if (event->hw.state & PERF_HES_STOPPED) in perf_tp_event_match()
9662 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_tp_event_match()
9665 if (!perf_tp_filter_match(event, data)) in perf_tp_event_match()
9683 perf_tp_event(call->event.type, count, raw_data, size, regs, head, in perf_trace_run_bpf_submit()
9693 struct perf_event *event; in perf_tp_event() local
9707 hlist_for_each_entry_rcu(event, head, hlist_entry) { in perf_tp_event()
9708 if (perf_tp_event_match(event, &data, regs)) in perf_tp_event()
9709 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
9714 * deliver this event there too. in perf_tp_event()
9725 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_tp_event()
9726 if (event->cpu != smp_processor_id()) in perf_tp_event()
9728 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event()
9730 if (event->attr.config != entry->type) in perf_tp_event()
9732 if (perf_tp_event_match(event, &data, regs)) in perf_tp_event()
9733 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
9743 static void tp_perf_event_destroy(struct perf_event *event) in tp_perf_event_destroy() argument
9745 perf_trace_destroy(event); in tp_perf_event_destroy()
9748 static int perf_tp_event_init(struct perf_event *event) in perf_tp_event_init() argument
9752 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event_init()
9758 if (has_branch_stack(event)) in perf_tp_event_init()
9761 err = perf_trace_init(event); in perf_tp_event_init()
9765 event->destroy = tp_perf_event_destroy; in perf_tp_event_init()
9821 static int perf_kprobe_event_init(struct perf_event *event);
9833 static int perf_kprobe_event_init(struct perf_event *event) in perf_kprobe_event_init() argument
9838 if (event->attr.type != perf_kprobe.type) in perf_kprobe_event_init()
9847 if (has_branch_stack(event)) in perf_kprobe_event_init()
9850 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_kprobe_event_init()
9851 err = perf_kprobe_init(event, is_retprobe); in perf_kprobe_event_init()
9855 event->destroy = perf_kprobe_destroy; in perf_kprobe_event_init()
9880 static int perf_uprobe_event_init(struct perf_event *event);
9892 static int perf_uprobe_event_init(struct perf_event *event) in perf_uprobe_event_init() argument
9898 if (event->attr.type != perf_uprobe.type) in perf_uprobe_event_init()
9907 if (has_branch_stack(event)) in perf_uprobe_event_init()
9910 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_uprobe_event_init()
9911 ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT; in perf_uprobe_event_init()
9912 err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe); in perf_uprobe_event_init()
9916 event->destroy = perf_uprobe_destroy; in perf_uprobe_event_init()
9933 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
9935 ftrace_profile_free_filter(event); in perf_event_free_filter()
9939 static void bpf_overflow_handler(struct perf_event *event, in bpf_overflow_handler() argument
9945 .event = event, in bpf_overflow_handler()
9954 prog = READ_ONCE(event->prog); in bpf_overflow_handler()
9963 event->orig_overflow_handler(event, data, regs); in bpf_overflow_handler()
9966 static int perf_event_set_bpf_handler(struct perf_event *event, in perf_event_set_bpf_handler() argument
9970 if (event->overflow_handler_context) in perf_event_set_bpf_handler()
9974 if (event->prog) in perf_event_set_bpf_handler()
9980 if (event->attr.precise_ip && in perf_event_set_bpf_handler()
9982 (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY) || in perf_event_set_bpf_handler()
9983 event->attr.exclude_callchain_kernel || in perf_event_set_bpf_handler()
9984 event->attr.exclude_callchain_user)) { in perf_event_set_bpf_handler()
9997 event->prog = prog; in perf_event_set_bpf_handler()
9998 event->bpf_cookie = bpf_cookie; in perf_event_set_bpf_handler()
9999 event->orig_overflow_handler = READ_ONCE(event->overflow_handler); in perf_event_set_bpf_handler()
10000 WRITE_ONCE(event->overflow_handler, bpf_overflow_handler); in perf_event_set_bpf_handler()
10004 static void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
10006 struct bpf_prog *prog = event->prog; in perf_event_free_bpf_handler()
10011 WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler); in perf_event_free_bpf_handler()
10012 event->prog = NULL; in perf_event_free_bpf_handler()
10016 static int perf_event_set_bpf_handler(struct perf_event *event, in perf_event_set_bpf_handler() argument
10022 static void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
10028 * returns true if the event is a tracepoint, or a kprobe/upprobe created
10031 static inline bool perf_event_is_tracing(struct perf_event *event) in perf_event_is_tracing() argument
10033 if (event->pmu == &perf_tracepoint) in perf_event_is_tracing()
10036 if (event->pmu == &perf_kprobe) in perf_event_is_tracing()
10040 if (event->pmu == &perf_uprobe) in perf_event_is_tracing()
10046 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, in perf_event_set_bpf_prog() argument
10051 if (!perf_event_is_tracing(event)) in perf_event_set_bpf_prog()
10052 return perf_event_set_bpf_handler(event, prog, bpf_cookie); in perf_event_set_bpf_prog()
10054 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE; in perf_event_set_bpf_prog()
10055 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; in perf_event_set_bpf_prog()
10056 is_syscall_tp = is_syscall_trace_event(event->tp_event); in perf_event_set_bpf_prog()
10068 !(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) in perf_event_set_bpf_prog()
10072 int off = trace_event_get_offsets(event->tp_event); in perf_event_set_bpf_prog()
10078 return perf_event_attach_bpf_prog(event, prog, bpf_cookie); in perf_event_set_bpf_prog()
10081 void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
10083 if (!perf_event_is_tracing(event)) { in perf_event_free_bpf_prog()
10084 perf_event_free_bpf_handler(event); in perf_event_free_bpf_prog()
10087 perf_event_detach_bpf_prog(event); in perf_event_free_bpf_prog()
10096 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
10100 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, in perf_event_set_bpf_prog() argument
10106 void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
10128 perf_addr_filter_new(struct perf_event *event, struct list_head *filters) in perf_addr_filter_new() argument
10130 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu); in perf_addr_filter_new()
10157 static void perf_addr_filters_splice(struct perf_event *event, in perf_addr_filters_splice() argument
10163 if (!has_addr_filter(event)) in perf_addr_filters_splice()
10167 if (event->parent) in perf_addr_filters_splice()
10170 raw_spin_lock_irqsave(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
10172 list_splice_init(&event->addr_filters.list, &list); in perf_addr_filters_splice()
10174 list_splice(head, &event->addr_filters.list); in perf_addr_filters_splice()
10176 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
10202 * Update event's address range filters based on the
10205 static void perf_event_addr_filters_apply(struct perf_event *event) in perf_event_addr_filters_apply() argument
10207 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_apply()
10208 struct task_struct *task = READ_ONCE(event->ctx->task); in perf_event_addr_filters_apply()
10215 * We may observe TASK_TOMBSTONE, which means that the event tear-down in perf_event_addr_filters_apply()
10236 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_apply()
10237 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_apply()
10239 perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]); in perf_event_addr_filters_apply()
10241 event->addr_filter_ranges[count].start = filter->offset; in perf_event_addr_filters_apply()
10242 event->addr_filter_ranges[count].size = filter->size; in perf_event_addr_filters_apply()
10248 event->addr_filters_gen++; in perf_event_addr_filters_apply()
10258 perf_event_stop(event, 1); in perf_event_addr_filters_apply()
10312 perf_event_parse_addr_filter(struct perf_event *event, char *fstr, in perf_event_parse_addr_filter() argument
10339 filter = perf_addr_filter_new(event, filters); in perf_event_parse_addr_filter()
10398 * Make sure that it doesn't contradict itself or the event's in perf_event_parse_addr_filter()
10403 if (kernel && event->attr.exclude_kernel) in perf_event_parse_addr_filter()
10427 if (!event->ctx->task) in perf_event_parse_addr_filter()
10442 event->addr_filters.nr_file_filters++; in perf_event_parse_addr_filter()
10468 perf_event_set_addr_filter(struct perf_event *event, char *filter_str) in perf_event_set_addr_filter() argument
10477 lockdep_assert_held(&event->ctx->mutex); in perf_event_set_addr_filter()
10479 if (WARN_ON_ONCE(event->parent)) in perf_event_set_addr_filter()
10482 ret = perf_event_parse_addr_filter(event, filter_str, &filters); in perf_event_set_addr_filter()
10486 ret = event->pmu->addr_filters_validate(&filters); in perf_event_set_addr_filter()
10491 perf_addr_filters_splice(event, &filters); in perf_event_set_addr_filter()
10494 perf_event_for_each_child(event, perf_event_addr_filters_apply); in perf_event_set_addr_filter()
10502 event->addr_filters.nr_file_filters = 0; in perf_event_set_addr_filter()
10507 static int perf_event_set_filter(struct perf_event *event, void __user *arg) in perf_event_set_filter() argument
10517 if (perf_event_is_tracing(event)) { in perf_event_set_filter()
10518 struct perf_event_context *ctx = event->ctx; in perf_event_set_filter()
10528 * This can result in event getting moved to a different ctx, in perf_event_set_filter()
10532 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); in perf_event_set_filter()
10536 if (has_addr_filter(event)) in perf_event_set_filter()
10537 ret = perf_event_set_addr_filter(event, filter_str); in perf_event_set_filter()
10552 struct perf_event *event; in perf_swevent_hrtimer() local
10555 event = container_of(hrtimer, struct perf_event, hw.hrtimer); in perf_swevent_hrtimer()
10557 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_swevent_hrtimer()
10560 event->pmu->read(event); in perf_swevent_hrtimer()
10562 perf_sample_data_init(&data, 0, event->hw.last_period); in perf_swevent_hrtimer()
10565 if (regs && !perf_exclude_event(event, regs)) { in perf_swevent_hrtimer()
10566 if (!(event->attr.exclude_idle && is_idle_task(current))) in perf_swevent_hrtimer()
10567 if (__perf_event_overflow(event, 1, &data, regs)) in perf_swevent_hrtimer()
10571 period = max_t(u64, 10000, event->hw.sample_period); in perf_swevent_hrtimer()
10577 static void perf_swevent_start_hrtimer(struct perf_event *event) in perf_swevent_start_hrtimer() argument
10579 struct hw_perf_event *hwc = &event->hw; in perf_swevent_start_hrtimer()
10582 if (!is_sampling_event(event)) in perf_swevent_start_hrtimer()
10598 static void perf_swevent_cancel_hrtimer(struct perf_event *event) in perf_swevent_cancel_hrtimer() argument
10600 struct hw_perf_event *hwc = &event->hw; in perf_swevent_cancel_hrtimer()
10602 if (is_sampling_event(event)) { in perf_swevent_cancel_hrtimer()
10610 static void perf_swevent_init_hrtimer(struct perf_event *event) in perf_swevent_init_hrtimer() argument
10612 struct hw_perf_event *hwc = &event->hw; in perf_swevent_init_hrtimer()
10614 if (!is_sampling_event(event)) in perf_swevent_init_hrtimer()
10624 if (event->attr.freq) { in perf_swevent_init_hrtimer()
10625 long freq = event->attr.sample_freq; in perf_swevent_init_hrtimer()
10627 event->attr.sample_period = NSEC_PER_SEC / freq; in perf_swevent_init_hrtimer()
10628 hwc->sample_period = event->attr.sample_period; in perf_swevent_init_hrtimer()
10631 event->attr.freq = 0; in perf_swevent_init_hrtimer()
10636 * Software event: cpu wall time clock
10639 static void cpu_clock_event_update(struct perf_event *event) in cpu_clock_event_update() argument
10645 prev = local64_xchg(&event->hw.prev_count, now); in cpu_clock_event_update()
10646 local64_add(now - prev, &event->count); in cpu_clock_event_update()
10649 static void cpu_clock_event_start(struct perf_event *event, int flags) in cpu_clock_event_start() argument
10651 local64_set(&event->hw.prev_count, local_clock()); in cpu_clock_event_start()
10652 perf_swevent_start_hrtimer(event); in cpu_clock_event_start()
10655 static void cpu_clock_event_stop(struct perf_event *event, int flags) in cpu_clock_event_stop() argument
10657 perf_swevent_cancel_hrtimer(event); in cpu_clock_event_stop()
10658 cpu_clock_event_update(event); in cpu_clock_event_stop()
10661 static int cpu_clock_event_add(struct perf_event *event, int flags) in cpu_clock_event_add() argument
10664 cpu_clock_event_start(event, flags); in cpu_clock_event_add()
10665 perf_event_update_userpage(event); in cpu_clock_event_add()
10670 static void cpu_clock_event_del(struct perf_event *event, int flags) in cpu_clock_event_del() argument
10672 cpu_clock_event_stop(event, flags); in cpu_clock_event_del()
10675 static void cpu_clock_event_read(struct perf_event *event) in cpu_clock_event_read() argument
10677 cpu_clock_event_update(event); in cpu_clock_event_read()
10680 static int cpu_clock_event_init(struct perf_event *event) in cpu_clock_event_init() argument
10682 if (event->attr.type != PERF_TYPE_SOFTWARE) in cpu_clock_event_init()
10685 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) in cpu_clock_event_init()
10691 if (has_branch_stack(event)) in cpu_clock_event_init()
10694 perf_swevent_init_hrtimer(event); in cpu_clock_event_init()
10713 * Software event: task time clock
10716 static void task_clock_event_update(struct perf_event *event, u64 now) in task_clock_event_update() argument
10721 prev = local64_xchg(&event->hw.prev_count, now); in task_clock_event_update()
10723 local64_add(delta, &event->count); in task_clock_event_update()
10726 static void task_clock_event_start(struct perf_event *event, int flags) in task_clock_event_start() argument
10728 local64_set(&event->hw.prev_count, event->ctx->time); in task_clock_event_start()
10729 perf_swevent_start_hrtimer(event); in task_clock_event_start()
10732 static void task_clock_event_stop(struct perf_event *event, int flags) in task_clock_event_stop() argument
10734 perf_swevent_cancel_hrtimer(event); in task_clock_event_stop()
10735 task_clock_event_update(event, event->ctx->time); in task_clock_event_stop()
10738 static int task_clock_event_add(struct perf_event *event, int flags) in task_clock_event_add() argument
10741 task_clock_event_start(event, flags); in task_clock_event_add()
10742 perf_event_update_userpage(event); in task_clock_event_add()
10747 static void task_clock_event_del(struct perf_event *event, int flags) in task_clock_event_del() argument
10749 task_clock_event_stop(event, PERF_EF_UPDATE); in task_clock_event_del()
10752 static void task_clock_event_read(struct perf_event *event) in task_clock_event_read() argument
10755 u64 delta = now - event->ctx->timestamp; in task_clock_event_read()
10756 u64 time = event->ctx->time + delta; in task_clock_event_read()
10758 task_clock_event_update(event, time); in task_clock_event_read()
10761 static int task_clock_event_init(struct perf_event *event) in task_clock_event_init() argument
10763 if (event->attr.type != PERF_TYPE_SOFTWARE) in task_clock_event_init()
10766 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) in task_clock_event_init()
10772 if (has_branch_stack(event)) in task_clock_event_init()
10775 perf_swevent_init_hrtimer(event); in task_clock_event_init()
10806 static int perf_event_nop_int(struct perf_event *event, u64 value) in perf_event_nop_int() argument
10848 static int perf_event_idx_default(struct perf_event *event) in perf_event_idx_default() argument
11134 * is fast, provided a valid software event is provided. in perf_pmu_register()
11188 static inline bool has_extended_regs(struct perf_event *event) in has_extended_regs() argument
11190 return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) || in has_extended_regs()
11191 (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK); in has_extended_regs()
11194 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) in perf_try_init_event() argument
11205 * if this is a sibling event, acquire the ctx->mutex to protect in perf_try_init_event()
11208 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) { in perf_try_init_event()
11213 ctx = perf_event_ctx_lock_nested(event->group_leader, in perf_try_init_event()
11218 event->pmu = pmu; in perf_try_init_event()
11219 ret = pmu->event_init(event); in perf_try_init_event()
11222 perf_event_ctx_unlock(event->group_leader, ctx); in perf_try_init_event()
11226 has_extended_regs(event)) in perf_try_init_event()
11230 event_has_any_exclude_flag(event)) in perf_try_init_event()
11233 if (ret && event->destroy) in perf_try_init_event()
11234 event->destroy(event); in perf_try_init_event()
11243 static struct pmu *perf_init_event(struct perf_event *event) in perf_init_event() argument
11252 if (event->parent && event->parent->pmu) { in perf_init_event()
11253 pmu = event->parent->pmu; in perf_init_event()
11254 ret = perf_try_init_event(pmu, event); in perf_init_event()
11263 type = event->attr.type; in perf_init_event()
11265 type = event->attr.config >> PERF_PMU_TYPE_SHIFT; in perf_init_event()
11270 event->attr.config &= PERF_HW_EVENT_MASK; in perf_init_event()
11279 if (event->attr.type != type && type != PERF_TYPE_RAW && in perf_init_event()
11283 ret = perf_try_init_event(pmu, event); in perf_init_event()
11284 if (ret == -ENOENT && event->attr.type != type && !extended_type) { in perf_init_event()
11285 type = event->attr.type; in perf_init_event()
11296 ret = perf_try_init_event(pmu, event); in perf_init_event()
11313 static void attach_sb_event(struct perf_event *event) in attach_sb_event() argument
11315 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in attach_sb_event()
11318 list_add_rcu(&event->sb_list, &pel->list); in attach_sb_event()
11329 static void account_pmu_sb_event(struct perf_event *event) in account_pmu_sb_event() argument
11331 if (is_sb_event(event)) in account_pmu_sb_event()
11332 attach_sb_event(event); in account_pmu_sb_event()
11335 static void account_event_cpu(struct perf_event *event, int cpu) in account_event_cpu() argument
11337 if (event->parent) in account_event_cpu()
11340 if (is_cgroup_event(event)) in account_event_cpu()
11365 static void account_event(struct perf_event *event) in account_event() argument
11369 if (event->parent) in account_event()
11372 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in account_event()
11374 if (event->attr.mmap || event->attr.mmap_data) in account_event()
11376 if (event->attr.build_id) in account_event()
11378 if (event->attr.comm) in account_event()
11380 if (event->attr.namespaces) in account_event()
11382 if (event->attr.cgroup) in account_event()
11384 if (event->attr.task) in account_event()
11386 if (event->attr.freq) in account_event()
11388 if (event->attr.context_switch) { in account_event()
11392 if (has_branch_stack(event)) in account_event()
11394 if (is_cgroup_event(event)) in account_event()
11396 if (event->attr.ksymbol) in account_event()
11398 if (event->attr.bpf_event) in account_event()
11400 if (event->attr.text_poke) in account_event()
11431 account_event_cpu(event, event->cpu); in account_event()
11433 account_pmu_sb_event(event); in account_event()
11437 * Allocate and initialize an event structure
11448 struct perf_event *event; in perf_event_alloc() local
11463 event = kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO, in perf_event_alloc()
11465 if (!event) in perf_event_alloc()
11473 group_leader = event; in perf_event_alloc()
11475 mutex_init(&event->child_mutex); in perf_event_alloc()
11476 INIT_LIST_HEAD(&event->child_list); in perf_event_alloc()
11478 INIT_LIST_HEAD(&event->event_entry); in perf_event_alloc()
11479 INIT_LIST_HEAD(&event->sibling_list); in perf_event_alloc()
11480 INIT_LIST_HEAD(&event->active_list); in perf_event_alloc()
11481 init_event_group(event); in perf_event_alloc()
11482 INIT_LIST_HEAD(&event->rb_entry); in perf_event_alloc()
11483 INIT_LIST_HEAD(&event->active_entry); in perf_event_alloc()
11484 INIT_LIST_HEAD(&event->addr_filters.list); in perf_event_alloc()
11485 INIT_HLIST_NODE(&event->hlist_entry); in perf_event_alloc()
11488 init_waitqueue_head(&event->waitq); in perf_event_alloc()
11489 event->pending_disable = -1; in perf_event_alloc()
11490 init_irq_work(&event->pending, perf_pending_event); in perf_event_alloc()
11492 mutex_init(&event->mmap_mutex); in perf_event_alloc()
11493 raw_spin_lock_init(&event->addr_filters.lock); in perf_event_alloc()
11495 atomic_long_set(&event->refcount, 1); in perf_event_alloc()
11496 event->cpu = cpu; in perf_event_alloc()
11497 event->attr = *attr; in perf_event_alloc()
11498 event->group_leader = group_leader; in perf_event_alloc()
11499 event->pmu = NULL; in perf_event_alloc()
11500 event->oncpu = -1; in perf_event_alloc()
11502 event->parent = parent_event; in perf_event_alloc()
11504 event->ns = get_pid_ns(task_active_pid_ns(current)); in perf_event_alloc()
11505 event->id = atomic64_inc_return(&perf_event_id); in perf_event_alloc()
11507 event->state = PERF_EVENT_STATE_INACTIVE; in perf_event_alloc()
11509 if (event->attr.sigtrap) in perf_event_alloc()
11510 atomic_set(&event->event_limit, 1); in perf_event_alloc()
11513 event->attach_state = PERF_ATTACH_TASK; in perf_event_alloc()
11519 event->hw.target = get_task_struct(task); in perf_event_alloc()
11522 event->clock = &local_clock; in perf_event_alloc()
11524 event->clock = parent_event->clock; in perf_event_alloc()
11534 event->prog = prog; in perf_event_alloc()
11535 event->orig_overflow_handler = in perf_event_alloc()
11542 event->overflow_handler = overflow_handler; in perf_event_alloc()
11543 event->overflow_handler_context = context; in perf_event_alloc()
11544 } else if (is_write_backward(event)){ in perf_event_alloc()
11545 event->overflow_handler = perf_event_output_backward; in perf_event_alloc()
11546 event->overflow_handler_context = NULL; in perf_event_alloc()
11548 event->overflow_handler = perf_event_output_forward; in perf_event_alloc()
11549 event->overflow_handler_context = NULL; in perf_event_alloc()
11552 perf_event__state_init(event); in perf_event_alloc()
11556 hwc = &event->hw; in perf_event_alloc()
11571 if (!has_branch_stack(event)) in perf_event_alloc()
11572 event->attr.branch_sample_type = 0; in perf_event_alloc()
11574 pmu = perf_init_event(event); in perf_event_alloc()
11589 if (event->attr.aux_output && in perf_event_alloc()
11596 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); in perf_event_alloc()
11601 err = exclusive_event_init(event); in perf_event_alloc()
11605 if (has_addr_filter(event)) { in perf_event_alloc()
11606 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters, in perf_event_alloc()
11609 if (!event->addr_filter_ranges) { in perf_event_alloc()
11618 if (event->parent) { in perf_event_alloc()
11619 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_alloc()
11622 memcpy(event->addr_filter_ranges, in perf_event_alloc()
11623 event->parent->addr_filter_ranges, in perf_event_alloc()
11629 event->addr_filters_gen = 1; in perf_event_alloc()
11632 if (!event->parent) { in perf_event_alloc()
11633 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { in perf_event_alloc()
11640 err = security_perf_event_alloc(event); in perf_event_alloc()
11645 account_event(event); in perf_event_alloc()
11647 return event; in perf_event_alloc()
11650 if (!event->parent) { in perf_event_alloc()
11651 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in perf_event_alloc()
11655 kfree(event->addr_filter_ranges); in perf_event_alloc()
11658 exclusive_event_destroy(event); in perf_event_alloc()
11661 if (is_cgroup_event(event)) in perf_event_alloc()
11662 perf_detach_cgroup(event); in perf_event_alloc()
11663 if (event->destroy) in perf_event_alloc()
11664 event->destroy(event); in perf_event_alloc()
11667 if (event->ns) in perf_event_alloc()
11668 put_pid_ns(event->ns); in perf_event_alloc()
11669 if (event->hw.target) in perf_event_alloc()
11670 put_task_struct(event->hw.target); in perf_event_alloc()
11671 kmem_cache_free(perf_event_cache, event); in perf_event_alloc()
11803 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) in perf_event_set_output() argument
11812 if (event == output_event) in perf_event_set_output()
11818 if (output_event->cpu != event->cpu) in perf_event_set_output()
11824 if (output_event->cpu == -1 && output_event->ctx != event->ctx) in perf_event_set_output()
11830 if (output_event->clock != event->clock) in perf_event_set_output()
11837 if (is_write_backward(output_event) != is_write_backward(event)) in perf_event_set_output()
11843 if (has_aux(event) && has_aux(output_event) && in perf_event_set_output()
11844 event->pmu != output_event->pmu) in perf_event_set_output()
11848 mutex_lock(&event->mmap_mutex); in perf_event_set_output()
11850 if (atomic_read(&event->mmap_count)) in perf_event_set_output()
11860 ring_buffer_attach(event, rb); in perf_event_set_output()
11864 mutex_unlock(&event->mmap_mutex); in perf_event_set_output()
11879 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) in perf_event_set_clock() argument
11885 event->clock = &ktime_get_mono_fast_ns; in perf_event_set_clock()
11890 event->clock = &ktime_get_raw_fast_ns; in perf_event_set_clock()
11895 event->clock = &ktime_get_real_ns; in perf_event_set_clock()
11899 event->clock = &ktime_get_boottime_ns; in perf_event_set_clock()
11903 event->clock = &ktime_get_clocktai_ns; in perf_event_set_clock()
11910 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) in perf_event_set_clock()
11979 * sys_perf_event_open - open a performance event, associate it to a task/cpu
11984 * @group_fd: group leader event fd
11985 * @flags: perf event open flags
11992 struct perf_event *event, *sibling; in SYSCALL_DEFINE5() local
12095 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, in SYSCALL_DEFINE5()
12097 if (IS_ERR(event)) { in SYSCALL_DEFINE5()
12098 err = PTR_ERR(event); in SYSCALL_DEFINE5()
12102 if (is_sampling_event(event)) { in SYSCALL_DEFINE5()
12103 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { in SYSCALL_DEFINE5()
12113 pmu = event->pmu; in SYSCALL_DEFINE5()
12116 err = perf_event_set_clock(event, attr.clockid); in SYSCALL_DEFINE5()
12122 event->event_caps |= PERF_EV_CAP_SOFTWARE; in SYSCALL_DEFINE5()
12125 if (is_software_event(event) && in SYSCALL_DEFINE5()
12128 * If the event is a sw event, but the group_leader in SYSCALL_DEFINE5()
12136 } else if (!is_software_event(event) && in SYSCALL_DEFINE5()
12141 * try to add a hardware event, move the whole group to in SYSCALL_DEFINE5()
12151 ctx = find_get_context(pmu, task, event); in SYSCALL_DEFINE5()
12158 * Look up the group leader (we will attach this event to it): in SYSCALL_DEFINE5()
12171 if (group_leader->clock != event->clock) in SYSCALL_DEFINE5()
12179 if (group_leader->cpu != event->cpu) in SYSCALL_DEFINE5()
12205 err = perf_event_set_output(event, output_event); in SYSCALL_DEFINE5()
12210 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, in SYSCALL_DEFINE5()
12225 * perf_install_in_context() call for this new event to in SYSCALL_DEFINE5()
12249 * if this new event wound up on the same ctx, if so in SYSCALL_DEFINE5()
12281 if (!perf_event_validate_size(event)) { in SYSCALL_DEFINE5()
12288 * Check if the @cpu we're creating an event for is online. in SYSCALL_DEFINE5()
12302 if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) { in SYSCALL_DEFINE5()
12309 * because we need to serialize with concurrent event creation. in SYSCALL_DEFINE5()
12311 if (!exclusive_event_installable(event, ctx)) { in SYSCALL_DEFINE5()
12360 * event. What we want here is event in the initial in SYSCALL_DEFINE5()
12371 * perf_install_in_context() which is the point the event is active and in SYSCALL_DEFINE5()
12374 perf_event__header_size(event); in SYSCALL_DEFINE5()
12375 perf_event__id_header_size(event); in SYSCALL_DEFINE5()
12377 event->owner = current; in SYSCALL_DEFINE5()
12379 perf_install_in_context(ctx, event, event->cpu); in SYSCALL_DEFINE5()
12392 list_add_tail(&event->owner_entry, ¤t->perf_event_list); in SYSCALL_DEFINE5()
12397 * new event on the sibling_list. This ensures destruction in SYSCALL_DEFINE5()
12420 * and that will take care of freeing the event. in SYSCALL_DEFINE5()
12423 free_event(event); in SYSCALL_DEFINE5()
12440 * @overflow_handler: callback to trigger when we hit the event
12450 struct perf_event *event; in perf_event_create_kernel_counter() local
12460 event = perf_event_alloc(attr, cpu, task, NULL, NULL, in perf_event_create_kernel_counter()
12462 if (IS_ERR(event)) { in perf_event_create_kernel_counter()
12463 err = PTR_ERR(event); in perf_event_create_kernel_counter()
12468 event->owner = TASK_TOMBSTONE; in perf_event_create_kernel_counter()
12473 ctx = find_get_context(event->pmu, task, event); in perf_event_create_kernel_counter()
12488 * Check if the @cpu we're creating an event for is online. in perf_event_create_kernel_counter()
12501 if (!exclusive_event_installable(event, ctx)) { in perf_event_create_kernel_counter()
12506 perf_install_in_context(ctx, event, event->cpu); in perf_event_create_kernel_counter()
12510 return event; in perf_event_create_kernel_counter()
12517 free_event(event); in perf_event_create_kernel_counter()
12527 struct perf_event *event, *tmp; in perf_pmu_migrate_context() local
12538 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, in perf_pmu_migrate_context()
12540 perf_remove_from_context(event, 0); in perf_pmu_migrate_context()
12541 unaccount_event_cpu(event, src_cpu); in perf_pmu_migrate_context()
12543 list_add(&event->migrate_entry, &events); in perf_pmu_migrate_context()
12559 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { in perf_pmu_migrate_context()
12560 if (event->group_leader == event) in perf_pmu_migrate_context()
12563 list_del(&event->migrate_entry); in perf_pmu_migrate_context()
12564 if (event->state >= PERF_EVENT_STATE_OFF) in perf_pmu_migrate_context()
12565 event->state = PERF_EVENT_STATE_INACTIVE; in perf_pmu_migrate_context()
12566 account_event_cpu(event, dst_cpu); in perf_pmu_migrate_context()
12567 perf_install_in_context(dst_ctx, event, dst_cpu); in perf_pmu_migrate_context()
12575 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { in perf_pmu_migrate_context()
12576 list_del(&event->migrate_entry); in perf_pmu_migrate_context()
12577 if (event->state >= PERF_EVENT_STATE_OFF) in perf_pmu_migrate_context()
12578 event->state = PERF_EVENT_STATE_INACTIVE; in perf_pmu_migrate_context()
12579 account_event_cpu(event, dst_cpu); in perf_pmu_migrate_context()
12580 perf_install_in_context(dst_ctx, event, dst_cpu); in perf_pmu_migrate_context()
12613 perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) in perf_event_exit_event() argument
12615 struct perf_event *parent_event = event->parent; in perf_event_exit_event()
12635 perf_remove_from_context(event, detach_flags); in perf_event_exit_event()
12638 if (event->state > PERF_EVENT_STATE_EXIT) in perf_event_exit_event()
12639 perf_event_set_state(event, PERF_EVENT_STATE_EXIT); in perf_event_exit_event()
12651 free_event(event); in perf_event_exit_event()
12659 perf_event_wakeup(event); in perf_event_exit_event()
12724 * When a child task exits, feed back event values to parent events.
12731 struct perf_event *event, *tmp; in perf_event_exit_task() local
12735 list_for_each_entry_safe(event, tmp, &child->perf_event_list, in perf_event_exit_task()
12737 list_del_init(&event->owner_entry); in perf_event_exit_task()
12744 smp_store_release(&event->owner, NULL); in perf_event_exit_task()
12760 static void perf_free_event(struct perf_event *event, in perf_free_event() argument
12763 struct perf_event *parent = event->parent; in perf_free_event()
12769 list_del_init(&event->child_list); in perf_free_event()
12775 perf_group_detach(event); in perf_free_event()
12776 list_del_event(event, ctx); in perf_free_event()
12778 free_event(event); in perf_free_event()
12791 struct perf_event *event, *tmp; in perf_event_free_task() local
12812 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) in perf_event_free_task()
12813 perf_free_event(event, ctx); in perf_event_free_task()
12826 * _free_event()'s put_task_struct(event->hw.target) will be a in perf_event_free_task()
12866 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) in perf_event_attrs() argument
12868 if (!event) in perf_event_attrs()
12871 return &event->attr; in perf_event_attrs()
12875 * Inherit an event from parent task to child task.
12941 * Make the child state follow the state of the parent event, in inherit_event()
12980 * Link this into the parent event's child list in inherit_event()
12989 * Inherits an event group.
13031 * Creates the child task context and tries to inherit the event-group.
13034 * inherited_all set when we 'fail' to inherit an orphaned event; this is
13042 inherit_task_group(struct perf_event *event, struct task_struct *parent, in inherit_task_group() argument
13050 if (!event->attr.inherit || in inherit_task_group()
13051 (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) || in inherit_task_group()
13053 (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) { in inherit_task_group()
13073 ret = inherit_group(event, parent, parent_ctx, in inherit_task_group()
13090 struct perf_event *event; in perf_event_init_context() local
13124 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) { in perf_event_init_context()
13125 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
13141 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) { in perf_event_init_context()
13142 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
13247 struct perf_event *event; in __perf_event_exit_context() local
13251 list_for_each_entry(event, &ctx->event_list, event_entry) in __perf_event_exit_context()
13252 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); in __perf_event_exit_context()