Lines Matching full:event

3  * Performance event support - Freescale Embedded Performance Monitor
23 struct perf_event *event[MAX_HWEVENTS]; member
175 static void fsl_emb_pmu_read(struct perf_event *event) in fsl_emb_pmu_read() argument
179 if (event->hw.state & PERF_HES_STOPPED) in fsl_emb_pmu_read()
188 prev = local64_read(&event->hw.prev_count); in fsl_emb_pmu_read()
190 val = read_pmc(event->hw.idx); in fsl_emb_pmu_read()
191 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); in fsl_emb_pmu_read()
195 local64_add(delta, &event->count); in fsl_emb_pmu_read()
196 local64_sub(delta, &event->hw.period_left); in fsl_emb_pmu_read()
268 struct perf_event *event; in collect_events() local
276 for_each_sibling_event(event, group) { in collect_events()
277 if (!is_software_event(event) && in collect_events()
278 event->state != PERF_EVENT_STATE_OFF) { in collect_events()
281 ctrs[n] = event; in collect_events()
289 static int fsl_emb_pmu_add(struct perf_event *event, int flags) in fsl_emb_pmu_add() argument
297 perf_pmu_disable(event->pmu); in fsl_emb_pmu_add()
300 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) in fsl_emb_pmu_add()
308 if (cpuhw->event[i]) in fsl_emb_pmu_add()
317 event->hw.idx = i; in fsl_emb_pmu_add()
318 cpuhw->event[i] = event; in fsl_emb_pmu_add()
322 if (event->hw.sample_period) { in fsl_emb_pmu_add()
323 s64 left = local64_read(&event->hw.period_left); in fsl_emb_pmu_add()
327 local64_set(&event->hw.prev_count, val); in fsl_emb_pmu_add()
330 event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; in fsl_emb_pmu_add()
333 event->hw.state &= ~(PERF_HES_STOPPED | PERF_HES_UPTODATE); in fsl_emb_pmu_add()
337 perf_event_update_userpage(event); in fsl_emb_pmu_add()
339 write_pmlcb(i, event->hw.config >> 32); in fsl_emb_pmu_add()
340 write_pmlca(i, event->hw.config_base); in fsl_emb_pmu_add()
345 perf_pmu_enable(event->pmu); in fsl_emb_pmu_add()
350 static void fsl_emb_pmu_del(struct perf_event *event, int flags) in fsl_emb_pmu_del() argument
353 int i = event->hw.idx; in fsl_emb_pmu_del()
355 perf_pmu_disable(event->pmu); in fsl_emb_pmu_del()
359 fsl_emb_pmu_read(event); in fsl_emb_pmu_del()
363 WARN_ON(event != cpuhw->event[event->hw.idx]); in fsl_emb_pmu_del()
369 cpuhw->event[i] = NULL; in fsl_emb_pmu_del()
370 event->hw.idx = -1; in fsl_emb_pmu_del()
373 * TODO: if at least one restricted event exists, and we in fsl_emb_pmu_del()
376 * a non-restricted event, migrate that event to the in fsl_emb_pmu_del()
383 perf_pmu_enable(event->pmu); in fsl_emb_pmu_del()
387 static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags) in fsl_emb_pmu_start() argument
393 if (event->hw.idx < 0 || !event->hw.sample_period) in fsl_emb_pmu_start()
396 if (!(event->hw.state & PERF_HES_STOPPED)) in fsl_emb_pmu_start()
400 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in fsl_emb_pmu_start()
403 perf_pmu_disable(event->pmu); in fsl_emb_pmu_start()
405 event->hw.state = 0; in fsl_emb_pmu_start()
406 left = local64_read(&event->hw.period_left); in fsl_emb_pmu_start()
410 write_pmc(event->hw.idx, val); in fsl_emb_pmu_start()
412 perf_event_update_userpage(event); in fsl_emb_pmu_start()
413 perf_pmu_enable(event->pmu); in fsl_emb_pmu_start()
417 static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags) in fsl_emb_pmu_stop() argument
421 if (event->hw.idx < 0 || !event->hw.sample_period) in fsl_emb_pmu_stop()
424 if (event->hw.state & PERF_HES_STOPPED) in fsl_emb_pmu_stop()
428 perf_pmu_disable(event->pmu); in fsl_emb_pmu_stop()
430 fsl_emb_pmu_read(event); in fsl_emb_pmu_stop()
431 event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; in fsl_emb_pmu_stop()
432 write_pmc(event->hw.idx, 0); in fsl_emb_pmu_stop()
434 perf_event_update_userpage(event); in fsl_emb_pmu_stop()
435 perf_pmu_enable(event->pmu); in fsl_emb_pmu_stop()
442 static void hw_perf_event_destroy(struct perf_event *event) in hw_perf_event_destroy() argument
482 static int fsl_emb_pmu_event_init(struct perf_event *event) in fsl_emb_pmu_event_init() argument
497 switch (event->attr.type) { in fsl_emb_pmu_event_init()
499 ev = event->attr.config; in fsl_emb_pmu_event_init()
506 err = hw_perf_cache_event(event->attr.config, &ev); in fsl_emb_pmu_event_init()
512 ev = event->attr.config; in fsl_emb_pmu_event_init()
519 event->hw.config = ppmu->xlate_event(ev); in fsl_emb_pmu_event_init()
520 if (!(event->hw.config & FSL_EMB_EVENT_VALID)) in fsl_emb_pmu_event_init()
525 * other hardware events in the group. We assume the event in fsl_emb_pmu_event_init()
529 if (event->group_leader != event) { in fsl_emb_pmu_event_init()
530 n = collect_events(event->group_leader, in fsl_emb_pmu_event_init()
536 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) { in fsl_emb_pmu_event_init()
547 event->hw.idx = -1; in fsl_emb_pmu_event_init()
549 event->hw.config_base = PMLCA_CE | PMLCA_FCM1 | in fsl_emb_pmu_event_init()
552 if (event->attr.exclude_user) in fsl_emb_pmu_event_init()
553 event->hw.config_base |= PMLCA_FCU; in fsl_emb_pmu_event_init()
554 if (event->attr.exclude_kernel) in fsl_emb_pmu_event_init()
555 event->hw.config_base |= PMLCA_FCS; in fsl_emb_pmu_event_init()
556 if (event->attr.exclude_idle) in fsl_emb_pmu_event_init()
559 event->hw.last_period = event->hw.sample_period; in fsl_emb_pmu_event_init()
560 local64_set(&event->hw.period_left, event->hw.last_period); in fsl_emb_pmu_event_init()
581 event->destroy = hw_perf_event_destroy; in fsl_emb_pmu_event_init()
602 static void record_and_restart(struct perf_event *event, unsigned long val, in record_and_restart() argument
605 u64 period = event->hw.sample_period; in record_and_restart()
609 if (event->hw.state & PERF_HES_STOPPED) { in record_and_restart()
610 write_pmc(event->hw.idx, 0); in record_and_restart()
615 prev = local64_read(&event->hw.prev_count); in record_and_restart()
617 local64_add(delta, &event->count); in record_and_restart()
620 * See if the total period for this event has expired, in record_and_restart()
624 left = local64_read(&event->hw.period_left) - delta; in record_and_restart()
631 event->hw.last_period = event->hw.sample_period; in record_and_restart()
637 write_pmc(event->hw.idx, val); in record_and_restart()
638 local64_set(&event->hw.prev_count, val); in record_and_restart()
639 local64_set(&event->hw.period_left, left); in record_and_restart()
640 perf_event_update_userpage(event); in record_and_restart()
648 perf_sample_data_init(&data, 0, event->hw.last_period); in record_and_restart()
650 if (perf_event_overflow(event, &data, regs)) in record_and_restart()
651 fsl_emb_pmu_stop(event, 0); in record_and_restart()
659 struct perf_event *event; in perf_event_interrupt() local
671 event = cpuhw->event[i]; in perf_event_interrupt()
675 if (event) { in perf_event_interrupt()
676 /* event has overflowed */ in perf_event_interrupt()
678 record_and_restart(event, val, regs); in perf_event_interrupt()