Lines Matching +full:event +full:- +full:name
1 // SPDX-License-Identifier: GPL-2.0
28 #define GET_EVENTID(ev, mask) ((ev->hw.config) & mask)
29 #define GET_COUNTERID(ev, mask) ((ev->hw.idx) & mask)
31 * Event id is encoded in bits [5:1] of a byte,
49 /* L3C event IDs */
60 /* DMC event IDs */
88 * L3C have 4 32-bit counters and the CCPI2 has 8 64-bit counters.
94 char *name; member
111 void (*init_cntr_base)(struct perf_event *event,
113 void (*stop_event)(struct perf_event *event);
114 void (*start_event)(struct perf_event *event, int flags);
137 TX2_PMU_FORMAT_ATTR(event, event, "config:0-4");
138 TX2_PMU_FORMAT_ATTR(event_ccpi2, event, "config:0-9");
156 .name = "format",
161 .name = "format",
166 .name = "format",
171 * sysfs event attributes
179 return sysfs_emit(buf, "event=0x%lx\n", (unsigned long) eattr->var); in tx2_pmu_event_show()
182 #define TX2_EVENT_ATTR(name, config) \ argument
183 PMU_EVENT_ATTR(name, tx2_pmu_event_attr_##name, \
234 .name = "events",
239 .name = "events",
244 .name = "events",
257 return cpumap_print_to_pagebuf(true, buf, cpumask_of(tx2_pmu->cpu)); in cpumask_show()
308 counter = find_first_zero_bit(tx2_pmu->active_counters, in alloc_counter()
309 tx2_pmu->max_counters); in alloc_counter()
310 if (counter == tx2_pmu->max_counters) in alloc_counter()
311 return -ENOSPC; in alloc_counter()
313 set_bit(counter, tx2_pmu->active_counters); in alloc_counter()
319 clear_bit(counter, tx2_pmu->active_counters); in free_counter()
322 static void init_cntr_base_l3c(struct perf_event *event, in init_cntr_base_l3c() argument
325 struct hw_perf_event *hwc = &event->hw; in init_cntr_base_l3c()
328 tx2_pmu = pmu_to_tx2_pmu(event->pmu); in init_cntr_base_l3c()
329 cmask = tx2_pmu->counters_mask; in init_cntr_base_l3c()
332 hwc->config_base = (unsigned long)tx2_pmu->base in init_cntr_base_l3c()
333 + L3C_COUNTER_CTL + (8 * GET_COUNTERID(event, cmask)); in init_cntr_base_l3c()
334 hwc->event_base = (unsigned long)tx2_pmu->base in init_cntr_base_l3c()
335 + L3C_COUNTER_DATA + (8 * GET_COUNTERID(event, cmask)); in init_cntr_base_l3c()
338 static void init_cntr_base_dmc(struct perf_event *event, in init_cntr_base_dmc() argument
341 struct hw_perf_event *hwc = &event->hw; in init_cntr_base_dmc()
344 tx2_pmu = pmu_to_tx2_pmu(event->pmu); in init_cntr_base_dmc()
345 cmask = tx2_pmu->counters_mask; in init_cntr_base_dmc()
347 hwc->config_base = (unsigned long)tx2_pmu->base in init_cntr_base_dmc()
350 hwc->event_base = (unsigned long)tx2_pmu->base in init_cntr_base_dmc()
351 + DMC_COUNTER_DATA + (0xc * GET_COUNTERID(event, cmask)); in init_cntr_base_dmc()
354 static void init_cntr_base_ccpi2(struct perf_event *event, in init_cntr_base_ccpi2() argument
357 struct hw_perf_event *hwc = &event->hw; in init_cntr_base_ccpi2()
360 cmask = tx2_pmu->counters_mask; in init_cntr_base_ccpi2()
362 hwc->config_base = (unsigned long)tx2_pmu->base in init_cntr_base_ccpi2()
363 + CCPI2_COUNTER_CTL + (4 * GET_COUNTERID(event, cmask)); in init_cntr_base_ccpi2()
364 hwc->event_base = (unsigned long)tx2_pmu->base; in init_cntr_base_ccpi2()
367 static void uncore_start_event_l3c(struct perf_event *event, int flags) in uncore_start_event_l3c() argument
370 struct hw_perf_event *hwc = &event->hw; in uncore_start_event_l3c()
373 tx2_pmu = pmu_to_tx2_pmu(event->pmu); in uncore_start_event_l3c()
374 emask = tx2_pmu->events_mask; in uncore_start_event_l3c()
376 /* event id encoded in bits [07:03] */ in uncore_start_event_l3c()
377 val = GET_EVENTID(event, emask) << 3; in uncore_start_event_l3c()
378 reg_writel(val, hwc->config_base); in uncore_start_event_l3c()
379 local64_set(&hwc->prev_count, 0); in uncore_start_event_l3c()
380 reg_writel(0, hwc->event_base); in uncore_start_event_l3c()
383 static inline void uncore_stop_event_l3c(struct perf_event *event) in uncore_stop_event_l3c() argument
385 reg_writel(0, event->hw.config_base); in uncore_stop_event_l3c()
388 static void uncore_start_event_dmc(struct perf_event *event, int flags) in uncore_start_event_dmc() argument
391 struct hw_perf_event *hwc = &event->hw; in uncore_start_event_dmc()
395 tx2_pmu = pmu_to_tx2_pmu(event->pmu); in uncore_start_event_dmc()
396 cmask = tx2_pmu->counters_mask; in uncore_start_event_dmc()
397 emask = tx2_pmu->events_mask; in uncore_start_event_dmc()
399 idx = GET_COUNTERID(event, cmask); in uncore_start_event_dmc()
400 event_id = GET_EVENTID(event, emask); in uncore_start_event_dmc()
403 * 8 bits for each counter, bits[05:01] of a counter to set event type. in uncore_start_event_dmc()
405 val = reg_readl(hwc->config_base); in uncore_start_event_dmc()
408 reg_writel(val, hwc->config_base); in uncore_start_event_dmc()
409 local64_set(&hwc->prev_count, 0); in uncore_start_event_dmc()
410 reg_writel(0, hwc->event_base); in uncore_start_event_dmc()
413 static void uncore_stop_event_dmc(struct perf_event *event) in uncore_stop_event_dmc() argument
416 struct hw_perf_event *hwc = &event->hw; in uncore_stop_event_dmc()
420 tx2_pmu = pmu_to_tx2_pmu(event->pmu); in uncore_stop_event_dmc()
421 cmask = tx2_pmu->counters_mask; in uncore_stop_event_dmc()
422 idx = GET_COUNTERID(event, cmask); in uncore_stop_event_dmc()
424 /* clear event type(bits[05:01]) to stop counter */ in uncore_stop_event_dmc()
425 val = reg_readl(hwc->config_base); in uncore_stop_event_dmc()
427 reg_writel(val, hwc->config_base); in uncore_stop_event_dmc()
430 static void uncore_start_event_ccpi2(struct perf_event *event, int flags) in uncore_start_event_ccpi2() argument
433 struct hw_perf_event *hwc = &event->hw; in uncore_start_event_ccpi2()
436 tx2_pmu = pmu_to_tx2_pmu(event->pmu); in uncore_start_event_ccpi2()
437 emask = tx2_pmu->events_mask; in uncore_start_event_ccpi2()
439 /* Bit [09:00] to set event id. in uncore_start_event_ccpi2()
445 GET_EVENTID(event, emask)), hwc->config_base); in uncore_start_event_ccpi2()
451 hwc->event_base + CCPI2_PERF_CTL); in uncore_start_event_ccpi2()
452 local64_set(&event->hw.prev_count, 0ULL); in uncore_start_event_ccpi2()
455 static void uncore_stop_event_ccpi2(struct perf_event *event) in uncore_stop_event_ccpi2() argument
457 struct hw_perf_event *hwc = &event->hw; in uncore_stop_event_ccpi2()
460 reg_writel(0, hwc->event_base + CCPI2_PERF_CTL); in uncore_stop_event_ccpi2()
463 static void tx2_uncore_event_update(struct perf_event *event) in tx2_uncore_event_update() argument
466 struct hw_perf_event *hwc = &event->hw; in tx2_uncore_event_update()
472 tx2_pmu = pmu_to_tx2_pmu(event->pmu); in tx2_uncore_event_update()
473 type = tx2_pmu->type; in tx2_uncore_event_update()
474 cmask = tx2_pmu->counters_mask; in tx2_uncore_event_update()
475 emask = tx2_pmu->events_mask; in tx2_uncore_event_update()
476 prorate_factor = tx2_pmu->prorate_factor; in tx2_uncore_event_update()
479 GET_COUNTERID(event, cmask), in tx2_uncore_event_update()
480 hwc->event_base + CCPI2_COUNTER_SEL); in tx2_uncore_event_update()
481 new = reg_readl(hwc->event_base + CCPI2_COUNTER_DATA_H); in tx2_uncore_event_update()
483 reg_readl(hwc->event_base + CCPI2_COUNTER_DATA_L); in tx2_uncore_event_update()
484 prev = local64_xchg(&hwc->prev_count, new); in tx2_uncore_event_update()
485 delta = new - prev; in tx2_uncore_event_update()
487 new = reg_readl(hwc->event_base); in tx2_uncore_event_update()
488 prev = local64_xchg(&hwc->prev_count, new); in tx2_uncore_event_update()
490 delta = (u32)(((1UL << 32) - prev) + new); in tx2_uncore_event_update()
493 /* DMC event data_transfers granularity is 16 Bytes, convert it to 64 */ in tx2_uncore_event_update()
495 GET_EVENTID(event, emask) == DMC_EVENT_DATA_TRANSFERS) in tx2_uncore_event_update()
502 local64_add(delta * prorate_factor, &event->count); in tx2_uncore_event_update()
528 struct perf_event *event, int *counters) in tx2_uncore_validate_event() argument
530 if (is_software_event(event)) in tx2_uncore_validate_event()
533 if (event->pmu != pmu) in tx2_uncore_validate_event()
544 static bool tx2_uncore_validate_event_group(struct perf_event *event, in tx2_uncore_validate_event_group() argument
547 struct perf_event *sibling, *leader = event->group_leader; in tx2_uncore_validate_event_group()
550 if (event->group_leader == event) in tx2_uncore_validate_event_group()
553 if (!tx2_uncore_validate_event(event->pmu, leader, &counters)) in tx2_uncore_validate_event_group()
557 if (!tx2_uncore_validate_event(event->pmu, sibling, &counters)) in tx2_uncore_validate_event_group()
561 if (!tx2_uncore_validate_event(event->pmu, event, &counters)) in tx2_uncore_validate_event_group()
572 static int tx2_uncore_event_init(struct perf_event *event) in tx2_uncore_event_init() argument
574 struct hw_perf_event *hwc = &event->hw; in tx2_uncore_event_init()
577 /* Test the event attr type check for PMU enumeration */ in tx2_uncore_event_init()
578 if (event->attr.type != event->pmu->type) in tx2_uncore_event_init()
579 return -ENOENT; in tx2_uncore_event_init()
583 * Therefore, it does not support per-process mode. in tx2_uncore_event_init()
584 * Also, it does not support event sampling mode. in tx2_uncore_event_init()
586 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) in tx2_uncore_event_init()
587 return -EINVAL; in tx2_uncore_event_init()
589 if (event->cpu < 0) in tx2_uncore_event_init()
590 return -EINVAL; in tx2_uncore_event_init()
592 tx2_pmu = pmu_to_tx2_pmu(event->pmu); in tx2_uncore_event_init()
593 if (tx2_pmu->cpu >= nr_cpu_ids) in tx2_uncore_event_init()
594 return -EINVAL; in tx2_uncore_event_init()
595 event->cpu = tx2_pmu->cpu; in tx2_uncore_event_init()
597 if (event->attr.config >= tx2_pmu->max_events) in tx2_uncore_event_init()
598 return -EINVAL; in tx2_uncore_event_init()
600 /* store event id */ in tx2_uncore_event_init()
601 hwc->config = event->attr.config; in tx2_uncore_event_init()
604 if (!tx2_uncore_validate_event_group(event, tx2_pmu->max_counters)) in tx2_uncore_event_init()
605 return -EINVAL; in tx2_uncore_event_init()
610 static void tx2_uncore_event_start(struct perf_event *event, int flags) in tx2_uncore_event_start() argument
612 struct hw_perf_event *hwc = &event->hw; in tx2_uncore_event_start()
615 hwc->state = 0; in tx2_uncore_event_start()
616 tx2_pmu = pmu_to_tx2_pmu(event->pmu); in tx2_uncore_event_start()
618 tx2_pmu->start_event(event, flags); in tx2_uncore_event_start()
619 perf_event_update_userpage(event); in tx2_uncore_event_start()
621 /* No hrtimer needed for CCPI2, 64-bit counters */ in tx2_uncore_event_start()
622 if (!tx2_pmu->hrtimer_callback) in tx2_uncore_event_start()
625 /* Start timer for first event */ in tx2_uncore_event_start()
626 if (bitmap_weight(tx2_pmu->active_counters, in tx2_uncore_event_start()
627 tx2_pmu->max_counters) == 1) { in tx2_uncore_event_start()
628 hrtimer_start(&tx2_pmu->hrtimer, in tx2_uncore_event_start()
629 ns_to_ktime(tx2_pmu->hrtimer_interval), in tx2_uncore_event_start()
634 static void tx2_uncore_event_stop(struct perf_event *event, int flags) in tx2_uncore_event_stop() argument
636 struct hw_perf_event *hwc = &event->hw; in tx2_uncore_event_stop()
639 if (hwc->state & PERF_HES_UPTODATE) in tx2_uncore_event_stop()
642 tx2_pmu = pmu_to_tx2_pmu(event->pmu); in tx2_uncore_event_stop()
643 tx2_pmu->stop_event(event); in tx2_uncore_event_stop()
644 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); in tx2_uncore_event_stop()
645 hwc->state |= PERF_HES_STOPPED; in tx2_uncore_event_stop()
647 tx2_uncore_event_update(event); in tx2_uncore_event_stop()
648 hwc->state |= PERF_HES_UPTODATE; in tx2_uncore_event_stop()
652 static int tx2_uncore_event_add(struct perf_event *event, int flags) in tx2_uncore_event_add() argument
654 struct hw_perf_event *hwc = &event->hw; in tx2_uncore_event_add()
657 tx2_pmu = pmu_to_tx2_pmu(event->pmu); in tx2_uncore_event_add()
660 hwc->idx = alloc_counter(tx2_pmu); in tx2_uncore_event_add()
661 if (hwc->idx < 0) in tx2_uncore_event_add()
662 return -EAGAIN; in tx2_uncore_event_add()
664 tx2_pmu->events[hwc->idx] = event; in tx2_uncore_event_add()
666 tx2_pmu->init_cntr_base(event, tx2_pmu); in tx2_uncore_event_add()
668 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; in tx2_uncore_event_add()
670 tx2_uncore_event_start(event, flags); in tx2_uncore_event_add()
675 static void tx2_uncore_event_del(struct perf_event *event, int flags) in tx2_uncore_event_del() argument
677 struct tx2_uncore_pmu *tx2_pmu = pmu_to_tx2_pmu(event->pmu); in tx2_uncore_event_del()
678 struct hw_perf_event *hwc = &event->hw; in tx2_uncore_event_del()
681 cmask = tx2_pmu->counters_mask; in tx2_uncore_event_del()
682 tx2_uncore_event_stop(event, PERF_EF_UPDATE); in tx2_uncore_event_del()
685 free_counter(tx2_pmu, GET_COUNTERID(event, cmask)); in tx2_uncore_event_del()
687 perf_event_update_userpage(event); in tx2_uncore_event_del()
688 tx2_pmu->events[hwc->idx] = NULL; in tx2_uncore_event_del()
689 hwc->idx = -1; in tx2_uncore_event_del()
691 if (!tx2_pmu->hrtimer_callback) in tx2_uncore_event_del()
694 if (bitmap_empty(tx2_pmu->active_counters, tx2_pmu->max_counters)) in tx2_uncore_event_del()
695 hrtimer_cancel(&tx2_pmu->hrtimer); in tx2_uncore_event_del()
698 static void tx2_uncore_event_read(struct perf_event *event) in tx2_uncore_event_read() argument
700 tx2_uncore_event_update(event); in tx2_uncore_event_read()
709 max_counters = tx2_pmu->max_counters; in tx2_hrtimer_callback()
711 if (bitmap_empty(tx2_pmu->active_counters, max_counters)) in tx2_hrtimer_callback()
714 for_each_set_bit(idx, tx2_pmu->active_counters, max_counters) { in tx2_hrtimer_callback()
715 struct perf_event *event = tx2_pmu->events[idx]; in tx2_hrtimer_callback() local
717 tx2_uncore_event_update(event); in tx2_hrtimer_callback()
719 hrtimer_forward_now(timer, ns_to_ktime(tx2_pmu->hrtimer_interval)); in tx2_hrtimer_callback()
726 struct device *dev = tx2_pmu->dev; in tx2_uncore_pmu_register()
727 char *name = tx2_pmu->name; in tx2_uncore_pmu_register() local
729 /* Perf event registration */ in tx2_uncore_pmu_register()
730 tx2_pmu->pmu = (struct pmu) { in tx2_uncore_pmu_register()
732 .attr_groups = tx2_pmu->attr_groups, in tx2_uncore_pmu_register()
743 tx2_pmu->pmu.name = devm_kasprintf(dev, GFP_KERNEL, in tx2_uncore_pmu_register()
744 "%s", name); in tx2_uncore_pmu_register()
746 return perf_pmu_register(&tx2_pmu->pmu, tx2_pmu->pmu.name, -1); in tx2_uncore_pmu_register()
753 cpu = cpumask_any_and(cpumask_of_node(tx2_pmu->node), in tx2_uncore_pmu_add_dev()
756 tx2_pmu->cpu = cpu; in tx2_uncore_pmu_add_dev()
758 if (tx2_pmu->hrtimer_callback) { in tx2_uncore_pmu_add_dev()
759 hrtimer_init(&tx2_pmu->hrtimer, in tx2_uncore_pmu_add_dev()
761 tx2_pmu->hrtimer.function = tx2_pmu->hrtimer_callback; in tx2_uncore_pmu_add_dev()
766 dev_err(tx2_pmu->dev, "%s PMU: Failed to init driver\n", in tx2_uncore_pmu_add_dev()
767 tx2_pmu->name); in tx2_uncore_pmu_add_dev()
768 return -ENODEV; in tx2_uncore_pmu_add_dev()
774 &tx2_pmu->hpnode); in tx2_uncore_pmu_add_dev()
776 dev_err(tx2_pmu->dev, "Error %d registering hotplug", ret); in tx2_uncore_pmu_add_dev()
781 list_add(&tx2_pmu->entry, &tx2_pmus); in tx2_uncore_pmu_add_dev()
783 dev_dbg(tx2_pmu->dev, "%s PMU UNCORE registered\n", in tx2_uncore_pmu_add_dev()
784 tx2_pmu->pmu.name); in tx2_uncore_pmu_add_dev()
806 if (resource_type(rentry->res) == IORESOURCE_MEM) { in tx2_uncore_pmu_init_dev()
807 res = *rentry->res; in tx2_uncore_pmu_init_dev()
827 tx2_pmu->dev = dev; in tx2_uncore_pmu_init_dev()
828 tx2_pmu->type = type; in tx2_uncore_pmu_init_dev()
829 tx2_pmu->base = base; in tx2_uncore_pmu_init_dev()
830 tx2_pmu->node = dev_to_node(dev); in tx2_uncore_pmu_init_dev()
831 INIT_LIST_HEAD(&tx2_pmu->entry); in tx2_uncore_pmu_init_dev()
833 switch (tx2_pmu->type) { in tx2_uncore_pmu_init_dev()
835 tx2_pmu->max_counters = TX2_PMU_DMC_L3C_MAX_COUNTERS; in tx2_uncore_pmu_init_dev()
836 tx2_pmu->counters_mask = 0x3; in tx2_uncore_pmu_init_dev()
837 tx2_pmu->prorate_factor = TX2_PMU_L3_TILES; in tx2_uncore_pmu_init_dev()
838 tx2_pmu->max_events = L3_EVENT_MAX; in tx2_uncore_pmu_init_dev()
839 tx2_pmu->events_mask = 0x1f; in tx2_uncore_pmu_init_dev()
840 tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL; in tx2_uncore_pmu_init_dev()
841 tx2_pmu->hrtimer_callback = tx2_hrtimer_callback; in tx2_uncore_pmu_init_dev()
842 tx2_pmu->attr_groups = l3c_pmu_attr_groups; in tx2_uncore_pmu_init_dev()
843 tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL, in tx2_uncore_pmu_init_dev()
844 "uncore_l3c_%d", tx2_pmu->node); in tx2_uncore_pmu_init_dev()
845 tx2_pmu->init_cntr_base = init_cntr_base_l3c; in tx2_uncore_pmu_init_dev()
846 tx2_pmu->start_event = uncore_start_event_l3c; in tx2_uncore_pmu_init_dev()
847 tx2_pmu->stop_event = uncore_stop_event_l3c; in tx2_uncore_pmu_init_dev()
850 tx2_pmu->max_counters = TX2_PMU_DMC_L3C_MAX_COUNTERS; in tx2_uncore_pmu_init_dev()
851 tx2_pmu->counters_mask = 0x3; in tx2_uncore_pmu_init_dev()
852 tx2_pmu->prorate_factor = TX2_PMU_DMC_CHANNELS; in tx2_uncore_pmu_init_dev()
853 tx2_pmu->max_events = DMC_EVENT_MAX; in tx2_uncore_pmu_init_dev()
854 tx2_pmu->events_mask = 0x1f; in tx2_uncore_pmu_init_dev()
855 tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL; in tx2_uncore_pmu_init_dev()
856 tx2_pmu->hrtimer_callback = tx2_hrtimer_callback; in tx2_uncore_pmu_init_dev()
857 tx2_pmu->attr_groups = dmc_pmu_attr_groups; in tx2_uncore_pmu_init_dev()
858 tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL, in tx2_uncore_pmu_init_dev()
859 "uncore_dmc_%d", tx2_pmu->node); in tx2_uncore_pmu_init_dev()
860 tx2_pmu->init_cntr_base = init_cntr_base_dmc; in tx2_uncore_pmu_init_dev()
861 tx2_pmu->start_event = uncore_start_event_dmc; in tx2_uncore_pmu_init_dev()
862 tx2_pmu->stop_event = uncore_stop_event_dmc; in tx2_uncore_pmu_init_dev()
866 tx2_pmu->max_counters = TX2_PMU_CCPI2_MAX_COUNTERS; in tx2_uncore_pmu_init_dev()
867 tx2_pmu->counters_mask = 0x7; in tx2_uncore_pmu_init_dev()
868 tx2_pmu->prorate_factor = 1; in tx2_uncore_pmu_init_dev()
869 tx2_pmu->max_events = CCPI2_EVENT_MAX; in tx2_uncore_pmu_init_dev()
870 tx2_pmu->events_mask = 0x1ff; in tx2_uncore_pmu_init_dev()
871 tx2_pmu->attr_groups = ccpi2_pmu_attr_groups; in tx2_uncore_pmu_init_dev()
872 tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL, in tx2_uncore_pmu_init_dev()
873 "uncore_ccpi2_%d", tx2_pmu->node); in tx2_uncore_pmu_init_dev()
874 tx2_pmu->init_cntr_base = init_cntr_base_ccpi2; in tx2_uncore_pmu_init_dev()
875 tx2_pmu->start_event = uncore_start_event_ccpi2; in tx2_uncore_pmu_init_dev()
876 tx2_pmu->stop_event = uncore_stop_event_ccpi2; in tx2_uncore_pmu_init_dev()
877 tx2_pmu->hrtimer_callback = NULL; in tx2_uncore_pmu_init_dev()
896 if (acpi_bus_get_status(adev) || !adev->status.present) in tx2_uncore_pmu_add()
927 if ((tx2_pmu->cpu >= nr_cpu_ids) && in tx2_uncore_pmu_online_cpu()
928 (tx2_pmu->node == cpu_to_node(cpu))) in tx2_uncore_pmu_online_cpu()
929 tx2_pmu->cpu = cpu; in tx2_uncore_pmu_online_cpu()
944 if (cpu != tx2_pmu->cpu) in tx2_uncore_pmu_offline_cpu()
947 if (tx2_pmu->hrtimer_callback) in tx2_uncore_pmu_offline_cpu()
948 hrtimer_cancel(&tx2_pmu->hrtimer); in tx2_uncore_pmu_offline_cpu()
953 cpumask_of_node(tx2_pmu->node), in tx2_uncore_pmu_offline_cpu()
956 tx2_pmu->cpu = new_cpu; in tx2_uncore_pmu_offline_cpu()
959 perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu); in tx2_uncore_pmu_offline_cpu()
972 struct device *dev = &pdev->dev; in tx2_uncore_probe()
979 return -ENODEV; in tx2_uncore_probe()
983 return -EINVAL; in tx2_uncore_probe()
1001 struct device *dev = &pdev->dev; in tx2_uncore_remove()
1005 if (tx2_pmu->node == dev_to_node(dev)) { in tx2_uncore_remove()
1008 &tx2_pmu->hpnode); in tx2_uncore_remove()
1009 perf_pmu_unregister(&tx2_pmu->pmu); in tx2_uncore_remove()
1010 list_del(&tx2_pmu->entry); in tx2_uncore_remove()
1019 .name = "tx2-uncore-pmu",