Lines Matching full:box
116 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) in uncore_msr_read_counter() argument
125 void uncore_mmio_exit_box(struct intel_uncore_box *box) in uncore_mmio_exit_box() argument
127 if (box->io_addr) in uncore_mmio_exit_box()
128 iounmap(box->io_addr); in uncore_mmio_exit_box()
131 u64 uncore_mmio_read_counter(struct intel_uncore_box *box, in uncore_mmio_read_counter() argument
134 if (!box->io_addr) in uncore_mmio_read_counter()
137 if (!uncore_mmio_is_valid_offset(box, event->hw.event_base)) in uncore_mmio_read_counter()
140 return readq(box->io_addr + event->hw.event_base); in uncore_mmio_read_counter()
147 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event) in uncore_get_constraint() argument
156 * reg->alloc can be set due to existing state, so for fake box we in uncore_get_constraint()
161 (!uncore_box_is_fake(box) && reg1->alloc)) in uncore_get_constraint()
164 er = &box->shared_regs[reg1->idx]; in uncore_get_constraint()
176 if (!uncore_box_is_fake(box)) in uncore_get_constraint()
184 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event) in uncore_put_constraint() argument
193 * Also, if this is a fake box we shouldn't touch any event state in uncore_put_constraint()
194 * (reg->alloc) and we don't care about leaving inconsistent box in uncore_put_constraint()
197 if (uncore_box_is_fake(box) || !reg1->alloc) in uncore_put_constraint()
200 er = &box->shared_regs[reg1->idx]; in uncore_put_constraint()
205 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx) in uncore_shared_reg_config() argument
211 er = &box->shared_regs[idx]; in uncore_shared_reg_config()
220 static void uncore_assign_hw_event(struct intel_uncore_box *box, in uncore_assign_hw_event() argument
226 hwc->last_tag = ++box->tags[idx]; in uncore_assign_hw_event()
229 hwc->event_base = uncore_fixed_ctr(box); in uncore_assign_hw_event()
230 hwc->config_base = uncore_fixed_ctl(box); in uncore_assign_hw_event()
234 hwc->config_base = uncore_event_ctl(box, hwc->idx); in uncore_assign_hw_event()
235 hwc->event_base = uncore_perf_ctr(box, hwc->idx); in uncore_assign_hw_event()
238 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event) in uncore_perf_event_update() argument
244 shift = 64 - uncore_freerunning_bits(box, event); in uncore_perf_event_update()
246 shift = 64 - uncore_fixed_ctr_bits(box); in uncore_perf_event_update()
248 shift = 64 - uncore_perf_ctr_bits(box); in uncore_perf_event_update()
253 new_count = uncore_read_counter(box, event); in uncore_perf_event_update()
270 struct intel_uncore_box *box; in uncore_pmu_hrtimer() local
275 box = container_of(hrtimer, struct intel_uncore_box, hrtimer); in uncore_pmu_hrtimer()
276 if (!box->n_active || box->cpu != smp_processor_id()) in uncore_pmu_hrtimer()
288 list_for_each_entry(event, &box->active_list, active_entry) { in uncore_pmu_hrtimer()
289 uncore_perf_event_update(box, event); in uncore_pmu_hrtimer()
292 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX) in uncore_pmu_hrtimer()
293 uncore_perf_event_update(box, box->events[bit]); in uncore_pmu_hrtimer()
297 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration)); in uncore_pmu_hrtimer()
301 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box) in uncore_pmu_start_hrtimer() argument
303 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration), in uncore_pmu_start_hrtimer()
307 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box) in uncore_pmu_cancel_hrtimer() argument
309 hrtimer_cancel(&box->hrtimer); in uncore_pmu_cancel_hrtimer()
312 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box) in uncore_pmu_init_hrtimer() argument
314 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in uncore_pmu_init_hrtimer()
315 box->hrtimer.function = uncore_pmu_hrtimer; in uncore_pmu_init_hrtimer()
322 struct intel_uncore_box *box; in uncore_alloc_box() local
324 size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg); in uncore_alloc_box()
326 box = kzalloc_node(size, GFP_KERNEL, node); in uncore_alloc_box()
327 if (!box) in uncore_alloc_box()
331 raw_spin_lock_init(&box->shared_regs[i].lock); in uncore_alloc_box()
333 uncore_pmu_init_hrtimer(box); in uncore_alloc_box()
334 box->cpu = -1; in uncore_alloc_box()
335 box->pci_phys_id = -1; in uncore_alloc_box()
336 box->dieid = -1; in uncore_alloc_box()
339 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL; in uncore_alloc_box()
341 INIT_LIST_HEAD(&box->active_list); in uncore_alloc_box()
343 return box; in uncore_alloc_box()
352 static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event) in is_box_event() argument
354 return &box->pmu->pmu == event->pmu; in is_box_event()
358 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, in uncore_collect_events() argument
364 max_count = box->pmu->type->num_counters; in uncore_collect_events()
365 if (box->pmu->type->fixed_ctl) in uncore_collect_events()
368 if (box->n_events >= max_count) in uncore_collect_events()
371 n = box->n_events; in uncore_collect_events()
373 if (is_box_event(box, leader)) { in uncore_collect_events()
374 box->event_list[n] = leader; in uncore_collect_events()
382 if (!is_box_event(box, event) || in uncore_collect_events()
389 box->event_list[n] = event; in uncore_collect_events()
396 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event) in uncore_get_event_constraint() argument
398 struct intel_uncore_type *type = box->pmu->type; in uncore_get_event_constraint()
402 c = type->ops->get_constraint(box, event); in uncore_get_event_constraint()
420 static void uncore_put_event_constraint(struct intel_uncore_box *box, in uncore_put_event_constraint() argument
423 if (box->pmu->type->ops->put_constraint) in uncore_put_event_constraint()
424 box->pmu->type->ops->put_constraint(box, event); in uncore_put_event_constraint()
427 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n) in uncore_assign_events() argument
437 c = uncore_get_event_constraint(box, box->event_list[i]); in uncore_assign_events()
438 box->event_constraint[i] = c; in uncore_assign_events()
445 hwc = &box->event_list[i]->hw; in uncore_assign_events()
446 c = box->event_constraint[i]; in uncore_assign_events()
466 ret = perf_assign_events(box->event_constraint, n, in uncore_assign_events()
471 uncore_put_event_constraint(box, box->event_list[i]); in uncore_assign_events()
478 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_start() local
491 list_add_tail(&event->active_entry, &box->active_list); in uncore_pmu_event_start()
493 uncore_read_counter(box, event)); in uncore_pmu_event_start()
494 if (box->n_active++ == 0) in uncore_pmu_event_start()
495 uncore_pmu_start_hrtimer(box); in uncore_pmu_event_start()
503 box->events[idx] = event; in uncore_pmu_event_start()
504 box->n_active++; in uncore_pmu_event_start()
505 __set_bit(idx, box->active_mask); in uncore_pmu_event_start()
507 local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); in uncore_pmu_event_start()
508 uncore_enable_event(box, event); in uncore_pmu_event_start()
510 if (box->n_active == 1) in uncore_pmu_event_start()
511 uncore_pmu_start_hrtimer(box); in uncore_pmu_event_start()
516 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_stop() local
522 if (--box->n_active == 0) in uncore_pmu_event_stop()
523 uncore_pmu_cancel_hrtimer(box); in uncore_pmu_event_stop()
524 uncore_perf_event_update(box, event); in uncore_pmu_event_stop()
528 if (__test_and_clear_bit(hwc->idx, box->active_mask)) { in uncore_pmu_event_stop()
529 uncore_disable_event(box, event); in uncore_pmu_event_stop()
530 box->n_active--; in uncore_pmu_event_stop()
531 box->events[hwc->idx] = NULL; in uncore_pmu_event_stop()
535 if (box->n_active == 0) in uncore_pmu_event_stop()
536 uncore_pmu_cancel_hrtimer(box); in uncore_pmu_event_stop()
544 uncore_perf_event_update(box, event); in uncore_pmu_event_stop()
551 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_add() local
556 if (!box) in uncore_pmu_event_add()
570 ret = n = uncore_collect_events(box, event, false); in uncore_pmu_event_add()
578 ret = uncore_assign_events(box, assign, n); in uncore_pmu_event_add()
583 for (i = 0; i < box->n_events; i++) { in uncore_pmu_event_add()
584 event = box->event_list[i]; in uncore_pmu_event_add()
588 hwc->last_tag == box->tags[assign[i]]) in uncore_pmu_event_add()
602 event = box->event_list[i]; in uncore_pmu_event_add()
606 hwc->last_tag != box->tags[assign[i]]) in uncore_pmu_event_add()
607 uncore_assign_hw_event(box, event, assign[i]); in uncore_pmu_event_add()
608 else if (i < box->n_events) in uncore_pmu_event_add()
616 box->n_events = n; in uncore_pmu_event_add()
623 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_del() local
636 for (i = 0; i < box->n_events; i++) { in uncore_pmu_event_del()
637 if (event == box->event_list[i]) { in uncore_pmu_event_del()
638 uncore_put_event_constraint(box, event); in uncore_pmu_event_del()
640 for (++i; i < box->n_events; i++) in uncore_pmu_event_del()
641 box->event_list[i - 1] = box->event_list[i]; in uncore_pmu_event_del()
643 --box->n_events; in uncore_pmu_event_del()
654 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_read() local
655 uncore_perf_event_update(box, event); in uncore_pmu_event_read()
704 struct intel_uncore_box *box; in uncore_pmu_event_init() local
726 box = uncore_pmu_to_box(pmu, event->cpu); in uncore_pmu_event_init()
727 if (!box || box->cpu < 0) in uncore_pmu_event_init()
729 event->cpu = box->cpu; in uncore_pmu_event_init()
730 event->pmu_private = box; in uncore_pmu_event_init()
754 if (!check_valid_freerunning_event(box, event)) in uncore_pmu_event_init()
763 event->hw.event_base = uncore_freerunning_counter(box, event); in uncore_pmu_event_init()
768 ret = pmu->type->ops->hw_config(box, event); in uncore_pmu_event_init()
785 struct intel_uncore_box *box; in uncore_pmu_enable() local
791 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); in uncore_pmu_enable()
792 if (!box) in uncore_pmu_enable()
796 uncore_pmu->type->ops->enable_box(box); in uncore_pmu_enable()
802 struct intel_uncore_box *box; in uncore_pmu_disable() local
808 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); in uncore_pmu_disable()
809 if (!box) in uncore_pmu_disable()
813 uncore_pmu->type->ops->disable_box(box); in uncore_pmu_disable()
1057 struct intel_uncore_box *box; in uncore_pci_pmu_register() local
1063 box = uncore_alloc_box(type, NUMA_NO_NODE); in uncore_pci_pmu_register()
1064 if (!box) in uncore_pci_pmu_register()
1072 atomic_inc(&box->refcnt); in uncore_pci_pmu_register()
1073 box->pci_phys_id = phys_id; in uncore_pci_pmu_register()
1074 box->dieid = die; in uncore_pci_pmu_register()
1075 box->pci_dev = pdev; in uncore_pci_pmu_register()
1076 box->pmu = pmu; in uncore_pci_pmu_register()
1077 uncore_box_init(box); in uncore_pci_pmu_register()
1079 pmu->boxes[die] = box; in uncore_pci_pmu_register()
1083 /* First active box registers the pmu */ in uncore_pci_pmu_register()
1087 uncore_box_exit(box); in uncore_pci_pmu_register()
1088 kfree(box); in uncore_pci_pmu_register()
1119 * PCI slot and func to indicate the uncore box. in uncore_pci_probe()
1130 * each box has a different function id. in uncore_pci_probe()
1151 struct intel_uncore_box *box = pmu->boxes[die]; in uncore_pci_pmu_unregister() local
1153 if (WARN_ON_ONCE(phys_id != box->pci_phys_id)) in uncore_pci_pmu_unregister()
1159 uncore_box_exit(box); in uncore_pci_pmu_unregister()
1160 kfree(box); in uncore_pci_pmu_unregister()
1165 struct intel_uncore_box *box; in uncore_pci_remove() local
1172 box = pci_get_drvdata(pdev); in uncore_pci_remove()
1173 if (!box) { in uncore_pci_remove()
1184 pmu = box->pmu; in uncore_pci_remove()
1321 struct intel_uncore_box *box; in uncore_change_type_ctx() local
1326 box = pmu->boxes[die]; in uncore_change_type_ctx()
1327 if (!box) in uncore_change_type_ctx()
1331 WARN_ON_ONCE(box->cpu != -1); in uncore_change_type_ctx()
1332 box->cpu = new_cpu; in uncore_change_type_ctx()
1336 WARN_ON_ONCE(box->cpu != old_cpu); in uncore_change_type_ctx()
1337 box->cpu = -1; in uncore_change_type_ctx()
1341 uncore_pmu_cancel_hrtimer(box); in uncore_change_type_ctx()
1343 box->cpu = new_cpu; in uncore_change_type_ctx()
1358 struct intel_uncore_box *box; in uncore_box_unref() local
1365 box = pmu->boxes[id]; in uncore_box_unref()
1366 if (box && atomic_dec_return(&box->refcnt) == 0) in uncore_box_unref()
1367 uncore_box_exit(box); in uncore_box_unref()
1403 struct intel_uncore_box *box, *tmp; in allocate_boxes() local
1416 box = uncore_alloc_box(type, cpu_to_node(cpu)); in allocate_boxes()
1417 if (!box) in allocate_boxes()
1419 box->pmu = pmu; in allocate_boxes()
1420 box->dieid = die; in allocate_boxes()
1421 list_add(&box->active_list, &allocated); in allocate_boxes()
1425 list_for_each_entry_safe(box, tmp, &allocated, active_list) { in allocate_boxes()
1426 list_del_init(&box->active_list); in allocate_boxes()
1427 box->pmu->boxes[die] = box; in allocate_boxes()
1432 list_for_each_entry_safe(box, tmp, &allocated, active_list) { in allocate_boxes()
1433 list_del_init(&box->active_list); in allocate_boxes()
1434 kfree(box); in allocate_boxes()
1444 struct intel_uncore_box *box; in uncore_box_ref() local
1455 box = pmu->boxes[id]; in uncore_box_ref()
1456 if (box && atomic_inc_return(&box->refcnt) == 1) in uncore_box_ref()
1457 uncore_box_init(box); in uncore_box_ref()