Lines Matching +full:ignore +full:- +full:power +full:- +full:on +full:- +full:sel

1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2016-2020 Arm Limited
3 // CMN-600 Coherent Mesh Network PMU driver
11 #include <linux/io-64-nonatomic-lo-hi.h>
33 #define CMN_NODEID_Y(reg, bits) (((reg) >> 3) & ((1U << (bits)) - 1))
44 #define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
75 /* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */
78 /* HN-Ps are weird... */
122 /* DTC counters are paired in 64-bit registers on a 16-byte stride. Yuck */
152 /* Similarly for the 40-bit cycle counter */
163 #define CMN_EVENT_TYPE(event) FIELD_GET(CMN_CONFIG_TYPE, (event)->attr.config)
164 #define CMN_EVENT_EVENTID(event) FIELD_GET(CMN_CONFIG_EVENTID, (event)->attr.config)
165 #define CMN_EVENT_OCCUPID(event) FIELD_GET(CMN_CONFIG_OCCUPID, (event)->attr.config)
166 #define CMN_EVENT_BYNODEID(event) FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config)
167 #define CMN_EVENT_NODEID(event) FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config)
172 /* Note that we don't yet support the tertiary match group on newer IPs */
178 #define CMN_EVENT_WP_COMBINE(event) FIELD_GET(CMN_CONFIG_WP_COMBINE, (event)->attr.config)
179 #define CMN_EVENT_WP_DEV_SEL(event) FIELD_GET(CMN_CONFIG_WP_DEV_SEL, (event)->attr.config)
180 #define CMN_EVENT_WP_CHN_SEL(event) FIELD_GET(CMN_CONFIG_WP_CHN_SEL, (event)->attr.config)
181 #define CMN_EVENT_WP_GRP(event) FIELD_GET(CMN_CONFIG_WP_GRP, (event)->attr.config)
182 #define CMN_EVENT_WP_EXCLUSIVE(event) FIELD_GET(CMN_CONFIG_WP_EXCLUSIVE, (event)->attr.config)
183 #define CMN_EVENT_WP_VAL(event) FIELD_GET(CMN_CONFIG1_WP_VAL, (event)->attr.config1)
184 #define CMN_EVENT_WP_MASK(event) FIELD_GET(CMN_CONFIG2_WP_MASK, (event)->attr.config2)
186 /* Made-up event IDs for watchpoint direction */
197 CMN_ANY = -1,
198 NOT_CMN600 = -2,
202 /* CMN-600 r0px shouldn't exist in silicon, thankfully */
252 SEL_NONE = -1,
266 /* DN/HN-F/CXHA */
352 return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1) | 2); in arm_cmn_xyidbits()
359 if (cmn->num_xps == 1) { in arm_cmn_nid()
369 if (cmn->ports_used & 0xc) { in arm_cmn_nid()
383 struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); in arm_cmn_node_to_xp()
384 int xp_idx = cmn->mesh_x * nid.y + nid.x; in arm_cmn_node_to_xp()
386 return cmn->xps + xp_idx; in arm_cmn_node_to_xp()
393 for (dn = cmn->dns; dn->type; dn++) in arm_cmn_node()
394 if (dn->type == type) in arm_cmn_node()
406 case 0x01: return " RN-I |"; in arm_cmn_device_type()
407 case 0x02: return " RN-D |"; in arm_cmn_device_type()
408 case 0x04: return " RN-F_B |"; in arm_cmn_device_type()
409 case 0x05: return "RN-F_B_E|"; in arm_cmn_device_type()
410 case 0x06: return " RN-F_A |"; in arm_cmn_device_type()
411 case 0x07: return "RN-F_A_E|"; in arm_cmn_device_type()
412 case 0x08: return " HN-T |"; in arm_cmn_device_type()
413 case 0x09: return " HN-I |"; in arm_cmn_device_type()
414 case 0x0a: return " HN-D |"; in arm_cmn_device_type()
415 case 0x0b: return " HN-P |"; in arm_cmn_device_type()
416 case 0x0c: return " SN-F |"; in arm_cmn_device_type()
418 case 0x0e: return " HN-F |"; in arm_cmn_device_type()
419 case 0x0f: return " SN-F_E |"; in arm_cmn_device_type()
420 case 0x10: return " SN-F_D |"; in arm_cmn_device_type()
424 case 0x14: return " RN-F_D |"; in arm_cmn_device_type()
425 case 0x15: return "RN-F_D_E|"; in arm_cmn_device_type()
426 case 0x16: return " RN-F_C |"; in arm_cmn_device_type()
427 case 0x17: return "RN-F_C_E|"; in arm_cmn_device_type()
428 case 0x18: return " RN-F_E |"; in arm_cmn_device_type()
429 case 0x19: return "RN-F_E_E|"; in arm_cmn_device_type()
431 case 0x1d: return " HN-V |"; in arm_cmn_device_type()
439 struct arm_cmn *cmn = s->private; in arm_cmn_show_logid()
442 for (dn = cmn->dns; dn->type; dn++) { in arm_cmn_show_logid()
443 struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); in arm_cmn_show_logid()
445 if (dn->type == CMN_TYPE_XP) in arm_cmn_show_logid()
447 /* Ignore the extra components that will overlap on some ports */ in arm_cmn_show_logid()
448 if (dn->type < CMN_TYPE_HNI) in arm_cmn_show_logid()
454 seq_printf(s, " #%-2d |", dn->logid); in arm_cmn_show_logid()
462 struct arm_cmn *cmn = s->private; in arm_cmn_map_show()
463 int x, y, p, pmax = fls(cmn->ports_used); in arm_cmn_map_show()
466 for (x = 0; x < cmn->mesh_x; x++) in arm_cmn_map_show()
469 y = cmn->mesh_y; in arm_cmn_map_show()
470 while (y--) { in arm_cmn_map_show()
471 int xp_base = cmn->mesh_x * y; in arm_cmn_map_show()
474 for (x = 0; x < cmn->mesh_x; x++) in arm_cmn_map_show()
475 seq_puts(s, "--------+"); in arm_cmn_map_show()
478 for (x = 0; x < cmn->mesh_x; x++) { in arm_cmn_map_show()
479 struct arm_cmn_node *xp = cmn->xps + xp_base + x; in arm_cmn_map_show()
480 void __iomem *base = xp->pmu_base - CMN_PMU_OFFSET; in arm_cmn_map_show()
488 seq_printf(s, " XP #%-2d |", xp_base + x); in arm_cmn_map_show()
492 for (x = 0; x < cmn->mesh_x; x++) { in arm_cmn_map_show()
493 u8 dtc = cmn->xps[xp_base + x].dtc; in arm_cmn_map_show()
495 if (dtc & (dtc - 1)) in arm_cmn_map_show()
501 for (x = 0; x < cmn->mesh_x; x++) in arm_cmn_map_show()
506 for (x = 0; x < cmn->mesh_x; x++) in arm_cmn_map_show()
509 for (x = 0; x < cmn->mesh_x; x++) in arm_cmn_map_show()
512 for (x = 0; x < cmn->mesh_x; x++) in arm_cmn_map_show()
515 seq_puts(s, "\n-----+"); in arm_cmn_map_show()
517 for (x = 0; x < cmn->mesh_x; x++) in arm_cmn_map_show()
518 seq_puts(s, "--------+"); in arm_cmn_map_show()
529 name = devm_kasprintf(cmn->dev, GFP_KERNEL, "map_%d", id); in arm_cmn_debugfs_init()
533 cmn->debug = debugfs_create_file(name, 0444, arm_cmn_debugfs, cmn, &arm_cmn_map_fops); in arm_cmn_debugfs_init()
551 for (i = 0, dn = hw->dn; i < hw->num_dns; i++, dn++)
556 return (struct arm_cmn_hw_event *)&event->hw; in to_cmn_hw()
603 if (eattr->type == CMN_TYPE_DTC) in arm_cmn_event_show()
604 return sysfs_emit(buf, "type=0x%x\n", eattr->type); in arm_cmn_event_show()
606 if (eattr->type == CMN_TYPE_WP) in arm_cmn_event_show()
609 eattr->type, eattr->eventid); in arm_cmn_event_show()
611 if (eattr->fsel > SEL_NONE) in arm_cmn_event_show()
613 eattr->type, eattr->eventid, eattr->occupid); in arm_cmn_event_show()
615 return sysfs_emit(buf, "type=0x%x,eventid=0x%x\n", eattr->type, in arm_cmn_event_show()
616 eattr->eventid); in arm_cmn_event_show()
631 if (!(eattr->model & cmn->model)) in arm_cmn_event_attr_is_visible()
634 type = eattr->type; in arm_cmn_event_attr_is_visible()
635 eventid = eattr->eventid; in arm_cmn_event_attr_is_visible()
639 return attr->mode; in arm_cmn_event_attr_is_visible()
646 if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3))) in arm_cmn_event_attr_is_visible()
649 if (chan == 4 && cmn->model == CMN600) in arm_cmn_event_attr_is_visible()
652 if ((chan == 5 && cmn->rsp_vc_num < 2) || in arm_cmn_event_attr_is_visible()
653 (chan == 6 && cmn->dat_vc_num < 2) || in arm_cmn_event_attr_is_visible()
654 (chan == 7 && cmn->snp_vc_num < 2) || in arm_cmn_event_attr_is_visible()
655 (chan == 8 && cmn->req_vc_num < 2)) in arm_cmn_event_attr_is_visible()
659 /* Revision-specific differences */ in arm_cmn_event_attr_is_visible()
660 if (cmn->model == CMN600) { in arm_cmn_event_attr_is_visible()
661 if (cmn->rev < CMN600_R1P3) { in arm_cmn_event_attr_is_visible()
665 if (cmn->rev < CMN600_R1P2) { in arm_cmn_event_attr_is_visible()
671 } else if (cmn->model == CMN650) { in arm_cmn_event_attr_is_visible()
672 if (cmn->rev < CMN650_R2P0 || cmn->rev == CMN650_R1P2) { in arm_cmn_event_attr_is_visible()
680 } else if (cmn->model == CMN700) { in arm_cmn_event_attr_is_visible()
681 if (cmn->rev < CMN700_R2P0) { in arm_cmn_event_attr_is_visible()
689 if (cmn->rev < CMN700_R1P0) { in arm_cmn_event_attr_is_visible()
698 return attr->mode; in arm_cmn_event_attr_is_visible()
781 * DVM node events conflict with HN-I events in the equivalent PMU
782 * slot, but our lazy short-cut of using the DTM counter index for
884 * HN-P events squat on top of the HN-I similarly to DVM events, except
907 /* We treat watchpoints as a special made-up class of XP events */
1108 int lo = __ffs(fmt->field), hi = __fls(fmt->field); in arm_cmn_format_show()
1113 if (!fmt->config) in arm_cmn_format_show()
1114 return sysfs_emit(buf, "config:%d-%d\n", lo, hi); in arm_cmn_format_show()
1116 return sysfs_emit(buf, "config%d:%d-%d\n", fmt->config, lo, hi); in arm_cmn_format_show()
1156 return cpumap_print_to_pagebuf(true, buf, cpumask_of(cmn->cpu)); in arm_cmn_cpumask_show()
1191 bool is_cmn600 = to_cmn(event->pmu)->model == CMN600; in arm_cmn_wp_config()
1208 if (!cmn->state) in arm_cmn_set_state()
1209 writel_relaxed(0, cmn->dtc[0].base + CMN_DT_PMCR); in arm_cmn_set_state()
1210 cmn->state |= state; in arm_cmn_set_state()
1215 cmn->state &= ~state; in arm_cmn_clear_state()
1216 if (!cmn->state) in arm_cmn_clear_state()
1218 cmn->dtc[0].base + CMN_DT_PMCR); in arm_cmn_clear_state()
1241 if (dtm != &cmn->dtms[dn->dtm]) { in arm_cmn_read_dtm()
1242 dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset; in arm_cmn_read_dtm()
1243 reg = readq_relaxed(dtm->base + offset); in arm_cmn_read_dtm()
1245 dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); in arm_cmn_read_dtm()
1253 u64 val = readq_relaxed(dtc->base + CMN_DT_PMCCNTR); in arm_cmn_read_cc()
1255 writeq_relaxed(CMN_CC_INIT, dtc->base + CMN_DT_PMCCNTR); in arm_cmn_read_cc()
1256 return (val - CMN_CC_INIT) & ((CMN_CC_INIT << 1) - 1); in arm_cmn_read_cc()
1263 val = readl_relaxed(dtc->base + pmevcnt); in arm_cmn_read_counter()
1264 writel_relaxed(CMN_COUNTER_INIT, dtc->base + pmevcnt); in arm_cmn_read_counter()
1265 return val - CMN_COUNTER_INIT; in arm_cmn_read_counter()
1270 struct arm_cmn *cmn = to_cmn(event->pmu); in arm_cmn_init_counter()
1272 unsigned int i, pmevcnt = CMN_DT_PMEVCNT(hw->dtc_idx); in arm_cmn_init_counter()
1275 for (i = 0; hw->dtcs_used & (1U << i); i++) { in arm_cmn_init_counter()
1276 writel_relaxed(CMN_COUNTER_INIT, cmn->dtc[i].base + pmevcnt); in arm_cmn_init_counter()
1277 cmn->dtc[i].counters[hw->dtc_idx] = event; in arm_cmn_init_counter()
1281 local64_set(&event->hw.prev_count, count); in arm_cmn_init_counter()
1286 struct arm_cmn *cmn = to_cmn(event->pmu); in arm_cmn_event_read()
1292 if (hw->dtc_idx == CMN_DT_NUM_COUNTERS) { in arm_cmn_event_read()
1293 i = __ffs(hw->dtcs_used); in arm_cmn_event_read()
1294 delta = arm_cmn_read_cc(cmn->dtc + i); in arm_cmn_event_read()
1295 local64_add(delta, &event->count); in arm_cmn_event_read()
1299 prev = local64_xchg(&event->hw.prev_count, new); in arm_cmn_event_read()
1301 delta = new - prev; in arm_cmn_event_read()
1304 for (i = 0; hw->dtcs_used & (1U << i); i++) { in arm_cmn_event_read()
1305 new = arm_cmn_read_counter(cmn->dtc + i, hw->dtc_idx); in arm_cmn_event_read()
1309 local64_add(delta, &event->count); in arm_cmn_event_read()
1320 if (!dn->occupid[fsel].count) { in arm_cmn_set_event_sel_hi()
1321 dn->occupid[fsel].val = occupid; in arm_cmn_set_event_sel_hi()
1323 dn->occupid[SEL_CBUSY_SNTHROTTLE_SEL].val) | in arm_cmn_set_event_sel_hi()
1325 dn->occupid[SEL_CLASS_OCCUP_ID].val) | in arm_cmn_set_event_sel_hi()
1327 dn->occupid[SEL_OCCUP1ID].val); in arm_cmn_set_event_sel_hi()
1328 writel_relaxed(reg >> 32, dn->pmu_base + CMN_PMU_EVENT_SEL + 4); in arm_cmn_set_event_sel_hi()
1329 } else if (dn->occupid[fsel].val != occupid) { in arm_cmn_set_event_sel_hi()
1330 return -EBUSY; in arm_cmn_set_event_sel_hi()
1332 dn->occupid[fsel].count++; in arm_cmn_set_event_sel_hi()
1340 dn->event_w[dtm_idx] = eventid; in arm_cmn_set_event_sel_lo()
1341 writeq_relaxed(le64_to_cpu(dn->event_sel_w), dn->pmu_base + CMN_PMU_EVENT_SEL); in arm_cmn_set_event_sel_lo()
1343 dn->event[dtm_idx] = eventid; in arm_cmn_set_event_sel_lo()
1344 writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL); in arm_cmn_set_event_sel_lo()
1350 struct arm_cmn *cmn = to_cmn(event->pmu); in arm_cmn_event_start()
1357 i = __ffs(hw->dtcs_used); in arm_cmn_event_start()
1358 writeq_relaxed(CMN_CC_INIT, cmn->dtc[i].base + CMN_DT_PMCCNTR); in arm_cmn_event_start()
1359 cmn->dtc[i].cc_active = true; in arm_cmn_event_start()
1366 void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset); in arm_cmn_event_start()
1372 int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); in arm_cmn_event_start()
1375 hw->wide_sel); in arm_cmn_event_start()
1381 struct arm_cmn *cmn = to_cmn(event->pmu); in arm_cmn_event_stop()
1388 i = __ffs(hw->dtcs_used); in arm_cmn_event_stop()
1389 cmn->dtc[i].cc_active = false; in arm_cmn_event_stop()
1394 void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset); in arm_cmn_event_stop()
1400 int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); in arm_cmn_event_stop()
1402 arm_cmn_set_event_sel_lo(dn, dtm_idx, 0, hw->wide_sel); in arm_cmn_event_stop()
1429 val->cycles = true; in arm_cmn_val_add_event()
1433 val->dtc_count++; in arm_cmn_val_add_event()
1436 int wp_idx, dtm = dn->dtm, sel = hw->filter_sel; in arm_cmn_val_add_event() local
1438 val->dtm_count[dtm]++; in arm_cmn_val_add_event()
1440 if (sel > SEL_NONE) in arm_cmn_val_add_event()
1441 val->occupid[dtm][sel] = CMN_EVENT_OCCUPID(event) + 1; in arm_cmn_val_add_event()
1447 val->wp[dtm][wp_idx] = CMN_EVENT_WP_COMBINE(event) + 1; in arm_cmn_val_add_event()
1455 struct perf_event *sibling, *leader = event->group_leader; in arm_cmn_validate_group()
1458 int i, ret = -EINVAL; in arm_cmn_validate_group()
1463 if (event->pmu != leader->pmu && !is_software_event(leader)) in arm_cmn_validate_group()
1464 return -EINVAL; in arm_cmn_validate_group()
1468 return -ENOMEM; in arm_cmn_validate_group()
1476 ret = val->cycles ? -EINVAL : 0; in arm_cmn_validate_group()
1480 if (val->dtc_count == CMN_DT_NUM_COUNTERS) in arm_cmn_validate_group()
1484 int wp_idx, wp_cmb, dtm = dn->dtm, sel = hw->filter_sel; in arm_cmn_validate_group() local
1486 if (val->dtm_count[dtm] == CMN_DTM_NUM_COUNTERS) in arm_cmn_validate_group()
1489 if (sel > SEL_NONE && val->occupid[dtm][sel] && in arm_cmn_validate_group()
1490 val->occupid[dtm][sel] != CMN_EVENT_OCCUPID(event) + 1) in arm_cmn_validate_group()
1497 if (val->wp[dtm][wp_idx]) in arm_cmn_validate_group()
1500 wp_cmb = val->wp[dtm][wp_idx ^ 1]; in arm_cmn_validate_group()
1518 for (i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs) - 1; i++) { in arm_cmn_filter_sel()
1520 if (e->model & model && e->type == type && e->eventid == eventid) in arm_cmn_filter_sel()
1521 return e->fsel; in arm_cmn_filter_sel()
1529 struct arm_cmn *cmn = to_cmn(event->pmu); in arm_cmn_event_init()
1536 if (event->attr.type != event->pmu->type) in arm_cmn_event_init()
1537 return -ENOENT; in arm_cmn_event_init()
1539 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) in arm_cmn_event_init()
1540 return -EINVAL; in arm_cmn_event_init()
1542 event->cpu = cmn->cpu; in arm_cmn_event_init()
1543 if (event->cpu < 0) in arm_cmn_event_init()
1544 return -EINVAL; in arm_cmn_event_init()
1557 return -EINVAL; in arm_cmn_event_init()
1558 /* ...but the DTM may depend on which port we're watching */ in arm_cmn_event_init()
1559 if (cmn->multi_dtm) in arm_cmn_event_init()
1560 hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2; in arm_cmn_event_init()
1561 } else if (type == CMN_TYPE_XP && cmn->model == CMN700) { in arm_cmn_event_init()
1562 hw->wide_sel = true; in arm_cmn_event_init()
1566 hw->filter_sel = arm_cmn_filter_sel(cmn->model, type, eventid); in arm_cmn_event_init()
1571 hw->dn = arm_cmn_node(cmn, type); in arm_cmn_event_init()
1572 if (!hw->dn) in arm_cmn_event_init()
1573 return -EINVAL; in arm_cmn_event_init()
1574 for (dn = hw->dn; dn->type == type; dn++) { in arm_cmn_event_init()
1575 if (bynodeid && dn->id != nodeid) { in arm_cmn_event_init()
1576 hw->dn++; in arm_cmn_event_init()
1579 hw->dtcs_used |= arm_cmn_node_to_xp(cmn, dn)->dtc; in arm_cmn_event_init()
1580 hw->num_dns++; in arm_cmn_event_init()
1585 if (!hw->num_dns) { in arm_cmn_event_init()
1588 dev_dbg(cmn->dev, "invalid node 0x%x (%d,%d,%d,%d) type 0x%x\n", in arm_cmn_event_init()
1590 return -EINVAL; in arm_cmn_event_init()
1602 while (i--) { in arm_cmn_event_clear()
1603 struct arm_cmn_dtm *dtm = &cmn->dtms[hw->dn[i].dtm] + hw->dtm_offset; in arm_cmn_event_clear()
1604 unsigned int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); in arm_cmn_event_clear()
1607 dtm->wp_event[arm_cmn_wp_idx(event)] = -1; in arm_cmn_event_clear()
1609 if (hw->filter_sel > SEL_NONE) in arm_cmn_event_clear()
1610 hw->dn[i].occupid[hw->filter_sel].count--; in arm_cmn_event_clear()
1612 dtm->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx); in arm_cmn_event_clear()
1613 writel_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG); in arm_cmn_event_clear()
1615 memset(hw->dtm_idx, 0, sizeof(hw->dtm_idx)); in arm_cmn_event_clear()
1617 for (i = 0; hw->dtcs_used & (1U << i); i++) in arm_cmn_event_clear()
1618 cmn->dtc[i].counters[hw->dtc_idx] = NULL; in arm_cmn_event_clear()
1623 struct arm_cmn *cmn = to_cmn(event->pmu); in arm_cmn_event_add()
1625 struct arm_cmn_dtc *dtc = &cmn->dtc[0]; in arm_cmn_event_add()
1632 while (cmn->dtc[i].cycles) in arm_cmn_event_add()
1633 if (++i == cmn->num_dtcs) in arm_cmn_event_add()
1634 return -ENOSPC; in arm_cmn_event_add()
1636 cmn->dtc[i].cycles = event; in arm_cmn_event_add()
1637 hw->dtc_idx = CMN_DT_NUM_COUNTERS; in arm_cmn_event_add()
1638 hw->dtcs_used = 1U << i; in arm_cmn_event_add()
1647 while (dtc->counters[dtc_idx]) in arm_cmn_event_add()
1649 return -ENOSPC; in arm_cmn_event_add()
1651 hw->dtc_idx = dtc_idx; in arm_cmn_event_add()
1655 struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset; in arm_cmn_event_add()
1660 while (dtm->pmu_config_low & CMN__PMEVCNT_PAIRED(dtm_idx)) in arm_cmn_event_add()
1670 if (dtm->wp_event[wp_idx] >= 0) in arm_cmn_event_add()
1673 tmp = dtm->wp_event[wp_idx ^ 1]; in arm_cmn_event_add()
1675 CMN_EVENT_WP_COMBINE(dtc->counters[tmp])) in arm_cmn_event_add()
1679 dtm->wp_event[wp_idx] = dtc_idx; in arm_cmn_event_add()
1680 writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx)); in arm_cmn_event_add()
1682 struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); in arm_cmn_event_add()
1684 if (cmn->multi_dtm) in arm_cmn_event_add()
1690 if (arm_cmn_set_event_sel_hi(dn, hw->filter_sel, CMN_EVENT_OCCUPID(event))) in arm_cmn_event_add()
1694 arm_cmn_set_index(hw->dtm_idx, i, dtm_idx); in arm_cmn_event_add()
1696 dtm->input_sel[dtm_idx] = input_sel; in arm_cmn_event_add()
1698 dtm->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift); in arm_cmn_event_add()
1699 dtm->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, dtc_idx) << shift; in arm_cmn_event_add()
1700 dtm->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx); in arm_cmn_event_add()
1701 reg = (u64)le32_to_cpu(dtm->pmu_config_high) << 32 | dtm->pmu_config_low; in arm_cmn_event_add()
1702 writeq_relaxed(reg, dtm->base + CMN_DTM_PMU_CONFIG); in arm_cmn_event_add()
1715 return -ENOSPC; in arm_cmn_event_add()
1720 struct arm_cmn *cmn = to_cmn(event->pmu); in arm_cmn_event_del()
1727 cmn->dtc[__ffs(hw->dtcs_used)].cycles = NULL; in arm_cmn_event_del()
1729 arm_cmn_event_clear(cmn, event, hw->num_dns); in arm_cmn_event_del()
1736 * plus it seems they don't work properly on some hardware anyway :(
1758 perf_pmu_migrate_context(&cmn->pmu, cmn->cpu, cpu); in arm_cmn_migrate()
1759 for (i = 0; i < cmn->num_dtcs; i++) in arm_cmn_migrate()
1760 irq_set_affinity(cmn->dtc[i].irq, cpumask_of(cpu)); in arm_cmn_migrate()
1761 cmn->cpu = cpu; in arm_cmn_migrate()
1770 node = dev_to_node(cmn->dev); in arm_cmn_pmu_online_cpu()
1771 if (node != NUMA_NO_NODE && cpu_to_node(cmn->cpu) != node && cpu_to_node(cpu) == node) in arm_cmn_pmu_online_cpu()
1784 if (cpu != cmn->cpu) in arm_cmn_pmu_offline_cpu()
1787 node = dev_to_node(cmn->dev); in arm_cmn_pmu_offline_cpu()
1804 u32 status = readl_relaxed(dtc->base + CMN_DT_PMOVSR); in arm_cmn_handle_irq()
1811 if (WARN_ON(!dtc->counters[i])) in arm_cmn_handle_irq()
1814 local64_add(delta, &dtc->counters[i]->count); in arm_cmn_handle_irq()
1820 if (dtc->cc_active && !WARN_ON(!dtc->cycles)) { in arm_cmn_handle_irq()
1822 local64_add(delta, &dtc->cycles->count); in arm_cmn_handle_irq()
1826 writel_relaxed(status, dtc->base + CMN_DT_PMOVSR_CLR); in arm_cmn_handle_irq()
1828 if (!dtc->irq_friend) in arm_cmn_handle_irq()
1830 dtc += dtc->irq_friend; in arm_cmn_handle_irq()
1839 for (i = 0; i < cmn->num_dtcs; i++) { in arm_cmn_init_irqs()
1840 irq = cmn->dtc[i].irq; in arm_cmn_init_irqs()
1841 for (j = i; j--; ) { in arm_cmn_init_irqs()
1842 if (cmn->dtc[j].irq == irq) { in arm_cmn_init_irqs()
1843 cmn->dtc[j].irq_friend = i - j; in arm_cmn_init_irqs()
1847 err = devm_request_irq(cmn->dev, irq, arm_cmn_handle_irq, in arm_cmn_init_irqs()
1849 dev_name(cmn->dev), &cmn->dtc[i]); in arm_cmn_init_irqs()
1853 err = irq_set_affinity(irq, cpumask_of(cmn->cpu)); in arm_cmn_init_irqs()
1866 dtm->base = xp->pmu_base + CMN_DTM_OFFSET(idx); in arm_cmn_init_dtm()
1867 dtm->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN; in arm_cmn_init_dtm()
1869 dtm->wp_event[i] = -1; in arm_cmn_init_dtm()
1870 writeq_relaxed(0, dtm->base + CMN_DTM_WPn_MASK(i)); in arm_cmn_init_dtm()
1871 writeq_relaxed(~0ULL, dtm->base + CMN_DTM_WPn_VAL(i)); in arm_cmn_init_dtm()
1877 struct arm_cmn_dtc *dtc = cmn->dtc + idx; in arm_cmn_init_dtc()
1879 dtc->base = dn->pmu_base - CMN_PMU_OFFSET; in arm_cmn_init_dtc()
1880 dtc->irq = platform_get_irq(to_platform_device(cmn->dev), idx); in arm_cmn_init_dtc()
1881 if (dtc->irq < 0) in arm_cmn_init_dtc()
1882 return dtc->irq; in arm_cmn_init_dtc()
1884 writel_relaxed(0, dtc->base + CMN_DT_PMCR); in arm_cmn_init_dtc()
1885 writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR); in arm_cmn_init_dtc()
1886 writel_relaxed(CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR); in arm_cmn_init_dtc()
1896 cmp = dna->type - dnb->type; in arm_cmn_node_cmp()
1898 cmp = dna->logid - dnb->logid; in arm_cmn_node_cmp()
1906 u8 dtcs_present = (1 << cmn->num_dtcs) - 1; in arm_cmn_init_dtcs()
1908 cmn->dtc = devm_kcalloc(cmn->dev, cmn->num_dtcs, sizeof(cmn->dtc[0]), GFP_KERNEL); in arm_cmn_init_dtcs()
1909 if (!cmn->dtc) in arm_cmn_init_dtcs()
1910 return -ENOMEM; in arm_cmn_init_dtcs()
1912 sort(cmn->dns, cmn->num_dns, sizeof(cmn->dns[0]), arm_cmn_node_cmp, NULL); in arm_cmn_init_dtcs()
1914 cmn->xps = arm_cmn_node(cmn, CMN_TYPE_XP); in arm_cmn_init_dtcs()
1916 for (dn = cmn->dns; dn->type; dn++) { in arm_cmn_init_dtcs()
1917 if (dn->type == CMN_TYPE_XP) { in arm_cmn_init_dtcs()
1918 dn->dtc &= dtcs_present; in arm_cmn_init_dtcs()
1923 dn->dtm = xp->dtm; in arm_cmn_init_dtcs()
1924 if (cmn->multi_dtm) in arm_cmn_init_dtcs()
1925 dn->dtm += arm_cmn_nid(cmn, dn->id).port / 2; in arm_cmn_init_dtcs()
1927 if (dn->type == CMN_TYPE_DTC) { in arm_cmn_init_dtcs()
1930 if (xp->dtc == 0xf) in arm_cmn_init_dtcs()
1931 xp->dtc = 1 << dtc_idx; in arm_cmn_init_dtcs()
1937 /* To the PMU, RN-Ds don't add anything over RN-Is, so smoosh them together */ in arm_cmn_init_dtcs()
1938 if (dn->type == CMN_TYPE_RND) in arm_cmn_init_dtcs()
1939 dn->type = CMN_TYPE_RNI; in arm_cmn_init_dtcs()
1941 /* We split the RN-I off already, so let the CCLA part match CCLA events */ in arm_cmn_init_dtcs()
1942 if (dn->type == CMN_TYPE_CCLA_RNI) in arm_cmn_init_dtcs()
1943 dn->type = CMN_TYPE_CCLA; in arm_cmn_init_dtcs()
1946 writel_relaxed(CMN_DT_DTC_CTL_DT_EN, cmn->dtc[0].base + CMN_DT_DTC_CTL); in arm_cmn_init_dtcs()
1954 u64 reg = readq_relaxed(cmn->base + offset + CMN_NODE_INFO); in arm_cmn_init_node_info()
1956 node->type = FIELD_GET(CMN_NI_NODE_TYPE, reg); in arm_cmn_init_node_info()
1957 node->id = FIELD_GET(CMN_NI_NODE_ID, reg); in arm_cmn_init_node_info()
1958 node->logid = FIELD_GET(CMN_NI_LOGICAL_ID, reg); in arm_cmn_init_node_info()
1960 node->pmu_base = cmn->base + offset + CMN_PMU_OFFSET; in arm_cmn_init_node_info()
1962 if (node->type == CMN_TYPE_CFG) in arm_cmn_init_node_info()
1964 else if (node->type == CMN_TYPE_XP) in arm_cmn_init_node_info()
1969 dev_dbg(cmn->dev, "node%*c%#06hx%*ctype:%-#6x id:%-4hd off:%#x\n", in arm_cmn_init_node_info()
1970 (level * 2) + 1, ' ', node->id, 5 - (level * 2), ' ', in arm_cmn_init_node_info()
1971 node->type, node->logid, offset); in arm_cmn_init_node_info()
1999 return -ENODEV; in arm_cmn_discover()
2001 cfg_region = cmn->base + rgn_offset; in arm_cmn_discover()
2003 cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg); in arm_cmn_discover()
2006 cmn->multi_dtm = reg & CMN_INFO_MULTIPLE_DTM_EN; in arm_cmn_discover()
2007 cmn->rsp_vc_num = FIELD_GET(CMN_INFO_RSP_VC_NUM, reg); in arm_cmn_discover()
2008 cmn->dat_vc_num = FIELD_GET(CMN_INFO_DAT_VC_NUM, reg); in arm_cmn_discover()
2011 cmn->snp_vc_num = FIELD_GET(CMN_INFO_SNP_VC_NUM, reg); in arm_cmn_discover()
2012 cmn->req_vc_num = FIELD_GET(CMN_INFO_REQ_VC_NUM, reg); in arm_cmn_discover()
2018 cmn->num_xps = child_count; in arm_cmn_discover()
2019 cmn->num_dns = cmn->num_xps; in arm_cmn_discover()
2022 for (i = 0; i < cmn->num_xps; i++) { in arm_cmn_discover()
2026 reg = readq_relaxed(cmn->base + xp_offset[i] + CMN_CHILD_INFO); in arm_cmn_discover()
2027 cmn->num_dns += FIELD_GET(CMN_CI_CHILD_COUNT, reg); in arm_cmn_discover()
2033 * bound, account for double the number of non-XP nodes. in arm_cmn_discover()
2035 dn = devm_kcalloc(cmn->dev, cmn->num_dns * 2 - cmn->num_xps, in arm_cmn_discover()
2038 return -ENOMEM; in arm_cmn_discover()
2040 /* Initial safe upper bound on DTMs for any possible mesh layout */ in arm_cmn_discover()
2041 i = cmn->num_xps; in arm_cmn_discover()
2042 if (cmn->multi_dtm) in arm_cmn_discover()
2043 i += cmn->num_xps + 1; in arm_cmn_discover()
2044 dtm = devm_kcalloc(cmn->dev, i, sizeof(*dtm), GFP_KERNEL); in arm_cmn_discover()
2046 return -ENOMEM; in arm_cmn_discover()
2049 cmn->dns = dn; in arm_cmn_discover()
2050 cmn->dtms = dtm; in arm_cmn_discover()
2051 for (i = 0; i < cmn->num_xps; i++) { in arm_cmn_discover()
2052 void __iomem *xp_region = cmn->base + xp_offset[i]; in arm_cmn_discover()
2063 if (xp->id == (1 << 3)) in arm_cmn_discover()
2064 cmn->mesh_x = xp->logid; in arm_cmn_discover()
2066 if (cmn->model == CMN600) in arm_cmn_discover()
2067 xp->dtc = 0xf; in arm_cmn_discover()
2069 xp->dtc = 1 << readl_relaxed(xp_region + CMN_DTM_UNIT_INFO); in arm_cmn_discover()
2071 xp->dtm = dtm - cmn->dtms; in arm_cmn_discover()
2078 * with port 2 connected, for the HN-D. in arm_cmn_discover()
2093 if (cmn->multi_dtm && (xp_ports & 0xc)) in arm_cmn_discover()
2095 if (cmn->multi_dtm && (xp_ports & 0x30)) in arm_cmn_discover()
2098 cmn->ports_used |= xp_ports; in arm_cmn_discover()
2108 * we haven't a clue how to power up arbitrary CHI requesters. in arm_cmn_discover()
2109 * As of CMN-600r1 these could only be RN-SAMs or CXLAs, in arm_cmn_discover()
2112 * but they don't go to regular XP DTMs, and they depend on in arm_cmn_discover()
2116 dev_dbg(cmn->dev, "ignoring external node %llx\n", reg); in arm_cmn_discover()
2122 switch (dn->type) { in arm_cmn_discover()
2124 cmn->num_dtcs++; in arm_cmn_discover()
2158 dn[1].type = arm_cmn_subtype(dn->type); in arm_cmn_discover()
2163 dev_err(cmn->dev, "invalid device node type: 0x%x\n", dn->type); in arm_cmn_discover()
2164 return -ENODEV; in arm_cmn_discover()
2170 cmn->num_dns = dn - cmn->dns; in arm_cmn_discover()
2172 /* Cheeky +1 to help terminate pointer-based iteration later */ in arm_cmn_discover()
2173 sz = (void *)(dn + 1) - (void *)cmn->dns; in arm_cmn_discover()
2174 dn = devm_krealloc(cmn->dev, cmn->dns, sz, GFP_KERNEL); in arm_cmn_discover()
2176 cmn->dns = dn; in arm_cmn_discover()
2178 sz = (void *)dtm - (void *)cmn->dtms; in arm_cmn_discover()
2179 dtm = devm_krealloc(cmn->dev, cmn->dtms, sz, GFP_KERNEL); in arm_cmn_discover()
2181 cmn->dtms = dtm; in arm_cmn_discover()
2187 if (!cmn->mesh_x) in arm_cmn_discover()
2188 cmn->mesh_x = cmn->num_xps; in arm_cmn_discover()
2189 cmn->mesh_y = cmn->num_xps / cmn->mesh_x; in arm_cmn_discover()
2192 if (cmn->num_xps == 1) in arm_cmn_discover()
2193 dev_warn(cmn->dev, "1x1 config not fully supported, translate XP events manually\n"); in arm_cmn_discover()
2195 dev_dbg(cmn->dev, "model %d, periph_id_2 revision %d\n", cmn->model, cmn->rev); in arm_cmn_discover()
2196 reg = cmn->ports_used; in arm_cmn_discover()
2197 dev_dbg(cmn->dev, "mesh %dx%d, ID width %d, ports %6pbl%s\n", in arm_cmn_discover()
2198 cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn), &reg, in arm_cmn_discover()
2199 cmn->multi_dtm ? ", multi-DTM" : ""); in arm_cmn_discover()
2210 return -EINVAL; in arm_cmn600_acpi_probe()
2214 return -EINVAL; in arm_cmn600_acpi_probe()
2224 cmn->base = devm_ioremap(cmn->dev, cfg->start, resource_size(cfg)); in arm_cmn600_acpi_probe()
2225 if (!cmn->base) in arm_cmn600_acpi_probe()
2226 return -ENOMEM; in arm_cmn600_acpi_probe()
2228 return root->start - cfg->start; in arm_cmn600_acpi_probe()
2235 return of_property_read_u32(np, "arm,root-node", &rootnode) ?: rootnode; in arm_cmn600_of_probe()
2245 cmn = devm_kzalloc(&pdev->dev, sizeof(*cmn), GFP_KERNEL); in arm_cmn_probe()
2247 return -ENOMEM; in arm_cmn_probe()
2249 cmn->dev = &pdev->dev; in arm_cmn_probe()
2250 cmn->model = (unsigned long)device_get_match_data(cmn->dev); in arm_cmn_probe()
2253 if (cmn->model == CMN600 && has_acpi_companion(cmn->dev)) { in arm_cmn_probe()
2257 cmn->base = devm_platform_ioremap_resource(pdev, 0); in arm_cmn_probe()
2258 if (IS_ERR(cmn->base)) in arm_cmn_probe()
2259 return PTR_ERR(cmn->base); in arm_cmn_probe()
2260 if (cmn->model == CMN600) in arm_cmn_probe()
2261 rootnode = arm_cmn600_of_probe(pdev->dev.of_node); in arm_cmn_probe()
2278 cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev)); in arm_cmn_probe()
2279 cmn->pmu = (struct pmu) { in arm_cmn_probe()
2298 name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", this_id); in arm_cmn_probe()
2300 return -ENOMEM; in arm_cmn_probe()
2302 err = cpuhp_state_add_instance(arm_cmn_hp_state, &cmn->cpuhp_node); in arm_cmn_probe()
2306 err = perf_pmu_register(&cmn->pmu, name, -1); in arm_cmn_probe()
2308 cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node); in arm_cmn_probe()
2319 writel_relaxed(0, cmn->dtc[0].base + CMN_DT_DTC_CTL); in arm_cmn_remove()
2321 perf_pmu_unregister(&cmn->pmu); in arm_cmn_remove()
2322 cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node); in arm_cmn_remove()
2323 debugfs_remove(cmn->debug); in arm_cmn_remove()
2329 { .compatible = "arm,cmn-600", .data = (void *)CMN600 },
2330 { .compatible = "arm,cmn-650", .data = (void *)CMN650 },
2331 { .compatible = "arm,cmn-700", .data = (void *)CMN700 },
2332 { .compatible = "arm,ci-700", .data = (void *)CI700 },
2350 .name = "arm-cmn",
2370 arm_cmn_debugfs = debugfs_create_dir("arm-cmn", NULL); in arm_cmn_init()
2391 MODULE_DESCRIPTION("Arm CMN-600 PMU driver");