Lines Matching +full:micro +full:- +full:volt

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * APM X-Gene SoC PMU (Performance Monitor Unit)
5 * Copyright (c) 2016, Applied Micro Circuits Corporation
81 #define GET_CNTR(ev) (ev->hw.idx)
82 #define GET_EVENTID(ev) (ev->hw.config & 0xFFULL)
83 #define GET_AGENTID(ev) (ev->hw.config_base & 0xFFFFFFFFUL)
84 #define GET_AGENT1ID(ev) ((ev->hw.config_base >> 32) & 0xFFFFFFFFUL)
173 return sysfs_emit(buf, "%s\n", (char *) eattr->var); in xgene_pmu_format_show()
183 XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-7"),
184 XGENE_PMU_FORMAT_ATTR(l3c_agentid, "config1:0-9"),
189 XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-7"),
190 XGENE_PMU_FORMAT_ATTR(iob_agentid, "config1:0-63"),
195 XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-5"),
196 XGENE_PMU_FORMAT_ATTR(mcb_agentid, "config1:0-9"),
201 XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-28"),
226 XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-39"),
231 XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-47"),
236 XGENE_PMU_FORMAT_ATTR(iob_slow_eventid, "config:0-16"),
241 XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-35"),
246 XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-44"),
284 return sysfs_emit(buf, "config=0x%llx\n", pmu_attr->id); in xgene_pmu_event_show()
291 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
292 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
293 XGENE_PMU_EVENT_ATTR(read-hit, 0x02),
294 XGENE_PMU_EVENT_ATTR(read-miss, 0x03),
295 XGENE_PMU_EVENT_ATTR(write-need-replacement, 0x06),
296 XGENE_PMU_EVENT_ATTR(write-not-need-replacement, 0x07),
297 XGENE_PMU_EVENT_ATTR(tq-full, 0x08),
298 XGENE_PMU_EVENT_ATTR(ackq-full, 0x09),
299 XGENE_PMU_EVENT_ATTR(wdb-full, 0x0a),
300 XGENE_PMU_EVENT_ATTR(bank-fifo-full, 0x0b),
301 XGENE_PMU_EVENT_ATTR(odb-full, 0x0c),
302 XGENE_PMU_EVENT_ATTR(wbq-full, 0x0d),
303 XGENE_PMU_EVENT_ATTR(bank-conflict-fifo-issue, 0x0e),
304 XGENE_PMU_EVENT_ATTR(bank-fifo-issue, 0x0f),
309 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
310 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
311 XGENE_PMU_EVENT_ATTR(axi0-read, 0x02),
312 XGENE_PMU_EVENT_ATTR(axi0-read-partial, 0x03),
313 XGENE_PMU_EVENT_ATTR(axi1-read, 0x04),
314 XGENE_PMU_EVENT_ATTR(axi1-read-partial, 0x05),
315 XGENE_PMU_EVENT_ATTR(csw-read-block, 0x06),
316 XGENE_PMU_EVENT_ATTR(csw-read-partial, 0x07),
317 XGENE_PMU_EVENT_ATTR(axi0-write, 0x10),
318 XGENE_PMU_EVENT_ATTR(axi0-write-partial, 0x11),
319 XGENE_PMU_EVENT_ATTR(axi1-write, 0x13),
320 XGENE_PMU_EVENT_ATTR(axi1-write-partial, 0x14),
321 XGENE_PMU_EVENT_ATTR(csw-inbound-dirty, 0x16),
326 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
327 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
328 XGENE_PMU_EVENT_ATTR(csw-read, 0x02),
329 XGENE_PMU_EVENT_ATTR(csw-write-request, 0x03),
330 XGENE_PMU_EVENT_ATTR(mcb-csw-stall, 0x04),
331 XGENE_PMU_EVENT_ATTR(cancel-read-gack, 0x05),
336 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
337 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
338 XGENE_PMU_EVENT_ATTR(act-cmd-sent, 0x02),
339 XGENE_PMU_EVENT_ATTR(pre-cmd-sent, 0x03),
340 XGENE_PMU_EVENT_ATTR(rd-cmd-sent, 0x04),
341 XGENE_PMU_EVENT_ATTR(rda-cmd-sent, 0x05),
342 XGENE_PMU_EVENT_ATTR(wr-cmd-sent, 0x06),
343 XGENE_PMU_EVENT_ATTR(wra-cmd-sent, 0x07),
344 XGENE_PMU_EVENT_ATTR(pde-cmd-sent, 0x08),
345 XGENE_PMU_EVENT_ATTR(sre-cmd-sent, 0x09),
346 XGENE_PMU_EVENT_ATTR(prea-cmd-sent, 0x0a),
347 XGENE_PMU_EVENT_ATTR(ref-cmd-sent, 0x0b),
348 XGENE_PMU_EVENT_ATTR(rd-rda-cmd-sent, 0x0c),
349 XGENE_PMU_EVENT_ATTR(wr-wra-cmd-sent, 0x0d),
350 XGENE_PMU_EVENT_ATTR(in-rd-collision, 0x0e),
351 XGENE_PMU_EVENT_ATTR(in-wr-collision, 0x0f),
352 XGENE_PMU_EVENT_ATTR(collision-queue-not-empty, 0x10),
353 XGENE_PMU_EVENT_ATTR(collision-queue-full, 0x11),
354 XGENE_PMU_EVENT_ATTR(mcu-request, 0x12),
355 XGENE_PMU_EVENT_ATTR(mcu-rd-request, 0x13),
356 XGENE_PMU_EVENT_ATTR(mcu-hp-rd-request, 0x14),
357 XGENE_PMU_EVENT_ATTR(mcu-wr-request, 0x15),
358 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-all, 0x16),
359 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-cancel, 0x17),
360 XGENE_PMU_EVENT_ATTR(mcu-rd-response, 0x18),
361 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-all, 0x19),
362 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-cancel, 0x1a),
363 XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-all, 0x1b),
364 XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-cancel, 0x1c),
389 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
390 XGENE_PMU_EVENT_ATTR(read-hit, 0x01),
391 XGENE_PMU_EVENT_ATTR(read-miss, 0x02),
392 XGENE_PMU_EVENT_ATTR(index-flush-eviction, 0x03),
393 XGENE_PMU_EVENT_ATTR(write-caused-replacement, 0x04),
394 XGENE_PMU_EVENT_ATTR(write-not-caused-replacement, 0x05),
395 XGENE_PMU_EVENT_ATTR(clean-eviction, 0x06),
396 XGENE_PMU_EVENT_ATTR(dirty-eviction, 0x07),
400 XGENE_PMU_EVENT_ATTR(tq-bank-conflict-issue-stall, 0x0b),
401 XGENE_PMU_EVENT_ATTR(tq-full, 0x0c),
402 XGENE_PMU_EVENT_ATTR(ackq-full, 0x0d),
403 XGENE_PMU_EVENT_ATTR(wdb-full, 0x0e),
404 XGENE_PMU_EVENT_ATTR(odb-full, 0x10),
405 XGENE_PMU_EVENT_ATTR(wbq-full, 0x11),
406 XGENE_PMU_EVENT_ATTR(input-req-async-fifo-stall, 0x12),
407 XGENE_PMU_EVENT_ATTR(output-req-async-fifo-stall, 0x13),
408 XGENE_PMU_EVENT_ATTR(output-data-async-fifo-stall, 0x14),
409 XGENE_PMU_EVENT_ATTR(total-insertion, 0x15),
410 XGENE_PMU_EVENT_ATTR(sip-insertions-r-set, 0x16),
411 XGENE_PMU_EVENT_ATTR(sip-insertions-r-clear, 0x17),
412 XGENE_PMU_EVENT_ATTR(dip-insertions-r-set, 0x18),
413 XGENE_PMU_EVENT_ATTR(dip-insertions-r-clear, 0x19),
414 XGENE_PMU_EVENT_ATTR(dip-insertions-force-r-set, 0x1a),
417 XGENE_PMU_EVENT_ATTR(old-replacement, 0x1d),
418 XGENE_PMU_EVENT_ATTR(young-replacement, 0x1e),
419 XGENE_PMU_EVENT_ATTR(r-set-replacement, 0x1f),
420 XGENE_PMU_EVENT_ATTR(r-clear-replacement, 0x20),
421 XGENE_PMU_EVENT_ATTR(old-r-replacement, 0x21),
422 XGENE_PMU_EVENT_ATTR(old-nr-replacement, 0x22),
423 XGENE_PMU_EVENT_ATTR(young-r-replacement, 0x23),
424 XGENE_PMU_EVENT_ATTR(young-nr-replacement, 0x24),
425 XGENE_PMU_EVENT_ATTR(bloomfilter-clearing, 0x25),
426 XGENE_PMU_EVENT_ATTR(generation-flip, 0x26),
427 XGENE_PMU_EVENT_ATTR(vcc-droop-detected, 0x27),
432 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
433 XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-all, 0x01),
434 XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-rd, 0x02),
435 XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-wr, 0x03),
436 XGENE_PMU_EVENT_ATTR(pa-all-cp-req, 0x04),
437 XGENE_PMU_EVENT_ATTR(pa-cp-blk-req, 0x05),
438 XGENE_PMU_EVENT_ATTR(pa-cp-ptl-req, 0x06),
439 XGENE_PMU_EVENT_ATTR(pa-cp-rd-req, 0x07),
440 XGENE_PMU_EVENT_ATTR(pa-cp-wr-req, 0x08),
441 XGENE_PMU_EVENT_ATTR(ba-all-req, 0x09),
442 XGENE_PMU_EVENT_ATTR(ba-rd-req, 0x0a),
443 XGENE_PMU_EVENT_ATTR(ba-wr-req, 0x0b),
444 XGENE_PMU_EVENT_ATTR(pa-rd-shared-req-issued, 0x10),
445 XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-req-issued, 0x11),
446 XGENE_PMU_EVENT_ATTR(pa-wr-invalidate-req-issued-stashable, 0x12),
447 XGENE_PMU_EVENT_ATTR(pa-wr-invalidate-req-issued-nonstashable, 0x13),
448 XGENE_PMU_EVENT_ATTR(pa-wr-back-req-issued-stashable, 0x14),
449 XGENE_PMU_EVENT_ATTR(pa-wr-back-req-issued-nonstashable, 0x15),
450 XGENE_PMU_EVENT_ATTR(pa-ptl-wr-req, 0x16),
451 XGENE_PMU_EVENT_ATTR(pa-ptl-rd-req, 0x17),
452 XGENE_PMU_EVENT_ATTR(pa-wr-back-clean-data, 0x18),
453 XGENE_PMU_EVENT_ATTR(pa-wr-back-cancelled-on-SS, 0x1b),
454 XGENE_PMU_EVENT_ATTR(pa-barrier-occurrence, 0x1c),
455 XGENE_PMU_EVENT_ATTR(pa-barrier-cycles, 0x1d),
456 XGENE_PMU_EVENT_ATTR(pa-total-cp-snoops, 0x20),
457 XGENE_PMU_EVENT_ATTR(pa-rd-shared-snoop, 0x21),
458 XGENE_PMU_EVENT_ATTR(pa-rd-shared-snoop-hit, 0x22),
459 XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-snoop, 0x23),
460 XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-snoop-hit, 0x24),
461 XGENE_PMU_EVENT_ATTR(pa-rd-wr-invalid-snoop, 0x25),
462 XGENE_PMU_EVENT_ATTR(pa-rd-wr-invalid-snoop-hit, 0x26),
463 XGENE_PMU_EVENT_ATTR(pa-req-buffer-full, 0x28),
464 XGENE_PMU_EVENT_ATTR(cswlf-outbound-req-fifo-full, 0x29),
465 XGENE_PMU_EVENT_ATTR(cswlf-inbound-snoop-fifo-backpressure, 0x2a),
466 XGENE_PMU_EVENT_ATTR(cswlf-outbound-lack-fifo-full, 0x2b),
467 XGENE_PMU_EVENT_ATTR(cswlf-inbound-gack-fifo-backpressure, 0x2c),
468 XGENE_PMU_EVENT_ATTR(cswlf-outbound-data-fifo-full, 0x2d),
469 XGENE_PMU_EVENT_ATTR(cswlf-inbound-data-fifo-backpressure, 0x2e),
470 XGENE_PMU_EVENT_ATTR(cswlf-inbound-req-backpressure, 0x2f),
475 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
476 XGENE_PMU_EVENT_ATTR(pa-axi0-rd-req, 0x01),
477 XGENE_PMU_EVENT_ATTR(pa-axi0-wr-req, 0x02),
478 XGENE_PMU_EVENT_ATTR(pa-axi1-rd-req, 0x03),
479 XGENE_PMU_EVENT_ATTR(pa-axi1-wr-req, 0x04),
480 XGENE_PMU_EVENT_ATTR(ba-all-axi-req, 0x07),
481 XGENE_PMU_EVENT_ATTR(ba-axi-rd-req, 0x08),
482 XGENE_PMU_EVENT_ATTR(ba-axi-wr-req, 0x09),
483 XGENE_PMU_EVENT_ATTR(ba-free-list-empty, 0x10),
488 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
489 XGENE_PMU_EVENT_ATTR(req-receive, 0x01),
490 XGENE_PMU_EVENT_ATTR(rd-req-recv, 0x02),
491 XGENE_PMU_EVENT_ATTR(rd-req-recv-2, 0x03),
492 XGENE_PMU_EVENT_ATTR(wr-req-recv, 0x04),
493 XGENE_PMU_EVENT_ATTR(wr-req-recv-2, 0x05),
494 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu, 0x06),
495 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu-2, 0x07),
496 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu, 0x08),
497 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu-2, 0x09),
498 XGENE_PMU_EVENT_ATTR(glbl-ack-recv-for-rd-sent-to-spec-mcu, 0x0a),
499 XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-for-rd-sent-to-spec-mcu, 0x0b),
500 XGENE_PMU_EVENT_ATTR(glbl-ack-nogo-recv-for-rd-sent-to-spec-mcu, 0x0c),
501 XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-any-rd-req, 0x0d),
502 XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-any-rd-req-2, 0x0e),
503 XGENE_PMU_EVENT_ATTR(wr-req-sent-to-mcu, 0x0f),
504 XGENE_PMU_EVENT_ATTR(gack-recv, 0x10),
505 XGENE_PMU_EVENT_ATTR(rd-gack-recv, 0x11),
506 XGENE_PMU_EVENT_ATTR(wr-gack-recv, 0x12),
507 XGENE_PMU_EVENT_ATTR(cancel-rd-gack, 0x13),
508 XGENE_PMU_EVENT_ATTR(cancel-wr-gack, 0x14),
509 XGENE_PMU_EVENT_ATTR(mcb-csw-req-stall, 0x15),
510 XGENE_PMU_EVENT_ATTR(mcu-req-intf-blocked, 0x16),
511 XGENE_PMU_EVENT_ATTR(mcb-mcu-rd-intf-stall, 0x17),
512 XGENE_PMU_EVENT_ATTR(csw-rd-intf-blocked, 0x18),
513 XGENE_PMU_EVENT_ATTR(csw-local-ack-intf-blocked, 0x19),
514 XGENE_PMU_EVENT_ATTR(mcu-req-table-full, 0x1a),
515 XGENE_PMU_EVENT_ATTR(mcu-stat-table-full, 0x1b),
516 XGENE_PMU_EVENT_ATTR(mcu-wr-table-full, 0x1c),
517 XGENE_PMU_EVENT_ATTR(mcu-rdreceipt-resp, 0x1d),
518 XGENE_PMU_EVENT_ATTR(mcu-wrcomplete-resp, 0x1e),
519 XGENE_PMU_EVENT_ATTR(mcu-retryack-resp, 0x1f),
520 XGENE_PMU_EVENT_ATTR(mcu-pcrdgrant-resp, 0x20),
521 XGENE_PMU_EVENT_ATTR(mcu-req-from-lastload, 0x21),
522 XGENE_PMU_EVENT_ATTR(mcu-req-from-bypass, 0x22),
523 XGENE_PMU_EVENT_ATTR(volt-droop-detect, 0x23),
528 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
529 XGENE_PMU_EVENT_ATTR(act-sent, 0x01),
530 XGENE_PMU_EVENT_ATTR(pre-sent, 0x02),
531 XGENE_PMU_EVENT_ATTR(rd-sent, 0x03),
532 XGENE_PMU_EVENT_ATTR(rda-sent, 0x04),
533 XGENE_PMU_EVENT_ATTR(wr-sent, 0x05),
534 XGENE_PMU_EVENT_ATTR(wra-sent, 0x06),
535 XGENE_PMU_EVENT_ATTR(pd-entry-vld, 0x07),
536 XGENE_PMU_EVENT_ATTR(sref-entry-vld, 0x08),
537 XGENE_PMU_EVENT_ATTR(prea-sent, 0x09),
538 XGENE_PMU_EVENT_ATTR(ref-sent, 0x0a),
539 XGENE_PMU_EVENT_ATTR(rd-rda-sent, 0x0b),
540 XGENE_PMU_EVENT_ATTR(wr-wra-sent, 0x0c),
541 XGENE_PMU_EVENT_ATTR(raw-hazard, 0x0d),
542 XGENE_PMU_EVENT_ATTR(war-hazard, 0x0e),
543 XGENE_PMU_EVENT_ATTR(waw-hazard, 0x0f),
544 XGENE_PMU_EVENT_ATTR(rar-hazard, 0x10),
545 XGENE_PMU_EVENT_ATTR(raw-war-waw-hazard, 0x11),
546 XGENE_PMU_EVENT_ATTR(hprd-lprd-wr-req-vld, 0x12),
547 XGENE_PMU_EVENT_ATTR(lprd-req-vld, 0x13),
548 XGENE_PMU_EVENT_ATTR(hprd-req-vld, 0x14),
549 XGENE_PMU_EVENT_ATTR(hprd-lprd-req-vld, 0x15),
550 XGENE_PMU_EVENT_ATTR(wr-req-vld, 0x16),
551 XGENE_PMU_EVENT_ATTR(partial-wr-req-vld, 0x17),
552 XGENE_PMU_EVENT_ATTR(rd-retry, 0x18),
553 XGENE_PMU_EVENT_ATTR(wr-retry, 0x19),
554 XGENE_PMU_EVENT_ATTR(retry-gnt, 0x1a),
555 XGENE_PMU_EVENT_ATTR(rank-change, 0x1b),
556 XGENE_PMU_EVENT_ATTR(dir-change, 0x1c),
557 XGENE_PMU_EVENT_ATTR(rank-dir-change, 0x1d),
558 XGENE_PMU_EVENT_ATTR(rank-active, 0x1e),
559 XGENE_PMU_EVENT_ATTR(rank-idle, 0x1f),
560 XGENE_PMU_EVENT_ATTR(rank-pd, 0x20),
561 XGENE_PMU_EVENT_ATTR(rank-sref, 0x21),
562 XGENE_PMU_EVENT_ATTR(queue-fill-gt-thresh, 0x22),
563 XGENE_PMU_EVENT_ATTR(queue-rds-gt-thresh, 0x23),
564 XGENE_PMU_EVENT_ATTR(queue-wrs-gt-thresh, 0x24),
565 XGENE_PMU_EVENT_ATTR(phy-updt-complt, 0x25),
566 XGENE_PMU_EVENT_ATTR(tz-fail, 0x26),
567 XGENE_PMU_EVENT_ATTR(dram-errc, 0x27),
568 XGENE_PMU_EVENT_ATTR(dram-errd, 0x28),
569 XGENE_PMU_EVENT_ATTR(rd-enq, 0x29),
570 XGENE_PMU_EVENT_ATTR(wr-enq, 0x2a),
571 XGENE_PMU_EVENT_ATTR(tmac-limit-reached, 0x2b),
572 XGENE_PMU_EVENT_ATTR(tmaw-tracker-full, 0x2c),
609 return cpumap_print_to_pagebuf(true, buf, &pmu_dev->parent->cpu); in cpumask_show()
696 cntr = find_first_zero_bit(pmu_dev->cntr_assign_mask, in get_next_avail_cntr()
697 pmu_dev->max_counters); in get_next_avail_cntr()
698 if (cntr == pmu_dev->max_counters) in get_next_avail_cntr()
699 return -ENOSPC; in get_next_avail_cntr()
700 set_bit(cntr, pmu_dev->cntr_assign_mask); in get_next_avail_cntr()
707 clear_bit(cntr, pmu_dev->cntr_assign_mask); in clear_avail_cntr()
712 writel(PCPPMU_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG); in xgene_pmu_mask_int()
717 writel(PCPPMU_V3_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG); in xgene_pmu_v3_mask_int()
722 writel(PCPPMU_INTCLRMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG); in xgene_pmu_unmask_int()
728 xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG); in xgene_pmu_v3_unmask_int()
734 return readl(pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx)); in xgene_pmu_read_counter32()
743 * v3 has 64-bit counter registers composed by 2 32-bit registers in xgene_pmu_read_counter64()
759 writel(val, pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx)); in xgene_pmu_write_counter32()
770 /* v3 has 64-bit counter registers composed by 2 32-bit registers */ in xgene_pmu_write_counter64()
778 writel(val, pmu_dev->inf->csr + PMU_PMEVTYPER0 + (4 * idx)); in xgene_pmu_write_evttype()
784 writel(val, pmu_dev->inf->csr + PMU_PMAMR0); in xgene_pmu_write_agentmsk()
793 writel(val, pmu_dev->inf->csr + PMU_PMAMR1); in xgene_pmu_write_agent1msk()
804 val = readl(pmu_dev->inf->csr + PMU_PMCNTENSET); in xgene_pmu_enable_counter()
806 writel(val, pmu_dev->inf->csr + PMU_PMCNTENSET); in xgene_pmu_enable_counter()
814 val = readl(pmu_dev->inf->csr + PMU_PMCNTENCLR); in xgene_pmu_disable_counter()
816 writel(val, pmu_dev->inf->csr + PMU_PMCNTENCLR); in xgene_pmu_disable_counter()
824 val = readl(pmu_dev->inf->csr + PMU_PMINTENSET); in xgene_pmu_enable_counter_int()
826 writel(val, pmu_dev->inf->csr + PMU_PMINTENSET); in xgene_pmu_enable_counter_int()
834 val = readl(pmu_dev->inf->csr + PMU_PMINTENCLR); in xgene_pmu_disable_counter_int()
836 writel(val, pmu_dev->inf->csr + PMU_PMINTENCLR); in xgene_pmu_disable_counter_int()
843 val = readl(pmu_dev->inf->csr + PMU_PMCR); in xgene_pmu_reset_counters()
845 writel(val, pmu_dev->inf->csr + PMU_PMCR); in xgene_pmu_reset_counters()
852 val = readl(pmu_dev->inf->csr + PMU_PMCR); in xgene_pmu_start_counters()
854 writel(val, pmu_dev->inf->csr + PMU_PMCR); in xgene_pmu_start_counters()
861 val = readl(pmu_dev->inf->csr + PMU_PMCR); in xgene_pmu_stop_counters()
863 writel(val, pmu_dev->inf->csr + PMU_PMCR); in xgene_pmu_stop_counters()
869 struct xgene_pmu *xgene_pmu = pmu_dev->parent; in xgene_perf_pmu_enable()
870 bool enabled = !bitmap_empty(pmu_dev->cntr_assign_mask, in xgene_perf_pmu_enable()
871 pmu_dev->max_counters); in xgene_perf_pmu_enable()
876 xgene_pmu->ops->start_counters(pmu_dev); in xgene_perf_pmu_enable()
882 struct xgene_pmu *xgene_pmu = pmu_dev->parent; in xgene_perf_pmu_disable()
884 xgene_pmu->ops->stop_counters(pmu_dev); in xgene_perf_pmu_disable()
889 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); in xgene_perf_event_init()
890 struct hw_perf_event *hw = &event->hw; in xgene_perf_event_init()
894 if (event->attr.type != event->pmu->type) in xgene_perf_event_init()
895 return -ENOENT; in xgene_perf_event_init()
899 * Therefore, it does not support per-process mode. in xgene_perf_event_init()
902 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) in xgene_perf_event_init()
903 return -EINVAL; in xgene_perf_event_init()
905 if (event->cpu < 0) in xgene_perf_event_init()
906 return -EINVAL; in xgene_perf_event_init()
911 * but can lead to issues for off-core PMUs, where each in xgene_perf_event_init()
916 event->cpu = cpumask_first(&pmu_dev->parent->cpu); in xgene_perf_event_init()
918 hw->config = event->attr.config; in xgene_perf_event_init()
925 hw->config_base = event->attr.config1; in xgene_perf_event_init()
931 if (event->group_leader->pmu != event->pmu && in xgene_perf_event_init()
932 !is_software_event(event->group_leader)) in xgene_perf_event_init()
933 return -EINVAL; in xgene_perf_event_init()
935 for_each_sibling_event(sibling, event->group_leader) { in xgene_perf_event_init()
936 if (sibling->pmu != event->pmu && in xgene_perf_event_init()
938 return -EINVAL; in xgene_perf_event_init()
946 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); in xgene_perf_enable_event()
947 struct xgene_pmu *xgene_pmu = pmu_dev->parent; in xgene_perf_enable_event()
949 xgene_pmu->ops->write_evttype(pmu_dev, GET_CNTR(event), in xgene_perf_enable_event()
951 xgene_pmu->ops->write_agentmsk(pmu_dev, ~((u32)GET_AGENTID(event))); in xgene_perf_enable_event()
952 if (pmu_dev->inf->type == PMU_TYPE_IOB) in xgene_perf_enable_event()
953 xgene_pmu->ops->write_agent1msk(pmu_dev, in xgene_perf_enable_event()
956 xgene_pmu->ops->enable_counter(pmu_dev, GET_CNTR(event)); in xgene_perf_enable_event()
957 xgene_pmu->ops->enable_counter_int(pmu_dev, GET_CNTR(event)); in xgene_perf_enable_event()
962 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); in xgene_perf_disable_event()
963 struct xgene_pmu *xgene_pmu = pmu_dev->parent; in xgene_perf_disable_event()
965 xgene_pmu->ops->disable_counter(pmu_dev, GET_CNTR(event)); in xgene_perf_disable_event()
966 xgene_pmu->ops->disable_counter_int(pmu_dev, GET_CNTR(event)); in xgene_perf_disable_event()
971 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); in xgene_perf_event_set_period()
972 struct xgene_pmu *xgene_pmu = pmu_dev->parent; in xgene_perf_event_set_period()
973 struct hw_perf_event *hw = &event->hw; in xgene_perf_event_set_period()
983 local64_set(&hw->prev_count, val); in xgene_perf_event_set_period()
984 xgene_pmu->ops->write_counter(pmu_dev, hw->idx, val); in xgene_perf_event_set_period()
989 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); in xgene_perf_event_update()
990 struct xgene_pmu *xgene_pmu = pmu_dev->parent; in xgene_perf_event_update()
991 struct hw_perf_event *hw = &event->hw; in xgene_perf_event_update()
995 prev_raw_count = local64_read(&hw->prev_count); in xgene_perf_event_update()
996 new_raw_count = xgene_pmu->ops->read_counter(pmu_dev, GET_CNTR(event)); in xgene_perf_event_update()
998 if (local64_cmpxchg(&hw->prev_count, prev_raw_count, in xgene_perf_event_update()
1002 delta = (new_raw_count - prev_raw_count) & pmu_dev->max_period; in xgene_perf_event_update()
1004 local64_add(delta, &event->count); in xgene_perf_event_update()
1014 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); in xgene_perf_start()
1015 struct xgene_pmu *xgene_pmu = pmu_dev->parent; in xgene_perf_start()
1016 struct hw_perf_event *hw = &event->hw; in xgene_perf_start()
1018 if (WARN_ON_ONCE(!(hw->state & PERF_HES_STOPPED))) in xgene_perf_start()
1021 WARN_ON_ONCE(!(hw->state & PERF_HES_UPTODATE)); in xgene_perf_start()
1022 hw->state = 0; in xgene_perf_start()
1027 u64 prev_raw_count = local64_read(&hw->prev_count); in xgene_perf_start()
1029 xgene_pmu->ops->write_counter(pmu_dev, GET_CNTR(event), in xgene_perf_start()
1039 struct hw_perf_event *hw = &event->hw; in xgene_perf_stop()
1041 if (hw->state & PERF_HES_UPTODATE) in xgene_perf_stop()
1045 WARN_ON_ONCE(hw->state & PERF_HES_STOPPED); in xgene_perf_stop()
1046 hw->state |= PERF_HES_STOPPED; in xgene_perf_stop()
1048 if (hw->state & PERF_HES_UPTODATE) in xgene_perf_stop()
1052 hw->state |= PERF_HES_UPTODATE; in xgene_perf_stop()
1057 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); in xgene_perf_add()
1058 struct hw_perf_event *hw = &event->hw; in xgene_perf_add()
1060 hw->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; in xgene_perf_add()
1063 hw->idx = get_next_avail_cntr(pmu_dev); in xgene_perf_add()
1064 if (hw->idx < 0) in xgene_perf_add()
1065 return -EAGAIN; in xgene_perf_add()
1068 pmu_dev->pmu_counter_event[hw->idx] = event; in xgene_perf_add()
1078 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); in xgene_perf_del()
1079 struct hw_perf_event *hw = &event->hw; in xgene_perf_del()
1087 pmu_dev->pmu_counter_event[hw->idx] = NULL; in xgene_perf_del()
1094 if (pmu_dev->parent->version == PCP_PMU_V3) in xgene_init_perf()
1095 pmu_dev->max_period = PMU_V3_CNT_MAX_PERIOD; in xgene_init_perf()
1097 pmu_dev->max_period = PMU_CNT_MAX_PERIOD; in xgene_init_perf()
1099 xgene_pmu = pmu_dev->parent; in xgene_init_perf()
1100 if (xgene_pmu->version == PCP_PMU_V1) in xgene_init_perf()
1101 pmu_dev->max_counters = 1; in xgene_init_perf()
1103 pmu_dev->max_counters = PMU_MAX_COUNTERS; in xgene_init_perf()
1106 pmu_dev->pmu = (struct pmu) { in xgene_init_perf()
1107 .attr_groups = pmu_dev->attr_groups, in xgene_init_perf()
1121 xgene_pmu->ops->stop_counters(pmu_dev); in xgene_init_perf()
1122 xgene_pmu->ops->reset_counters(pmu_dev); in xgene_init_perf()
1124 return perf_pmu_register(&pmu_dev->pmu, name, -1); in xgene_init_perf()
1130 struct device *dev = xgene_pmu->dev; in xgene_pmu_dev_add()
1135 return -ENOMEM; in xgene_pmu_dev_add()
1136 pmu->parent = xgene_pmu; in xgene_pmu_dev_add()
1137 pmu->inf = &ctx->inf; in xgene_pmu_dev_add()
1138 ctx->pmu_dev = pmu; in xgene_pmu_dev_add()
1140 switch (pmu->inf->type) { in xgene_pmu_dev_add()
1142 if (!(xgene_pmu->l3c_active_mask & pmu->inf->enable_mask)) in xgene_pmu_dev_add()
1143 return -ENODEV; in xgene_pmu_dev_add()
1144 if (xgene_pmu->version == PCP_PMU_V3) in xgene_pmu_dev_add()
1145 pmu->attr_groups = l3c_pmu_v3_attr_groups; in xgene_pmu_dev_add()
1147 pmu->attr_groups = l3c_pmu_attr_groups; in xgene_pmu_dev_add()
1150 if (xgene_pmu->version == PCP_PMU_V3) in xgene_pmu_dev_add()
1151 pmu->attr_groups = iob_fast_pmu_v3_attr_groups; in xgene_pmu_dev_add()
1153 pmu->attr_groups = iob_pmu_attr_groups; in xgene_pmu_dev_add()
1156 if (xgene_pmu->version == PCP_PMU_V3) in xgene_pmu_dev_add()
1157 pmu->attr_groups = iob_slow_pmu_v3_attr_groups; in xgene_pmu_dev_add()
1160 if (!(xgene_pmu->mcb_active_mask & pmu->inf->enable_mask)) in xgene_pmu_dev_add()
1161 return -ENODEV; in xgene_pmu_dev_add()
1162 if (xgene_pmu->version == PCP_PMU_V3) in xgene_pmu_dev_add()
1163 pmu->attr_groups = mcb_pmu_v3_attr_groups; in xgene_pmu_dev_add()
1165 pmu->attr_groups = mcb_pmu_attr_groups; in xgene_pmu_dev_add()
1168 if (!(xgene_pmu->mc_active_mask & pmu->inf->enable_mask)) in xgene_pmu_dev_add()
1169 return -ENODEV; in xgene_pmu_dev_add()
1170 if (xgene_pmu->version == PCP_PMU_V3) in xgene_pmu_dev_add()
1171 pmu->attr_groups = mc_pmu_v3_attr_groups; in xgene_pmu_dev_add()
1173 pmu->attr_groups = mc_pmu_attr_groups; in xgene_pmu_dev_add()
1176 return -EINVAL; in xgene_pmu_dev_add()
1179 if (xgene_init_perf(pmu, ctx->name)) { in xgene_pmu_dev_add()
1180 dev_err(dev, "%s PMU: Failed to init perf driver\n", ctx->name); in xgene_pmu_dev_add()
1181 return -ENODEV; in xgene_pmu_dev_add()
1184 dev_info(dev, "%s PMU registered\n", ctx->name); in xgene_pmu_dev_add()
1191 struct xgene_pmu *xgene_pmu = pmu_dev->parent; in _xgene_pmu_isr()
1192 void __iomem *csr = pmu_dev->inf->csr; in _xgene_pmu_isr()
1196 xgene_pmu->ops->stop_counters(pmu_dev); in _xgene_pmu_isr()
1198 if (xgene_pmu->version == PCP_PMU_V3) in _xgene_pmu_isr()
1207 if (xgene_pmu->version == PCP_PMU_V1) in _xgene_pmu_isr()
1209 else if (xgene_pmu->version == PCP_PMU_V2) in _xgene_pmu_isr()
1215 struct perf_event *event = pmu_dev->pmu_counter_event[idx]; in _xgene_pmu_isr()
1226 xgene_pmu->ops->start_counters(pmu_dev); in _xgene_pmu_isr()
1236 raw_spin_lock(&xgene_pmu->lock); in xgene_pmu_isr()
1239 val = readl(xgene_pmu->pcppmu_csr + PCPPMU_INTSTATUS_REG); in xgene_pmu_isr()
1240 if (xgene_pmu->version == PCP_PMU_V3) { in xgene_pmu_isr()
1252 list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) { in xgene_pmu_isr()
1253 _xgene_pmu_isr(irq, ctx->pmu_dev); in xgene_pmu_isr()
1257 list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) { in xgene_pmu_isr()
1258 _xgene_pmu_isr(irq, ctx->pmu_dev); in xgene_pmu_isr()
1262 list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) { in xgene_pmu_isr()
1263 _xgene_pmu_isr(irq, ctx->pmu_dev); in xgene_pmu_isr()
1267 list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) { in xgene_pmu_isr()
1268 _xgene_pmu_isr(irq, ctx->pmu_dev); in xgene_pmu_isr()
1272 raw_spin_unlock(&xgene_pmu->lock); in xgene_pmu_isr()
1285 dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n"); in acpi_pmu_probe_active_mcb_mcu_l3c()
1291 dev_err(&pdev->dev, "ioremap failed for MCBA CSR resource\n"); in acpi_pmu_probe_active_mcb_mcu_l3c()
1297 dev_err(&pdev->dev, "ioremap failed for MCBB CSR resource\n"); in acpi_pmu_probe_active_mcb_mcu_l3c()
1301 xgene_pmu->l3c_active_mask = 0x1; in acpi_pmu_probe_active_mcb_mcu_l3c()
1306 xgene_pmu->mcb_active_mask = 0x3; in acpi_pmu_probe_active_mcb_mcu_l3c()
1309 xgene_pmu->mc_active_mask = in acpi_pmu_probe_active_mcb_mcu_l3c()
1313 xgene_pmu->mcb_active_mask = 0x1; in acpi_pmu_probe_active_mcb_mcu_l3c()
1316 xgene_pmu->mc_active_mask = in acpi_pmu_probe_active_mcb_mcu_l3c()
1333 dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n"); in acpi_pmu_v3_probe_active_mcb_mcu_l3c()
1342 xgene_pmu->mcb_active_mask = 0x3; in acpi_pmu_v3_probe_active_mcb_mcu_l3c()
1344 xgene_pmu->l3c_active_mask = 0xFF; in acpi_pmu_v3_probe_active_mcb_mcu_l3c()
1347 xgene_pmu->mc_active_mask = 0xFF; in acpi_pmu_v3_probe_active_mcb_mcu_l3c()
1349 xgene_pmu->mc_active_mask = 0x33; in acpi_pmu_v3_probe_active_mcb_mcu_l3c()
1351 xgene_pmu->mc_active_mask = 0x11; in acpi_pmu_v3_probe_active_mcb_mcu_l3c()
1354 xgene_pmu->mcb_active_mask = 0x1; in acpi_pmu_v3_probe_active_mcb_mcu_l3c()
1356 xgene_pmu->l3c_active_mask = 0x0F; in acpi_pmu_v3_probe_active_mcb_mcu_l3c()
1359 xgene_pmu->mc_active_mask = 0x0F; in acpi_pmu_v3_probe_active_mcb_mcu_l3c()
1361 xgene_pmu->mc_active_mask = 0x03; in acpi_pmu_v3_probe_active_mcb_mcu_l3c()
1363 xgene_pmu->mc_active_mask = 0x01; in acpi_pmu_v3_probe_active_mcb_mcu_l3c()
1373 struct device_node *np = pdev->dev.of_node; in fdt_pmu_probe_active_mcb_mcu_l3c()
1376 csw_map = syscon_regmap_lookup_by_phandle(np, "regmap-csw"); in fdt_pmu_probe_active_mcb_mcu_l3c()
1378 dev_err(&pdev->dev, "unable to get syscon regmap csw\n"); in fdt_pmu_probe_active_mcb_mcu_l3c()
1382 mcba_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcba"); in fdt_pmu_probe_active_mcb_mcu_l3c()
1384 dev_err(&pdev->dev, "unable to get syscon regmap mcba\n"); in fdt_pmu_probe_active_mcb_mcu_l3c()
1388 mcbb_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcbb"); in fdt_pmu_probe_active_mcb_mcu_l3c()
1390 dev_err(&pdev->dev, "unable to get syscon regmap mcbb\n"); in fdt_pmu_probe_active_mcb_mcu_l3c()
1394 xgene_pmu->l3c_active_mask = 0x1; in fdt_pmu_probe_active_mcb_mcu_l3c()
1396 return -EINVAL; in fdt_pmu_probe_active_mcb_mcu_l3c()
1400 xgene_pmu->mcb_active_mask = 0x3; in fdt_pmu_probe_active_mcb_mcu_l3c()
1404 xgene_pmu->mc_active_mask = in fdt_pmu_probe_active_mcb_mcu_l3c()
1408 xgene_pmu->mcb_active_mask = 0x1; in fdt_pmu_probe_active_mcb_mcu_l3c()
1412 xgene_pmu->mc_active_mask = in fdt_pmu_probe_active_mcb_mcu_l3c()
1422 if (has_acpi_companion(&pdev->dev)) { in xgene_pmu_probe_active_mcb_mcu_l3c()
1423 if (xgene_pmu->version == PCP_PMU_V3) in xgene_pmu_probe_active_mcb_mcu_l3c()
1456 struct device *dev = xgene_pmu->dev; in acpi_get_pmu_hw_inf()
1479 if (resource_type(rentry->res) == IORESOURCE_MEM) { in acpi_get_pmu_hw_inf()
1480 res = *rentry->res; in acpi_get_pmu_hw_inf()
1498 /* A PMU device node without enable-bit-index is always enabled */ in acpi_get_pmu_hw_inf()
1499 rc = acpi_dev_get_property(adev, "enable-bit-index", in acpi_get_pmu_hw_inf()
1504 enable_bit = (int) obj->integer.value; in acpi_get_pmu_hw_inf()
1506 ctx->name = xgene_pmu_dev_name(dev, type, enable_bit); in acpi_get_pmu_hw_inf()
1507 if (!ctx->name) { in acpi_get_pmu_hw_inf()
1511 inf = &ctx->inf; in acpi_get_pmu_hw_inf()
1512 inf->type = type; in acpi_get_pmu_hw_inf()
1513 inf->csr = dev_csr; in acpi_get_pmu_hw_inf()
1514 inf->enable_mask = 1 << enable_bit; in acpi_get_pmu_hw_inf()
1539 for (id = ids; id->id[0] || id->cls; id++) { in xgene_pmu_acpi_match_type()
1557 if (!adev || acpi_bus_get_status(adev) || !adev->status.present) in acpi_pmu_dev_add()
1564 ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, (u32)acpi_id->driver_data); in acpi_pmu_dev_add()
1570 devm_kfree(xgene_pmu->dev, ctx); in acpi_pmu_dev_add()
1574 switch (ctx->inf.type) { in acpi_pmu_dev_add()
1576 list_add(&ctx->next, &xgene_pmu->l3cpmus); in acpi_pmu_dev_add()
1579 list_add(&ctx->next, &xgene_pmu->iobpmus); in acpi_pmu_dev_add()
1582 list_add(&ctx->next, &xgene_pmu->iobpmus); in acpi_pmu_dev_add()
1585 list_add(&ctx->next, &xgene_pmu->mcbpmus); in acpi_pmu_dev_add()
1588 list_add(&ctx->next, &xgene_pmu->mcpmus); in acpi_pmu_dev_add()
1597 struct device *dev = xgene_pmu->dev; in acpi_pmu_probe_pmu_dev()
1603 return -EINVAL; in acpi_pmu_probe_pmu_dev()
1609 return -ENODEV; in acpi_pmu_probe_pmu_dev()
1626 struct device *dev = xgene_pmu->dev; in fdt_get_pmu_hw_inf()
1648 /* A PMU device node without enable-bit-index is always enabled */ in fdt_get_pmu_hw_inf()
1649 if (of_property_read_u32(np, "enable-bit-index", &enable_bit)) in fdt_get_pmu_hw_inf()
1652 ctx->name = xgene_pmu_dev_name(dev, type, enable_bit); in fdt_get_pmu_hw_inf()
1653 if (!ctx->name) { in fdt_get_pmu_hw_inf()
1658 inf = &ctx->inf; in fdt_get_pmu_hw_inf()
1659 inf->type = type; in fdt_get_pmu_hw_inf()
1660 inf->csr = dev_csr; in fdt_get_pmu_hw_inf()
1661 inf->enable_mask = 1 << enable_bit; in fdt_get_pmu_hw_inf()
1672 for_each_child_of_node(pdev->dev.of_node, np) { in fdt_pmu_probe_pmu_dev()
1676 if (of_device_is_compatible(np, "apm,xgene-pmu-l3c")) in fdt_pmu_probe_pmu_dev()
1678 else if (of_device_is_compatible(np, "apm,xgene-pmu-iob")) in fdt_pmu_probe_pmu_dev()
1680 else if (of_device_is_compatible(np, "apm,xgene-pmu-mcb")) in fdt_pmu_probe_pmu_dev()
1682 else if (of_device_is_compatible(np, "apm,xgene-pmu-mc")) in fdt_pmu_probe_pmu_dev()
1692 devm_kfree(xgene_pmu->dev, ctx); in fdt_pmu_probe_pmu_dev()
1696 switch (ctx->inf.type) { in fdt_pmu_probe_pmu_dev()
1698 list_add(&ctx->next, &xgene_pmu->l3cpmus); in fdt_pmu_probe_pmu_dev()
1701 list_add(&ctx->next, &xgene_pmu->iobpmus); in fdt_pmu_probe_pmu_dev()
1704 list_add(&ctx->next, &xgene_pmu->iobpmus); in fdt_pmu_probe_pmu_dev()
1707 list_add(&ctx->next, &xgene_pmu->mcbpmus); in fdt_pmu_probe_pmu_dev()
1710 list_add(&ctx->next, &xgene_pmu->mcpmus); in fdt_pmu_probe_pmu_dev()
1721 if (has_acpi_companion(&pdev->dev)) in xgene_pmu_probe_pmu_dev()
1769 { .compatible = "apm,xgene-pmu", .data = &xgene_pmu_data },
1770 { .compatible = "apm,xgene-pmu-v2", .data = &xgene_pmu_v2_data },
1789 if (cpumask_empty(&xgene_pmu->cpu)) in xgene_pmu_online_cpu()
1790 cpumask_set_cpu(cpu, &xgene_pmu->cpu); in xgene_pmu_online_cpu()
1793 WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu)); in xgene_pmu_online_cpu()
1805 if (!cpumask_test_and_clear_cpu(cpu, &xgene_pmu->cpu)) in xgene_pmu_offline_cpu()
1811 list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) { in xgene_pmu_offline_cpu()
1812 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target); in xgene_pmu_offline_cpu()
1814 list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) { in xgene_pmu_offline_cpu()
1815 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target); in xgene_pmu_offline_cpu()
1817 list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) { in xgene_pmu_offline_cpu()
1818 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target); in xgene_pmu_offline_cpu()
1820 list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) { in xgene_pmu_offline_cpu()
1821 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target); in xgene_pmu_offline_cpu()
1824 cpumask_set_cpu(target, &xgene_pmu->cpu); in xgene_pmu_offline_cpu()
1826 WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu)); in xgene_pmu_offline_cpu()
1848 xgene_pmu = devm_kzalloc(&pdev->dev, sizeof(*xgene_pmu), GFP_KERNEL); in xgene_pmu_probe()
1850 return -ENOMEM; in xgene_pmu_probe()
1851 xgene_pmu->dev = &pdev->dev; in xgene_pmu_probe()
1854 version = -EINVAL; in xgene_pmu_probe()
1855 of_id = of_match_device(xgene_pmu_of_match, &pdev->dev); in xgene_pmu_probe()
1857 dev_data = (const struct xgene_pmu_data *) of_id->data; in xgene_pmu_probe()
1858 version = dev_data->id; in xgene_pmu_probe()
1862 if (ACPI_COMPANION(&pdev->dev)) { in xgene_pmu_probe()
1865 acpi_id = acpi_match_device(xgene_pmu_acpi_match, &pdev->dev); in xgene_pmu_probe()
1867 version = (int) acpi_id->driver_data; in xgene_pmu_probe()
1871 return -ENODEV; in xgene_pmu_probe()
1874 xgene_pmu->ops = &xgene_pmu_v3_ops; in xgene_pmu_probe()
1876 xgene_pmu->ops = &xgene_pmu_ops; in xgene_pmu_probe()
1878 INIT_LIST_HEAD(&xgene_pmu->l3cpmus); in xgene_pmu_probe()
1879 INIT_LIST_HEAD(&xgene_pmu->iobpmus); in xgene_pmu_probe()
1880 INIT_LIST_HEAD(&xgene_pmu->mcbpmus); in xgene_pmu_probe()
1881 INIT_LIST_HEAD(&xgene_pmu->mcpmus); in xgene_pmu_probe()
1883 xgene_pmu->version = version; in xgene_pmu_probe()
1884 dev_info(&pdev->dev, "X-Gene PMU version %d\n", xgene_pmu->version); in xgene_pmu_probe()
1887 xgene_pmu->pcppmu_csr = devm_ioremap_resource(&pdev->dev, res); in xgene_pmu_probe()
1888 if (IS_ERR(xgene_pmu->pcppmu_csr)) { in xgene_pmu_probe()
1889 dev_err(&pdev->dev, "ioremap failed for PCP PMU resource\n"); in xgene_pmu_probe()
1890 return PTR_ERR(xgene_pmu->pcppmu_csr); in xgene_pmu_probe()
1895 return -EINVAL; in xgene_pmu_probe()
1897 rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr, in xgene_pmu_probe()
1899 dev_name(&pdev->dev), xgene_pmu); in xgene_pmu_probe()
1901 dev_err(&pdev->dev, "Could not request IRQ %d\n", irq); in xgene_pmu_probe()
1905 xgene_pmu->irq = irq; in xgene_pmu_probe()
1907 raw_spin_lock_init(&xgene_pmu->lock); in xgene_pmu_probe()
1912 dev_warn(&pdev->dev, "Unknown MCB/MCU active status\n"); in xgene_pmu_probe()
1913 xgene_pmu->mcb_active_mask = 0x1; in xgene_pmu_probe()
1914 xgene_pmu->mc_active_mask = 0x1; in xgene_pmu_probe()
1919 &xgene_pmu->node); in xgene_pmu_probe()
1921 dev_err(&pdev->dev, "Error %d registering hotplug", rc); in xgene_pmu_probe()
1928 dev_err(&pdev->dev, "No PMU perf devices found!\n"); in xgene_pmu_probe()
1933 xgene_pmu->ops->unmask_int(xgene_pmu); in xgene_pmu_probe()
1939 &xgene_pmu->node); in xgene_pmu_probe()
1949 perf_pmu_unregister(&ctx->pmu_dev->pmu); in xgene_pmu_dev_cleanup()
1955 struct xgene_pmu *xgene_pmu = dev_get_drvdata(&pdev->dev); in xgene_pmu_remove()
1957 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->l3cpmus); in xgene_pmu_remove()
1958 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->iobpmus); in xgene_pmu_remove()
1959 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcbpmus); in xgene_pmu_remove()
1960 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus); in xgene_pmu_remove()
1962 &xgene_pmu->node); in xgene_pmu_remove()
1971 .name = "xgene-pmu",