/Linux-v6.1/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/ |
D | base.c | 32 struct nvkm_pmu *pmu = device->pmu; in nvkm_pmu_fan_controlled() local 34 /* Internal PMU FW does not currently control fans in any way, in nvkm_pmu_fan_controlled() 37 if (pmu && pmu->func->code.size) in nvkm_pmu_fan_controlled() 40 /* Default (board-loaded, or VBIOS PMU/PREOS) PMU FW on Fermi in nvkm_pmu_fan_controlled() 48 nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable) in nvkm_pmu_pgob() argument 50 if (pmu && pmu->func->pgob) in nvkm_pmu_pgob() 51 pmu->func->pgob(pmu, enable); in nvkm_pmu_pgob() 57 struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work); in nvkm_pmu_recv() local 58 return pmu->func->recv(pmu); in nvkm_pmu_recv() 62 nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], in nvkm_pmu_send() argument [all …]
|
D | gk20a.c | 51 gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state) in gk20a_pmu_dvfs_target() argument 53 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_target() 59 gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state) in gk20a_pmu_dvfs_get_cur_state() argument 61 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_get_cur_state() 67 gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu, in gk20a_pmu_dvfs_get_target_state() argument 70 struct gk20a_pmu_dvfs_data *data = pmu->data; in gk20a_pmu_dvfs_get_target_state() 71 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_get_target_state() 86 nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n", in gk20a_pmu_dvfs_get_target_state() 95 gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, in gk20a_pmu_dvfs_get_dev_status() argument 98 struct nvkm_falcon *falcon = &pmu->base.falcon; in gk20a_pmu_dvfs_get_dev_status() [all …]
|
D | gt215.c | 30 gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], in gt215_pmu_send() argument 33 struct nvkm_subdev *subdev = &pmu->subdev; in gt215_pmu_send() 37 mutex_lock(&pmu->send.mutex); in gt215_pmu_send() 45 mutex_unlock(&pmu->send.mutex); in gt215_pmu_send() 50 * on a synchronous reply, take the PMU mutex and tell the in gt215_pmu_send() 54 pmu->recv.message = message; in gt215_pmu_send() 55 pmu->recv.process = process; in gt215_pmu_send() 65 pmu->send.base)); in gt215_pmu_send() 77 wait_event(pmu->recv.wait, (pmu->recv.process == 0)); in gt215_pmu_send() 78 reply[0] = pmu->recv.data[0]; in gt215_pmu_send() [all …]
|
/Linux-v6.1/Documentation/devicetree/bindings/arm/ |
D | pmu.yaml | 4 $id: http://devicetree.org/schemas/arm/pmu.yaml# 14 ARM cores often have a PMU for counting cpu and cache events like cache misses 15 and hits. The interface to the PMU is part of the ARM ARM. The ARM PMU 22 - apm,potenza-pmu 23 - apple,firestorm-pmu 24 - apple,icestorm-pmu 26 - arm,arm1136-pmu 27 - arm,arm1176-pmu 28 - arm,arm11mpcore-pmu 29 - arm,cortex-a5-pmu [all …]
|
/Linux-v6.1/arch/x86/kvm/vmx/ |
D | pmu_intel.c | 3 * KVM PMU support for Intel CPUs 19 #include "pmu.h" 38 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) in reprogram_fixed_counters() argument 41 u8 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl; in reprogram_fixed_counters() 44 pmu->fixed_ctr_ctrl = data; in reprogram_fixed_counters() 45 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { in reprogram_fixed_counters() 52 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); in reprogram_fixed_counters() 54 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use); in reprogram_fixed_counters() 59 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) in intel_pmc_idx_to_pmc() argument 62 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx, in intel_pmc_idx_to_pmc() [all …]
|
/Linux-v6.1/drivers/gpu/drm/i915/ |
D | i915_pmu.c | 107 static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) in pmu_needs_timer() argument 109 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); in pmu_needs_timer() 117 enable = pmu->enable; in pmu_needs_timer() 170 struct i915_pmu *pmu = &i915->pmu; in get_rc6() local 181 spin_lock_irqsave(&pmu->lock, flags); in get_rc6() 184 pmu->sample[__I915_SAMPLE_RC6].cur = val; in get_rc6() 193 val = ktime_since_raw(pmu->sleep_last); in get_rc6() 194 val += pmu->sample[__I915_SAMPLE_RC6].cur; in get_rc6() 197 if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur) in get_rc6() 198 val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur; in get_rc6() [all …]
|
/Linux-v6.1/drivers/soc/dove/ |
D | pmu.c | 3 * Marvell Dove PMU support 17 #include <linux/soc/dove/pmu.h> 42 * The PMU contains a register to reset various subsystems within the 50 struct pmu_data *pmu = rcdev_to_pmu(rc); in pmu_reset_reset() local 54 spin_lock_irqsave(&pmu->lock, flags); in pmu_reset_reset() 55 val = readl_relaxed(pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset() 56 writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset() 57 writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset() 58 spin_unlock_irqrestore(&pmu->lock, flags); in pmu_reset_reset() 65 struct pmu_data *pmu = rcdev_to_pmu(rc); in pmu_reset_assert() local [all …]
|
/Linux-v6.1/drivers/perf/ |
D | fsl_imx8_ddr_perf.c | 40 #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) 53 const char *identifier; /* system PMU identifier for userspace */ 83 { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data}, 84 { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data}, 85 { .compatible = "fsl,imx8mq-ddr-pmu", .data = &imx8mq_devtype_data}, 86 { .compatible = "fsl,imx8mm-ddr-pmu", .data = &imx8mm_devtype_data}, 87 { .compatible = "fsl,imx8mn-ddr-pmu", .data = &imx8mn_devtype_data}, 88 { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data}, 94 struct pmu pmu; member 111 struct ddr_pmu *pmu = dev_get_drvdata(dev); in ddr_perf_identifier_show() local [all …]
|
D | marvell_cn10k_ddr_pmu.c | 125 struct pmu pmu; member 135 #define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu) 233 struct cn10k_ddr_pmu *pmu = dev_get_drvdata(dev); in cn10k_ddr_perf_cpumask_show() local 235 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu)); in cn10k_ddr_perf_cpumask_show() 289 static int cn10k_ddr_perf_alloc_counter(struct cn10k_ddr_pmu *pmu, in cn10k_ddr_perf_alloc_counter() argument 297 pmu->events[DDRC_PERF_READ_COUNTER_IDX] = event; in cn10k_ddr_perf_alloc_counter() 303 pmu->events[DDRC_PERF_WRITE_COUNTER_IDX] = event; in cn10k_ddr_perf_alloc_counter() 309 if (pmu->events[i] == NULL) { in cn10k_ddr_perf_alloc_counter() 310 pmu->events[i] = event; in cn10k_ddr_perf_alloc_counter() 318 static void cn10k_ddr_perf_free_counter(struct cn10k_ddr_pmu *pmu, int counter) in cn10k_ddr_perf_free_counter() argument [all …]
|
D | Kconfig | 10 tristate "ARM CCI PMU driver" 14 Support for PMU events monitoring on the ARM CCI (Cache Coherent 41 PMU (perf) driver supporting the ARM CCN (Cache Coherent Network) 45 tristate "Arm CMN-600 PMU support" 48 Support for PMU events monitoring on the Arm CMN-600 Coherent Mesh 53 bool "ARM PMU framework" 61 bool "RISC-V PMU framework" 65 systems. This provides the core PMU framework that abstracts common 66 PMU functionalities in a core library so that different PMU drivers 71 bool "RISC-V legacy PMU implementation" [all …]
|
D | arm_pmu_platform.c | 26 static int probe_current_pmu(struct arm_pmu *pmu, in probe_current_pmu() argument 33 pr_info("probing PMU on CPU %d\n", cpu); in probe_current_pmu() 38 ret = info->init(pmu); in probe_current_pmu() 46 static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq) in pmu_parse_percpu_irq() argument 49 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; in pmu_parse_percpu_irq() 51 ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); in pmu_parse_percpu_irq() 55 for_each_cpu(cpu, &pmu->supported_cpus) in pmu_parse_percpu_irq() 96 static int pmu_parse_irqs(struct arm_pmu *pmu) in pmu_parse_irqs() argument 99 struct platform_device *pdev = pmu->plat_device; in pmu_parse_irqs() 100 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; in pmu_parse_irqs() [all …]
|
D | arm_pmu_acpi.c | 31 * Per the ACPI spec, the MADT cannot describe a PMU that doesn't in arm_pmu_acpi_register_irq() 87 * For lack of a better place, hook the normal PMU MADT walk 149 pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n", in arm_pmu_acpi_parse_irqs() 153 pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu); in arm_pmu_acpi_parse_irqs() 193 struct arm_pmu *pmu; in arm_pmu_acpi_find_alloc_pmu() local 197 pmu = per_cpu(probed_pmus, cpu); in arm_pmu_acpi_find_alloc_pmu() 198 if (!pmu || pmu->acpi_cpuid != cpuid) in arm_pmu_acpi_find_alloc_pmu() 201 return pmu; in arm_pmu_acpi_find_alloc_pmu() 204 pmu = armpmu_alloc_atomic(); in arm_pmu_acpi_find_alloc_pmu() 205 if (!pmu) { in arm_pmu_acpi_find_alloc_pmu() [all …]
|
D | arm_pmu.c | 183 if (type == event->pmu->type) in armpmu_map_event() 200 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_event_set_period() 242 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_event_update() 272 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_stop() 276 * ARM pmu always has to update the counter, so ignore in armpmu_stop() 288 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_start() 292 * ARM pmu always has to reprogram the period, so ignore in armpmu_start() 313 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_del() 329 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_add() 362 validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, in validate_event() argument [all …]
|
/Linux-v6.1/Documentation/devicetree/bindings/soc/samsung/ |
D | exynos-pmu.yaml | 4 $id: http://devicetree.org/schemas/soc/samsung/exynos-pmu.yaml# 7 title: Samsung Exynos SoC series Power Management Unit (PMU) 18 - samsung,exynos3250-pmu 19 - samsung,exynos4210-pmu 20 - samsung,exynos4412-pmu 21 - samsung,exynos5250-pmu 22 - samsung,exynos5260-pmu 23 - samsung,exynos5410-pmu 24 - samsung,exynos5420-pmu 25 - samsung,exynos5433-pmu [all …]
|
/Linux-v6.1/arch/x86/kvm/svm/ |
D | pmu.c | 3 * KVM PMU support for AMD 18 #include "pmu.h" 26 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) in amd_pmc_idx_to_pmc() argument 28 unsigned int num_counters = pmu->nr_arch_gp_counters; in amd_pmc_idx_to_pmc() 33 return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)]; in amd_pmc_idx_to_pmc() 36 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, in get_gp_pmc_amd() argument 39 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); in get_gp_pmc_amd() 50 * Each PMU counter has a pair of CTL and CTR MSRs. CTLn in get_gp_pmc_amd() 71 return amd_pmc_idx_to_pmc(pmu, idx); in get_gp_pmc_amd() 89 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in amd_is_valid_rdpmc_ecx() local [all …]
|
/Linux-v6.1/Documentation/devicetree/bindings/pinctrl/ |
D | marvell,dove-pinctrl.txt | 9 - reg: register specifiers of MPP, MPP4, and PMU MPP registers 14 Note: pmu* also allows for Power Management functions listed below 18 mpp0 0 gpio, pmu, uart2(rts), sdio0(cd), lcd0(pwm), pmu* 19 mpp1 1 gpio, pmu, uart2(cts), sdio0(wp), lcd1(pwm), pmu* 20 mpp2 2 gpio, pmu, uart2(txd), sdio0(buspwr), sata(prsnt), 21 uart1(rts), pmu* 22 mpp3 3 gpio, pmu, uart2(rxd), sdio0(ledctrl), sata(act), 23 uart1(cts), lcd-spi(cs1), pmu* 24 mpp4 4 gpio, pmu, uart3(rts), sdio1(cd), spi1(miso), pmu* 25 mpp5 5 gpio, pmu, uart3(cts), sdio1(wp), spi1(cs), pmu* [all …]
|
/Linux-v6.1/Documentation/devicetree/bindings/arm/rockchip/ |
D | pmu.yaml | 4 $id: http://devicetree.org/schemas/arm/rockchip/pmu.yaml# 7 title: Rockchip Power Management Unit (PMU) 14 The PMU is used to turn on and off different power domains of the SoCs. 22 - rockchip,px30-pmu 23 - rockchip,rk3066-pmu 24 - rockchip,rk3128-pmu 25 - rockchip,rk3288-pmu 26 - rockchip,rk3368-pmu 27 - rockchip,rk3399-pmu 28 - rockchip,rk3568-pmu [all …]
|
/Linux-v6.1/Documentation/devicetree/bindings/perf/ |
D | apm-xgene-pmu.txt | 1 * APM X-Gene SoC PMU bindings 3 This is APM X-Gene SoC PMU (Performance Monitoring Unit) module. 4 The following PMU devices are supported: 11 The following section describes the SoC PMU DT node binding. 14 - compatible : Shall be "apm,xgene-pmu" for revision 1 or 15 "apm,xgene-pmu-v2" for revision 2. 19 - reg : First resource shall be the CPU bus PMU resource. 20 - interrupts : Interrupt-specifier for PMU IRQ. 23 - compatible : Shall be "apm,xgene-pmu-l3c". 24 - reg : First resource shall be the L3C PMU resource. [all …]
|
/Linux-v6.1/tools/perf/util/ |
D | pmu.c | 24 #include "pmu.h" 30 #include "pmu-hybrid.h" 86 * Reading/parsing the default pmu format definition, which should be 337 pmu_name = (char *)pe->pmu; in __perf_pmu__new_alias() 360 * event=0x0091 (read from files ../<PMU>/events/<FILE> in __perf_pmu__new_alias() 493 * Reading the pmu event aliases definition, which should be located at: 541 * Reading/parsing the default pmu type value, which should be 572 /* Add all pmus in sysfs to pmu list: */ 675 * PMU CORE devices have different name other than cpu in sysfs on some 693 char *perf_pmu__getcpuid(struct perf_pmu *pmu) in perf_pmu__getcpuid() argument [all …]
|
D | parse-events-hybrid.c | 14 #include "pmu.h" 15 #include "pmu-hybrid.h" 27 * EEEEEEEE: PMU type ID in config_hybrid_attr() 32 * EEEEEEEE: PMU type ID in config_hybrid_attr() 33 * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied. in config_hybrid_attr() 45 struct perf_pmu *pmu) in create_event_hybrid() argument 51 config_hybrid_attr(attr, config_type, pmu->type); in create_event_hybrid() 55 * PMU. For example, the 'L1-dcache-load-misses' is only available in create_event_hybrid() 64 pmu, config_terms); in create_event_hybrid() 66 evsel->pmu_name = strdup(pmu->name); in create_event_hybrid() [all …]
|
/Linux-v6.1/arch/arm64/kvm/ |
D | pmu-emul.c | 44 WARN_ONCE(1, "Unknown PMU version %d\n", pmuver); in kvm_pmu_event_mask() 62 struct kvm_pmu *pmu; in kvm_pmc_to_vcpu() local 66 pmu = container_of(pmc, struct kvm_pmu, pmc[0]); in kvm_pmc_to_vcpu() 67 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu); in kvm_pmc_to_vcpu() 73 * @pmc: The PMU counter pointer 79 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_pmc_is_chained() 93 * @pmc: The PMU counter pointer 135 * kvm_pmu_get_pair_counter_value - get PMU counter value 137 * @pmc: The PMU counter pointer 170 * kvm_pmu_get_counter_value - get PMU counter value [all …]
|
/Linux-v6.1/arch/x86/events/intel/ |
D | uncore.c | 122 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) in uncore_pmu_to_box() argument 130 return dieid < uncore_max_dies() ? pmu->boxes[dieid] : NULL; in uncore_pmu_to_box() 363 * Using uncore_pmu_event_init pmu event_init callback 370 return &box->pmu->pmu == event->pmu; in is_box_event() 380 max_count = box->pmu->type->num_counters; in uncore_collect_events() 381 if (box->pmu->type->fixed_ctl) in uncore_collect_events() 414 struct intel_uncore_type *type = box->pmu->type; in uncore_get_event_constraint() 439 if (box->pmu->type->ops->put_constraint) in uncore_put_event_constraint() 440 box->pmu->type->ops->put_constraint(box, event); in uncore_put_event_constraint() 676 * PMU if it was the only group available. [all …]
|
D | uncore.h | 85 struct pmu *pmu; /* for custom pmu ops */ member 87 * Uncore PMU would store relevant platform topology configuration here 119 struct pmu pmu; member 148 struct intel_uncore_pmu *pmu; member 201 return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu); in dev_to_uncore_pmu() 241 if (offset < box->pmu->type->mmio_map_size) in uncore_mmio_is_valid_offset() 245 offset, box->pmu->type->name); in uncore_mmio_is_valid_offset() 253 return box->pmu->type->box_ctl + in uncore_mmio_box_ctl() 254 box->pmu->type->mmio_offset * box->pmu->pmu_idx; in uncore_mmio_box_ctl() 259 return box->pmu->type->box_ctl; in uncore_pci_box_ctl() [all …]
|
/Linux-v6.1/arch/x86/events/ |
D | rapl.c | 56 #define pr_fmt(fmt) "RAPL PMU: " fmt 110 struct pmu *pmu; member 116 struct pmu pmu; member 210 static void rapl_start_hrtimer(struct rapl_pmu *pmu) in rapl_start_hrtimer() argument 212 hrtimer_start(&pmu->hrtimer, pmu->timer_interval, in rapl_start_hrtimer() 218 struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer); in rapl_hrtimer_handle() local 222 if (!pmu->n_active) in rapl_hrtimer_handle() 225 raw_spin_lock_irqsave(&pmu->lock, flags); in rapl_hrtimer_handle() 227 list_for_each_entry(event, &pmu->active_list, active_entry) in rapl_hrtimer_handle() 230 raw_spin_unlock_irqrestore(&pmu->lock, flags); in rapl_hrtimer_handle() [all …]
|
/Linux-v6.1/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ |
D | gm200.c | 28 #include <subdev/bios/pmu.h> 32 pmu_code(struct nv50_devinit *init, u32 pmu, u32 img, u32 len, bool sec) in pmu_code() argument 38 nvkm_wr32(device, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu); in pmu_code() 41 nvkm_wr32(device, 0x10a188, (pmu + i) >> 8); in pmu_code() 52 pmu_data(struct nv50_devinit *init, u32 pmu, u32 img, u32 len) in pmu_data() argument 58 nvkm_wr32(device, 0x10a1c0, 0x01000000 | pmu); in pmu_data() 87 struct nvbios_pmuR pmu; in pmu_load() local 89 if (!nvbios_pmuRm(bios, type, &pmu)) in pmu_load() 95 pmu_code(init, pmu.boot_addr_pmu, pmu.boot_addr, pmu.boot_size, false); in pmu_load() 96 pmu_code(init, pmu.code_addr_pmu, pmu.code_addr, pmu.code_size, true); in pmu_load() [all …]
|