Home
last modified time | relevance | path

Searched full:pmu (Results 1 – 25 of 881) sorted by relevance

12345678910>>...36

/Linux-v5.10/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/
Dbase.c32 struct nvkm_pmu *pmu = device->pmu; in nvkm_pmu_fan_controlled() local
34 /* Internal PMU FW does not currently control fans in any way, in nvkm_pmu_fan_controlled()
37 if (pmu && pmu->func->code.size) in nvkm_pmu_fan_controlled()
40 /* Default (board-loaded, or VBIOS PMU/PREOS) PMU FW on Fermi in nvkm_pmu_fan_controlled()
48 nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable) in nvkm_pmu_pgob() argument
50 if (pmu && pmu->func->pgob) in nvkm_pmu_pgob()
51 pmu->func->pgob(pmu, enable); in nvkm_pmu_pgob()
57 struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work); in nvkm_pmu_recv() local
58 return pmu->func->recv(pmu); in nvkm_pmu_recv()
62 nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], in nvkm_pmu_send() argument
[all …]
Dgk20a.c51 gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state) in gk20a_pmu_dvfs_target() argument
53 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_target()
59 gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state) in gk20a_pmu_dvfs_get_cur_state() argument
61 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_get_cur_state()
67 gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu, in gk20a_pmu_dvfs_get_target_state() argument
70 struct gk20a_pmu_dvfs_data *data = pmu->data; in gk20a_pmu_dvfs_get_target_state()
71 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_get_target_state()
86 nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n", in gk20a_pmu_dvfs_get_target_state()
95 gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, in gk20a_pmu_dvfs_get_dev_status() argument
98 struct nvkm_falcon *falcon = &pmu->base.falcon; in gk20a_pmu_dvfs_get_dev_status()
[all …]
Dgt215.c30 gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], in gt215_pmu_send() argument
33 struct nvkm_subdev *subdev = &pmu->subdev; in gt215_pmu_send()
50 * on a synchronous reply, take the PMU mutex and tell the in gt215_pmu_send()
54 pmu->recv.message = message; in gt215_pmu_send()
55 pmu->recv.process = process; in gt215_pmu_send()
65 pmu->send.base)); in gt215_pmu_send()
77 wait_event(pmu->recv.wait, (pmu->recv.process == 0)); in gt215_pmu_send()
78 reply[0] = pmu->recv.data[0]; in gt215_pmu_send()
79 reply[1] = pmu->recv.data[1]; in gt215_pmu_send()
87 gt215_pmu_recv(struct nvkm_pmu *pmu) in gt215_pmu_recv() argument
[all …]
DKbuild2 nvkm-y += nvkm/subdev/pmu/base.o
3 nvkm-y += nvkm/subdev/pmu/memx.o
4 nvkm-y += nvkm/subdev/pmu/gt215.o
5 nvkm-y += nvkm/subdev/pmu/gf100.o
6 nvkm-y += nvkm/subdev/pmu/gf119.o
7 nvkm-y += nvkm/subdev/pmu/gk104.o
8 nvkm-y += nvkm/subdev/pmu/gk110.o
9 nvkm-y += nvkm/subdev/pmu/gk208.o
10 nvkm-y += nvkm/subdev/pmu/gk20a.o
11 nvkm-y += nvkm/subdev/pmu/gm107.o
[all …]
Dgm20b.c28 #include <nvfw/pmu.h>
42 struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); in gm20b_pmu_acr_bootstrap_falcon() local
52 ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, in gm20b_pmu_acr_bootstrap_falcon()
54 &pmu->subdev, msecs_to_jiffies(1000)); in gm20b_pmu_acr_bootstrap_falcon()
140 struct nvkm_pmu *pmu = priv; in gm20b_pmu_acr_init_wpr_callback() local
141 struct nvkm_subdev *subdev = &pmu->subdev; in gm20b_pmu_acr_init_wpr_callback()
150 complete_all(&pmu->wpr_ready); in gm20b_pmu_acr_init_wpr_callback()
155 gm20b_pmu_acr_init_wpr(struct nvkm_pmu *pmu) in gm20b_pmu_acr_init_wpr() argument
165 return nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, in gm20b_pmu_acr_init_wpr()
166 gm20b_pmu_acr_init_wpr_callback, pmu, 0); in gm20b_pmu_acr_init_wpr()
[all …]
/Linux-v5.10/arch/x86/kvm/vmx/
Dpmu_intel.c3 * KVM PMU support for Intel CPUs
19 #include "pmu.h"
38 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) in reprogram_fixed_counters() argument
42 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { in reprogram_fixed_counters()
44 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i); in reprogram_fixed_counters()
47 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); in reprogram_fixed_counters()
52 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use); in reprogram_fixed_counters()
56 pmu->fixed_ctr_ctrl = data; in reprogram_fixed_counters()
60 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data) in global_ctrl_changed() argument
63 u64 diff = pmu->global_ctrl ^ data; in global_ctrl_changed()
[all …]
/Linux-v5.10/drivers/gpu/drm/i915/
Di915_pmu.c82 static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) in pmu_needs_timer() argument
84 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); in pmu_needs_timer()
92 enable = pmu->enable; in pmu_needs_timer()
150 struct i915_pmu *pmu = &i915->pmu; in get_rc6() local
161 spin_lock_irqsave(&pmu->lock, flags); in get_rc6()
164 pmu->sample[__I915_SAMPLE_RC6].cur = val; in get_rc6()
173 val = ktime_since(pmu->sleep_last); in get_rc6()
174 val += pmu->sample[__I915_SAMPLE_RC6].cur; in get_rc6()
177 if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur) in get_rc6()
178 val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur; in get_rc6()
[all …]
/Linux-v5.10/drivers/soc/dove/
Dpmu.c3 * Marvell Dove PMU support
17 #include <linux/soc/dove/pmu.h>
42 * The PMU contains a register to reset various subsystems within the
50 struct pmu_data *pmu = rcdev_to_pmu(rc); in pmu_reset_reset() local
54 spin_lock_irqsave(&pmu->lock, flags); in pmu_reset_reset()
55 val = readl_relaxed(pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset()
56 writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset()
57 writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset()
58 spin_unlock_irqrestore(&pmu->lock, flags); in pmu_reset_reset()
65 struct pmu_data *pmu = rcdev_to_pmu(rc); in pmu_reset_assert() local
[all …]
/Linux-v5.10/drivers/perf/
Dfsl_imx8_ddr_perf.c40 #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
66 { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
67 { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
68 { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data},
74 struct pmu pmu; member
93 static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap) in ddr_perf_filter_cap_get() argument
95 u32 quirks = pmu->devtype_data->quirks; in ddr_perf_filter_cap_get()
114 struct ddr_pmu *pmu = dev_get_drvdata(dev); in ddr_perf_filter_cap_show() local
120 ddr_perf_filter_cap_get(pmu, cap)); in ddr_perf_filter_cap_show()
145 struct ddr_pmu *pmu = dev_get_drvdata(dev); in ddr_perf_cpumask_show() local
[all …]
Darm_pmu_platform.c25 static int probe_current_pmu(struct arm_pmu *pmu, in probe_current_pmu() argument
32 pr_info("probing PMU on CPU %d\n", cpu); in probe_current_pmu()
37 ret = info->init(pmu); in probe_current_pmu()
45 static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq) in pmu_parse_percpu_irq() argument
48 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; in pmu_parse_percpu_irq()
50 ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); in pmu_parse_percpu_irq()
54 for_each_cpu(cpu, &pmu->supported_cpus) in pmu_parse_percpu_irq()
96 static int pmu_parse_irqs(struct arm_pmu *pmu) in pmu_parse_irqs() argument
99 struct platform_device *pdev = pmu->plat_device; in pmu_parse_irqs()
100 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; in pmu_parse_irqs()
[all …]
Darm_pmu_acpi.c31 * Per the ACPI spec, the MADT cannot describe a PMU that doesn't in arm_pmu_acpi_register_irq()
87 * For lack of a better place, hook the normal PMU MADT walk
149 pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n", in arm_pmu_acpi_parse_irqs()
153 pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu); in arm_pmu_acpi_parse_irqs()
191 struct arm_pmu *pmu; in arm_pmu_acpi_find_alloc_pmu() local
195 pmu = per_cpu(probed_pmus, cpu); in arm_pmu_acpi_find_alloc_pmu()
196 if (!pmu || pmu->acpi_cpuid != cpuid) in arm_pmu_acpi_find_alloc_pmu()
199 return pmu; in arm_pmu_acpi_find_alloc_pmu()
202 pmu = armpmu_alloc_atomic(); in arm_pmu_acpi_find_alloc_pmu()
203 if (!pmu) { in arm_pmu_acpi_find_alloc_pmu()
[all …]
Darm_pmu.c181 if (type == event->pmu->type) in armpmu_map_event()
198 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_event_set_period()
240 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_event_update()
270 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_stop()
274 * ARM pmu always has to update the counter, so ignore in armpmu_stop()
286 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_start()
290 * ARM pmu always has to reprogram the period, so ignore in armpmu_start()
311 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_del()
327 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_add()
360 validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, in validate_event() argument
[all …]
/Linux-v5.10/Documentation/devicetree/bindings/arm/
Dpmu.yaml4 $id: http://devicetree.org/schemas/arm/pmu.yaml#
14 ARM cores often have a PMU for counting cpu and cache events like cache misses
15 and hits. The interface to the PMU is part of the ARM ARM. The ARM PMU
22 - apm,potenza-pmu
24 - arm,arm1136-pmu
25 - arm,arm1176-pmu
26 - arm,arm11mpcore-pmu
27 - arm,cortex-a5-pmu
28 - arm,cortex-a7-pmu
29 - arm,cortex-a8-pmu
[all …]
/Linux-v5.10/Documentation/devicetree/bindings/arm/samsung/
Dpmu.yaml4 $id: http://devicetree.org/schemas/arm/samsung/pmu.yaml#
7 title: Samsung Exynos SoC series Power Management Unit (PMU)
18 - samsung,exynos3250-pmu
19 - samsung,exynos4210-pmu
20 - samsung,exynos4412-pmu
21 - samsung,exynos5250-pmu
22 - samsung,exynos5260-pmu
23 - samsung,exynos5410-pmu
24 - samsung,exynos5420-pmu
25 - samsung,exynos5433-pmu
[all …]
/Linux-v5.10/Documentation/devicetree/bindings/pinctrl/
Dmarvell,dove-pinctrl.txt9 - reg: register specifiers of MPP, MPP4, and PMU MPP registers
14 Note: pmu* also allows for Power Management functions listed below
18 mpp0 0 gpio, pmu, uart2(rts), sdio0(cd), lcd0(pwm), pmu*
19 mpp1 1 gpio, pmu, uart2(cts), sdio0(wp), lcd1(pwm), pmu*
20 mpp2 2 gpio, pmu, uart2(txd), sdio0(buspwr), sata(prsnt),
21 uart1(rts), pmu*
22 mpp3 3 gpio, pmu, uart2(rxd), sdio0(ledctrl), sata(act),
23 uart1(cts), lcd-spi(cs1), pmu*
24 mpp4 4 gpio, pmu, uart3(rts), sdio1(cd), spi1(miso), pmu*
25 mpp5 5 gpio, pmu, uart3(cts), sdio1(wp), spi1(cs), pmu*
[all …]
/Linux-v5.10/arch/x86/kvm/svm/
Dpmu.c3 * KVM PMU support for AMD
18 #include "pmu.h"
47 static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type) in get_msr_base() argument
49 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); in get_msr_base()
98 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, in get_gp_pmc_amd() argument
126 return &pmu->gp_counters[msr_to_index(msr)]; in get_gp_pmc_amd()
129 static unsigned amd_find_arch_event(struct kvm_pmu *pmu, in amd_find_arch_event() argument
160 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) in amd_pmc_idx_to_pmc() argument
162 unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER); in amd_pmc_idx_to_pmc()
163 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); in amd_pmc_idx_to_pmc()
[all …]
/Linux-v5.10/Documentation/devicetree/bindings/perf/
Dapm-xgene-pmu.txt1 * APM X-Gene SoC PMU bindings
3 This is APM X-Gene SoC PMU (Performance Monitoring Unit) module.
4 The following PMU devices are supported:
11 The following section describes the SoC PMU DT node binding.
14 - compatible : Shall be "apm,xgene-pmu" for revision 1 or
15 "apm,xgene-pmu-v2" for revision 2.
19 - reg : First resource shall be the CPU bus PMU resource.
20 - interrupts : Interrupt-specifier for PMU IRQ.
23 - compatible : Shall be "apm,xgene-pmu-l3c".
24 - reg : First resource shall be the L3C PMU resource.
[all …]
/Linux-v5.10/arch/arm64/kvm/
Dpmu-emul.c33 WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver); in kvm_pmu_event_mask()
51 struct kvm_pmu *pmu; in kvm_pmc_to_vcpu() local
55 pmu = container_of(pmc, struct kvm_pmu, pmc[0]); in kvm_pmc_to_vcpu()
56 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu); in kvm_pmc_to_vcpu()
62 * @pmc: The PMU counter pointer
68 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_pmc_is_chained()
82 * @pmc: The PMU counter pointer
124 * kvm_pmu_get_pair_counter_value - get PMU counter value
126 * @pmc: The PMU counter pointer
159 * kvm_pmu_get_counter_value - get PMU counter value
[all …]
/Linux-v5.10/arch/x86/events/intel/
Duncore.c105 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) in uncore_pmu_to_box() argument
113 return dieid < uncore_max_dies() ? pmu->boxes[dieid] : NULL; in uncore_pmu_to_box()
347 * Using uncore_pmu_event_init pmu event_init callback
354 return &box->pmu->pmu == event->pmu; in is_box_event()
364 max_count = box->pmu->type->num_counters; in uncore_collect_events()
365 if (box->pmu->type->fixed_ctl) in uncore_collect_events()
398 struct intel_uncore_type *type = box->pmu->type; in uncore_get_event_constraint()
423 if (box->pmu->type->ops->put_constraint) in uncore_put_event_constraint()
424 box->pmu->type->ops->put_constraint(box, event); in uncore_put_event_constraint()
660 * PMU if it was the only group available.
[all …]
Duncore.h77 struct pmu *pmu; /* for custom pmu ops */ member
79 * Uncore PMU would store relevant platform topology configuration here
110 struct pmu pmu; member
140 struct intel_uncore_pmu *pmu; member
187 return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu); in dev_to_uncore_pmu()
227 if (offset < box->pmu->type->mmio_map_size) in uncore_mmio_is_valid_offset()
231 offset, box->pmu->type->name); in uncore_mmio_is_valid_offset()
239 return box->pmu->type->box_ctl + in uncore_mmio_box_ctl()
240 box->pmu->type->mmio_offset * box->pmu->pmu_idx; in uncore_mmio_box_ctl()
245 return box->pmu->type->box_ctl; in uncore_pci_box_ctl()
[all …]
/Linux-v5.10/arch/x86/events/
Drapl.c56 #define pr_fmt(fmt) "RAPL PMU: " fmt
110 struct pmu *pmu; member
116 struct pmu pmu; member
210 static void rapl_start_hrtimer(struct rapl_pmu *pmu) in rapl_start_hrtimer() argument
212 hrtimer_start(&pmu->hrtimer, pmu->timer_interval, in rapl_start_hrtimer()
218 struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer); in rapl_hrtimer_handle() local
222 if (!pmu->n_active) in rapl_hrtimer_handle()
225 raw_spin_lock_irqsave(&pmu->lock, flags); in rapl_hrtimer_handle()
227 list_for_each_entry(event, &pmu->active_list, active_entry) in rapl_hrtimer_handle()
230 raw_spin_unlock_irqrestore(&pmu->lock, flags); in rapl_hrtimer_handle()
[all …]
/Linux-v5.10/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/
Dgm200.c28 #include <subdev/bios/pmu.h>
32 pmu_code(struct nv50_devinit *init, u32 pmu, u32 img, u32 len, bool sec) in pmu_code() argument
38 nvkm_wr32(device, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu); in pmu_code()
41 nvkm_wr32(device, 0x10a188, (pmu + i) >> 8); in pmu_code()
52 pmu_data(struct nv50_devinit *init, u32 pmu, u32 img, u32 len) in pmu_data() argument
58 nvkm_wr32(device, 0x10a1c0, 0x01000000 | pmu); in pmu_data()
87 struct nvbios_pmuR pmu; in pmu_load() local
89 if (!nvbios_pmuRm(bios, type, &pmu)) in pmu_load()
95 pmu_code(init, pmu.boot_addr_pmu, pmu.boot_addr, pmu.boot_size, false); in pmu_load()
96 pmu_code(init, pmu.code_addr_pmu, pmu.code_addr, pmu.code_size, true); in pmu_load()
[all …]
/Linux-v5.10/tools/perf/util/
Dpmu.c22 #include "pmu.h"
82 * Reading/parsing the default pmu format definition, which should be
342 * event=0x0091 (read from files ../<PMU>/events/<FILE> in __perf_pmu__new_alias()
475 * Reading the pmu event aliases definition, which should be located at:
523 * Reading/parsing the default pmu type value, which should be
554 /* Add all pmus in sysfs to pmu list: */
639 * PMU CORE devices have different name other than cpu in sysfs on some
657 static char *perf_pmu__getcpuid(struct perf_pmu *pmu) in perf_pmu__getcpuid() argument
666 cpuid = get_cpuid_str(pmu); in perf_pmu__getcpuid()
677 struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu) in perf_pmu__find_map() argument
[all …]
/Linux-v5.10/arch/powerpc/perf/
DMakefile9 obj64-$(CONFIG_PPC_PERF_CTRS) += ppc970-pmu.o power5-pmu.o \
10 power5+-pmu.o power6-pmu.o power7-pmu.o \
11 isa207-common.o power8-pmu.o power9-pmu.o \
12 generic-compat-pmu.o power10-pmu.o
13 obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
15 obj-$(CONFIG_PPC_POWERNV) += imc-pmu.o
17 obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o e6500-pmu.o
21 obj-$(CONFIG_PPC_8xx) += 8xx-pmu.o
Dimc-pmu.c12 #include <asm/imc-pmu.h>
20 * Used to avoid races in counting the nest-pmu units during hotplug
59 return container_of(event->pmu, struct imc_pmu, pmu); in imc_event_to_pmu()
103 struct pmu *pmu = dev_get_drvdata(dev); in imc_pmu_cpumask_get_attr() local
104 struct imc_pmu *imc_pmu = container_of(pmu, struct imc_pmu, pmu); in imc_pmu_cpumask_get_attr()
217 * and assign the attr_group to the pmu "pmu".
219 static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu) in update_events_in_group() argument
256 pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL); in update_events_in_group()
257 if (!pmu->events) in update_events_in_group()
263 ret = imc_parse_event(np, g_scale, g_unit, prefix, base_reg, &pmu->events[ct]); in update_events_in_group()
[all …]

12345678910>>...36