Home
last modified time | relevance | path

Searched full:gmu (Results 1 – 25 of 43) sorted by relevance

12

/Linux-v6.6/drivers/gpu/drm/msm/adreno/
Da6xx_gmu.c19 static void a6xx_gmu_fault(struct a6xx_gmu *gmu) in a6xx_gmu_fault() argument
21 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_fault()
26 gmu->hung = true; in a6xx_gmu_fault()
37 struct a6xx_gmu *gmu = data; in a6xx_gmu_irq() local
40 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); in a6xx_gmu_irq()
41 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); in a6xx_gmu_irq()
44 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); in a6xx_gmu_irq()
46 a6xx_gmu_fault(gmu); in a6xx_gmu_irq()
50 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); in a6xx_gmu_irq()
53 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", in a6xx_gmu_irq()
[all …]
Da6xx_gmu.h22 * These define the different GMU wake up options - these define how both the
23 * CPU and the GMU bring up the hardware
26 /* THe GMU has already been booted and the rentention registers are active */
29 /* the GMU is coming up for the first time or back from a power collapse */
33 * These define the level of control that the GMU has - the higher the number
34 * the more things that the GMU hardware controls on its own.
37 /* The GMU does not do any idle state management */
40 /* The GMU manages SPTP power collapse */
43 /* The GMU does automatic IFPC (intra-frame power collapse) */
49 /* For serializing communication with the GMU: */
[all …]
Da6xx_hfi.c26 static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu, in a6xx_hfi_queue_read() argument
42 * If we are to assume that the GMU firmware is in fact a rational actor in a6xx_hfi_queue_read()
57 if (!gmu->legacy) in a6xx_hfi_queue_read()
64 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, in a6xx_hfi_queue_write() argument
88 if (!gmu->legacy) { in a6xx_hfi_queue_write()
96 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01); in a6xx_hfi_queue_write()
100 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum, in a6xx_hfi_wait_for_ack() argument
103 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; in a6xx_hfi_wait_for_ack()
108 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, in a6xx_hfi_wait_for_ack()
112 DRM_DEV_ERROR(gmu->dev, in a6xx_hfi_wait_for_ack()
[all …]
Da6xx_gpu.c23 /* Check that the GMU is idle */ in _a6xx_check_idle()
24 if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_isidle(&a6xx_gpu->gmu)) in _a6xx_check_idle()
186 * For PM4 the GMU register offsets are calculated from the base of the in a6xx_submit()
702 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_set_hwcg() local
725 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0); in a6xx_set_hwcg()
732 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1); in a6xx_set_hwcg()
1199 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in hw_init() local
1203 /* Make sure the GMU keeps the GPU on while we set it up */ in hw_init()
1204 ret = a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in hw_init()
1225 a6xx_sptprac_enable(gmu); in hw_init()
[all …]
Da6xx_gpu.h23 struct a6xx_gmu gmu; member
86 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu);
88 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu);
90 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
91 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
Da6xx_gpu_state.c144 if (!a6xx_gmu_sptprac_is_on(&a6xx_gpu->gmu)) in a6xx_crashdumper_run()
775 /* Read a block of GMU registers */
784 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in _a6xx_get_gmu_registers() local
804 val = gmu_read_rscc(gmu, offset); in _a6xx_get_gmu_registers()
806 val = gmu_read(gmu, offset); in _a6xx_get_gmu_registers()
827 /* Get the CX GMU registers from AHB */ in a6xx_get_gmu_registers()
833 if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) in a6xx_get_gmu_registers()
871 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_snapshot_gmu_hfi_history() local
874 BUILD_BUG_ON(ARRAY_SIZE(gmu->queues) != ARRAY_SIZE(a6xx_state->hfi_queue_history)); in a6xx_snapshot_gmu_hfi_history()
876 for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) { in a6xx_snapshot_gmu_hfi_history()
[all …]
Da6xx_hfi.h49 /* This is the outgoing queue to the GMU */
52 /* THis is the incoming response queue from the GMU */
Da6xx_gpu_state.h342 /* GMU GX */
351 /* GMU CX */
361 /* GMU AO */
Dadreno_gpu.c520 /* Skip loading GMU firwmare with GMU Wrapper */ in adreno_load_fw()
1075 /* Only handle the core clock when GMU is not in use (or is absent). */ in adreno_gpu_init()
/Linux-v6.6/Documentation/devicetree/bindings/display/msm/
Dgmu.yaml6 $id: http://devicetree.org/schemas/display/msm/gmu.yaml#
9 title: GMU attached to certain Adreno GPUs
15 These bindings describe the Graphics Management Unit (GMU) that is attached
16 to members of the Adreno A6xx GPU family. The GMU provides on-device power
24 - pattern: '^qcom,adreno-gmu-6[0-9][0-9]\.[0-9]$'
25 - const: qcom,adreno-gmu
26 - const: qcom,adreno-gmu-wrapper
46 - description: GMU HFI interrupt
47 - description: GMU interrupt
52 - const: gmu
[all …]
Dgpu.yaml111 qcom,gmu:
114 For GMU attached devices a phandle to the GMU device that will
187 - const: gmu
188 description: CX GMU clock
208 then: # Starting with A6xx, the clocks are usually defined in the GMU node
272 // Example a6xx (with GMU):
309 qcom,gmu = <&gmu>;
/Linux-v6.6/arch/arm64/boot/dts/qcom/
Dmsm8992.dtsi31 gmu-sram@0 {
Dsm6350.dtsi1335 qcom,gmu = <&gmu>;
1418 gmu: gmu@3d6a000 { label
1419 compatible = "qcom,adreno-gmu-619.0", "qcom,adreno-gmu";
1423 reg-names = "gmu",
1430 "gmu";
1438 "gmu",
Dsc7180.dtsi2093 qcom,gmu = <&gmu>;
2187 gmu: gmu@506a000 { label
2188 compatible = "qcom,adreno-gmu-618.0", "qcom,adreno-gmu";
2191 reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq";
2194 interrupt-names = "hfi", "gmu";
2199 clock-names = "gmu", "cxo", "axi", "memnoc";
Dsc8180x.dtsi2212 qcom,gmu = <&gmu>;
2255 gmu: gmu@2c6a000 { label
2256 compatible = "qcom,adreno-gmu-680.1", "qcom,adreno-gmu";
2261 reg-names = "gmu",
2267 interrupt-names = "hfi", "gmu";
2274 clock-names = "ahb", "gmu", "cxo", "axi", "memnoc";
Dsm8350.dtsi1850 qcom,gmu = <&gmu>;
1914 gmu: gmu@3d6a000 { label
1915 compatible = "qcom,adreno-gmu-660.1", "qcom,adreno-gmu";
1920 reg-names = "gmu", "rscc", "gmu_pdc";
1924 interrupt-names = "hfi", "gmu";
1933 clock-names = "gmu",
Dsm6115.dtsi1333 /* There's no (real) GMU, so we have to handle quite a bunch of clocks! */
1344 "gmu",
1352 qcom,gmu = <&gmu_wrapper>;
1417 gmu_wrapper: gmu@596a000 {
1418 compatible = "qcom,adreno-gmu-wrapper";
1420 reg-names = "gmu";
Dsm8150-hdk.dts358 &gmu {
Dsm8150-mtp.dts353 &gmu {
Dsm8150.dtsi2213 qcom,gmu = <&gmu>;
2265 gmu: gmu@2c6a000 { label
2266 compatible = "qcom,adreno-gmu-640.1", "qcom,adreno-gmu";
2271 reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq";
2275 interrupt-names = "hfi", "gmu";
2282 clock-names = "ahb", "gmu", "cxo", "axi", "memnoc";
Dsdm845.dtsi4778 * controlled entirely by the GMU
4787 qcom,gmu = <&gmu>;
4863 gmu: gmu@506a000 { label
4864 compatible = "qcom,adreno-gmu-630.2", "qcom,adreno-gmu";
4869 reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq";
4873 interrupt-names = "hfi", "gmu";
4879 clock-names = "gmu", "cxo", "axi", "memnoc";
Dsm8250-hdk.dts368 &gmu {
Dsc8280xp.dtsi2359 qcom,gmu = <&gmu>;
2419 gmu: gmu@3d6a000 { label
2420 compatible = "qcom,adreno-gmu-690.0", "qcom,adreno-gmu";
2424 reg-names = "gmu", "rscc", "gmu_pdc";
2427 interrupt-names = "hfi", "gmu";
2435 clock-names = "gmu",
/Linux-v6.6/Documentation/devicetree/bindings/sram/
Dqcom,ocmem.yaml120 gmu-sram@0 {
/Linux-v6.6/drivers/clk/qcom/
Dgdsc.c540 * On SDM845+ the GPU GX domain is *almost* entirely controlled by the GMU
545 * the GMU crashes it could leave the GX on. In order to successfully bring back
554 * driver. During power up, nothing will happen from the CPU (and the GMU will

12