Lines Matching full:gmu

17 static void a6xx_gmu_fault(struct a6xx_gmu *gmu)  in a6xx_gmu_fault()  argument
19 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_fault()
26 gmu->hung = true; in a6xx_gmu_fault()
37 struct a6xx_gmu *gmu = data; in a6xx_gmu_irq() local
40 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); in a6xx_gmu_irq()
41 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); in a6xx_gmu_irq()
44 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); in a6xx_gmu_irq()
46 a6xx_gmu_fault(gmu); in a6xx_gmu_irq()
50 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); in a6xx_gmu_irq()
53 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", in a6xx_gmu_irq()
54 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS)); in a6xx_gmu_irq()
61 struct a6xx_gmu *gmu = data; in a6xx_hfi_irq() local
64 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO); in a6xx_hfi_irq()
65 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status); in a6xx_hfi_irq()
68 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); in a6xx_hfi_irq()
70 a6xx_gmu_fault(gmu); in a6xx_hfi_irq()
76 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) in a6xx_gmu_sptprac_is_on() argument
80 /* This can be called from gpu state code so make sure GMU is valid */ in a6xx_gmu_sptprac_is_on()
81 if (!gmu->initialized) in a6xx_gmu_sptprac_is_on()
84 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); in a6xx_gmu_sptprac_is_on()
92 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) in a6xx_gmu_gx_is_on() argument
96 /* This can be called from gpu state code so make sure GMU is valid */ in a6xx_gmu_gx_is_on()
97 if (!gmu->initialized) in a6xx_gmu_gx_is_on()
100 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); in a6xx_gmu_gx_is_on()
111 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_set_freq() local
118 if (gpu_freq == gmu->freq) in a6xx_gmu_set_freq()
121 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) in a6xx_gmu_set_freq()
122 if (gpu_freq == gmu->gpu_freqs[perf_index]) in a6xx_gmu_set_freq()
125 gmu->current_perf_index = perf_index; in a6xx_gmu_set_freq()
126 gmu->freq = gmu->gpu_freqs[perf_index]; in a6xx_gmu_set_freq()
128 trace_msm_gmu_freq_change(gmu->freq, perf_index); in a6xx_gmu_set_freq()
134 if (pm_runtime_get_if_in_use(gmu->dev) == 0) in a6xx_gmu_set_freq()
137 if (!gmu->legacy) { in a6xx_gmu_set_freq()
138 a6xx_hfi_set_freq(gmu, perf_index); in a6xx_gmu_set_freq()
140 pm_runtime_put(gmu->dev); in a6xx_gmu_set_freq()
144 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); in a6xx_gmu_set_freq()
146 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, in a6xx_gmu_set_freq()
153 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff); in a6xx_gmu_set_freq()
155 /* Set and clear the OOB for DCVS to trigger the GMU */ in a6xx_gmu_set_freq()
156 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET); in a6xx_gmu_set_freq()
157 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET); in a6xx_gmu_set_freq()
159 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN); in a6xx_gmu_set_freq()
161 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); in a6xx_gmu_set_freq()
164 pm_runtime_put(gmu->dev); in a6xx_gmu_set_freq()
171 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_get_freq() local
173 return gmu->freq; in a6xx_gmu_get_freq()
176 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) in a6xx_gmu_check_idle_level() argument
179 int local = gmu->idle_level; in a6xx_gmu_check_idle_level()
182 if (gmu->idle_level == GMU_IDLE_STATE_SPTP) in a6xx_gmu_check_idle_level()
185 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); in a6xx_gmu_check_idle_level()
188 if (gmu->idle_level != GMU_IDLE_STATE_IFPC || in a6xx_gmu_check_idle_level()
189 !a6xx_gmu_gx_is_on(gmu)) in a6xx_gmu_check_idle_level()
196 /* Wait for the GMU to get to its most idle state */
197 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu) in a6xx_gmu_wait_for_idle() argument
199 return spin_until(a6xx_gmu_check_idle_level(gmu)); in a6xx_gmu_wait_for_idle()
202 static int a6xx_gmu_start(struct a6xx_gmu *gmu) in a6xx_gmu_start() argument
208 val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8); in a6xx_gmu_start()
217 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); in a6xx_gmu_start()
222 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0); in a6xx_gmu_start()
224 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); in a6xx_gmu_start()
226 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, in a6xx_gmu_start()
230 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); in a6xx_gmu_start()
235 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) in a6xx_gmu_hfi_start() argument
240 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1); in a6xx_gmu_hfi_start()
242 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val, in a6xx_gmu_hfi_start()
245 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n"); in a6xx_gmu_hfi_start()
250 /* Trigger a OOB (out of band) request to the GMU */
251 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) in a6xx_gmu_set_oob() argument
260 if (gmu->legacy) { in a6xx_gmu_set_oob()
284 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); in a6xx_gmu_set_oob()
287 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, in a6xx_gmu_set_oob()
291 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_set_oob()
292 "Timeout waiting for GMU OOB set %s: 0x%x\n", in a6xx_gmu_set_oob()
294 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); in a6xx_gmu_set_oob()
297 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack); in a6xx_gmu_set_oob()
302 /* Clear a pending OOB state in the GMU */
303 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) in a6xx_gmu_clear_oob() argument
305 if (!gmu->legacy) { in a6xx_gmu_clear_oob()
307 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, in a6xx_gmu_clear_oob()
314 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, in a6xx_gmu_clear_oob()
318 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, in a6xx_gmu_clear_oob()
322 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, in a6xx_gmu_clear_oob()
329 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu) in a6xx_sptprac_enable() argument
334 if (!gmu->legacy) in a6xx_sptprac_enable()
337 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); in a6xx_sptprac_enable()
339 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, in a6xx_sptprac_enable()
343 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", in a6xx_sptprac_enable()
344 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); in a6xx_sptprac_enable()
351 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu) in a6xx_sptprac_disable() argument
356 if (!gmu->legacy) in a6xx_sptprac_disable()
360 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11)); in a6xx_sptprac_disable()
362 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001); in a6xx_sptprac_disable()
364 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, in a6xx_sptprac_disable()
368 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", in a6xx_sptprac_disable()
369 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); in a6xx_sptprac_disable()
372 /* Let the GMU know we are starting a boot sequence */
373 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) in a6xx_gmu_gfx_rail_on() argument
377 /* Let the GMU know we are getting ready for boot */ in a6xx_gmu_gfx_rail_on()
378 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0); in a6xx_gmu_gfx_rail_on()
381 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; in a6xx_gmu_gfx_rail_on()
383 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff); in a6xx_gmu_gfx_rail_on()
384 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff); in a6xx_gmu_gfx_rail_on()
386 /* Let the GMU know the boot sequence has started */ in a6xx_gmu_gfx_rail_on()
387 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); in a6xx_gmu_gfx_rail_on()
390 /* Let the GMU know that we are about to go into slumber */
391 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) in a6xx_gmu_notify_slumber() argument
395 /* Disable the power counter so the GMU isn't busy */ in a6xx_gmu_notify_slumber()
396 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); in a6xx_gmu_notify_slumber()
399 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) in a6xx_gmu_notify_slumber()
400 a6xx_sptprac_disable(gmu); in a6xx_gmu_notify_slumber()
402 if (!gmu->legacy) { in a6xx_gmu_notify_slumber()
403 ret = a6xx_hfi_send_prep_slumber(gmu); in a6xx_gmu_notify_slumber()
407 /* Tell the GMU to get ready to slumber */ in a6xx_gmu_notify_slumber()
408 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1); in a6xx_gmu_notify_slumber()
410 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); in a6xx_gmu_notify_slumber()
411 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); in a6xx_gmu_notify_slumber()
414 /* Check to see if the GMU really did slumber */ in a6xx_gmu_notify_slumber()
415 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE) in a6xx_gmu_notify_slumber()
417 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n"); in a6xx_gmu_notify_slumber()
424 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); in a6xx_gmu_notify_slumber()
428 static int a6xx_rpmh_start(struct a6xx_gmu *gmu) in a6xx_rpmh_start() argument
433 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1); in a6xx_rpmh_start()
437 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, in a6xx_rpmh_start()
440 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n"); in a6xx_rpmh_start()
444 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, in a6xx_rpmh_start()
448 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); in a6xx_rpmh_start()
452 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); in a6xx_rpmh_start()
454 /* Set up CX GMU counter 0 to count busy ticks */ in a6xx_rpmh_start()
455 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000); in a6xx_rpmh_start()
456 gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20); in a6xx_rpmh_start()
459 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1); in a6xx_rpmh_start()
463 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) in a6xx_rpmh_stop() argument
468 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); in a6xx_rpmh_stop()
470 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, in a6xx_rpmh_stop()
473 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); in a6xx_rpmh_stop()
475 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); in a6xx_rpmh_stop()
486 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) in a6xx_gmu_rpmh_init() argument
488 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_rpmh_init()
490 struct platform_device *pdev = to_platform_device(gmu->dev); in a6xx_gmu_rpmh_init()
506 gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); in a6xx_gmu_rpmh_init()
509 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1); in a6xx_gmu_rpmh_init()
510 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0); in a6xx_gmu_rpmh_init()
511 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); in a6xx_gmu_rpmh_init()
512 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); in a6xx_gmu_rpmh_init()
513 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); in a6xx_gmu_rpmh_init()
514 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000); in a6xx_gmu_rpmh_init()
515 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); in a6xx_gmu_rpmh_init()
516 gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); in a6xx_gmu_rpmh_init()
517 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); in a6xx_gmu_rpmh_init()
518 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); in a6xx_gmu_rpmh_init()
519 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); in a6xx_gmu_rpmh_init()
523 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0); in a6xx_gmu_rpmh_init()
524 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab); in a6xx_gmu_rpmh_init()
525 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581); in a6xx_gmu_rpmh_init()
526 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2); in a6xx_gmu_rpmh_init()
527 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad); in a6xx_gmu_rpmh_init()
529 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); in a6xx_gmu_rpmh_init()
530 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); in a6xx_gmu_rpmh_init()
531 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1); in a6xx_gmu_rpmh_init()
532 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2); in a6xx_gmu_rpmh_init()
533 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); in a6xx_gmu_rpmh_init()
597 /* Set up the idle state for the GMU */
598 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) in a6xx_gmu_power_config() argument
600 /* Disable GMU WB/RB buffer */ in a6xx_gmu_power_config()
601 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1); in a6xx_gmu_power_config()
602 gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1); in a6xx_gmu_power_config()
603 gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1); in a6xx_gmu_power_config()
605 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); in a6xx_gmu_power_config()
607 switch (gmu->idle_level) { in a6xx_gmu_power_config()
609 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST, in a6xx_gmu_power_config()
611 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, in a6xx_gmu_power_config()
616 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST, in a6xx_gmu_power_config()
618 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, in a6xx_gmu_power_config()
624 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0, in a6xx_gmu_power_config()
656 static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) in a6xx_gmu_fw_load() argument
658 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_fw_load()
670 if (gmu->legacy) { in a6xx_gmu_fw_load()
673 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_fw_load()
674 "GMU firmware is bigger than the available region\n"); in a6xx_gmu_fw_load()
678 gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START, in a6xx_gmu_fw_load()
692 gmu_write_bulk(gmu, in a6xx_gmu_fw_load()
697 gmu_write_bulk(gmu, in a6xx_gmu_fw_load()
700 } else if (!fw_block_mem(&gmu->icache, blk) && in a6xx_gmu_fw_load()
701 !fw_block_mem(&gmu->dcache, blk) && in a6xx_gmu_fw_load()
702 !fw_block_mem(&gmu->dummy, blk)) { in a6xx_gmu_fw_load()
703 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_fw_load()
712 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) in a6xx_gmu_fw_start() argument
715 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_fw_start()
721 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); in a6xx_gmu_fw_start()
724 ret = a6xx_rpmh_start(gmu); in a6xx_gmu_fw_start()
729 "GMU firmware is not loaded\n")) in a6xx_gmu_fw_start()
733 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); in a6xx_gmu_fw_start()
737 a6xx_gmu_rpmh_init(gmu); in a6xx_gmu_fw_start()
740 ret = a6xx_rpmh_start(gmu); in a6xx_gmu_fw_start()
745 ret = a6xx_gmu_fw_load(gmu); in a6xx_gmu_fw_start()
750 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0); in a6xx_gmu_fw_start()
751 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02); in a6xx_gmu_fw_start()
754 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); in a6xx_gmu_fw_start()
755 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); in a6xx_gmu_fw_start()
757 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0, in a6xx_gmu_fw_start()
765 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); in a6xx_gmu_fw_start()
767 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG, in a6xx_gmu_fw_start()
768 gmu->log.iova | (gmu->log.size / SZ_4K - 1)); in a6xx_gmu_fw_start()
770 /* Set up the lowest idle level on the GMU */ in a6xx_gmu_fw_start()
771 a6xx_gmu_power_config(gmu); in a6xx_gmu_fw_start()
773 ret = a6xx_gmu_start(gmu); in a6xx_gmu_fw_start()
777 if (gmu->legacy) { in a6xx_gmu_fw_start()
778 ret = a6xx_gmu_gfx_rail_on(gmu); in a6xx_gmu_fw_start()
784 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { in a6xx_gmu_fw_start()
785 ret = a6xx_sptprac_enable(gmu); in a6xx_gmu_fw_start()
790 ret = a6xx_gmu_hfi_start(gmu); in a6xx_gmu_fw_start()
808 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) in a6xx_gmu_irq_disable() argument
810 disable_irq(gmu->gmu_irq); in a6xx_gmu_irq_disable()
811 disable_irq(gmu->hfi_irq); in a6xx_gmu_irq_disable()
813 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0); in a6xx_gmu_irq_disable()
814 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0); in a6xx_gmu_irq_disable()
817 static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) in a6xx_gmu_rpmh_off() argument
822 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val, in a6xx_gmu_rpmh_off()
824 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val, in a6xx_gmu_rpmh_off()
826 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val, in a6xx_gmu_rpmh_off()
828 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val, in a6xx_gmu_rpmh_off()
832 /* Force the GMU off in case it isn't responsive */
833 static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) in a6xx_gmu_force_off() argument
836 a6xx_hfi_stop(gmu); in a6xx_gmu_force_off()
839 a6xx_gmu_irq_disable(gmu); in a6xx_gmu_force_off()
841 /* Force off SPTP in case the GMU is managing it */ in a6xx_gmu_force_off()
842 a6xx_sptprac_disable(gmu); in a6xx_gmu_force_off()
845 a6xx_gmu_rpmh_off(gmu); in a6xx_gmu_force_off()
848 static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) in a6xx_gmu_set_initial_freq() argument
851 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; in a6xx_gmu_set_initial_freq()
857 gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ in a6xx_gmu_set_initial_freq()
862 static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) in a6xx_gmu_set_initial_bw() argument
865 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; in a6xx_gmu_set_initial_bw()
879 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_resume() local
882 if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) in a6xx_gmu_resume()
885 gmu->hung = false; in a6xx_gmu_resume()
888 pm_runtime_get_sync(gmu->dev); in a6xx_gmu_resume()
893 * bring down the GX after a GMU failure in a6xx_gmu_resume()
895 if (!IS_ERR_OR_NULL(gmu->gxpd)) in a6xx_gmu_resume()
896 pm_runtime_get_sync(gmu->gxpd); in a6xx_gmu_resume()
898 /* Use a known rate to bring up the GMU */ in a6xx_gmu_resume()
899 clk_set_rate(gmu->core_clk, 200000000); in a6xx_gmu_resume()
900 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); in a6xx_gmu_resume()
902 pm_runtime_put(gmu->gxpd); in a6xx_gmu_resume()
903 pm_runtime_put(gmu->dev); in a6xx_gmu_resume()
908 a6xx_gmu_set_initial_bw(gpu, gmu); in a6xx_gmu_resume()
910 /* Enable the GMU interrupt */ in a6xx_gmu_resume()
911 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); in a6xx_gmu_resume()
912 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK); in a6xx_gmu_resume()
913 enable_irq(gmu->gmu_irq); in a6xx_gmu_resume()
916 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? in a6xx_gmu_resume()
923 if (!gmu->legacy) in a6xx_gmu_resume()
926 ret = a6xx_gmu_fw_start(gmu, status); in a6xx_gmu_resume()
930 ret = a6xx_hfi_start(gmu, status); in a6xx_gmu_resume()
935 * Turn on the GMU firmware fault interrupt after we know the boot in a6xx_gmu_resume()
938 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); in a6xx_gmu_resume()
939 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK); in a6xx_gmu_resume()
940 enable_irq(gmu->hfi_irq); in a6xx_gmu_resume()
943 a6xx_gmu_set_initial_freq(gpu, gmu); in a6xx_gmu_resume()
946 /* On failure, shut down the GMU to leave it in a good state */ in a6xx_gmu_resume()
948 disable_irq(gmu->gmu_irq); in a6xx_gmu_resume()
949 a6xx_rpmh_stop(gmu); in a6xx_gmu_resume()
950 pm_runtime_put(gmu->gxpd); in a6xx_gmu_resume()
951 pm_runtime_put(gmu->dev); in a6xx_gmu_resume()
957 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) in a6xx_gmu_isidle() argument
961 if (!gmu->initialized) in a6xx_gmu_isidle()
964 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS); in a6xx_gmu_isidle()
1002 /* Gracefully try to shut down the GMU and by extension the GPU */
1003 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) in a6xx_gmu_shutdown() argument
1005 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_shutdown()
1010 * The GMU may still be in slumber unless the GPU started so check and in a6xx_gmu_shutdown()
1013 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); in a6xx_gmu_shutdown()
1016 int ret = a6xx_gmu_wait_for_idle(gmu); in a6xx_gmu_shutdown()
1018 /* If the GMU isn't responding assume it is hung */ in a6xx_gmu_shutdown()
1020 a6xx_gmu_force_off(gmu); in a6xx_gmu_shutdown()
1026 /* tell the GMU we want to slumber */ in a6xx_gmu_shutdown()
1027 a6xx_gmu_notify_slumber(gmu); in a6xx_gmu_shutdown()
1029 ret = gmu_poll_timeout(gmu, in a6xx_gmu_shutdown()
1040 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_shutdown()
1041 "Unable to slumber GMU: status = 0%x/0%x\n", in a6xx_gmu_shutdown()
1042 gmu_read(gmu, in a6xx_gmu_shutdown()
1044 gmu_read(gmu, in a6xx_gmu_shutdown()
1049 a6xx_hfi_stop(gmu); in a6xx_gmu_shutdown()
1052 a6xx_gmu_irq_disable(gmu); in a6xx_gmu_shutdown()
1055 a6xx_rpmh_stop(gmu); in a6xx_gmu_shutdown()
1061 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_stop() local
1064 if (!pm_runtime_active(gmu->dev)) in a6xx_gmu_stop()
1068 * Force the GMU off if we detected a hang, otherwise try to shut it in a6xx_gmu_stop()
1071 if (gmu->hung) in a6xx_gmu_stop()
1072 a6xx_gmu_force_off(gmu); in a6xx_gmu_stop()
1074 a6xx_gmu_shutdown(gmu); in a6xx_gmu_stop()
1080 * Make sure the GX domain is off before turning off the GMU (CX) in a6xx_gmu_stop()
1081 * domain. Usually the GMU does this but only if the shutdown sequence in a6xx_gmu_stop()
1084 if (!IS_ERR_OR_NULL(gmu->gxpd)) in a6xx_gmu_stop()
1085 pm_runtime_put_sync(gmu->gxpd); in a6xx_gmu_stop()
1087 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); in a6xx_gmu_stop()
1089 pm_runtime_put_sync(gmu->dev); in a6xx_gmu_stop()
1094 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) in a6xx_gmu_memory_free() argument
1096 msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace, false); in a6xx_gmu_memory_free()
1097 msm_gem_kernel_put(gmu->debug.obj, gmu->aspace, false); in a6xx_gmu_memory_free()
1098 msm_gem_kernel_put(gmu->icache.obj, gmu->aspace, false); in a6xx_gmu_memory_free()
1099 msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace, false); in a6xx_gmu_memory_free()
1100 msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace, false); in a6xx_gmu_memory_free()
1101 msm_gem_kernel_put(gmu->log.obj, gmu->aspace, false); in a6xx_gmu_memory_free()
1103 gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); in a6xx_gmu_memory_free()
1104 msm_gem_address_space_put(gmu->aspace); in a6xx_gmu_memory_free()
1107 static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, in a6xx_gmu_memory_alloc() argument
1110 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_memory_alloc()
1118 /* no fixed address - use GMU's uncached range */ in a6xx_gmu_memory_alloc()
1133 ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova, in a6xx_gmu_memory_alloc()
1146 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) in a6xx_gmu_memory_probe() argument
1155 mmu = msm_iommu_new(gmu->dev, domain); in a6xx_gmu_memory_probe()
1156 gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000); in a6xx_gmu_memory_probe()
1157 if (IS_ERR(gmu->aspace)) { in a6xx_gmu_memory_probe()
1159 return PTR_ERR(gmu->aspace); in a6xx_gmu_memory_probe()
1259 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
1264 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) in a6xx_gmu_rpmh_votes_init() argument
1266 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_rpmh_votes_init()
1272 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, in a6xx_gmu_rpmh_votes_init()
1273 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); in a6xx_gmu_rpmh_votes_init()
1276 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, in a6xx_gmu_rpmh_votes_init()
1277 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); in a6xx_gmu_rpmh_votes_init()
1296 "The GMU frequency table is being truncated\n")) in a6xx_gmu_build_freq_table()
1314 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) in a6xx_gmu_pwrlevels_probe() argument
1316 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_pwrlevels_probe()
1323 * The GMU handles its own frequency switching so build a list of in a6xx_gmu_pwrlevels_probe()
1326 ret = dev_pm_opp_of_add_table(gmu->dev); in a6xx_gmu_pwrlevels_probe()
1328 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); in a6xx_gmu_pwrlevels_probe()
1332 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, in a6xx_gmu_pwrlevels_probe()
1333 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); in a6xx_gmu_pwrlevels_probe()
1336 * The GMU also handles GPU frequency switching so build a list in a6xx_gmu_pwrlevels_probe()
1339 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, in a6xx_gmu_pwrlevels_probe()
1340 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); in a6xx_gmu_pwrlevels_probe()
1342 gmu->current_perf_index = gmu->nr_gpu_freqs - 1; in a6xx_gmu_pwrlevels_probe()
1344 /* Build the list of RPMh votes that we'll send to the GMU */ in a6xx_gmu_pwrlevels_probe()
1345 return a6xx_gmu_rpmh_votes_init(gmu); in a6xx_gmu_pwrlevels_probe()
1348 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) in a6xx_gmu_clocks_probe() argument
1350 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); in a6xx_gmu_clocks_probe()
1355 gmu->nr_clocks = ret; in a6xx_gmu_clocks_probe()
1357 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, in a6xx_gmu_clocks_probe()
1358 gmu->nr_clocks, "gmu"); in a6xx_gmu_clocks_probe()
1384 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, in a6xx_gmu_get_irq() argument
1391 ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu); in a6xx_gmu_get_irq()
1405 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_remove() local
1406 struct platform_device *pdev = to_platform_device(gmu->dev); in a6xx_gmu_remove()
1408 if (!gmu->initialized) in a6xx_gmu_remove()
1411 pm_runtime_force_suspend(gmu->dev); in a6xx_gmu_remove()
1413 if (!IS_ERR_OR_NULL(gmu->gxpd)) { in a6xx_gmu_remove()
1414 pm_runtime_disable(gmu->gxpd); in a6xx_gmu_remove()
1415 dev_pm_domain_detach(gmu->gxpd, false); in a6xx_gmu_remove()
1418 iounmap(gmu->mmio); in a6xx_gmu_remove()
1420 iounmap(gmu->rscc); in a6xx_gmu_remove()
1421 gmu->mmio = NULL; in a6xx_gmu_remove()
1422 gmu->rscc = NULL; in a6xx_gmu_remove()
1424 a6xx_gmu_memory_free(gmu); in a6xx_gmu_remove()
1426 free_irq(gmu->gmu_irq, gmu); in a6xx_gmu_remove()
1427 free_irq(gmu->hfi_irq, gmu); in a6xx_gmu_remove()
1430 put_device(gmu->dev); in a6xx_gmu_remove()
1432 gmu->initialized = false; in a6xx_gmu_remove()
1438 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_init() local
1445 gmu->dev = &pdev->dev; in a6xx_gmu_init()
1447 of_dma_configure(gmu->dev, node, true); in a6xx_gmu_init()
1450 gmu->idle_level = GMU_IDLE_STATE_ACTIVE; in a6xx_gmu_init()
1452 pm_runtime_enable(gmu->dev); in a6xx_gmu_init()
1455 ret = a6xx_gmu_clocks_probe(gmu); in a6xx_gmu_init()
1459 ret = a6xx_gmu_memory_probe(gmu); in a6xx_gmu_init()
1463 /* Allocate memory for the GMU dummy page */ in a6xx_gmu_init()
1464 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, SZ_4K, 0x60000000); in a6xx_gmu_init()
1469 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, in a6xx_gmu_init()
1474 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, in a6xx_gmu_init()
1479 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache, in a6xx_gmu_init()
1485 gmu->legacy = true; in a6xx_gmu_init()
1487 /* Allocate memory for the GMU debug region */ in a6xx_gmu_init()
1488 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0); in a6xx_gmu_init()
1494 ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0); in a6xx_gmu_init()
1498 /* Allocate memory for the GMU log region */ in a6xx_gmu_init()
1499 ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0); in a6xx_gmu_init()
1503 /* Map the GMU registers */ in a6xx_gmu_init()
1504 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); in a6xx_gmu_init()
1505 if (IS_ERR(gmu->mmio)) { in a6xx_gmu_init()
1506 ret = PTR_ERR(gmu->mmio); in a6xx_gmu_init()
1511 gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); in a6xx_gmu_init()
1512 if (IS_ERR(gmu->rscc)) in a6xx_gmu_init()
1515 gmu->rscc = gmu->mmio + 0x23000; in a6xx_gmu_init()
1518 /* Get the HFI and GMU interrupts */ in a6xx_gmu_init()
1519 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); in a6xx_gmu_init()
1520 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); in a6xx_gmu_init()
1522 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) in a6xx_gmu_init()
1526 * Get a link to the GX power domain to reset the GPU in case of GMU in a6xx_gmu_init()
1529 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); in a6xx_gmu_init()
1531 /* Get the power levels for the GMU and GPU */ in a6xx_gmu_init()
1532 a6xx_gmu_pwrlevels_probe(gmu); in a6xx_gmu_init()
1535 a6xx_hfi_init(gmu); in a6xx_gmu_init()
1537 gmu->initialized = true; in a6xx_gmu_init()
1542 iounmap(gmu->mmio); in a6xx_gmu_init()
1544 iounmap(gmu->rscc); in a6xx_gmu_init()
1545 free_irq(gmu->gmu_irq, gmu); in a6xx_gmu_init()
1546 free_irq(gmu->hfi_irq, gmu); in a6xx_gmu_init()
1551 a6xx_gmu_memory_free(gmu); in a6xx_gmu_init()
1554 put_device(gmu->dev); in a6xx_gmu_init()