Lines Matching +full:tcs +full:- +full:wait

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
8 #include <soc/qcom/cmd-db.h>
20 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_fault()
21 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_fault()
22 struct drm_device *dev = gpu->dev; in a6xx_gmu_fault()
23 struct msm_drm_private *priv = dev->dev_private; in a6xx_gmu_fault()
26 gmu->hung = true; in a6xx_gmu_fault()
29 del_timer(&gpu->hangcheck_timer); in a6xx_gmu_fault()
32 queue_work(priv->wq, &gpu->recover_work); in a6xx_gmu_fault()
44 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); in a6xx_gmu_irq()
50 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); in a6xx_gmu_irq()
53 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", in a6xx_gmu_irq()
68 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); in a6xx_hfi_irq()
81 if (!gmu->initialized) in a6xx_gmu_sptprac_is_on()
97 if (!gmu->initialized) in a6xx_gmu_gx_is_on()
111 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_set_freq()
118 if (gpu_freq == gmu->freq) in a6xx_gmu_set_freq()
121 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) in a6xx_gmu_set_freq()
122 if (gpu_freq == gmu->gpu_freqs[perf_index]) in a6xx_gmu_set_freq()
125 gmu->current_perf_index = perf_index; in a6xx_gmu_set_freq()
126 gmu->freq = gmu->gpu_freqs[perf_index]; in a6xx_gmu_set_freq()
128 trace_msm_gmu_freq_change(gmu->freq, perf_index); in a6xx_gmu_set_freq()
134 if (pm_runtime_get_if_in_use(gmu->dev) == 0) in a6xx_gmu_set_freq()
137 if (!gmu->legacy) { in a6xx_gmu_set_freq()
139 dev_pm_opp_set_bw(&gpu->pdev->dev, opp); in a6xx_gmu_set_freq()
140 pm_runtime_put(gmu->dev); in a6xx_gmu_set_freq()
161 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); in a6xx_gmu_set_freq()
163 dev_pm_opp_set_bw(&gpu->pdev->dev, opp); in a6xx_gmu_set_freq()
164 pm_runtime_put(gmu->dev); in a6xx_gmu_set_freq()
171 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_get_freq()
173 return gmu->freq; in a6xx_gmu_get_freq()
179 int local = gmu->idle_level; in a6xx_gmu_check_idle_level()
182 if (gmu->idle_level == GMU_IDLE_STATE_SPTP) in a6xx_gmu_check_idle_level()
188 if (gmu->idle_level != GMU_IDLE_STATE_IFPC || in a6xx_gmu_check_idle_level()
196 /* Wait for the GMU to get to its most idle state */
230 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); in a6xx_gmu_start()
245 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n"); in a6xx_gmu_hfi_start()
260 if (gmu->legacy) { in a6xx_gmu_set_oob()
280 return -EINVAL; in a6xx_gmu_set_oob()
286 /* Wait for the acknowledge interrupt */ in a6xx_gmu_set_oob()
291 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_set_oob()
305 if (!gmu->legacy) { in a6xx_gmu_clear_oob()
334 if (!gmu->legacy) in a6xx_sptprac_enable()
343 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", in a6xx_sptprac_enable()
356 if (!gmu->legacy) in a6xx_sptprac_disable()
368 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", in a6xx_sptprac_disable()
381 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; in a6xx_gmu_gfx_rail_on()
399 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) in a6xx_gmu_notify_slumber()
402 if (!gmu->legacy) { in a6xx_gmu_notify_slumber()
417 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n"); in a6xx_gmu_notify_slumber()
418 ret = -ETIMEDOUT; in a6xx_gmu_notify_slumber()
434 /* Wait for the register to finish posting */ in a6xx_rpmh_start()
440 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n"); in a6xx_rpmh_start()
448 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); in a6xx_rpmh_start()
473 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); in a6xx_rpmh_stop()
489 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_rpmh_init()
490 struct platform_device *pdev = to_platform_device(gmu->dev); in a6xx_gmu_rpmh_init()
543 /* Set TCS commands used by PDC sequence for low power modes */ in a6xx_gmu_rpmh_init()
592 * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
607 switch (gmu->idle_level) { in a6xx_gmu_power_config()
649 if (!in_range(blk->addr, bo->iova, bo->size)) in fw_block_mem()
652 memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size); in fw_block_mem()
659 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_fw_load()
660 const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU]; in a6xx_gmu_fw_load()
670 if (gmu->legacy) { in a6xx_gmu_fw_load()
672 if (fw_image->size > 0x8000) { in a6xx_gmu_fw_load()
673 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_fw_load()
675 return -EINVAL; in a6xx_gmu_fw_load()
679 (u32*) fw_image->data, fw_image->size); in a6xx_gmu_fw_load()
684 for (blk = (const struct block_header *) fw_image->data; in a6xx_gmu_fw_load()
685 (const u8*) blk < fw_image->data + fw_image->size; in a6xx_gmu_fw_load()
686 blk = (const struct block_header *) &blk->data[blk->size >> 2]) { in a6xx_gmu_fw_load()
687 if (blk->size == 0) in a6xx_gmu_fw_load()
690 if (in_range(blk->addr, itcm_base, SZ_16K)) { in a6xx_gmu_fw_load()
691 reg_offset = (blk->addr - itcm_base) >> 2; in a6xx_gmu_fw_load()
694 blk->data, blk->size); in a6xx_gmu_fw_load()
695 } else if (in_range(blk->addr, dtcm_base, SZ_16K)) { in a6xx_gmu_fw_load()
696 reg_offset = (blk->addr - dtcm_base) >> 2; in a6xx_gmu_fw_load()
699 blk->data, blk->size); in a6xx_gmu_fw_load()
700 } else if (!fw_block_mem(&gmu->icache, blk) && in a6xx_gmu_fw_load()
701 !fw_block_mem(&gmu->dcache, blk) && in a6xx_gmu_fw_load()
702 !fw_block_mem(&gmu->dummy, blk)) { in a6xx_gmu_fw_load()
703 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_fw_load()
705 blk->addr, blk->size, blk->data[0]); in a6xx_gmu_fw_load()
716 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_fw_start()
728 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], in a6xx_gmu_fw_start()
730 return -ENOENT; in a6xx_gmu_fw_start()
754 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); in a6xx_gmu_fw_start()
760 chipid = adreno_gpu->rev.core << 24; in a6xx_gmu_fw_start()
761 chipid |= adreno_gpu->rev.major << 16; in a6xx_gmu_fw_start()
762 chipid |= adreno_gpu->rev.minor << 12; in a6xx_gmu_fw_start()
763 chipid |= adreno_gpu->rev.patchid << 8; in a6xx_gmu_fw_start()
768 gmu->log.iova | (gmu->log.size / SZ_4K - 1)); in a6xx_gmu_fw_start()
777 if (gmu->legacy) { in a6xx_gmu_fw_start()
784 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { in a6xx_gmu_fw_start()
810 disable_irq(gmu->gmu_irq); in a6xx_gmu_irq_disable()
811 disable_irq(gmu->hfi_irq); in a6xx_gmu_irq_disable()
851 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; in a6xx_gmu_set_initial_freq()
853 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); in a6xx_gmu_set_initial_freq()
857 gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ in a6xx_gmu_set_initial_freq()
865 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; in a6xx_gmu_set_initial_bw()
867 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); in a6xx_gmu_set_initial_bw()
871 dev_pm_opp_set_bw(&gpu->pdev->dev, gpu_opp); in a6xx_gmu_set_initial_bw()
877 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_resume()
878 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_resume()
879 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_resume()
882 if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) in a6xx_gmu_resume()
885 gmu->hung = false; in a6xx_gmu_resume()
888 pm_runtime_get_sync(gmu->dev); in a6xx_gmu_resume()
895 if (!IS_ERR_OR_NULL(gmu->gxpd)) in a6xx_gmu_resume()
896 pm_runtime_get_sync(gmu->gxpd); in a6xx_gmu_resume()
899 clk_set_rate(gmu->core_clk, 200000000); in a6xx_gmu_resume()
900 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); in a6xx_gmu_resume()
902 pm_runtime_put(gmu->gxpd); in a6xx_gmu_resume()
903 pm_runtime_put(gmu->dev); in a6xx_gmu_resume()
913 enable_irq(gmu->gmu_irq); in a6xx_gmu_resume()
923 if (!gmu->legacy) in a6xx_gmu_resume()
940 enable_irq(gmu->hfi_irq); in a6xx_gmu_resume()
948 disable_irq(gmu->gmu_irq); in a6xx_gmu_resume()
950 pm_runtime_put(gmu->gxpd); in a6xx_gmu_resume()
951 pm_runtime_put(gmu->dev); in a6xx_gmu_resume()
961 if (!gmu->initialized) in a6xx_gmu_isidle()
977 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_bus_clear_pending_transactions()
1006 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_shutdown()
1040 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_shutdown()
1061 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_stop()
1062 struct msm_gpu *gpu = &a6xx_gpu->base.base; in a6xx_gmu_stop()
1064 if (!pm_runtime_active(gmu->dev)) in a6xx_gmu_stop()
1071 if (gmu->hung) in a6xx_gmu_stop()
1077 dev_pm_opp_set_bw(&gpu->pdev->dev, NULL); in a6xx_gmu_stop()
1084 if (!IS_ERR_OR_NULL(gmu->gxpd)) in a6xx_gmu_stop()
1085 pm_runtime_put_sync(gmu->gxpd); in a6xx_gmu_stop()
1087 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); in a6xx_gmu_stop()
1089 pm_runtime_put_sync(gmu->dev); in a6xx_gmu_stop()
1096 msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace, false); in a6xx_gmu_memory_free()
1097 msm_gem_kernel_put(gmu->debug.obj, gmu->aspace, false); in a6xx_gmu_memory_free()
1098 msm_gem_kernel_put(gmu->icache.obj, gmu->aspace, false); in a6xx_gmu_memory_free()
1099 msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace, false); in a6xx_gmu_memory_free()
1100 msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace, false); in a6xx_gmu_memory_free()
1101 msm_gem_kernel_put(gmu->log.obj, gmu->aspace, false); in a6xx_gmu_memory_free()
1103 gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); in a6xx_gmu_memory_free()
1104 msm_gem_address_space_put(gmu->aspace); in a6xx_gmu_memory_free()
1111 struct drm_device *dev = a6xx_gpu->base.base.dev; in a6xx_gmu_memory_alloc()
1118 /* no fixed address - use GMU's uncached range */ in a6xx_gmu_memory_alloc()
1129 bo->obj = msm_gem_new(dev, size, flags); in a6xx_gmu_memory_alloc()
1130 if (IS_ERR(bo->obj)) in a6xx_gmu_memory_alloc()
1131 return PTR_ERR(bo->obj); in a6xx_gmu_memory_alloc()
1133 ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova, in a6xx_gmu_memory_alloc()
1136 drm_gem_object_put(bo->obj); in a6xx_gmu_memory_alloc()
1140 bo->virt = msm_gem_get_vaddr(bo->obj); in a6xx_gmu_memory_alloc()
1141 bo->size = size; in a6xx_gmu_memory_alloc()
1153 return -ENODEV; in a6xx_gmu_memory_probe()
1155 mmu = msm_iommu_new(gmu->dev, domain); in a6xx_gmu_memory_probe()
1156 gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000); in a6xx_gmu_memory_probe()
1157 if (IS_ERR(gmu->aspace)) { in a6xx_gmu_memory_probe()
1159 return PTR_ERR(gmu->aspace); in a6xx_gmu_memory_probe()
1165 /* Return the 'arc-level' for the given frequency */
1202 return -EINVAL; in a6xx_gmu_rpmh_arc_votes_init()
1210 return -EINVAL; in a6xx_gmu_rpmh_arc_votes_init()
1233 return -EINVAL; in a6xx_gmu_rpmh_arc_votes_init()
1267 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_rpmh_votes_init()
1268 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_rpmh_votes_init()
1272 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, in a6xx_gmu_rpmh_votes_init()
1273 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); in a6xx_gmu_rpmh_votes_init()
1276 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, in a6xx_gmu_rpmh_votes_init()
1277 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); in a6xx_gmu_rpmh_votes_init()
1297 count = size - 1; in a6xx_gmu_build_freq_table()
1317 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_pwrlevels_probe()
1318 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_pwrlevels_probe()
1326 ret = dev_pm_opp_of_add_table(gmu->dev); in a6xx_gmu_pwrlevels_probe()
1328 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); in a6xx_gmu_pwrlevels_probe()
1332 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, in a6xx_gmu_pwrlevels_probe()
1333 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); in a6xx_gmu_pwrlevels_probe()
1339 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, in a6xx_gmu_pwrlevels_probe()
1340 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); in a6xx_gmu_pwrlevels_probe()
1342 gmu->current_perf_index = gmu->nr_gpu_freqs - 1; in a6xx_gmu_pwrlevels_probe()
1350 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); in a6xx_gmu_clocks_probe()
1355 gmu->nr_clocks = ret; in a6xx_gmu_clocks_probe()
1357 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, in a6xx_gmu_clocks_probe()
1358 gmu->nr_clocks, "gmu"); in a6xx_gmu_clocks_probe()
1371 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name); in a6xx_gmu_get_mmio()
1372 return ERR_PTR(-EINVAL); in a6xx_gmu_get_mmio()
1375 ret = ioremap(res->start, resource_size(res)); in a6xx_gmu_get_mmio()
1377 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); in a6xx_gmu_get_mmio()
1378 return ERR_PTR(-EINVAL); in a6xx_gmu_get_mmio()
1393 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", in a6xx_gmu_get_irq()
1405 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_remove()
1406 struct platform_device *pdev = to_platform_device(gmu->dev); in a6xx_gmu_remove()
1408 if (!gmu->initialized) in a6xx_gmu_remove()
1411 pm_runtime_force_suspend(gmu->dev); in a6xx_gmu_remove()
1413 if (!IS_ERR_OR_NULL(gmu->gxpd)) { in a6xx_gmu_remove()
1414 pm_runtime_disable(gmu->gxpd); in a6xx_gmu_remove()
1415 dev_pm_domain_detach(gmu->gxpd, false); in a6xx_gmu_remove()
1418 iounmap(gmu->mmio); in a6xx_gmu_remove()
1420 iounmap(gmu->rscc); in a6xx_gmu_remove()
1421 gmu->mmio = NULL; in a6xx_gmu_remove()
1422 gmu->rscc = NULL; in a6xx_gmu_remove()
1426 free_irq(gmu->gmu_irq, gmu); in a6xx_gmu_remove()
1427 free_irq(gmu->hfi_irq, gmu); in a6xx_gmu_remove()
1430 put_device(gmu->dev); in a6xx_gmu_remove()
1432 gmu->initialized = false; in a6xx_gmu_remove()
1437 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_init()
1438 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_init()
1443 return -ENODEV; in a6xx_gmu_init()
1445 gmu->dev = &pdev->dev; in a6xx_gmu_init()
1447 of_dma_configure(gmu->dev, node, true); in a6xx_gmu_init()
1450 gmu->idle_level = GMU_IDLE_STATE_ACTIVE; in a6xx_gmu_init()
1452 pm_runtime_enable(gmu->dev); in a6xx_gmu_init()
1464 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, SZ_4K, 0x60000000); in a6xx_gmu_init()
1469 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, in a6xx_gmu_init()
1470 SZ_16M - SZ_16K, 0x04000); in a6xx_gmu_init()
1474 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, in a6xx_gmu_init()
1475 SZ_256K - SZ_16K, 0x04000); in a6xx_gmu_init()
1479 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache, in a6xx_gmu_init()
1480 SZ_256K - SZ_16K, 0x44000); in a6xx_gmu_init()
1485 gmu->legacy = true; in a6xx_gmu_init()
1488 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0); in a6xx_gmu_init()
1494 ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0); in a6xx_gmu_init()
1499 ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0); in a6xx_gmu_init()
1504 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); in a6xx_gmu_init()
1505 if (IS_ERR(gmu->mmio)) { in a6xx_gmu_init()
1506 ret = PTR_ERR(gmu->mmio); in a6xx_gmu_init()
1511 gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); in a6xx_gmu_init()
1512 if (IS_ERR(gmu->rscc)) in a6xx_gmu_init()
1515 gmu->rscc = gmu->mmio + 0x23000; in a6xx_gmu_init()
1519 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); in a6xx_gmu_init()
1520 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); in a6xx_gmu_init()
1522 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) in a6xx_gmu_init()
1529 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); in a6xx_gmu_init()
1537 gmu->initialized = true; in a6xx_gmu_init()
1542 iounmap(gmu->mmio); in a6xx_gmu_init()
1544 iounmap(gmu->rscc); in a6xx_gmu_init()
1545 free_irq(gmu->gmu_irq, gmu); in a6xx_gmu_init()
1546 free_irq(gmu->hfi_irq, gmu); in a6xx_gmu_init()
1548 ret = -ENODEV; in a6xx_gmu_init()
1554 put_device(gmu->dev); in a6xx_gmu_init()