Lines Matching +full:mmu +full:- +full:500
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
14 #include <linux/soc/qcom/llcc-qcom.h>
24 if (!a6xx_gmu_isidle(&a6xx_gpu->gmu)) in _a6xx_check_idle()
44 gpu->name, __builtin_return_address(0), in a6xx_idle()
61 if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) { in update_shadow_rptr()
75 spin_lock_irqsave(&ring->preempt_lock, flags); in a6xx_flush()
78 ring->cur = ring->next; in a6xx_flush()
83 spin_unlock_irqrestore(&ring->preempt_lock, flags); in a6xx_flush()
105 bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1; in a6xx_set_pagetable()
110 if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno) in a6xx_set_pagetable()
113 if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) in a6xx_set_pagetable()
166 /* Re-enable protected mode: */ in a6xx_set_pagetable()
174 unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; in a6xx_submit()
177 struct msm_ringbuffer *ring = submit->ring; in a6xx_submit()
180 a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); in a6xx_submit()
201 for (i = 0; i < submit->nr_cmds; i++) { in a6xx_submit()
202 switch (submit->cmd[i].type) { in a6xx_submit()
206 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) in a6xx_submit()
211 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a6xx_submit()
212 OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); in a6xx_submit()
213 OUT_RING(ring, submit->cmd[i].size); in a6xx_submit()
219 * Periodically update shadow-wptr if needed, so that we in a6xx_submit()
236 OUT_RING(ring, submit->seqno); in a6xx_submit()
247 OUT_RING(ring, submit->seqno); in a6xx_submit()
596 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_set_hwcg()
601 if (!adreno_gpu->info->hwcg) in a6xx_set_hwcg()
611 /* Don't re-program the registers if they are already correct */ in a6xx_set_hwcg()
618 for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++) in a6xx_set_hwcg()
619 gpu_write(gpu, reg->offset, state ? reg->value : 0); in a6xx_set_hwcg()
781 for (i = 0; i < count - 1; i++) in a6xx_set_cp_protect()
784 gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]); in a6xx_set_cp_protect()
827 struct msm_ringbuffer *ring = gpu->rb[0]; in a6xx_cp_init()
851 return a6xx_idle(gpu, ring) ? 0 : -EINVAL; in a6xx_cp_init()
861 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_ucode_check_version()
862 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_ucode_check_version()
863 const char *sqe_name = adreno_gpu->info->fw[ADRENO_FW_SQE]; in a6xx_ucode_check_version()
893 a6xx_gpu->has_whereami = true; in a6xx_ucode_check_version()
898 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_check_version()
907 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_check_version()
913 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_check_version()
926 if (!a6xx_gpu->sqe_bo) { in a6xx_ucode_init()
927 a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu, in a6xx_ucode_init()
928 adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova); in a6xx_ucode_init()
930 if (IS_ERR(a6xx_gpu->sqe_bo)) { in a6xx_ucode_init()
931 int ret = PTR_ERR(a6xx_gpu->sqe_bo); in a6xx_ucode_init()
933 a6xx_gpu->sqe_bo = NULL; in a6xx_ucode_init()
934 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_init()
940 msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw"); in a6xx_ucode_init()
941 if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) { in a6xx_ucode_init()
942 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); in a6xx_ucode_init()
943 drm_gem_object_put(a6xx_gpu->sqe_bo); in a6xx_ucode_init()
945 a6xx_gpu->sqe_bo = NULL; in a6xx_ucode_init()
946 return -EPERM; in a6xx_ucode_init()
951 REG_A6XX_CP_SQE_INSTR_BASE+1, a6xx_gpu->sqe_iova); in a6xx_ucode_init()
989 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in hw_init()
998 * Disable the trusted memory range - we don't actually supported secure in hw_init()
1051 /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */ in hw_init()
1057 0x00100000 + adreno_gpu->gmem - 1); in hw_init()
1131 if (gpu->hw_apriv) { in hw_init()
1149 gpu->rb[0]->iova); in hw_init()
1155 if (adreno_gpu->base.hw_apriv) in hw_init()
1166 if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) { in hw_init()
1167 if (!a6xx_gpu->shadow_bo) { in hw_init()
1168 a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, in hw_init()
1169 sizeof(u32) * gpu->nr_rings, in hw_init()
1171 gpu->aspace, &a6xx_gpu->shadow_bo, in hw_init()
1172 &a6xx_gpu->shadow_iova); in hw_init()
1174 if (IS_ERR(a6xx_gpu->shadow)) in hw_init()
1175 return PTR_ERR(a6xx_gpu->shadow); in hw_init()
1177 msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow"); in hw_init()
1182 shadowptr(a6xx_gpu, gpu->rb[0])); in hw_init()
1186 a6xx_gpu->cur_ring = gpu->rb[0]; in hw_init()
1188 gpu->cur_ctx_seqno = 0; in hw_init()
1206 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1); in hw_init()
1207 OUT_RING(gpu->rb[0], 0x00000000); in hw_init()
1209 a6xx_flush(gpu, gpu->rb[0]); in hw_init()
1210 if (!a6xx_idle(gpu, gpu->rb[0])) in hw_init()
1211 return -EINVAL; in hw_init()
1212 } else if (ret == -ENODEV) { in hw_init()
1219 dev_warn_once(gpu->dev->dev, in hw_init()
1220 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n"); in hw_init()
1232 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in hw_init()
1234 if (a6xx_gpu->gmu.legacy) { in hw_init()
1236 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER); in hw_init()
1248 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_hw_init()
1250 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_hw_init()
1257 DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n", in a6xx_dump()
1274 DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i, in a6xx_recover()
1287 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); in a6xx_recover()
1289 pm_runtime_dont_use_autosuspend(&gpu->pdev->dev); in a6xx_recover()
1292 mutex_lock(&gpu->active_lock); in a6xx_recover()
1293 active_submits = gpu->active_submits; in a6xx_recover()
1299 gpu->active_submits = 0; in a6xx_recover()
1303 pm_runtime_put(&gpu->pdev->dev); in a6xx_recover()
1306 pm_runtime_put_sync(&gpu->pdev->dev); in a6xx_recover()
1309 reset_control_reset(gpu->cx_collapse); in a6xx_recover()
1311 pm_runtime_use_autosuspend(&gpu->pdev->dev); in a6xx_recover()
1314 pm_runtime_get(&gpu->pdev->dev); in a6xx_recover()
1316 pm_runtime_get_sync(&gpu->pdev->dev); in a6xx_recover()
1318 gpu->active_submits = active_submits; in a6xx_recover()
1319 mutex_unlock(&gpu->active_lock); in a6xx_recover()
1374 bool do_devcoredump = info && !READ_ONCE(gpu->crashstate); in a6xx_fault_handler()
1381 gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu); in a6xx_fault_handler()
1386 * adreno-smmu-priv in a6xx_fault_handler()
1399 if (info->fsr & ARM_SMMU_FSR_TF) in a6xx_fault_handler()
1401 else if (info->fsr & ARM_SMMU_FSR_PF) in a6xx_fault_handler()
1403 else if (info->fsr & ARM_SMMU_FSR_EF) in a6xx_fault_handler()
1406 block = a6xx_fault_block(gpu, info->fsynr1 & 0xff); in a6xx_fault_handler()
1409 info->ttbr0, iova, in a6xx_fault_handler()
1419 del_timer(&gpu->hangcheck_timer); in a6xx_fault_handler()
1421 gpu->fault_info.ttbr0 = info->ttbr0; in a6xx_fault_handler()
1422 gpu->fault_info.iova = iova; in a6xx_fault_handler()
1423 gpu->fault_info.flags = flags; in a6xx_fault_handler()
1424 gpu->fault_info.type = type; in a6xx_fault_handler()
1425 gpu->fault_info.block = block; in a6xx_fault_handler()
1427 kthread_queue_work(gpu->worker, &gpu->fault_work); in a6xx_fault_handler()
1442 dev_err_ratelimited(&gpu->pdev->dev, in a6xx_cp_hw_err_irq()
1448 dev_err_ratelimited(&gpu->pdev->dev, in a6xx_cp_hw_err_irq()
1452 dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n", in a6xx_cp_hw_err_irq()
1458 dev_err_ratelimited(&gpu->pdev->dev, in a6xx_cp_hw_err_irq()
1465 dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n"); in a6xx_cp_hw_err_irq()
1468 dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n"); in a6xx_cp_hw_err_irq()
1471 dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n"); in a6xx_cp_hw_err_irq()
1479 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); in a6xx_fault_detect_irq()
1494 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1); in a6xx_fault_detect_irq()
1496 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_fault_detect_irq()
1498 ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0, in a6xx_fault_detect_irq()
1508 del_timer(&gpu->hangcheck_timer); in a6xx_fault_detect_irq()
1510 kthread_queue_work(gpu->worker, &gpu->recover_work); in a6xx_fault_detect_irq()
1515 struct msm_drm_private *priv = gpu->dev->dev_private; in a6xx_irq()
1520 if (priv->disable_err_irq) in a6xx_irq()
1527 dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n"); in a6xx_irq()
1533 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n"); in a6xx_irq()
1536 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n"); in a6xx_irq()
1539 dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n"); in a6xx_irq()
1549 return msm_rmw(a6xx_gpu->llc_mmio + (reg << 2), mask, or); in a6xx_llc_rmw()
1554 msm_writel(value, a6xx_gpu->llc_mmio + (reg << 2)); in a6xx_llc_write()
1559 llcc_slice_deactivate(a6xx_gpu->llc_slice); in a6xx_llc_deactivate()
1560 llcc_slice_deactivate(a6xx_gpu->htw_llc_slice); in a6xx_llc_deactivate()
1565 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_llc_activate()
1566 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_llc_activate()
1569 if (IS_ERR(a6xx_gpu->llc_mmio)) in a6xx_llc_activate()
1572 if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { in a6xx_llc_activate()
1573 u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); in a6xx_llc_activate()
1591 if (!llcc_slice_activate(a6xx_gpu->htw_llc_slice)) { in a6xx_llc_activate()
1592 if (!a6xx_gpu->have_mmu500) { in a6xx_llc_activate()
1593 u32 gpuhtw_scid = llcc_get_slice_id(a6xx_gpu->htw_llc_slice); in a6xx_llc_activate()
1604 * Program the slice IDs for the various GPU blocks and GPU MMU in a6xx_llc_activate()
1607 if (!a6xx_gpu->have_mmu500) { in a6xx_llc_activate()
1625 llcc_slice_putd(a6xx_gpu->llc_slice); in a6xx_llc_slices_destroy()
1626 llcc_slice_putd(a6xx_gpu->htw_llc_slice); in a6xx_llc_slices_destroy()
1638 phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0); in a6xx_llc_slices_init()
1639 a6xx_gpu->have_mmu500 = (phandle && in a6xx_llc_slices_init()
1640 of_device_is_compatible(phandle, "arm,mmu-500")); in a6xx_llc_slices_init()
1643 if (a6xx_gpu->have_mmu500) in a6xx_llc_slices_init()
1644 a6xx_gpu->llc_mmio = NULL; in a6xx_llc_slices_init()
1646 a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem"); in a6xx_llc_slices_init()
1648 a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU); in a6xx_llc_slices_init()
1649 a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW); in a6xx_llc_slices_init()
1651 if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice)) in a6xx_llc_slices_init()
1652 a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL); in a6xx_llc_slices_init()
1661 gpu->needs_hw_init = true; in a6xx_pm_resume()
1665 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_pm_resume()
1667 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_pm_resume()
1690 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_pm_suspend()
1692 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_pm_suspend()
1696 if (a6xx_gpu->shadow_bo) in a6xx_pm_suspend()
1697 for (i = 0; i < gpu->nr_rings; i++) in a6xx_pm_suspend()
1698 a6xx_gpu->shadow[i] = 0; in a6xx_pm_suspend()
1700 gpu->suspend_count++; in a6xx_pm_suspend()
1710 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_get_timestamp()
1713 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); in a6xx_get_timestamp()
1718 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); in a6xx_get_timestamp()
1720 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_get_timestamp()
1730 return a6xx_gpu->cur_ring; in a6xx_active_ring()
1738 if (a6xx_gpu->sqe_bo) { in a6xx_destroy()
1739 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); in a6xx_destroy()
1740 drm_gem_object_put(a6xx_gpu->sqe_bo); in a6xx_destroy()
1743 if (a6xx_gpu->shadow_bo) { in a6xx_destroy()
1744 msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace); in a6xx_destroy()
1745 drm_gem_object_put(a6xx_gpu->shadow_bo); in a6xx_destroy()
1766 busy_cycles = gmu_read64(&a6xx_gpu->gmu, in a6xx_gpu_busy()
1779 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_gpu_set_freq()
1781 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_gpu_set_freq()
1790 struct msm_mmu *mmu; in a6xx_create_address_space() local
1802 if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice)) in a6xx_create_address_space()
1805 mmu = msm_iommu_new(&pdev->dev, iommu); in a6xx_create_address_space()
1806 if (IS_ERR(mmu)) { in a6xx_create_address_space()
1808 return ERR_CAST(mmu); in a6xx_create_address_space()
1816 start = max_t(u64, SZ_16M, iommu->geometry.aperture_start); in a6xx_create_address_space()
1817 size = iommu->geometry.aperture_end - start + 1; in a6xx_create_address_space()
1819 aspace = msm_gem_address_space_create(mmu, "gpu", in a6xx_create_address_space()
1822 if (IS_ERR(aspace) && !IS_ERR(mmu)) in a6xx_create_address_space()
1823 mmu->funcs->destroy(mmu); in a6xx_create_address_space()
1831 struct msm_mmu *mmu; in a6xx_create_private_address_space() local
1833 mmu = msm_iommu_pagetable_create(gpu->aspace->mmu); in a6xx_create_private_address_space()
1835 if (IS_ERR(mmu)) in a6xx_create_private_address_space()
1836 return ERR_CAST(mmu); in a6xx_create_private_address_space()
1838 return msm_gem_address_space_create(mmu, in a6xx_create_private_address_space()
1848 if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) in a6xx_get_rptr()
1849 return a6xx_gpu->shadow[ring->id]; in a6xx_get_rptr()
1851 return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR); in a6xx_get_rptr()
1909 "missing support for speed-bin: %u. Some OPPs may not be supported by hardware", in fuse_to_supp_hw()
1925 * -ENOENT means that the platform doesn't support speedbin which is in a6xx_set_supported_hw()
1928 if (ret == -ENOENT) { in a6xx_set_supported_hw()
1932 "failed to read speed-bin (%d). Some OPPs may not be supported by hardware", in a6xx_set_supported_hw()
1978 struct msm_drm_private *priv = dev->dev_private; in a6xx_gpu_init()
1979 struct platform_device *pdev = priv->gpu_pdev; in a6xx_gpu_init()
1980 struct adreno_platform_config *config = pdev->dev.platform_data; in a6xx_gpu_init()
1990 return ERR_PTR(-ENOMEM); in a6xx_gpu_init()
1992 adreno_gpu = &a6xx_gpu->base; in a6xx_gpu_init()
1993 gpu = &adreno_gpu->base; in a6xx_gpu_init()
1995 adreno_gpu->registers = NULL; in a6xx_gpu_init()
2002 info = adreno_info(config->rev); in a6xx_gpu_init()
2004 if (info && (info->revn == 650 || info->revn == 660 || in a6xx_gpu_init()
2005 adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev))) in a6xx_gpu_init()
2006 adreno_gpu->base.hw_apriv = true; in a6xx_gpu_init()
2012 if (info && (info->revn == 618)) in a6xx_gpu_init()
2013 gpu->clamp_to_idle = true; in a6xx_gpu_init()
2017 ret = a6xx_set_supported_hw(&pdev->dev, config->rev); in a6xx_gpu_init()
2019 a6xx_destroy(&(a6xx_gpu->base.base)); in a6xx_gpu_init()
2025 a6xx_destroy(&(a6xx_gpu->base.base)); in a6xx_gpu_init()
2030 node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0); in a6xx_gpu_init()
2038 a6xx_destroy(&(a6xx_gpu->base.base)); in a6xx_gpu_init()
2042 if (gpu->aspace) in a6xx_gpu_init()
2043 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, in a6xx_gpu_init()