Lines Matching full:gpu
18 static inline bool _a6xx_check_idle(struct msm_gpu *gpu) in _a6xx_check_idle() argument
20 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in _a6xx_check_idle()
28 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) & in _a6xx_check_idle()
32 return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) & in _a6xx_check_idle()
36 static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_idle() argument
39 if (!adreno_idle(gpu, ring)) in a6xx_idle()
42 if (spin_until(_a6xx_check_idle(gpu))) { in a6xx_idle()
43 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n", in a6xx_idle()
44 gpu->name, __builtin_return_address(0), in a6xx_idle()
45 gpu_read(gpu, REG_A6XX_RBBM_STATUS), in a6xx_idle()
46 gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS), in a6xx_idle()
47 gpu_read(gpu, REG_A6XX_CP_RB_RPTR), in a6xx_idle()
48 gpu_read(gpu, REG_A6XX_CP_RB_WPTR)); in a6xx_idle()
55 static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_shadow_rptr() argument
57 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in update_shadow_rptr()
68 static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_flush() argument
73 update_shadow_rptr(gpu, ring); in a6xx_flush()
88 gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); in a6xx_flush()
146 * lingering in that part of the GPU in a6xx_set_pagetable()
172 static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a6xx_submit() argument
175 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_submit()
187 * GPU registers so we need to add 0x1a800 to the register value on A630 in a6xx_submit()
206 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) in a6xx_submit()
226 update_shadow_rptr(gpu, ring); in a6xx_submit()
250 gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO, in a6xx_submit()
253 a6xx_flush(gpu, ring); in a6xx_submit()
592 static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) in a6xx_set_hwcg() argument
594 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_set_hwcg()
609 val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL); in a6xx_set_hwcg()
619 gpu_write(gpu, reg->offset, state ? reg->value : 0); in a6xx_set_hwcg()
624 gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0); in a6xx_set_hwcg()
751 static void a6xx_set_cp_protect(struct msm_gpu *gpu) in a6xx_set_cp_protect() argument
753 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_set_cp_protect()
779 gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, BIT(0) | BIT(1) | BIT(3)); in a6xx_set_cp_protect()
782 gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]); in a6xx_set_cp_protect()
784 gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]); in a6xx_set_cp_protect()
787 static void a6xx_set_ubwc_config(struct msm_gpu *gpu) in a6xx_set_ubwc_config() argument
789 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_set_ubwc_config()
817 gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, in a6xx_set_ubwc_config()
819 gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1); in a6xx_set_ubwc_config()
820 gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, in a6xx_set_ubwc_config()
822 gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21); in a6xx_set_ubwc_config()
825 static int a6xx_cp_init(struct msm_gpu *gpu) in a6xx_cp_init() argument
827 struct msm_ringbuffer *ring = gpu->rb[0]; in a6xx_cp_init()
850 a6xx_flush(gpu, ring); in a6xx_cp_init()
851 return a6xx_idle(gpu, ring) ? 0 : -EINVAL; in a6xx_cp_init()
862 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_ucode_check_version() local
898 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_check_version()
907 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_check_version()
913 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_check_version()
914 "unknown GPU, add it to a6xx_ucode_check_version()!!\n"); in a6xx_ucode_check_version()
921 static int a6xx_ucode_init(struct msm_gpu *gpu) in a6xx_ucode_init() argument
923 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_ucode_init()
927 a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu, in a6xx_ucode_init()
934 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_init()
942 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); in a6xx_ucode_init()
950 gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, in a6xx_ucode_init()
956 static int a6xx_zap_shader_init(struct msm_gpu *gpu) in a6xx_zap_shader_init() argument
964 ret = adreno_zap_shader_load(gpu, GPU_PAS_ID); in a6xx_zap_shader_init()
982 static int hw_init(struct msm_gpu *gpu) in hw_init() argument
984 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in hw_init()
988 /* Make sure the GMU keeps the GPU on while we set it up */ in hw_init()
993 gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0); in hw_init()
995 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0); in hw_init()
1002 gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, in hw_init()
1004 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000); in hw_init()
1007 gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1); in hw_init()
1008 gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1); in hw_init()
1009 gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1); in hw_init()
1010 gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1); in hw_init()
1011 gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1); in hw_init()
1012 gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1); in hw_init()
1013 gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1); in hw_init()
1014 gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1); in hw_init()
1015 gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1); in hw_init()
1016 gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1); in hw_init()
1017 gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1); in hw_init()
1018 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1); in hw_init()
1021 a6xx_set_hwcg(gpu, true); in hw_init()
1026 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620); in hw_init()
1027 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620); in hw_init()
1028 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620); in hw_init()
1029 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620); in hw_init()
1030 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620); in hw_init()
1031 gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3); in hw_init()
1033 gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3); in hw_init()
1037 gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009); in hw_init()
1039 /* Make all blocks contribute to the GPU BUSY perf counter */ in hw_init()
1040 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff); in hw_init()
1043 gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0); in hw_init()
1044 gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff); in hw_init()
1045 gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_LO, 0xfffff000); in hw_init()
1046 gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff); in hw_init()
1047 gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000); in hw_init()
1048 gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff); in hw_init()
1051 /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */ in hw_init()
1052 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO, in hw_init()
1055 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO, in hw_init()
1060 gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804); in hw_init()
1061 gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4); in hw_init()
1065 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140); in hw_init()
1067 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0); in hw_init()
1068 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c); in hw_init()
1071 gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020); in hw_init()
1074 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128); in hw_init()
1080 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200); in hw_init()
1082 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200); in hw_init()
1084 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200); in hw_init()
1086 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000); in hw_init()
1089 gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1); in hw_init()
1092 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1); in hw_init()
1095 gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL(0), PERF_CP_ALWAYS_COUNT); in hw_init()
1097 a6xx_set_ubwc_config(gpu); in hw_init()
1100 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, in hw_init()
1103 gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1); in hw_init()
1107 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0); in hw_init()
1108 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1, in hw_init()
1110 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2, in hw_init()
1112 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3, in hw_init()
1114 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4, in hw_init()
1119 a6xx_set_cp_protect(gpu); in hw_init()
1122 gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1); in hw_init()
1123 gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0); in hw_init()
1126 /* Set dualQ + disable afull for A660 GPU */ in hw_init()
1128 gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906); in hw_init()
1131 if (gpu->hw_apriv) { in hw_init()
1132 gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL, in hw_init()
1137 gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK); in hw_init()
1139 ret = adreno_hw_init(gpu); in hw_init()
1143 ret = a6xx_ucode_init(gpu); in hw_init()
1148 gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI, in hw_init()
1149 gpu->rb[0]->iova); in hw_init()
1156 gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT); in hw_init()
1158 gpu_write(gpu, REG_A6XX_CP_RB_CNTL, in hw_init()
1168 a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, in hw_init()
1169 sizeof(u32) * gpu->nr_rings, in hw_init()
1171 gpu->aspace, &a6xx_gpu->shadow_bo, in hw_init()
1180 gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO, in hw_init()
1182 shadowptr(a6xx_gpu, gpu->rb[0])); in hw_init()
1186 a6xx_gpu->cur_ring = gpu->rb[0]; in hw_init()
1188 gpu->cur_ctx_seqno = 0; in hw_init()
1191 gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1); in hw_init()
1193 ret = a6xx_cp_init(gpu); in hw_init()
1204 ret = a6xx_zap_shader_init(gpu); in hw_init()
1206 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1); in hw_init()
1207 OUT_RING(gpu->rb[0], 0x00000000); in hw_init()
1209 a6xx_flush(gpu, gpu->rb[0]); in hw_init()
1210 if (!a6xx_idle(gpu, gpu->rb[0])) in hw_init()
1219 dev_warn_once(gpu->dev->dev, in hw_init()
1221 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0); in hw_init()
1229 * Tell the GMU that we are done touching the GPU and it can start power in hw_init()
1242 static int a6xx_hw_init(struct msm_gpu *gpu) in a6xx_hw_init() argument
1244 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_hw_init()
1249 ret = hw_init(gpu); in a6xx_hw_init()
1255 static void a6xx_dump(struct msm_gpu *gpu) in a6xx_dump() argument
1257 DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n", in a6xx_dump()
1258 gpu_read(gpu, REG_A6XX_RBBM_STATUS)); in a6xx_dump()
1259 adreno_dump(gpu); in a6xx_dump()
1265 static void a6xx_recover(struct msm_gpu *gpu) in a6xx_recover() argument
1267 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_recover()
1271 adreno_dump_info(gpu); in a6xx_recover()
1274 DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i, in a6xx_recover()
1275 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i))); in a6xx_recover()
1278 a6xx_dump(gpu); in a6xx_recover()
1281 gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3); in a6xx_recover()
1289 pm_runtime_dont_use_autosuspend(&gpu->pdev->dev); in a6xx_recover()
1292 mutex_lock(&gpu->active_lock); in a6xx_recover()
1293 active_submits = gpu->active_submits; in a6xx_recover()
1299 gpu->active_submits = 0; in a6xx_recover()
1303 pm_runtime_put(&gpu->pdev->dev); in a6xx_recover()
1306 pm_runtime_put_sync(&gpu->pdev->dev); in a6xx_recover()
1309 reset_control_reset(gpu->cx_collapse); in a6xx_recover()
1311 pm_runtime_use_autosuspend(&gpu->pdev->dev); in a6xx_recover()
1314 pm_runtime_get(&gpu->pdev->dev); in a6xx_recover()
1316 pm_runtime_get_sync(&gpu->pdev->dev); in a6xx_recover()
1318 gpu->active_submits = active_submits; in a6xx_recover()
1319 mutex_unlock(&gpu->active_lock); in a6xx_recover()
1321 msm_gpu_hw_init(gpu); in a6xx_recover()
1324 static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid) in a6xx_uche_fault_block() argument
1338 val = gpu_read(gpu, REG_A6XX_UCHE_CLIENT_PF); in a6xx_uche_fault_block()
1352 static const char *a6xx_fault_block(struct msm_gpu *gpu, u32 id) in a6xx_fault_block() argument
1361 return a6xx_uche_fault_block(gpu, id); in a6xx_fault_block()
1370 struct msm_gpu *gpu = arg; in a6xx_fault_handler() local
1374 bool do_devcoredump = info && !READ_ONCE(gpu->crashstate); in a6xx_fault_handler()
1381 gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu); in a6xx_fault_handler()
1389 pr_warn_ratelimited("*** gpu fault: iova=%.16lx flags=%d (%u,%u,%u,%u)\n", in a6xx_fault_handler()
1391 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)), in a6xx_fault_handler()
1392 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)), in a6xx_fault_handler()
1393 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)), in a6xx_fault_handler()
1394 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7))); in a6xx_fault_handler()
1406 block = a6xx_fault_block(gpu, info->fsynr1 & 0xff); in a6xx_fault_handler()
1408 …pr_warn_ratelimited("*** gpu fault: ttbr0=%.16llx iova=%.16lx dir=%s type=%s source=%s (%u,%u,%u,%… in a6xx_fault_handler()
1412 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)), in a6xx_fault_handler()
1413 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)), in a6xx_fault_handler()
1414 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)), in a6xx_fault_handler()
1415 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7))); in a6xx_fault_handler()
1419 del_timer(&gpu->hangcheck_timer); in a6xx_fault_handler()
1421 gpu->fault_info.ttbr0 = info->ttbr0; in a6xx_fault_handler()
1422 gpu->fault_info.iova = iova; in a6xx_fault_handler()
1423 gpu->fault_info.flags = flags; in a6xx_fault_handler()
1424 gpu->fault_info.type = type; in a6xx_fault_handler()
1425 gpu->fault_info.block = block; in a6xx_fault_handler()
1427 kthread_queue_work(gpu->worker, &gpu->fault_work); in a6xx_fault_handler()
1433 static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu) in a6xx_cp_hw_err_irq() argument
1435 u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS); in a6xx_cp_hw_err_irq()
1440 gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1); in a6xx_cp_hw_err_irq()
1441 val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA); in a6xx_cp_hw_err_irq()
1442 dev_err_ratelimited(&gpu->pdev->dev, in a6xx_cp_hw_err_irq()
1448 dev_err_ratelimited(&gpu->pdev->dev, in a6xx_cp_hw_err_irq()
1452 dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n", in a6xx_cp_hw_err_irq()
1453 gpu_read(gpu, REG_A6XX_CP_HW_FAULT)); in a6xx_cp_hw_err_irq()
1456 u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS); in a6xx_cp_hw_err_irq()
1458 dev_err_ratelimited(&gpu->pdev->dev, in a6xx_cp_hw_err_irq()
1465 dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n"); in a6xx_cp_hw_err_irq()
1468 dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n"); in a6xx_cp_hw_err_irq()
1471 dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n"); in a6xx_cp_hw_err_irq()
1475 static void a6xx_fault_detect_irq(struct msm_gpu *gpu) in a6xx_fault_detect_irq() argument
1477 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_fault_detect_irq()
1479 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); in a6xx_fault_detect_irq()
1482 * If stalled on SMMU fault, we could trip the GPU's hang detection, in a6xx_fault_detect_irq()
1487 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS3) & A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT) in a6xx_fault_detect_irq()
1491 * Force the GPU to stay on until after we finish in a6xx_fault_detect_irq()
1496 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_fault_detect_irq()
1497 …"gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", in a6xx_fault_detect_irq()
1499 gpu_read(gpu, REG_A6XX_RBBM_STATUS), in a6xx_fault_detect_irq()
1500 gpu_read(gpu, REG_A6XX_CP_RB_RPTR), in a6xx_fault_detect_irq()
1501 gpu_read(gpu, REG_A6XX_CP_RB_WPTR), in a6xx_fault_detect_irq()
1502 gpu_read64(gpu, REG_A6XX_CP_IB1_BASE, REG_A6XX_CP_IB1_BASE_HI), in a6xx_fault_detect_irq()
1503 gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE), in a6xx_fault_detect_irq()
1504 gpu_read64(gpu, REG_A6XX_CP_IB2_BASE, REG_A6XX_CP_IB2_BASE_HI), in a6xx_fault_detect_irq()
1505 gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE)); in a6xx_fault_detect_irq()
1508 del_timer(&gpu->hangcheck_timer); in a6xx_fault_detect_irq()
1510 kthread_queue_work(gpu->worker, &gpu->recover_work); in a6xx_fault_detect_irq()
1513 static irqreturn_t a6xx_irq(struct msm_gpu *gpu) in a6xx_irq() argument
1515 struct msm_drm_private *priv = gpu->dev->dev_private; in a6xx_irq()
1516 u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS); in a6xx_irq()
1518 gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status); in a6xx_irq()
1524 a6xx_fault_detect_irq(gpu); in a6xx_irq()
1527 dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n"); in a6xx_irq()
1530 a6xx_cp_hw_err_irq(gpu); in a6xx_irq()
1533 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n"); in a6xx_irq()
1536 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n"); in a6xx_irq()
1539 dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n"); in a6xx_irq()
1542 msm_gpu_retire(gpu); in a6xx_irq()
1566 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_llc_activate() local
1583 gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) | in a6xx_llc_activate()
1604 * Program the slice IDs for the various GPU blocks and GPU MMU in a6xx_llc_activate()
1620 gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval); in a6xx_llc_activate()
1655 static int a6xx_pm_resume(struct msm_gpu *gpu) in a6xx_pm_resume() argument
1657 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_pm_resume()
1661 gpu->needs_hw_init = true; in a6xx_pm_resume()
1671 msm_devfreq_resume(gpu); in a6xx_pm_resume()
1678 static int a6xx_pm_suspend(struct msm_gpu *gpu) in a6xx_pm_suspend() argument
1680 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_pm_suspend()
1688 msm_devfreq_suspend(gpu); in a6xx_pm_suspend()
1697 for (i = 0; i < gpu->nr_rings; i++) in a6xx_pm_suspend()
1700 gpu->suspend_count++; in a6xx_pm_suspend()
1705 static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) in a6xx_get_timestamp() argument
1707 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_get_timestamp()
1712 /* Force the GPU power on so we can read this register */ in a6xx_get_timestamp()
1715 *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO, in a6xx_get_timestamp()
1725 static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu) in a6xx_active_ring() argument
1727 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_active_ring()
1733 static void a6xx_destroy(struct msm_gpu *gpu) in a6xx_destroy() argument
1735 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_destroy()
1739 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); in a6xx_destroy()
1744 msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace); in a6xx_destroy()
1757 static u64 a6xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate) in a6xx_gpu_busy() argument
1759 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_gpu_busy()
1773 static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, in a6xx_gpu_set_freq() argument
1776 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_gpu_set_freq()
1780 a6xx_gmu_set_freq(gpu, opp, suspended); in a6xx_gpu_set_freq()
1785 a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) in a6xx_create_address_space() argument
1787 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_create_address_space()
1799 * This allows GPU to set the bus attributes required to use system in a6xx_create_address_space()
1819 aspace = msm_gem_address_space_create(mmu, "gpu", in a6xx_create_address_space()
1829 a6xx_create_private_address_space(struct msm_gpu *gpu) in a6xx_create_private_address_space() argument
1833 mmu = msm_iommu_pagetable_create(gpu->aspace->mmu); in a6xx_create_private_address_space()
1839 "gpu", 0x100000000ULL, in a6xx_create_private_address_space()
1840 adreno_private_address_space_size(gpu)); in a6xx_create_private_address_space()
1843 static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_get_rptr() argument
1845 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_get_rptr()
1851 return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR); in a6xx_get_rptr()
1985 struct msm_gpu *gpu; in a6xx_gpu_init() local
1993 gpu = &adreno_gpu->base; in a6xx_gpu_init()
2013 gpu->clamp_to_idle = true; in a6xx_gpu_init()
2042 if (gpu->aspace) in a6xx_gpu_init()
2043 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, in a6xx_gpu_init()
2046 return gpu; in a6xx_gpu_init()