Lines Matching refs:gpu
23 static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, in zap_shader_load_mdt() argument
26 struct device *dev = &gpu->pdev->dev; in zap_shader_load_mdt()
61 fw = adreno_request_fw(to_adreno_gpu(gpu), fwname); in zap_shader_load_mdt()
97 if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) { in zap_shader_load_mdt()
133 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) in adreno_zap_shader_load() argument
135 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_zap_shader_load()
136 struct platform_device *pdev = gpu->pdev; in adreno_zap_shader_load()
156 return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); in adreno_zap_shader_load()
159 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) in adreno_get_param() argument
161 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_get_param()
186 pm_runtime_get_sync(&gpu->pdev->dev); in adreno_get_param()
187 ret = adreno_gpu->funcs->get_timestamp(gpu, value); in adreno_get_param()
188 pm_runtime_put_autosuspend(&gpu->pdev->dev); in adreno_get_param()
194 *value = gpu->nr_rings; in adreno_get_param()
200 *value = gpu->global_faults; in adreno_get_param()
203 DBG("%s: invalid param: %u", gpu->name, param); in adreno_get_param()
313 struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, in adreno_fw_create_bo() argument
319 ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4, in adreno_fw_create_bo()
320 MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); in adreno_fw_create_bo()
332 int adreno_hw_init(struct msm_gpu *gpu) in adreno_hw_init() argument
334 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_hw_init()
337 DBG("%s", gpu->name); in adreno_hw_init()
343 for (i = 0; i < gpu->nr_rings; i++) { in adreno_hw_init()
344 struct msm_ringbuffer *ring = gpu->rb[i]; in adreno_hw_init()
369 REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova); in adreno_hw_init()
374 rbmemptr(gpu->rb[0], rptr)); in adreno_hw_init()
391 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu) in adreno_active_ring() argument
393 return gpu->rb[0]; in adreno_active_ring()
396 void adreno_recover(struct msm_gpu *gpu) in adreno_recover() argument
398 struct drm_device *dev = gpu->dev; in adreno_recover()
404 gpu->funcs->pm_suspend(gpu); in adreno_recover()
405 gpu->funcs->pm_resume(gpu); in adreno_recover()
407 ret = msm_gpu_hw_init(gpu); in adreno_recover()
414 void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, in adreno_submit() argument
417 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_submit()
418 struct msm_drm_private *priv = gpu->dev->dev_private; in adreno_submit()
483 gpu->funcs->flush(gpu, ring); in adreno_submit()
486 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in adreno_flush() argument
488 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_flush()
507 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in adreno_idle() argument
509 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_idle()
518 gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr); in adreno_idle()
523 int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state) in adreno_gpu_state_get() argument
525 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_gpu_state_get()
532 for (i = 0; i < gpu->nr_rings; i++) { in adreno_gpu_state_get()
535 state->ring[i].fence = gpu->rb[i]->memptrs->fence; in adreno_gpu_state_get()
536 state->ring[i].iova = gpu->rb[i]->iova; in adreno_gpu_state_get()
537 state->ring[i].seqno = gpu->rb[i]->seqno; in adreno_gpu_state_get()
538 state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]); in adreno_gpu_state_get()
539 state->ring[i].wptr = get_wptr(gpu->rb[i]); in adreno_gpu_state_get()
546 if (gpu->rb[i]->start[j]) in adreno_gpu_state_get()
552 memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2); in adreno_gpu_state_get()
578 state->registers[pos++] = gpu_read(gpu, addr); in adreno_gpu_state_get()
695 void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state, in adreno_show() argument
698 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_show()
713 for (i = 0; i < gpu->nr_rings; i++) { in adreno_show()
757 void adreno_dump_info(struct msm_gpu *gpu) in adreno_dump_info() argument
759 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_dump_info()
767 for (i = 0; i < gpu->nr_rings; i++) { in adreno_dump_info()
768 struct msm_ringbuffer *ring = gpu->rb[i]; in adreno_dump_info()
780 void adreno_dump(struct msm_gpu *gpu) in adreno_dump() argument
782 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_dump()
789 printk("IO:region %s 00000000 00020000\n", gpu->name); in adreno_dump()
796 uint32_t val = gpu_read(gpu, addr); in adreno_dump()
804 struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu); in ring_freewords()
815 DRM_DEV_ERROR(ring->gpu->dev->dev, in adreno_wait_ring()
853 struct msm_gpu *gpu) in adreno_get_pwrlevels() argument
859 gpu->fast_rate = 0; in adreno_get_pwrlevels()
874 gpu->fast_rate = freq; in adreno_get_pwrlevels()
879 if (!gpu->fast_rate) { in adreno_get_pwrlevels()
883 gpu->fast_rate = 200000000; in adreno_get_pwrlevels()
886 DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate); in adreno_get_pwrlevels()
889 gpu->icc_path = of_icc_get(dev, NULL); in adreno_get_pwrlevels()
890 if (IS_ERR(gpu->icc_path)) in adreno_get_pwrlevels()
891 gpu->icc_path = NULL; in adreno_get_pwrlevels()
902 struct msm_gpu *gpu = &adreno_gpu->base; in adreno_gpu_init() local
920 adreno_get_pwrlevels(&pdev->dev, gpu); in adreno_gpu_init()
933 struct msm_gpu *gpu = &adreno_gpu->base; in adreno_gpu_cleanup() local
939 icc_put(gpu->icc_path); in adreno_gpu_cleanup()