Lines Matching full:gpu
26 static int enable_pwrrail(struct msm_gpu *gpu) in enable_pwrrail() argument
28 struct drm_device *dev = gpu->dev; in enable_pwrrail()
31 if (gpu->gpu_reg) { in enable_pwrrail()
32 ret = regulator_enable(gpu->gpu_reg); in enable_pwrrail()
39 if (gpu->gpu_cx) { in enable_pwrrail()
40 ret = regulator_enable(gpu->gpu_cx); in enable_pwrrail()
50 static int disable_pwrrail(struct msm_gpu *gpu) in disable_pwrrail() argument
52 if (gpu->gpu_cx) in disable_pwrrail()
53 regulator_disable(gpu->gpu_cx); in disable_pwrrail()
54 if (gpu->gpu_reg) in disable_pwrrail()
55 regulator_disable(gpu->gpu_reg); in disable_pwrrail()
59 static int enable_clk(struct msm_gpu *gpu) in enable_clk() argument
61 if (gpu->core_clk && gpu->fast_rate) in enable_clk()
62 clk_set_rate(gpu->core_clk, gpu->fast_rate); in enable_clk()
65 if (gpu->rbbmtimer_clk) in enable_clk()
66 clk_set_rate(gpu->rbbmtimer_clk, 19200000); in enable_clk()
68 return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks); in enable_clk()
71 static int disable_clk(struct msm_gpu *gpu) in disable_clk() argument
73 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks); in disable_clk()
80 if (gpu->core_clk) in disable_clk()
81 clk_set_rate(gpu->core_clk, 27000000); in disable_clk()
83 if (gpu->rbbmtimer_clk) in disable_clk()
84 clk_set_rate(gpu->rbbmtimer_clk, 0); in disable_clk()
89 static int enable_axi(struct msm_gpu *gpu) in enable_axi() argument
91 return clk_prepare_enable(gpu->ebi1_clk); in enable_axi()
94 static int disable_axi(struct msm_gpu *gpu) in disable_axi() argument
96 clk_disable_unprepare(gpu->ebi1_clk); in disable_axi()
100 int msm_gpu_pm_resume(struct msm_gpu *gpu) in msm_gpu_pm_resume() argument
104 DBG("%s", gpu->name); in msm_gpu_pm_resume()
107 ret = enable_pwrrail(gpu); in msm_gpu_pm_resume()
111 ret = enable_clk(gpu); in msm_gpu_pm_resume()
115 ret = enable_axi(gpu); in msm_gpu_pm_resume()
119 msm_devfreq_resume(gpu); in msm_gpu_pm_resume()
121 gpu->needs_hw_init = true; in msm_gpu_pm_resume()
126 int msm_gpu_pm_suspend(struct msm_gpu *gpu) in msm_gpu_pm_suspend() argument
130 DBG("%s", gpu->name); in msm_gpu_pm_suspend()
133 msm_devfreq_suspend(gpu); in msm_gpu_pm_suspend()
135 ret = disable_axi(gpu); in msm_gpu_pm_suspend()
139 ret = disable_clk(gpu); in msm_gpu_pm_suspend()
143 ret = disable_pwrrail(gpu); in msm_gpu_pm_suspend()
147 gpu->suspend_count++; in msm_gpu_pm_suspend()
152 void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx, in msm_gpu_show_fdinfo() argument
155 drm_printf(p, "drm-driver:\t%s\n", gpu->dev->driver->name); in msm_gpu_show_fdinfo()
157 drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns); in msm_gpu_show_fdinfo()
158 drm_printf(p, "drm-cycles-gpu:\t%llu\n", ctx->cycles); in msm_gpu_show_fdinfo()
159 drm_printf(p, "drm-maxfreq-gpu:\t%u Hz\n", gpu->fast_rate); in msm_gpu_show_fdinfo()
162 int msm_gpu_hw_init(struct msm_gpu *gpu) in msm_gpu_hw_init() argument
166 WARN_ON(!mutex_is_locked(&gpu->lock)); in msm_gpu_hw_init()
168 if (!gpu->needs_hw_init) in msm_gpu_hw_init()
171 disable_irq(gpu->irq); in msm_gpu_hw_init()
172 ret = gpu->funcs->hw_init(gpu); in msm_gpu_hw_init()
174 gpu->needs_hw_init = false; in msm_gpu_hw_init()
175 enable_irq(gpu->irq); in msm_gpu_hw_init()
184 struct msm_gpu *gpu = data; in msm_gpu_devcoredump_read() local
189 state = msm_gpu_crashstate_get(gpu); in msm_gpu_devcoredump_read()
210 gpu->funcs->show(gpu, state, &p); in msm_gpu_devcoredump_read()
212 msm_gpu_crashstate_put(gpu); in msm_gpu_devcoredump_read()
219 struct msm_gpu *gpu = data; in msm_gpu_devcoredump_free() local
221 msm_gpu_crashstate_put(gpu); in msm_gpu_devcoredump_free()
260 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, in msm_gpu_crashstate_capture() argument
266 if (!gpu->funcs->gpu_state_get) in msm_gpu_crashstate_capture()
270 if (gpu->crashstate) in msm_gpu_crashstate_capture()
273 state = gpu->funcs->gpu_state_get(gpu); in msm_gpu_crashstate_capture()
280 state->fault_info = gpu->fault_info; in msm_gpu_crashstate_capture()
296 gpu->crashstate = state; in msm_gpu_crashstate_capture()
299 dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL, in msm_gpu_crashstate_capture()
303 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, in msm_gpu_crashstate_capture() argument
310 * Hangcheck detection for locked gpu:
331 static void retire_submits(struct msm_gpu *gpu);
357 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); in recover_worker() local
358 struct drm_device *dev = gpu->dev; in recover_worker()
361 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu); in recover_worker()
365 mutex_lock(&gpu->lock); in recover_worker()
367 DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name); in recover_worker()
380 gpu->name, comm, cmd); in recover_worker()
392 gpu->global_faults++; in recover_worker()
396 pm_runtime_get_sync(&gpu->pdev->dev); in recover_worker()
397 msm_gpu_crashstate_capture(gpu, submit, comm, cmd); in recover_worker()
407 for (i = 0; i < gpu->nr_rings; i++) { in recover_worker()
408 struct msm_ringbuffer *ring = gpu->rb[i]; in recover_worker()
422 if (msm_gpu_active(gpu)) { in recover_worker()
424 retire_submits(gpu); in recover_worker()
426 gpu->funcs->recover(gpu); in recover_worker()
432 for (i = 0; i < gpu->nr_rings; i++) { in recover_worker()
433 struct msm_ringbuffer *ring = gpu->rb[i]; in recover_worker()
438 gpu->funcs->submit(gpu, submit); in recover_worker()
443 pm_runtime_put(&gpu->pdev->dev); in recover_worker()
445 mutex_unlock(&gpu->lock); in recover_worker()
447 msm_gpu_retire(gpu); in recover_worker()
452 struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work); in fault_worker() local
454 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu); in fault_worker()
457 mutex_lock(&gpu->lock); in fault_worker()
467 * When we get GPU iova faults, we can get 1000s of them, in fault_worker()
474 pm_runtime_get_sync(&gpu->pdev->dev); in fault_worker()
475 msm_gpu_crashstate_capture(gpu, submit, comm, cmd); in fault_worker()
476 pm_runtime_put_sync(&gpu->pdev->dev); in fault_worker()
482 memset(&gpu->fault_info, 0, sizeof(gpu->fault_info)); in fault_worker()
483 gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu); in fault_worker()
485 mutex_unlock(&gpu->lock); in fault_worker()
488 static void hangcheck_timer_reset(struct msm_gpu *gpu) in hangcheck_timer_reset() argument
490 struct msm_drm_private *priv = gpu->dev->dev_private; in hangcheck_timer_reset()
491 mod_timer(&gpu->hangcheck_timer, in hangcheck_timer_reset()
497 struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer); in hangcheck_handler() local
498 struct drm_device *dev = gpu->dev; in hangcheck_handler()
499 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); in hangcheck_handler()
508 DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n", in hangcheck_handler()
509 gpu->name, ring->id); in hangcheck_handler()
511 gpu->name, fence); in hangcheck_handler()
513 gpu->name, ring->fctx->last_fence); in hangcheck_handler()
515 kthread_queue_work(gpu->worker, &gpu->recover_work); in hangcheck_handler()
520 hangcheck_timer_reset(gpu); in hangcheck_handler()
523 msm_gpu_retire(gpu); in hangcheck_handler()
531 static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs) in update_hw_cntrs() argument
533 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)]; in update_hw_cntrs()
534 int i, n = min(ncntrs, gpu->num_perfcntrs); in update_hw_cntrs()
537 for (i = 0; i < gpu->num_perfcntrs; i++) in update_hw_cntrs()
538 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg); in update_hw_cntrs()
542 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i]; in update_hw_cntrs()
545 for (i = 0; i < gpu->num_perfcntrs; i++) in update_hw_cntrs()
546 gpu->last_cntrs[i] = current_cntrs[i]; in update_hw_cntrs()
551 static void update_sw_cntrs(struct msm_gpu *gpu) in update_sw_cntrs() argument
557 spin_lock_irqsave(&gpu->perf_lock, flags); in update_sw_cntrs()
558 if (!gpu->perfcntr_active) in update_sw_cntrs()
562 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time)); in update_sw_cntrs()
564 gpu->totaltime += elapsed; in update_sw_cntrs()
565 if (gpu->last_sample.active) in update_sw_cntrs()
566 gpu->activetime += elapsed; in update_sw_cntrs()
568 gpu->last_sample.active = msm_gpu_active(gpu); in update_sw_cntrs()
569 gpu->last_sample.time = time; in update_sw_cntrs()
572 spin_unlock_irqrestore(&gpu->perf_lock, flags); in update_sw_cntrs()
575 void msm_gpu_perfcntr_start(struct msm_gpu *gpu) in msm_gpu_perfcntr_start() argument
579 pm_runtime_get_sync(&gpu->pdev->dev); in msm_gpu_perfcntr_start()
581 spin_lock_irqsave(&gpu->perf_lock, flags); in msm_gpu_perfcntr_start()
583 gpu->last_sample.active = msm_gpu_active(gpu); in msm_gpu_perfcntr_start()
584 gpu->last_sample.time = ktime_get(); in msm_gpu_perfcntr_start()
585 gpu->activetime = gpu->totaltime = 0; in msm_gpu_perfcntr_start()
586 gpu->perfcntr_active = true; in msm_gpu_perfcntr_start()
587 update_hw_cntrs(gpu, 0, NULL); in msm_gpu_perfcntr_start()
588 spin_unlock_irqrestore(&gpu->perf_lock, flags); in msm_gpu_perfcntr_start()
591 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu) in msm_gpu_perfcntr_stop() argument
593 gpu->perfcntr_active = false; in msm_gpu_perfcntr_stop()
594 pm_runtime_put_sync(&gpu->pdev->dev); in msm_gpu_perfcntr_stop()
598 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, in msm_gpu_perfcntr_sample() argument
604 spin_lock_irqsave(&gpu->perf_lock, flags); in msm_gpu_perfcntr_sample()
606 if (!gpu->perfcntr_active) { in msm_gpu_perfcntr_sample()
611 *activetime = gpu->activetime; in msm_gpu_perfcntr_sample()
612 *totaltime = gpu->totaltime; in msm_gpu_perfcntr_sample()
614 gpu->activetime = gpu->totaltime = 0; in msm_gpu_perfcntr_sample()
616 ret = update_hw_cntrs(gpu, ncntrs, cntrs); in msm_gpu_perfcntr_sample()
619 spin_unlock_irqrestore(&gpu->perf_lock, flags); in msm_gpu_perfcntr_sample()
628 static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, in retire_submit() argument
657 pm_runtime_mark_last_busy(&gpu->pdev->dev); in retire_submit()
664 mutex_lock(&gpu->active_lock); in retire_submit()
665 gpu->active_submits--; in retire_submit()
666 WARN_ON(gpu->active_submits < 0); in retire_submit()
667 if (!gpu->active_submits) { in retire_submit()
668 msm_devfreq_idle(gpu); in retire_submit()
669 pm_runtime_put_autosuspend(&gpu->pdev->dev); in retire_submit()
672 mutex_unlock(&gpu->active_lock); in retire_submit()
677 static void retire_submits(struct msm_gpu *gpu) in retire_submits() argument
682 for (i = 0; i < gpu->nr_rings; i++) { in retire_submits()
683 struct msm_ringbuffer *ring = gpu->rb[i]; in retire_submits()
700 retire_submit(gpu, ring, submit); in retire_submits()
707 wake_up_all(&gpu->retire_event); in retire_submits()
712 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); in retire_worker() local
714 retire_submits(gpu); in retire_worker()
718 void msm_gpu_retire(struct msm_gpu *gpu) in msm_gpu_retire() argument
722 for (i = 0; i < gpu->nr_rings; i++) in msm_gpu_retire()
723 msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence); in msm_gpu_retire()
725 kthread_queue_work(gpu->worker, &gpu->retire_work); in msm_gpu_retire()
726 update_sw_cntrs(gpu); in msm_gpu_retire()
729 /* add bo's to gpu's ring, and kick gpu: */
730 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in msm_gpu_submit() argument
732 struct drm_device *dev = gpu->dev; in msm_gpu_submit()
737 WARN_ON(!mutex_is_locked(&gpu->lock)); in msm_gpu_submit()
739 pm_runtime_get_sync(&gpu->pdev->dev); in msm_gpu_submit()
741 msm_gpu_hw_init(gpu); in msm_gpu_submit()
747 update_sw_cntrs(gpu); in msm_gpu_submit()
760 mutex_lock(&gpu->active_lock); in msm_gpu_submit()
761 if (!gpu->active_submits) { in msm_gpu_submit()
762 pm_runtime_get(&gpu->pdev->dev); in msm_gpu_submit()
763 msm_devfreq_active(gpu); in msm_gpu_submit()
765 gpu->active_submits++; in msm_gpu_submit()
766 mutex_unlock(&gpu->active_lock); in msm_gpu_submit()
768 gpu->funcs->submit(gpu, submit); in msm_gpu_submit()
769 gpu->cur_ctx_seqno = submit->queue->ctx->seqno; in msm_gpu_submit()
771 pm_runtime_put(&gpu->pdev->dev); in msm_gpu_submit()
772 hangcheck_timer_reset(gpu); in msm_gpu_submit()
781 struct msm_gpu *gpu = data; in irq_handler() local
782 return gpu->funcs->irq(gpu); in irq_handler()
785 static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu) in get_clocks() argument
787 int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks); in get_clocks()
790 gpu->nr_clocks = 0; in get_clocks()
794 gpu->nr_clocks = ret; in get_clocks()
796 gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks, in get_clocks()
797 gpu->nr_clocks, "core"); in get_clocks()
799 gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks, in get_clocks()
800 gpu->nr_clocks, "rbbmtimer"); in get_clocks()
807 msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task) in msm_gpu_create_private_address_space() argument
810 if (!gpu) in msm_gpu_create_private_address_space()
817 if (gpu->funcs->create_private_address_space) { in msm_gpu_create_private_address_space()
818 aspace = gpu->funcs->create_private_address_space(gpu); in msm_gpu_create_private_address_space()
824 aspace = msm_gem_address_space_get(gpu->aspace); in msm_gpu_create_private_address_space()
830 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, in msm_gpu_init() argument
837 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) in msm_gpu_init()
838 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs); in msm_gpu_init()
840 gpu->dev = drm; in msm_gpu_init()
841 gpu->funcs = funcs; in msm_gpu_init()
842 gpu->name = name; in msm_gpu_init()
844 gpu->worker = kthread_create_worker(0, "gpu-worker"); in msm_gpu_init()
845 if (IS_ERR(gpu->worker)) { in msm_gpu_init()
846 ret = PTR_ERR(gpu->worker); in msm_gpu_init()
847 gpu->worker = NULL; in msm_gpu_init()
851 sched_set_fifo_low(gpu->worker->task); in msm_gpu_init()
853 mutex_init(&gpu->active_lock); in msm_gpu_init()
854 mutex_init(&gpu->lock); in msm_gpu_init()
855 init_waitqueue_head(&gpu->retire_event); in msm_gpu_init()
856 kthread_init_work(&gpu->retire_work, retire_worker); in msm_gpu_init()
857 kthread_init_work(&gpu->recover_work, recover_worker); in msm_gpu_init()
858 kthread_init_work(&gpu->fault_work, fault_worker); in msm_gpu_init()
860 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0); in msm_gpu_init()
862 spin_lock_init(&gpu->perf_lock); in msm_gpu_init()
866 gpu->mmio = msm_ioremap(pdev, config->ioname); in msm_gpu_init()
867 if (IS_ERR(gpu->mmio)) { in msm_gpu_init()
868 ret = PTR_ERR(gpu->mmio); in msm_gpu_init()
873 gpu->irq = platform_get_irq(pdev, 0); in msm_gpu_init()
874 if (gpu->irq < 0) { in msm_gpu_init()
875 ret = gpu->irq; in msm_gpu_init()
880 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, in msm_gpu_init()
881 IRQF_TRIGGER_HIGH, "gpu-irq", gpu); in msm_gpu_init()
883 DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret); in msm_gpu_init()
887 ret = get_clocks(pdev, gpu); in msm_gpu_init()
891 gpu->ebi1_clk = msm_clk_get(pdev, "bus"); in msm_gpu_init()
892 DBG("ebi1_clk: %p", gpu->ebi1_clk); in msm_gpu_init()
893 if (IS_ERR(gpu->ebi1_clk)) in msm_gpu_init()
894 gpu->ebi1_clk = NULL; in msm_gpu_init()
897 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd"); in msm_gpu_init()
898 DBG("gpu_reg: %p", gpu->gpu_reg); in msm_gpu_init()
899 if (IS_ERR(gpu->gpu_reg)) in msm_gpu_init()
900 gpu->gpu_reg = NULL; in msm_gpu_init()
902 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx"); in msm_gpu_init()
903 DBG("gpu_cx: %p", gpu->gpu_cx); in msm_gpu_init()
904 if (IS_ERR(gpu->gpu_cx)) in msm_gpu_init()
905 gpu->gpu_cx = NULL; in msm_gpu_init()
907 gpu->cx_collapse = devm_reset_control_get_optional_exclusive(&pdev->dev, in msm_gpu_init()
910 gpu->pdev = pdev; in msm_gpu_init()
911 platform_set_drvdata(pdev, &gpu->adreno_smmu); in msm_gpu_init()
913 msm_devfreq_init(gpu); in msm_gpu_init()
916 gpu->aspace = gpu->funcs->create_address_space(gpu, pdev); in msm_gpu_init()
918 if (gpu->aspace == NULL) in msm_gpu_init()
920 else if (IS_ERR(gpu->aspace)) { in msm_gpu_init()
921 ret = PTR_ERR(gpu->aspace); in msm_gpu_init()
927 check_apriv(gpu, MSM_BO_WC), gpu->aspace, &gpu->memptrs_bo, in msm_gpu_init()
936 msm_gem_object_set_name(gpu->memptrs_bo, "memptrs"); in msm_gpu_init()
938 if (nr_rings > ARRAY_SIZE(gpu->rb)) { in msm_gpu_init()
940 ARRAY_SIZE(gpu->rb)); in msm_gpu_init()
941 nr_rings = ARRAY_SIZE(gpu->rb); in msm_gpu_init()
946 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova); in msm_gpu_init()
948 if (IS_ERR(gpu->rb[i])) { in msm_gpu_init()
949 ret = PTR_ERR(gpu->rb[i]); in msm_gpu_init()
959 gpu->nr_rings = nr_rings; in msm_gpu_init()
961 refcount_set(&gpu->sysprof_active, 1); in msm_gpu_init()
966 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { in msm_gpu_init()
967 msm_ringbuffer_destroy(gpu->rb[i]); in msm_gpu_init()
968 gpu->rb[i] = NULL; in msm_gpu_init()
971 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace); in msm_gpu_init()
977 void msm_gpu_cleanup(struct msm_gpu *gpu) in msm_gpu_cleanup() argument
981 DBG("%s", gpu->name); in msm_gpu_cleanup()
983 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { in msm_gpu_cleanup()
984 msm_ringbuffer_destroy(gpu->rb[i]); in msm_gpu_cleanup()
985 gpu->rb[i] = NULL; in msm_gpu_cleanup()
988 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace); in msm_gpu_cleanup()
990 if (!IS_ERR_OR_NULL(gpu->aspace)) { in msm_gpu_cleanup()
991 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu); in msm_gpu_cleanup()
992 msm_gem_address_space_put(gpu->aspace); in msm_gpu_cleanup()
995 if (gpu->worker) { in msm_gpu_cleanup()
996 kthread_destroy_worker(gpu->worker); in msm_gpu_cleanup()
999 msm_devfreq_cleanup(gpu); in msm_gpu_cleanup()
1001 platform_set_drvdata(gpu->pdev, NULL); in msm_gpu_cleanup()