Lines Matching refs:tmp_adev
4541 struct amdgpu_device *tmp_adev = NULL; in amdgpu_do_asic_reset() local
4546 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, in amdgpu_do_asic_reset()
4548 r = amdgpu_reset_perform_reset(tmp_adev, reset_context); in amdgpu_do_asic_reset()
4565 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { in amdgpu_do_asic_reset()
4567 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_do_asic_reset()
4568 tmp_adev->gmc.xgmi.pending_reset = false; in amdgpu_do_asic_reset()
4569 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work)) in amdgpu_do_asic_reset()
4572 r = amdgpu_asic_reset(tmp_adev); in amdgpu_do_asic_reset()
4575 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s", in amdgpu_do_asic_reset()
4576 r, adev_to_drm(tmp_adev)->unique); in amdgpu_do_asic_reset()
4583 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { in amdgpu_do_asic_reset()
4584 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_do_asic_reset()
4585 flush_work(&tmp_adev->xgmi_reset_work); in amdgpu_do_asic_reset()
4586 r = tmp_adev->asic_reset_res; in amdgpu_do_asic_reset()
4595 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { in amdgpu_do_asic_reset()
4596 if (tmp_adev->mmhub.ras_funcs && in amdgpu_do_asic_reset()
4597 tmp_adev->mmhub.ras_funcs->reset_ras_error_count) in amdgpu_do_asic_reset()
4598 tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev); in amdgpu_do_asic_reset()
4604 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { in amdgpu_do_asic_reset()
4607 r = amdgpu_device_asic_init(tmp_adev); in amdgpu_do_asic_reset()
4609 dev_warn(tmp_adev->dev, "asic atom init failed!"); in amdgpu_do_asic_reset()
4611 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); in amdgpu_do_asic_reset()
4612 r = amdgpu_amdkfd_resume_iommu(tmp_adev); in amdgpu_do_asic_reset()
4616 r = amdgpu_device_ip_resume_phase1(tmp_adev); in amdgpu_do_asic_reset()
4620 vram_lost = amdgpu_device_check_vram_lost(tmp_adev); in amdgpu_do_asic_reset()
4623 amdgpu_inc_vram_lost(tmp_adev); in amdgpu_do_asic_reset()
4626 r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT)); in amdgpu_do_asic_reset()
4630 r = amdgpu_device_fw_loading(tmp_adev); in amdgpu_do_asic_reset()
4634 r = amdgpu_device_ip_resume_phase2(tmp_adev); in amdgpu_do_asic_reset()
4639 amdgpu_device_fill_reset_magic(tmp_adev); in amdgpu_do_asic_reset()
4645 amdgpu_register_gpu_instance(tmp_adev); in amdgpu_do_asic_reset()
4648 tmp_adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_do_asic_reset()
4649 amdgpu_xgmi_add_device(tmp_adev); in amdgpu_do_asic_reset()
4651 r = amdgpu_device_ip_late_init(tmp_adev); in amdgpu_do_asic_reset()
4655 amdgpu_fbdev_set_suspend(tmp_adev, 0); in amdgpu_do_asic_reset()
4667 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) { in amdgpu_do_asic_reset()
4669 amdgpu_ras_resume(tmp_adev); in amdgpu_do_asic_reset()
4677 tmp_adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_do_asic_reset()
4679 reset_context->hive, tmp_adev); in amdgpu_do_asic_reset()
4685 amdgpu_irq_gpu_reset_resume_helper(tmp_adev); in amdgpu_do_asic_reset()
4686 r = amdgpu_ib_ring_tests(tmp_adev); in amdgpu_do_asic_reset()
4688 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); in amdgpu_do_asic_reset()
4696 r = amdgpu_device_recover_vram(tmp_adev); in amdgpu_do_asic_reset()
4698 tmp_adev->asic_reset_res = r; in amdgpu_do_asic_reset()
4752 struct amdgpu_device *tmp_adev = NULL; in amdgpu_device_lock_hive_adev() local
4759 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_device_lock_hive_adev()
4760 if (!amdgpu_device_lock_adev(tmp_adev, hive)) in amdgpu_device_lock_hive_adev()
4768 if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) { in amdgpu_device_lock_hive_adev()
4776 dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock"); in amdgpu_device_lock_hive_adev()
4777 list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_device_lock_hive_adev()
4778 amdgpu_device_unlock_adev(tmp_adev); in amdgpu_device_lock_hive_adev()
4927 struct amdgpu_device *tmp_adev = NULL; in amdgpu_device_gpu_recover() local
5004 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) in amdgpu_device_gpu_recover()
5005 list_add_tail(&tmp_adev->reset_list, &device_list); in amdgpu_device_gpu_recover()
5015 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { in amdgpu_device_gpu_recover()
5026 if (!amdgpu_device_suspend_display_audio(tmp_adev)) in amdgpu_device_gpu_recover()
5029 amdgpu_ras_set_error_query_ready(tmp_adev, false); in amdgpu_device_gpu_recover()
5031 cancel_delayed_work_sync(&tmp_adev->delayed_init_work); in amdgpu_device_gpu_recover()
5033 if (!amdgpu_sriov_vf(tmp_adev)) in amdgpu_device_gpu_recover()
5034 amdgpu_amdkfd_pre_reset(tmp_adev); in amdgpu_device_gpu_recover()
5040 amdgpu_unregister_gpu_instance(tmp_adev); in amdgpu_device_gpu_recover()
5042 amdgpu_fbdev_set_suspend(tmp_adev, 1); in amdgpu_device_gpu_recover()
5046 amdgpu_device_ip_need_full_reset(tmp_adev)) in amdgpu_device_gpu_recover()
5047 amdgpu_ras_suspend(tmp_adev); in amdgpu_device_gpu_recover()
5050 struct amdgpu_ring *ring = tmp_adev->rings[i]; in amdgpu_device_gpu_recover()
5060 atomic_inc(&tmp_adev->gpu_reset_counter); in amdgpu_device_gpu_recover()
5080 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { in amdgpu_device_gpu_recover()
5081 r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context); in amdgpu_device_gpu_recover()
5084 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ", in amdgpu_device_gpu_recover()
5085 r, adev_to_drm(tmp_adev)->unique); in amdgpu_device_gpu_recover()
5086 tmp_adev->asic_reset_res = r; in amdgpu_device_gpu_recover()
5106 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { in amdgpu_device_gpu_recover()
5118 tmp_adev, device_list_handle, &reset_context); in amdgpu_device_gpu_recover()
5121 struct amdgpu_ring *ring = tmp_adev->rings[i]; in amdgpu_device_gpu_recover()
5127 if (!tmp_adev->asic_reset_res && !job_signaled) in amdgpu_device_gpu_recover()
5130 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res); in amdgpu_device_gpu_recover()
5133 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) { in amdgpu_device_gpu_recover()
5134 drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); in amdgpu_device_gpu_recover()
5137 tmp_adev->asic_reset_res = 0; in amdgpu_device_gpu_recover()
5141 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter)); in amdgpu_device_gpu_recover()
5142 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); in amdgpu_device_gpu_recover()
5144 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); in amdgpu_device_gpu_recover()
5145 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0)) in amdgpu_device_gpu_recover()
5151 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { in amdgpu_device_gpu_recover()
5153 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev)) in amdgpu_device_gpu_recover()
5154 amdgpu_amdkfd_post_reset(tmp_adev); in amdgpu_device_gpu_recover()
5163 amdgpu_device_resume_display_audio(tmp_adev); in amdgpu_device_gpu_recover()
5164 amdgpu_device_unlock_adev(tmp_adev); in amdgpu_device_gpu_recover()