Lines Matching full:gt
178 static int i915_do_reset(struct intel_gt *gt, in i915_do_reset() argument
182 struct pci_dev *pdev = gt->i915->drm.pdev; in i915_do_reset()
207 static int g33_do_reset(struct intel_gt *gt, in g33_do_reset() argument
211 struct pci_dev *pdev = gt->i915->drm.pdev; in g33_do_reset()
217 static int g4x_do_reset(struct intel_gt *gt, in g4x_do_reset() argument
221 struct pci_dev *pdev = gt->i915->drm.pdev; in g4x_do_reset()
222 struct intel_uncore *uncore = gt->uncore; in g4x_do_reset()
233 drm_dbg(>->i915->drm, "Wait for media reset failed\n"); in g4x_do_reset()
241 drm_dbg(>->i915->drm, "Wait for render reset failed\n"); in g4x_do_reset()
254 static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, in ilk_do_reset() argument
257 struct intel_uncore *uncore = gt->uncore; in ilk_do_reset()
267 drm_dbg(>->i915->drm, "Wait for render reset failed\n"); in ilk_do_reset()
278 drm_dbg(>->i915->drm, "Wait for media reset failed\n"); in ilk_do_reset()
289 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask) in gen6_hw_domain_reset() argument
291 struct intel_uncore *uncore = gt->uncore; in gen6_hw_domain_reset()
295 * GEN6_GDRST is not in the gt power well, no need to check in gen6_hw_domain_reset()
307 drm_dbg(>->i915->drm, in gen6_hw_domain_reset()
314 static int gen6_reset_engines(struct intel_gt *gt, in gen6_reset_engines() argument
334 for_each_engine_masked(engine, gt, engine_mask, tmp) { in gen6_reset_engines()
340 return gen6_hw_domain_reset(gt, hw_mask); in gen6_reset_engines()
346 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access; in gen11_lock_sfc()
421 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access; in gen11_unlock_sfc()
446 static int gen11_reset_engines(struct intel_gt *gt, in gen11_reset_engines() argument
469 for_each_engine_masked(engine, gt, engine_mask, tmp) { in gen11_reset_engines()
478 ret = gen6_hw_domain_reset(gt, hw_mask); in gen11_reset_engines()
488 for_each_engine_masked(engine, gt, engine_mask, tmp) in gen11_reset_engines()
539 static int gen8_reset_engines(struct intel_gt *gt, in gen8_reset_engines() argument
548 for_each_engine_masked(engine, gt, engine_mask, tmp) { in gen8_reset_engines()
568 if (INTEL_GEN(gt->i915) >= 11) in gen8_reset_engines()
569 ret = gen11_reset_engines(gt, engine_mask, retry); in gen8_reset_engines()
571 ret = gen6_reset_engines(gt, engine_mask, retry); in gen8_reset_engines()
574 for_each_engine_masked(engine, gt, engine_mask, tmp) in gen8_reset_engines()
580 static int mock_reset(struct intel_gt *gt, in mock_reset() argument
591 static reset_func intel_get_gpu_reset(const struct intel_gt *gt) in intel_get_gpu_reset() argument
593 struct drm_i915_private *i915 = gt->i915; in intel_get_gpu_reset()
595 if (is_mock_gt(gt)) in intel_get_gpu_reset()
613 int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) in __intel_gt_reset() argument
620 reset = intel_get_gpu_reset(gt); in __intel_gt_reset()
628 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); in __intel_gt_reset()
630 GT_TRACE(gt, "engine_mask=%x\n", engine_mask); in __intel_gt_reset()
632 ret = reset(gt, engine_mask, retry); in __intel_gt_reset()
635 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); in __intel_gt_reset()
640 bool intel_has_gpu_reset(const struct intel_gt *gt) in intel_has_gpu_reset() argument
642 if (!gt->i915->params.reset) in intel_has_gpu_reset()
645 return intel_get_gpu_reset(gt); in intel_has_gpu_reset()
648 bool intel_has_reset_engine(const struct intel_gt *gt) in intel_has_reset_engine() argument
650 if (gt->i915->params.reset < 2) in intel_has_reset_engine()
653 return INTEL_INFO(gt->i915)->has_reset_engine; in intel_has_reset_engine()
656 int intel_reset_guc(struct intel_gt *gt) in intel_reset_guc() argument
659 INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC; in intel_reset_guc()
662 GEM_BUG_ON(!HAS_GT_UC(gt->i915)); in intel_reset_guc()
664 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); in intel_reset_guc()
665 ret = gen6_hw_domain_reset(gt, guc_domain); in intel_reset_guc()
666 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); in intel_reset_guc()
689 static void revoke_mmaps(struct intel_gt *gt) in revoke_mmaps() argument
693 for (i = 0; i < gt->ggtt->num_fences; i++) { in revoke_mmaps()
698 vma = READ_ONCE(gt->ggtt->fence_regs[i].vma); in revoke_mmaps()
705 GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]); in revoke_mmaps()
713 unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping, in revoke_mmaps()
720 static intel_engine_mask_t reset_prepare(struct intel_gt *gt) in reset_prepare() argument
726 for_each_engine(engine, gt, id) { in reset_prepare()
732 intel_uc_reset_prepare(>->uc); in reset_prepare()
737 static void gt_revoke(struct intel_gt *gt) in gt_revoke() argument
739 revoke_mmaps(gt); in gt_revoke()
742 static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) in gt_reset() argument
752 err = i915_ggtt_enable_hw(gt->i915); in gt_reset()
756 for_each_engine(engine, gt, id) in gt_reset()
759 intel_ggtt_restore_fences(gt->ggtt); in gt_reset()
773 static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake) in reset_finish() argument
778 for_each_engine(engine, gt, id) { in reset_finish()
801 static void __intel_gt_set_wedged(struct intel_gt *gt) in __intel_gt_set_wedged() argument
807 if (test_bit(I915_WEDGED, >->reset.flags)) in __intel_gt_set_wedged()
810 GT_TRACE(gt, "start\n"); in __intel_gt_set_wedged()
817 awake = reset_prepare(gt); in __intel_gt_set_wedged()
820 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) in __intel_gt_set_wedged()
821 __intel_gt_reset(gt, ALL_ENGINES); in __intel_gt_set_wedged()
823 for_each_engine(engine, gt, id) in __intel_gt_set_wedged()
832 set_bit(I915_WEDGED, >->reset.flags); in __intel_gt_set_wedged()
835 for_each_engine(engine, gt, id) in __intel_gt_set_wedged()
839 reset_finish(gt, awake); in __intel_gt_set_wedged()
841 GT_TRACE(gt, "end\n"); in __intel_gt_set_wedged()
844 void intel_gt_set_wedged(struct intel_gt *gt) in intel_gt_set_wedged() argument
848 if (test_bit(I915_WEDGED, >->reset.flags)) in intel_gt_set_wedged()
851 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in intel_gt_set_wedged()
852 mutex_lock(>->reset.mutex); in intel_gt_set_wedged()
860 for_each_engine(engine, gt, id) { in intel_gt_set_wedged()
868 __intel_gt_set_wedged(gt); in intel_gt_set_wedged()
870 mutex_unlock(>->reset.mutex); in intel_gt_set_wedged()
871 intel_runtime_pm_put(gt->uncore->rpm, wakeref); in intel_gt_set_wedged()
874 static bool __intel_gt_unset_wedged(struct intel_gt *gt) in __intel_gt_unset_wedged() argument
876 struct intel_gt_timelines *timelines = >->timelines; in __intel_gt_unset_wedged()
880 if (!test_bit(I915_WEDGED, >->reset.flags)) in __intel_gt_unset_wedged()
884 if (intel_gt_has_unrecoverable_error(gt)) in __intel_gt_unset_wedged()
887 GT_TRACE(gt, "start\n"); in __intel_gt_unset_wedged()
926 ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */ in __intel_gt_unset_wedged()
927 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) in __intel_gt_unset_wedged()
928 ok = __intel_gt_reset(gt, ALL_ENGINES) == 0; in __intel_gt_unset_wedged()
934 add_taint_for_CI(gt->i915, TAINT_WARN); in __intel_gt_unset_wedged()
947 intel_engines_reset_default_submission(gt); in __intel_gt_unset_wedged()
949 GT_TRACE(gt, "end\n"); in __intel_gt_unset_wedged()
952 clear_bit(I915_WEDGED, >->reset.flags); in __intel_gt_unset_wedged()
957 bool intel_gt_unset_wedged(struct intel_gt *gt) in intel_gt_unset_wedged() argument
961 mutex_lock(>->reset.mutex); in intel_gt_unset_wedged()
962 result = __intel_gt_unset_wedged(gt); in intel_gt_unset_wedged()
963 mutex_unlock(>->reset.mutex); in intel_gt_unset_wedged()
968 static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) in do_reset() argument
972 gt_revoke(gt); in do_reset()
974 err = __intel_gt_reset(gt, ALL_ENGINES); in do_reset()
977 err = __intel_gt_reset(gt, ALL_ENGINES); in do_reset()
982 return gt_reset(gt, stalled_mask); in do_reset()
985 static int resume(struct intel_gt *gt) in resume() argument
991 for_each_engine(engine, gt, id) { in resume()
1002 * @gt: #intel_gt to reset
1017 void intel_gt_reset(struct intel_gt *gt, in intel_gt_reset() argument
1024 GT_TRACE(gt, "flags=%lx\n", gt->reset.flags); in intel_gt_reset()
1027 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags)); in intel_gt_reset()
1028 mutex_lock(>->reset.mutex); in intel_gt_reset()
1031 if (!__intel_gt_unset_wedged(gt)) in intel_gt_reset()
1035 drm_notice(>->i915->drm, in intel_gt_reset()
1037 atomic_inc(>->i915->gpu_error.reset_count); in intel_gt_reset()
1039 awake = reset_prepare(gt); in intel_gt_reset()
1041 if (!intel_has_gpu_reset(gt)) { in intel_gt_reset()
1042 if (gt->i915->params.reset) in intel_gt_reset()
1043 drm_err(>->i915->drm, "GPU reset not supported\n"); in intel_gt_reset()
1045 drm_dbg(>->i915->drm, "GPU reset disabled\n"); in intel_gt_reset()
1049 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) in intel_gt_reset()
1050 intel_runtime_pm_disable_interrupts(gt->i915); in intel_gt_reset()
1052 if (do_reset(gt, stalled_mask)) { in intel_gt_reset()
1053 drm_err(>->i915->drm, "Failed to reset chip\n"); in intel_gt_reset()
1057 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) in intel_gt_reset()
1058 intel_runtime_pm_enable_interrupts(gt->i915); in intel_gt_reset()
1060 intel_overlay_reset(gt->i915); in intel_gt_reset()
1070 ret = intel_gt_init_hw(gt); in intel_gt_reset()
1072 drm_err(>->i915->drm, in intel_gt_reset()
1078 ret = resume(gt); in intel_gt_reset()
1083 reset_finish(gt, awake); in intel_gt_reset()
1085 mutex_unlock(>->reset.mutex); in intel_gt_reset()
1101 add_taint_for_CI(gt->i915, TAINT_WARN); in intel_gt_reset()
1103 __intel_gt_set_wedged(gt); in intel_gt_reset()
1109 return __intel_gt_reset(engine->gt, engine->mask); in intel_gt_reset_engine()
1127 struct intel_gt *gt = engine->gt; in intel_engine_reset() local
1131 ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags); in intel_engine_reset()
1132 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags)); in intel_engine_reset()
1147 ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine); in intel_engine_reset()
1150 drm_dbg(>->i915->drm, "%sFailed to reset %s, ret=%d\n", in intel_engine_reset()
1176 static void intel_gt_reset_global(struct intel_gt *gt, in intel_gt_reset_global() argument
1180 struct kobject *kobj = >->i915->drm.primary->kdev->kobj; in intel_gt_reset_global()
1188 drm_dbg(>->i915->drm, "resetting chip, engines=%x\n", engine_mask); in intel_gt_reset_global()
1192 intel_wedge_on_timeout(&w, gt, 5 * HZ) { in intel_gt_reset_global()
1193 intel_prepare_reset(gt->i915); in intel_gt_reset_global()
1196 synchronize_srcu_expedited(>->reset.backoff_srcu); in intel_gt_reset_global()
1198 intel_gt_reset(gt, engine_mask, reason); in intel_gt_reset_global()
1200 intel_finish_reset(gt->i915); in intel_gt_reset_global()
1203 if (!test_bit(I915_WEDGED, >->reset.flags)) in intel_gt_reset_global()
1209 * @gt: the intel_gt
1220 void intel_gt_handle_error(struct intel_gt *gt, in intel_gt_handle_error() argument
1248 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in intel_gt_handle_error()
1250 engine_mask &= gt->info.engine_mask; in intel_gt_handle_error()
1253 i915_capture_error_state(gt->i915); in intel_gt_handle_error()
1254 intel_gt_clear_error_registers(gt, engine_mask); in intel_gt_handle_error()
1261 if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) { in intel_gt_handle_error()
1262 for_each_engine_masked(engine, gt, engine_mask, tmp) { in intel_gt_handle_error()
1265 >->reset.flags)) in intel_gt_handle_error()
1272 >->reset.flags); in intel_gt_handle_error()
1280 if (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) { in intel_gt_handle_error()
1281 wait_event(gt->reset.queue, in intel_gt_handle_error()
1282 !test_bit(I915_RESET_BACKOFF, >->reset.flags)); in intel_gt_handle_error()
1290 for_each_engine(engine, gt, tmp) { in intel_gt_handle_error()
1292 >->reset.flags)) in intel_gt_handle_error()
1293 wait_on_bit(>->reset.flags, in intel_gt_handle_error()
1298 intel_gt_reset_global(gt, engine_mask, msg); in intel_gt_handle_error()
1300 for_each_engine(engine, gt, tmp) in intel_gt_handle_error()
1302 >->reset.flags); in intel_gt_handle_error()
1303 clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags); in intel_gt_handle_error()
1305 wake_up_all(>->reset.queue); in intel_gt_handle_error()
1308 intel_runtime_pm_put(gt->uncore->rpm, wakeref); in intel_gt_handle_error()
1311 int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu) in intel_gt_reset_trylock() argument
1313 might_lock(>->reset.backoff_srcu); in intel_gt_reset_trylock()
1317 while (test_bit(I915_RESET_BACKOFF, >->reset.flags)) { in intel_gt_reset_trylock()
1320 if (wait_event_interruptible(gt->reset.queue, in intel_gt_reset_trylock()
1322 >->reset.flags))) in intel_gt_reset_trylock()
1327 *srcu = srcu_read_lock(>->reset.backoff_srcu); in intel_gt_reset_trylock()
1333 void intel_gt_reset_unlock(struct intel_gt *gt, int tag) in intel_gt_reset_unlock() argument
1334 __releases(>->reset.backoff_srcu) in intel_gt_reset_unlock()
1336 srcu_read_unlock(>->reset.backoff_srcu, tag); in intel_gt_reset_unlock()
1339 int intel_gt_terminally_wedged(struct intel_gt *gt) in intel_gt_terminally_wedged() argument
1343 if (!intel_gt_is_wedged(gt)) in intel_gt_terminally_wedged()
1346 if (intel_gt_has_unrecoverable_error(gt)) in intel_gt_terminally_wedged()
1350 if (wait_event_interruptible(gt->reset.queue, in intel_gt_terminally_wedged()
1352 >->reset.flags))) in intel_gt_terminally_wedged()
1355 return intel_gt_is_wedged(gt) ? -EIO : 0; in intel_gt_terminally_wedged()
1358 void intel_gt_set_wedged_on_init(struct intel_gt *gt) in intel_gt_set_wedged_on_init() argument
1362 intel_gt_set_wedged(gt); in intel_gt_set_wedged_on_init()
1363 set_bit(I915_WEDGED_ON_INIT, >->reset.flags); in intel_gt_set_wedged_on_init()
1366 add_taint_for_CI(gt->i915, TAINT_WARN); in intel_gt_set_wedged_on_init()
1369 void intel_gt_set_wedged_on_fini(struct intel_gt *gt) in intel_gt_set_wedged_on_fini() argument
1371 intel_gt_set_wedged(gt); in intel_gt_set_wedged_on_fini()
1372 set_bit(I915_WEDGED_ON_FINI, >->reset.flags); in intel_gt_set_wedged_on_fini()
1375 void intel_gt_init_reset(struct intel_gt *gt) in intel_gt_init_reset() argument
1377 init_waitqueue_head(>->reset.queue); in intel_gt_init_reset()
1378 mutex_init(>->reset.mutex); in intel_gt_init_reset()
1379 init_srcu_struct(>->reset.backoff_srcu); in intel_gt_init_reset()
1382 __set_bit(I915_WEDGED, >->reset.flags); in intel_gt_init_reset()
1385 void intel_gt_fini_reset(struct intel_gt *gt) in intel_gt_fini_reset() argument
1387 cleanup_srcu_struct(>->reset.backoff_srcu); in intel_gt_fini_reset()
1394 drm_err(&w->gt->i915->drm, in intel_wedge_me()
1397 intel_gt_set_wedged(w->gt); in intel_wedge_me()
1401 struct intel_gt *gt, in __intel_init_wedge() argument
1405 w->gt = gt; in __intel_init_wedge()
1416 w->gt = NULL; in __intel_fini_wedge()