Lines Matching full:engine
42 struct intel_engine_cs *engine = rq->engine; in engine_skip_context() local
48 lockdep_assert_held(&engine->active.lock); in engine_skip_context()
49 list_for_each_entry_continue(rq, &engine->active.requests, sched.link) in engine_skip_context()
120 rq->engine->name, in __i915_request_reset()
285 struct intel_engine_cs *engine; in gen6_reset_engines() local
301 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { in gen6_reset_engines()
302 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); in gen6_reset_engines()
303 hw_mask |= hw_engine_mask[engine->id]; in gen6_reset_engines()
310 static u32 gen11_lock_sfc(struct intel_engine_cs *engine) in gen11_lock_sfc() argument
312 struct intel_uncore *uncore = engine->uncore; in gen11_lock_sfc()
313 u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access; in gen11_lock_sfc()
320 switch (engine->class) { in gen11_lock_sfc()
322 if ((BIT(engine->instance) & vdbox_sfc_access) == 0) in gen11_lock_sfc()
325 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine); in gen11_lock_sfc()
328 sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine); in gen11_lock_sfc()
331 sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine); in gen11_lock_sfc()
333 sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance); in gen11_lock_sfc()
337 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine); in gen11_lock_sfc()
340 sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine); in gen11_lock_sfc()
343 sfc_usage = GEN11_VECS_SFC_USAGE(engine); in gen11_lock_sfc()
345 sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance); in gen11_lock_sfc()
353 * Tell the engine that a software reset is going to happen. The engine in gen11_lock_sfc()
355 * remain so until we tell the engine it is safe to unlock; if currently in gen11_lock_sfc()
357 * ends up being locked to the engine we want to reset, we have to reset in gen11_lock_sfc()
377 static void gen11_unlock_sfc(struct intel_engine_cs *engine) in gen11_unlock_sfc() argument
379 struct intel_uncore *uncore = engine->uncore; in gen11_unlock_sfc()
380 u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access; in gen11_unlock_sfc()
384 switch (engine->class) { in gen11_unlock_sfc()
386 if ((BIT(engine->instance) & vdbox_sfc_access) == 0) in gen11_unlock_sfc()
389 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine); in gen11_unlock_sfc()
394 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine); in gen11_unlock_sfc()
419 struct intel_engine_cs *engine; in gen11_reset_engines() local
428 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { in gen11_reset_engines()
429 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); in gen11_reset_engines()
430 hw_mask |= hw_engine_mask[engine->id]; in gen11_reset_engines()
431 hw_mask |= gen11_lock_sfc(engine); in gen11_reset_engines()
438 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) in gen11_reset_engines()
439 gen11_unlock_sfc(engine); in gen11_reset_engines()
444 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine) in gen8_engine_reset_prepare() argument
446 struct intel_uncore *uncore = engine->uncore; in gen8_engine_reset_prepare()
447 const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base); in gen8_engine_reset_prepare()
475 engine->name, request, in gen8_engine_reset_prepare()
481 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine) in gen8_engine_reset_cancel() argument
483 intel_uncore_write_fw(engine->uncore, in gen8_engine_reset_cancel()
484 RING_RESET_CTL(engine->mmio_base), in gen8_engine_reset_cancel()
492 struct intel_engine_cs *engine; in gen8_reset_engines() local
497 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { in gen8_reset_engines()
498 ret = gen8_engine_reset_prepare(engine); in gen8_reset_engines()
523 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) in gen8_reset_engines()
524 gen8_engine_reset_cancel(engine); in gen8_reset_engines()
610 static void reset_prepare_engine(struct intel_engine_cs *engine) in reset_prepare_engine() argument
613 * During the reset sequence, we must prevent the engine from in reset_prepare_engine()
615 * the engine, if it does enter RC6 during the reset, the state in reset_prepare_engine()
619 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); in reset_prepare_engine()
620 engine->reset.prepare(engine); in reset_prepare_engine()
651 struct intel_engine_cs *engine; in reset_prepare() local
655 for_each_engine(engine, gt->i915, id) { in reset_prepare()
656 if (intel_engine_pm_get_if_awake(engine)) in reset_prepare()
657 awake |= engine->mask; in reset_prepare()
658 reset_prepare_engine(engine); in reset_prepare()
673 struct intel_engine_cs *engine; in gt_reset() local
685 for_each_engine(engine, gt->i915, id) in gt_reset()
686 __intel_engine_reset(engine, stalled_mask & engine->mask); in gt_reset()
693 static void reset_finish_engine(struct intel_engine_cs *engine) in reset_finish_engine() argument
695 engine->reset.finish(engine); in reset_finish_engine()
696 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); in reset_finish_engine()
698 intel_engine_signal_breadcrumbs(engine); in reset_finish_engine()
703 struct intel_engine_cs *engine; in reset_finish() local
706 for_each_engine(engine, gt->i915, id) { in reset_finish()
707 reset_finish_engine(engine); in reset_finish()
708 if (awake & engine->mask) in reset_finish()
709 intel_engine_pm_put(engine); in reset_finish()
715 struct intel_engine_cs *engine = request->engine; in nop_submit_request() local
719 engine->name, request->fence.context, request->fence.seqno); in nop_submit_request()
722 spin_lock_irqsave(&engine->active.lock, flags); in nop_submit_request()
725 spin_unlock_irqrestore(&engine->active.lock, flags); in nop_submit_request()
727 intel_engine_queue_breadcrumbs(engine); in nop_submit_request()
732 struct intel_engine_cs *engine; in __intel_gt_set_wedged() local
742 for_each_engine(engine, gt->i915, id) in __intel_gt_set_wedged()
743 intel_engine_dump(engine, &p, "%s\n", engine->name); in __intel_gt_set_wedged()
759 for_each_engine(engine, gt->i915, id) in __intel_gt_set_wedged()
760 engine->submit_request = nop_submit_request; in __intel_gt_set_wedged()
771 for_each_engine(engine, gt->i915, id) in __intel_gt_set_wedged()
772 engine->cancel_requests(engine); in __intel_gt_set_wedged()
846 * engine->submit_request() as we swap over. So unlike installing in __intel_gt_unset_wedged()
890 struct intel_engine_cs *engine; in resume() local
894 for_each_engine(engine, gt->i915, id) { in resume()
895 ret = engine->resume(engine); in resume()
1011 static inline int intel_gt_reset_engine(struct intel_engine_cs *engine) in intel_gt_reset_engine() argument
1013 return __intel_gt_reset(engine->gt, engine->mask); in intel_gt_reset_engine()
1017 * intel_engine_reset - reset GPU engine to recover from a hang
1018 * @engine: engine to reset
1021 * Reset a specific GPU engine. Useful if a hang is detected.
1026 * - reset engine (which will force the engine to idle)
1027 * - re-init/configure engine
1029 int intel_engine_reset(struct intel_engine_cs *engine, const char *msg) in intel_engine_reset() argument
1031 struct intel_gt *gt = engine->gt; in intel_engine_reset()
1034 GEM_TRACE("%s flags=%lx\n", engine->name, gt->reset.flags); in intel_engine_reset()
1035 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags)); in intel_engine_reset()
1037 if (!intel_engine_pm_get_if_awake(engine)) in intel_engine_reset()
1040 reset_prepare_engine(engine); in intel_engine_reset()
1043 dev_notice(engine->i915->drm.dev, in intel_engine_reset()
1044 "Resetting %s for %s\n", engine->name, msg); in intel_engine_reset()
1045 atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]); in intel_engine_reset()
1047 if (!engine->gt->uc.guc.execbuf_client) in intel_engine_reset()
1048 ret = intel_gt_reset_engine(engine); in intel_engine_reset()
1050 ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine); in intel_engine_reset()
1054 engine->gt->uc.guc.execbuf_client ? "GuC " : "", in intel_engine_reset()
1055 engine->name, ret); in intel_engine_reset()
1064 __intel_engine_reset(engine, true); in intel_engine_reset()
1067 * The engine and its registers (and workarounds in case of render) in intel_engine_reset()
1071 ret = engine->resume(engine); in intel_engine_reset()
1074 intel_engine_cancel_stop_cs(engine); in intel_engine_reset()
1075 reset_finish_engine(engine); in intel_engine_reset()
1076 intel_engine_pm_put(engine); in intel_engine_reset()
1129 struct intel_engine_cs *engine; in intel_gt_handle_error() local
1162 * Try engine reset when available. We fall back to full reset if in intel_gt_handle_error()
1166 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { in intel_gt_handle_error()
1168 if (test_and_set_bit(I915_RESET_ENGINE + engine->id, in intel_gt_handle_error()
1172 if (intel_engine_reset(engine, msg) == 0) in intel_gt_handle_error()
1173 engine_mask &= ~engine->mask; in intel_gt_handle_error()
1175 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, in intel_gt_handle_error()
1193 /* Prevent any other reset-engine attempt. */ in intel_gt_handle_error()
1194 for_each_engine(engine, gt->i915, tmp) { in intel_gt_handle_error()
1195 while (test_and_set_bit(I915_RESET_ENGINE + engine->id, in intel_gt_handle_error()
1198 I915_RESET_ENGINE + engine->id, in intel_gt_handle_error()
1204 for_each_engine(engine, gt->i915, tmp) in intel_gt_handle_error()
1205 clear_bit_unlock(I915_RESET_ENGINE + engine->id, in intel_gt_handle_error()