Lines Matching full:engine
26 int type = i915_coherent_map_type(ce->engine->i915, obj, true); in dbg_poison_ce()
44 struct intel_engine_cs *engine = in __engine_unpark() local
45 container_of(wf, typeof(*engine), wakeref); in __engine_unpark()
48 ENGINE_TRACE(engine, "\n"); in __engine_unpark()
50 intel_gt_pm_get(engine->gt); in __engine_unpark()
53 ce = engine->kernel_context; in __engine_unpark()
59 intel_engine_flush_submission(engine); in __engine_unpark()
75 if (engine->unpark) in __engine_unpark()
76 engine->unpark(engine); in __engine_unpark()
78 intel_breadcrumbs_unpark(engine->breadcrumbs); in __engine_unpark()
79 intel_engine_unpark_heartbeat(engine); in __engine_unpark()
120 ewma__engine_latency_add(&rq->engine->latency, in duration()
128 struct intel_engine_cs *engine) in __queue_and_release_pm() argument
130 struct intel_gt_timelines *timelines = &engine->gt->timelines; in __queue_and_release_pm()
132 ENGINE_TRACE(engine, "parking\n"); in __queue_and_release_pm()
137 * engine->wakeref.counter or our timeline->active_count. in __queue_and_release_pm()
153 __intel_wakeref_defer_park(&engine->wakeref); in __queue_and_release_pm()
158 static bool switch_to_kernel_context(struct intel_engine_cs *engine) in switch_to_kernel_context() argument
160 struct intel_context *ce = engine->kernel_context; in switch_to_kernel_context()
166 if (intel_gt_is_wedged(engine->gt)) in switch_to_kernel_context()
170 GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma); in switch_to_kernel_context()
173 if (engine->wakeref_serial == engine->serial) in switch_to_kernel_context()
181 * else from creating a request on this engine. This also requires in switch_to_kernel_context()
184 * This should hold true as we can only park the engine after in switch_to_kernel_context()
191 * A new gpu user will be waiting on the engine-pm to start their in switch_to_kernel_context()
192 * engine_unpark. New waiters are predicated on engine->wakeref.count in switch_to_kernel_context()
194 * engine->wakeref. in switch_to_kernel_context()
201 * engine->wakeref.count, we may see the request completion and retire in switch_to_kernel_context()
202 * it causing an underflow of the engine->wakeref. in switch_to_kernel_context()
213 engine->wakeref_serial = engine->serial + 1; in switch_to_kernel_context()
218 if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */ in switch_to_kernel_context()
232 __queue_and_release_pm(rq, ce->timeline, engine); in switch_to_kernel_context()
240 static void call_idle_barriers(struct intel_engine_cs *engine) in call_idle_barriers() argument
244 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) { in call_idle_barriers()
255 struct intel_engine_cs *engine = in __engine_park() local
256 container_of(wf, typeof(*engine), wakeref); in __engine_park()
258 engine->saturated = 0; in __engine_park()
267 if (!switch_to_kernel_context(engine)) in __engine_park()
270 ENGINE_TRACE(engine, "parked\n"); in __engine_park()
272 call_idle_barriers(engine); /* cleanup after wedging */ in __engine_park()
274 intel_engine_park_heartbeat(engine); in __engine_park()
275 intel_breadcrumbs_park(engine->breadcrumbs); in __engine_park()
278 GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN); in __engine_park()
280 if (engine->park) in __engine_park()
281 engine->park(engine); in __engine_park()
284 intel_gt_pm_put_async(engine->gt); in __engine_park()
293 void intel_engine_init__pm(struct intel_engine_cs *engine) in intel_engine_init__pm() argument
295 struct intel_runtime_pm *rpm = engine->uncore->rpm; in intel_engine_init__pm()
297 intel_wakeref_init(&engine->wakeref, rpm, &wf_ops); in intel_engine_init__pm()
298 intel_engine_init_heartbeat(engine); in intel_engine_init__pm()