Lines Matching full:engine

49  * shouldn't we just need a set of those per engine command streamer? This is
51 * rings, the engine cs shifts to a new "ring buffer" with every context
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
75 * more complex, because we don't know at creation time which engine is going
80 * gets populated for a given engine once we receive an execbuffer. If later
82 * engine, we allocate/populate a new ringbuffer and context backing object and
99 * for the appropriate engine: this structure contains a copy of the context's
103 * If the engine's request queue was empty before the request was added, the
186 * We allow only a single request through the virtual engine at a time
191 * scheduling -- each real engine takes the next available request
198 * engine, sorted by priority. Here we preallocate the nodes we need
199 * for the virtual engine, indexed by physical_engine->id.
209 * If we receive a submit-fence from a master engine, we will only
218 /* And finally, which physical engines this virtual engine maps onto. */
223 static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine) in to_virtual_engine() argument
225 GEM_BUG_ON(!intel_engine_is_virtual(engine)); in to_virtual_engine()
226 return container_of(engine, struct virtual_engine, base); in to_virtual_engine()
230 struct intel_engine_cs *engine);
234 struct intel_engine_cs *engine,
244 static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine) in intel_hws_preempt_address() argument
246 return (i915_ggtt_offset(engine->status_page.vma) + in intel_hws_preempt_address()
251 ring_set_paused(const struct intel_engine_cs *engine, int state) in ring_set_paused() argument
255 * engine->emit_fini_breadcrumb. If the dword is true, in ring_set_paused()
259 engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state; in ring_set_paused()
320 static inline bool need_preempt(const struct intel_engine_cs *engine, in need_preempt() argument
326 if (!intel_engine_has_semaphores(engine)) in need_preempt()
342 if (!i915_scheduler_need_preempt(engine->execlists.queue_priority_hint, in need_preempt()
350 if (!list_is_last(&rq->sched.link, &engine->active.requests) && in need_preempt()
356 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); in need_preempt()
359 if (engine == ve->siblings[0]) { /* only preempt one sibling */ in need_preempt()
383 return queue_prio(&engine->execlists) > last_prio; in need_preempt()
421 * bits 48:53: engine instance
424 * bits 61-63: engine class
426 * engine info, SW context ID and SW counter need to form a unique number
430 lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) in lrc_descriptor() argument
444 if (IS_GEN(engine->i915, 8)) in lrc_descriptor()
454 if (INTEL_GEN(engine->i915) >= 11) { in lrc_descriptor()
459 desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT; in lrc_descriptor()
464 desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT; in lrc_descriptor()
481 __unwind_incomplete_requests(struct intel_engine_cs *engine) in __unwind_incomplete_requests() argument
487 lockdep_assert_held(&engine->active.lock); in __unwind_incomplete_requests()
490 &engine->active.requests, in __unwind_incomplete_requests()
502 * If this request is not native to this physical engine (i.e. in __unwind_incomplete_requests()
504 * engine so that it can be moved across onto another physical in __unwind_incomplete_requests()
505 * engine as load dictates. in __unwind_incomplete_requests()
507 owner = rq->hw_context->engine; in __unwind_incomplete_requests()
508 if (likely(owner == engine)) { in __unwind_incomplete_requests()
512 pl = i915_sched_lookup_priolist(engine, prio); in __unwind_incomplete_requests()
514 GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); in __unwind_incomplete_requests()
521 * back to the virtual engine -- we don't want the in __unwind_incomplete_requests()
523 * and cancel the breadcrumb on the virtual engine in __unwind_incomplete_requests()
524 * (instead of the old engine where it is linked)! in __unwind_incomplete_requests()
532 rq->engine = owner; in __unwind_incomplete_requests()
544 struct intel_engine_cs *engine = in execlists_unwind_incomplete_requests() local
545 container_of(execlists, typeof(*engine), execlists); in execlists_unwind_incomplete_requests()
547 return __unwind_incomplete_requests(engine); in execlists_unwind_incomplete_requests()
560 atomic_notifier_call_chain(&rq->engine->context_status_notifier, in execlists_context_status_change()
567 struct intel_engine_cs * const engine = rq->engine; in __execlists_schedule_in() local
572 intel_gt_pm_get(engine->gt); in __execlists_schedule_in()
574 intel_engine_context_in(engine); in __execlists_schedule_in()
576 return engine; in __execlists_schedule_in()
585 GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine)); in execlists_schedule_in()
596 GEM_BUG_ON(intel_context_inflight(ce) != rq->engine); in execlists_schedule_in()
611 struct intel_engine_cs * const engine) in __execlists_schedule_out() argument
615 intel_engine_context_out(engine); in __execlists_schedule_out()
617 intel_gt_pm_put(engine->gt); in __execlists_schedule_out()
620 * If this is part of a virtual engine, its next request may in __execlists_schedule_out()
624 * engine). Hopefully, we will already have submitted the next in __execlists_schedule_out()
628 if (ce->engine != engine) in __execlists_schedule_out()
699 const struct intel_engine_cs *engine = in trace_ports() local
700 container_of(execlists, typeof(*engine), execlists); in trace_ports()
703 engine->name, msg, in trace_ports()
746 static void execlists_submit_ports(struct intel_engine_cs *engine) in execlists_submit_ports() argument
748 struct intel_engine_execlists *execlists = &engine->execlists; in execlists_submit_ports()
761 GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); in execlists_submit_ports()
824 struct intel_engine_cs *engine) in virtual_update_register_offsets() argument
826 u32 base = engine->mmio_base; in virtual_update_register_offsets()
856 if (engine->class == RENDER_CLASS) { in virtual_update_register_offsets()
871 const struct intel_engine_cs *engine) in virtual_matches() argument
875 if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */ in virtual_matches()
882 * then. This restricts us to only using the active engine in virtual_matches()
888 if (inflight && inflight != engine) in virtual_matches()
895 struct intel_engine_cs *engine) in virtual_xfer_breadcrumbs() argument
899 /* All unattached (rq->engine == old) must already be completed */ in virtual_xfer_breadcrumbs()
904 &engine->breadcrumbs.signalers); in virtual_xfer_breadcrumbs()
905 intel_engine_queue_breadcrumbs(engine); in virtual_xfer_breadcrumbs()
943 if (w->engine != rq->engine) in defer_request()
965 static void defer_active(struct intel_engine_cs *engine) in defer_active() argument
969 rq = __unwind_incomplete_requests(engine); in defer_active()
973 defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq))); in defer_active()
977 need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) in need_timeslice() argument
981 if (!intel_engine_has_semaphores(engine)) in need_timeslice()
984 if (list_is_last(&rq->sched.link, &engine->active.requests)) in need_timeslice()
988 engine->execlists.queue_priority_hint); in need_timeslice()
994 switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq) in switch_prio() argument
996 if (list_is_last(&rq->sched.link, &engine->active.requests)) in switch_prio()
1018 static void execlists_dequeue(struct intel_engine_cs *engine) in execlists_dequeue() argument
1020 struct intel_engine_execlists * const execlists = &engine->execlists; in execlists_dequeue()
1051 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); in execlists_dequeue()
1054 if (!rq) { /* lazily cleanup after another engine handled rq */ in execlists_dequeue()
1061 if (!virtual_matches(ve, rq, engine)) { in execlists_dequeue()
1079 if (need_preempt(engine, last, rb)) { in execlists_dequeue()
1081 engine->name, in execlists_dequeue()
1093 ring_set_paused(engine, 1); in execlists_dequeue()
1102 __unwind_incomplete_requests(engine); in execlists_dequeue()
1113 } else if (need_timeslice(engine, last) && in execlists_dequeue()
1114 !timer_pending(&engine->execlists.timer)) { in execlists_dequeue()
1116 engine->name, in execlists_dequeue()
1122 ring_set_paused(engine, 1); in execlists_dequeue()
1123 defer_active(engine); in execlists_dequeue()
1150 &engine->active.requests)) in execlists_dequeue()
1167 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); in execlists_dequeue()
1182 GEM_BUG_ON(rq->engine != &ve->base); in execlists_dequeue()
1186 if (!virtual_matches(ve, rq, engine)) { in execlists_dequeue()
1197 GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n", in execlists_dequeue()
1198 engine->name, in execlists_dequeue()
1204 yesno(engine != ve->siblings[0])); in execlists_dequeue()
1211 GEM_BUG_ON(!(rq->execution_mask & engine->mask)); in execlists_dequeue()
1212 rq->engine = engine; in execlists_dequeue()
1214 if (engine != ve->siblings[0]) { in execlists_dequeue()
1219 virtual_update_register_offsets(regs, engine); in execlists_dequeue()
1222 virtual_xfer_breadcrumbs(ve, engine); in execlists_dequeue()
1225 * Move the bound engine to the top of the list in execlists_dequeue()
1232 if (ve->siblings[n] == engine) { in execlists_dequeue()
1239 GEM_BUG_ON(ve->siblings[0] != engine); in execlists_dequeue()
1249 * Hmm, we have a bunch of virtual engine requests, in execlists_dequeue()
1355 engine->name, execlists->queue_priority_hint, in execlists_dequeue()
1362 switch_prio(engine, *execlists->pending); in execlists_dequeue()
1363 execlists_submit_ports(engine); in execlists_dequeue()
1365 ring_set_paused(engine, 0); in execlists_dequeue()
1411 * bits 3-5: engine class
1412 * bits 6-11: engine instance
1484 static void process_csb(struct intel_engine_cs *engine) in process_csb() argument
1486 struct intel_engine_execlists * const execlists = &engine->execlists; in process_csb()
1491 GEM_BUG_ON(USES_GUC_SUBMISSION(engine->i915)); in process_csb()
1505 GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail); in process_csb()
1544 engine->name, head, in process_csb()
1547 if (INTEL_GEN(engine->i915) >= 12) in process_csb()
1573 ring_set_paused(engine, 0); in process_csb()
1616 static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) in __execlists_submission_tasklet() argument
1618 lockdep_assert_held(&engine->active.lock); in __execlists_submission_tasklet()
1619 if (!engine->execlists.pending[0]) { in __execlists_submission_tasklet()
1621 execlists_dequeue(engine); in __execlists_submission_tasklet()
1632 struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; in execlists_submission_tasklet() local
1635 process_csb(engine); in execlists_submission_tasklet()
1636 if (!READ_ONCE(engine->execlists.pending[0])) { in execlists_submission_tasklet()
1637 spin_lock_irqsave(&engine->active.lock, flags); in execlists_submission_tasklet()
1638 __execlists_submission_tasklet(engine); in execlists_submission_tasklet()
1639 spin_unlock_irqrestore(&engine->active.lock, flags); in execlists_submission_tasklet()
1645 struct intel_engine_cs *engine = in execlists_submission_timer() local
1646 from_timer(engine, timer, execlists.timer); in execlists_submission_timer()
1649 tasklet_hi_schedule(&engine->execlists.tasklet); in execlists_submission_timer()
1652 static void queue_request(struct intel_engine_cs *engine, in queue_request() argument
1657 list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio)); in queue_request()
1660 static void __submit_queue_imm(struct intel_engine_cs *engine) in __submit_queue_imm() argument
1662 struct intel_engine_execlists * const execlists = &engine->execlists; in __submit_queue_imm()
1665 return; /* defer until we restart the engine following reset */ in __submit_queue_imm()
1668 __execlists_submission_tasklet(engine); in __submit_queue_imm()
1673 static void submit_queue(struct intel_engine_cs *engine, in submit_queue() argument
1676 struct intel_engine_execlists *execlists = &engine->execlists; in submit_queue()
1682 __submit_queue_imm(engine); in submit_queue()
1687 struct intel_engine_cs *engine = request->engine; in execlists_submit_request() local
1691 spin_lock_irqsave(&engine->active.lock, flags); in execlists_submit_request()
1693 queue_request(engine, &request->sched, rq_prio(request)); in execlists_submit_request()
1695 GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); in execlists_submit_request()
1698 submit_queue(engine, request); in execlists_submit_request()
1700 spin_unlock_irqrestore(&engine->active.lock, flags); in execlists_submit_request()
1724 set_redzone(void *vaddr, const struct intel_engine_cs *engine) in set_redzone() argument
1730 vaddr += engine->context_size; in set_redzone()
1736 check_redzone(const void *vaddr, const struct intel_engine_cs *engine) in check_redzone() argument
1742 vaddr += engine->context_size; in check_redzone()
1745 dev_err_once(engine->i915->drm.dev, in check_redzone()
1747 engine->name); in check_redzone()
1753 ce->engine); in execlists_context_unpin()
1762 struct intel_engine_cs *engine) in __execlists_update_reg_state() argument
1775 if (engine->class == RENDER_CLASS) { in __execlists_update_reg_state()
1777 intel_sseu_make_rpcs(engine->i915, &ce->sseu); in __execlists_update_reg_state()
1779 i915_oa_init_reg_state(engine, ce, regs); in __execlists_update_reg_state()
1785 struct intel_engine_cs *engine) in __execlists_context_pin() argument
1798 i915_coherent_map_type(engine->i915) | in __execlists_context_pin()
1809 ce->lrc_desc = lrc_descriptor(ce, engine); in __execlists_context_pin()
1811 __execlists_update_reg_state(ce, engine); in __execlists_context_pin()
1825 return __execlists_context_pin(ce, ce->engine); in execlists_context_pin()
1830 return __execlists_context_alloc(ce, ce->engine); in execlists_context_alloc()
1844 * kernel, and are local to each engine. All other contexts will in execlists_context_reset()
1852 __execlists_update_reg_state(ce, ce->engine); in execlists_context_reset()
1902 const struct intel_engine_cs * const engine = rq->engine; in emit_pdps() local
1917 err = engine->emit_flush(rq, EMIT_FLUSH); in emit_pdps()
1922 err = engine->emit_flush(rq, EMIT_INVALIDATE); in emit_pdps()
1934 u32 base = engine->mmio_base; in emit_pdps()
1946 err = engine->emit_flush(rq, EMIT_FLUSH); in emit_pdps()
1951 return engine->emit_flush(rq, EMIT_INVALIDATE); in emit_pdps()
1970 * state of engine initialisation and liveness of the in execlists_request_alloc()
1977 ret = request->engine->emit_flush(request, EMIT_INVALIDATE); in execlists_request_alloc()
2004 gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) in gen8_emit_flush_coherentl3_wa() argument
2009 *batch++ = intel_gt_scratch_offset(engine->gt, in gen8_emit_flush_coherentl3_wa()
2024 *batch++ = intel_gt_scratch_offset(engine->gt, in gen8_emit_flush_coherentl3_wa()
2031 static u32 slm_offset(struct intel_engine_cs *engine) in slm_offset() argument
2033 return intel_gt_scratch_offset(engine->gt, in slm_offset()
2052 static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) in gen8_init_indirectctx_bb() argument
2058 if (IS_BROADWELL(engine->i915)) in gen8_init_indirectctx_bb()
2059 batch = gen8_emit_flush_coherentl3_wa(engine, batch); in gen8_init_indirectctx_bb()
2068 slm_offset(engine)); in gen8_init_indirectctx_bb()
2104 static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) in gen9_init_indirectctx_bb() argument
2132 batch = gen8_emit_flush_coherentl3_wa(engine, batch); in gen9_init_indirectctx_bb()
2137 if (HAS_POOLED_EU(engine->i915)) { in gen9_init_indirectctx_bb()
2169 gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) in gen10_init_indirectctx_bb() argument
2176 * Ensure the engine is idle prior to programming a in gen10_init_indirectctx_bb()
2204 static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) in lrc_setup_wa_ctx() argument
2210 obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_OBJ_SIZE); in lrc_setup_wa_ctx()
2214 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); in lrc_setup_wa_ctx()
2224 engine->wa_ctx.vma = vma; in lrc_setup_wa_ctx()
2232 static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine) in lrc_destroy_wa_ctx() argument
2234 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); in lrc_destroy_wa_ctx()
2237 typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
2239 static int intel_init_workaround_bb(struct intel_engine_cs *engine) in intel_init_workaround_bb() argument
2241 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; in intel_init_workaround_bb()
2250 if (engine->class != RENDER_CLASS) in intel_init_workaround_bb()
2253 switch (INTEL_GEN(engine->i915)) { in intel_init_workaround_bb()
2270 MISSING_CASE(INTEL_GEN(engine->i915)); in intel_init_workaround_bb()
2274 ret = lrc_setup_wa_ctx(engine); in intel_init_workaround_bb()
2296 batch_ptr = wa_bb_fn[i](engine, batch_ptr); in intel_init_workaround_bb()
2304 lrc_destroy_wa_ctx(engine); in intel_init_workaround_bb()
2309 static void enable_execlists(struct intel_engine_cs *engine) in enable_execlists() argument
2313 assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL); in enable_execlists()
2315 intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */ in enable_execlists()
2317 if (INTEL_GEN(engine->i915) >= 11) in enable_execlists()
2321 ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode); in enable_execlists()
2323 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); in enable_execlists()
2325 ENGINE_WRITE_FW(engine, in enable_execlists()
2327 i915_ggtt_offset(engine->status_page.vma)); in enable_execlists()
2328 ENGINE_POSTING_READ(engine, RING_HWS_PGA); in enable_execlists()
2331 static bool unexpected_starting_state(struct intel_engine_cs *engine) in unexpected_starting_state() argument
2335 if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) { in unexpected_starting_state()
2343 static int execlists_resume(struct intel_engine_cs *engine) in execlists_resume() argument
2345 intel_engine_apply_workarounds(engine); in execlists_resume()
2346 intel_engine_apply_whitelist(engine); in execlists_resume()
2348 intel_mocs_init_engine(engine); in execlists_resume()
2350 intel_engine_reset_breadcrumbs(engine); in execlists_resume()
2352 if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) { in execlists_resume()
2355 intel_engine_dump(engine, &p, NULL); in execlists_resume()
2358 enable_execlists(engine); in execlists_resume()
2363 static void execlists_reset_prepare(struct intel_engine_cs *engine) in execlists_reset_prepare() argument
2365 struct intel_engine_execlists * const execlists = &engine->execlists; in execlists_reset_prepare()
2368 GEM_TRACE("%s: depth<-%d\n", engine->name, in execlists_reset_prepare()
2374 * is completed by one engine, it may then queue a request in execlists_reset_prepare()
2376 * calling engine->resume() and also writing the ELSP. in execlists_reset_prepare()
2384 spin_lock_irqsave(&engine->active.lock, flags); in execlists_reset_prepare()
2385 spin_unlock_irqrestore(&engine->active.lock, flags); in execlists_reset_prepare()
2399 intel_engine_stop_cs(engine); in execlists_reset_prepare()
2402 static void reset_csb_pointers(struct intel_engine_cs *engine) in reset_csb_pointers() argument
2404 struct intel_engine_execlists * const execlists = &engine->execlists; in reset_csb_pointers()
2407 ring_set_paused(engine, 0); in reset_csb_pointers()
2449 static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) in __execlists_reset() argument
2451 struct intel_engine_execlists * const execlists = &engine->execlists; in __execlists_reset()
2456 process_csb(engine); /* drain preemption events */ in __execlists_reset()
2459 reset_csb_pointers(engine); in __execlists_reset()
2520 if (engine->pinned_default_state) { in __execlists_reset()
2522 engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, in __execlists_reset()
2523 engine->context_size - PAGE_SIZE); in __execlists_reset()
2525 execlists_init_reg_state(regs, ce, engine, ce->ring); in __execlists_reset()
2529 engine->name, ce->ring->head, ce->ring->tail); in __execlists_reset()
2531 __execlists_update_reg_state(ce, engine); in __execlists_reset()
2536 __unwind_incomplete_requests(engine); in __execlists_reset()
2539 static void execlists_reset(struct intel_engine_cs *engine, bool stalled) in execlists_reset() argument
2543 GEM_TRACE("%s\n", engine->name); in execlists_reset()
2545 spin_lock_irqsave(&engine->active.lock, flags); in execlists_reset()
2547 __execlists_reset(engine, stalled); in execlists_reset()
2549 spin_unlock_irqrestore(&engine->active.lock, flags); in execlists_reset()
2557 static void execlists_cancel_requests(struct intel_engine_cs *engine) in execlists_cancel_requests() argument
2559 struct intel_engine_execlists * const execlists = &engine->execlists; in execlists_cancel_requests()
2564 GEM_TRACE("%s\n", engine->name); in execlists_cancel_requests()
2567 * Before we call engine->cancel_requests(), we should have exclusive in execlists_cancel_requests()
2580 spin_lock_irqsave(&engine->active.lock, flags); in execlists_cancel_requests()
2582 __execlists_reset(engine, true); in execlists_cancel_requests()
2585 list_for_each_entry(rq, &engine->active.requests, sched.link) in execlists_cancel_requests()
2605 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); in execlists_cancel_requests()
2615 rq->engine = engine; in execlists_cancel_requests()
2632 spin_unlock_irqrestore(&engine->active.lock, flags); in execlists_cancel_requests()
2635 static void execlists_reset_finish(struct intel_engine_cs *engine) in execlists_reset_finish() argument
2637 struct intel_engine_execlists * const execlists = &engine->execlists; in execlists_reset_finish()
2651 GEM_TRACE("%s: depth->%d\n", engine->name, in execlists_reset_finish()
2676 * (engine->emit_fini_breadcrumb). in gen8_emit_bb_start()
2716 static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine) in gen8_logical_ring_enable_irq() argument
2718 ENGINE_WRITE(engine, RING_IMR, in gen8_logical_ring_enable_irq()
2719 ~(engine->irq_enable_mask | engine->irq_keep_mask)); in gen8_logical_ring_enable_irq()
2720 ENGINE_POSTING_READ(engine, RING_IMR); in gen8_logical_ring_enable_irq()
2723 static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) in gen8_logical_ring_disable_irq() argument
2725 ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); in gen8_logical_ring_disable_irq()
2747 if (request->engine->class == VIDEO_DECODE_CLASS) in gen8_emit_flush()
2763 struct intel_engine_cs *engine = request->engine; in gen8_emit_flush_render() local
2765 intel_gt_scratch_offset(engine->gt, in gen8_emit_flush_render()
2834 struct intel_engine_cs *engine = request->engine; in gen11_emit_flush_render() local
2836 intel_gt_scratch_offset(engine->gt, in gen11_emit_flush_render()
2910 *cs++ = intel_hws_preempt_address(request->engine); in emit_preempt_busywait()
2923 if (intel_engine_has_semaphores(request->engine)) in gen8_emit_fini_breadcrumb_footer()
2976 static void execlists_park(struct intel_engine_cs *engine) in execlists_park() argument
2978 del_timer(&engine->execlists.timer); in execlists_park()
2981 void intel_execlists_set_default_submission(struct intel_engine_cs *engine) in intel_execlists_set_default_submission() argument
2983 engine->submit_request = execlists_submit_request; in intel_execlists_set_default_submission()
2984 engine->cancel_requests = execlists_cancel_requests; in intel_execlists_set_default_submission()
2985 engine->schedule = i915_schedule; in intel_execlists_set_default_submission()
2986 engine->execlists.tasklet.func = execlists_submission_tasklet; in intel_execlists_set_default_submission()
2988 engine->reset.prepare = execlists_reset_prepare; in intel_execlists_set_default_submission()
2989 engine->reset.reset = execlists_reset; in intel_execlists_set_default_submission()
2990 engine->reset.finish = execlists_reset_finish; in intel_execlists_set_default_submission()
2992 engine->park = execlists_park; in intel_execlists_set_default_submission()
2993 engine->unpark = NULL; in intel_execlists_set_default_submission()
2995 engine->flags |= I915_ENGINE_SUPPORTS_STATS; in intel_execlists_set_default_submission()
2996 if (!intel_vgpu_active(engine->i915)) { in intel_execlists_set_default_submission()
2997 engine->flags |= I915_ENGINE_HAS_SEMAPHORES; in intel_execlists_set_default_submission()
2998 if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) in intel_execlists_set_default_submission()
2999 engine->flags |= I915_ENGINE_HAS_PREEMPTION; in intel_execlists_set_default_submission()
3003 static void execlists_destroy(struct intel_engine_cs *engine) in execlists_destroy() argument
3005 intel_engine_cleanup_common(engine); in execlists_destroy()
3006 lrc_destroy_wa_ctx(engine); in execlists_destroy()
3007 kfree(engine); in execlists_destroy()
3011 logical_ring_default_vfuncs(struct intel_engine_cs *engine) in logical_ring_default_vfuncs() argument
3013 /* Default vfuncs which can be overriden by each engine. */ in logical_ring_default_vfuncs()
3015 engine->destroy = execlists_destroy; in logical_ring_default_vfuncs()
3016 engine->resume = execlists_resume; in logical_ring_default_vfuncs()
3018 engine->reset.prepare = execlists_reset_prepare; in logical_ring_default_vfuncs()
3019 engine->reset.reset = execlists_reset; in logical_ring_default_vfuncs()
3020 engine->reset.finish = execlists_reset_finish; in logical_ring_default_vfuncs()
3022 engine->cops = &execlists_context_ops; in logical_ring_default_vfuncs()
3023 engine->request_alloc = execlists_request_alloc; in logical_ring_default_vfuncs()
3025 engine->emit_flush = gen8_emit_flush; in logical_ring_default_vfuncs()
3026 engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb; in logical_ring_default_vfuncs()
3027 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb; in logical_ring_default_vfuncs()
3029 engine->set_default_submission = intel_execlists_set_default_submission; in logical_ring_default_vfuncs()
3031 if (INTEL_GEN(engine->i915) < 11) { in logical_ring_default_vfuncs()
3032 engine->irq_enable = gen8_logical_ring_enable_irq; in logical_ring_default_vfuncs()
3033 engine->irq_disable = gen8_logical_ring_disable_irq; in logical_ring_default_vfuncs()
3042 if (IS_GEN(engine->i915, 8)) in logical_ring_default_vfuncs()
3043 engine->emit_bb_start = gen8_emit_bb_start; in logical_ring_default_vfuncs()
3045 engine->emit_bb_start = gen9_emit_bb_start; in logical_ring_default_vfuncs()
3049 logical_ring_default_irqs(struct intel_engine_cs *engine) in logical_ring_default_irqs() argument
3053 if (INTEL_GEN(engine->i915) < 11) { in logical_ring_default_irqs()
3062 shift = irq_shifts[engine->id]; in logical_ring_default_irqs()
3065 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; in logical_ring_default_irqs()
3066 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; in logical_ring_default_irqs()
3069 static void rcs_submission_override(struct intel_engine_cs *engine) in rcs_submission_override() argument
3071 switch (INTEL_GEN(engine->i915)) { in rcs_submission_override()
3074 engine->emit_flush = gen11_emit_flush_render; in rcs_submission_override()
3075 engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs; in rcs_submission_override()
3078 engine->emit_flush = gen8_emit_flush_render; in rcs_submission_override()
3079 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs; in rcs_submission_override()
3084 int intel_execlists_submission_setup(struct intel_engine_cs *engine) in intel_execlists_submission_setup() argument
3086 tasklet_init(&engine->execlists.tasklet, in intel_execlists_submission_setup()
3087 execlists_submission_tasklet, (unsigned long)engine); in intel_execlists_submission_setup()
3088 timer_setup(&engine->execlists.timer, execlists_submission_timer, 0); in intel_execlists_submission_setup()
3090 logical_ring_default_vfuncs(engine); in intel_execlists_submission_setup()
3091 logical_ring_default_irqs(engine); in intel_execlists_submission_setup()
3093 if (engine->class == RENDER_CLASS) in intel_execlists_submission_setup()
3094 rcs_submission_override(engine); in intel_execlists_submission_setup()
3099 int intel_execlists_submission_init(struct intel_engine_cs *engine) in intel_execlists_submission_init() argument
3101 struct intel_engine_execlists * const execlists = &engine->execlists; in intel_execlists_submission_init()
3102 struct drm_i915_private *i915 = engine->i915; in intel_execlists_submission_init()
3103 struct intel_uncore *uncore = engine->uncore; in intel_execlists_submission_init()
3104 u32 base = engine->mmio_base; in intel_execlists_submission_init()
3107 ret = intel_engine_init_common(engine); in intel_execlists_submission_init()
3111 if (intel_init_workaround_bb(engine)) in intel_execlists_submission_init()
3130 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; in intel_execlists_submission_init()
3133 &engine->status_page.addr[intel_hws_csb_write_index(i915)]; in intel_execlists_submission_init()
3140 reset_csb_pointers(engine); in intel_execlists_submission_init()
3145 static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) in intel_lr_indirect_ctx_offset() argument
3149 switch (INTEL_GEN(engine->i915)) { in intel_lr_indirect_ctx_offset()
3151 MISSING_CASE(INTEL_GEN(engine->i915)); in intel_lr_indirect_ctx_offset()
3180 struct intel_engine_cs *engine, in execlists_init_reg_state() argument
3184 bool rcs = engine->class == RENDER_CLASS; in execlists_init_reg_state()
3185 u32 base = engine->mmio_base; in execlists_init_reg_state()
3203 if (INTEL_GEN(engine->i915) < 11) { in execlists_init_reg_state()
3220 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; in execlists_init_reg_state()
3233 intel_lr_indirect_ctx_offset(engine) << 6; in execlists_init_reg_state()
3277 if (INTEL_GEN(engine->i915) >= 10) in execlists_init_reg_state()
3284 struct intel_engine_cs *engine, in populate_lr_context() argument
3298 set_redzone(vaddr, engine); in populate_lr_context()
3300 if (engine->default_state) { in populate_lr_context()
3309 defaults = i915_gem_object_pin_map(engine->default_state, in populate_lr_context()
3316 memcpy(vaddr + start, defaults + start, engine->context_size); in populate_lr_context()
3317 i915_gem_object_unpin_map(engine->default_state); in populate_lr_context()
3323 execlists_init_reg_state(regs, ce, engine, ring); in populate_lr_context()
3324 if (!engine->default_state) in populate_lr_context()
3332 engine->context_size); in populate_lr_context()
3338 struct intel_engine_cs *engine) in __execlists_context_alloc() argument
3347 context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE); in __execlists_context_alloc()
3357 ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size); in __execlists_context_alloc()
3361 vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL); in __execlists_context_alloc()
3370 tl = intel_timeline_create(engine->gt, NULL); in __execlists_context_alloc()
3379 ring = intel_engine_create_ring(engine, (unsigned long)ce->ring); in __execlists_context_alloc()
3385 ret = populate_lr_context(ce, ctx_obj, engine, ring); in __execlists_context_alloc()
3455 * first engine we inspect being different each time. in virtual_engine_initial_hint()
3457 * NB This does not force us to execute on this engine, it will just in virtual_engine_initial_hint()
3474 /* Note: we must use a real engine class for setting up reg state */ in virtual_context_pin()
3527 /* Invalid selection, submit to a random engine in error */ in virtual_submission_mask()
3622 struct virtual_engine *ve = to_virtual_engine(rq->engine); in virtual_submit_request()
3677 struct virtual_engine *ve = to_virtual_engine(rq->engine); in virtual_bond_execute()
3681 allowed = ~to_request(signal)->engine->mask; in virtual_bond_execute()
3683 bond = virtual_find_bond(ve, to_request(signal)->engine); in virtual_bond_execute()
3724 * depends on the saturated state of the engine. We only compute in intel_execlists_create_virtual()
3727 * to this engine. Virtual engines encompass more than one physical in intel_execlists_create_virtual()
3728 * engine and so we cannot accurately tell in advance if one of those in intel_execlists_create_virtual()
3770 * The virtual engine implementation is tightly coupled to in intel_execlists_create_virtual()
3772 * into a tree inside each physical engine. We could support in intel_execlists_create_virtual()
3792 * on the physical engine). We use the engine class as a guide in intel_execlists_create_virtual()
3797 DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n", in intel_execlists_create_virtual()
3850 struct virtual_engine *de = to_virtual_engine(dst->engine); in intel_execlists_clone_virtual()
3866 int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, in intel_virtual_engine_attach_bond() argument
3870 struct virtual_engine *ve = to_virtual_engine(engine); in intel_virtual_engine_attach_bond()
3874 /* Sanity check the sibling is part of the virtual engine */ in intel_virtual_engine_attach_bond()
3902 void intel_execlists_show_requests(struct intel_engine_cs *engine, in intel_execlists_show_requests() argument
3909 const struct intel_engine_execlists *execlists = &engine->execlists; in intel_execlists_show_requests()
3915 spin_lock_irqsave(&engine->active.lock, flags); in intel_execlists_show_requests()
3919 list_for_each_entry(rq, &engine->active.requests, sched.link) { in intel_execlists_show_requests()
3963 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); in intel_execlists_show_requests()
3982 spin_unlock_irqrestore(&engine->active.lock, flags); in intel_execlists_show_requests()
3985 void intel_lr_context_reset(struct intel_engine_cs *engine, in intel_lr_context_reset() argument
4001 if (engine->pinned_default_state) { in intel_lr_context_reset()
4003 engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, in intel_lr_context_reset()
4004 engine->context_size - PAGE_SIZE); in intel_lr_context_reset()
4006 execlists_init_reg_state(regs, ce, engine, ce->ring); in intel_lr_context_reset()
4013 __execlists_update_reg_state(ce, engine); in intel_lr_context_reset()