Lines Matching refs:execlists
265 struct intel_engine_execlists * const execlists = &engine->execlists; in lookup_priolist() local
270 if (unlikely(execlists->no_priolist)) in lookup_priolist()
276 parent = &execlists->queue.rb_root.rb_node; in lookup_priolist()
291 p = &execlists->default_priolist; in lookup_priolist()
306 execlists->no_priolist = true; in lookup_priolist()
314 rb_insert_color_cached(&p->node, &execlists->queue, first); in lookup_priolist()
354 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists) in execlists_unwind_incomplete_requests() argument
357 container_of(execlists, typeof(*engine), execlists); in execlists_unwind_incomplete_requests()
382 execlists_user_begin(struct intel_engine_execlists *execlists, in execlists_user_begin() argument
385 execlists_set_active_once(execlists, EXECLISTS_ACTIVE_USER); in execlists_user_begin()
389 execlists_user_end(struct intel_engine_execlists *execlists) in execlists_user_end() argument
391 execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER); in execlists_user_end()
438 static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port) in write_desc() argument
440 if (execlists->ctrl_reg) { in write_desc()
441 writel(lower_32_bits(desc), execlists->submit_reg + port * 2); in write_desc()
442 writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1); in write_desc()
444 writel(upper_32_bits(desc), execlists->submit_reg); in write_desc()
445 writel(lower_32_bits(desc), execlists->submit_reg); in write_desc()
451 struct intel_engine_execlists *execlists = &engine->execlists; in execlists_submit_ports() local
452 struct execlist_port *port = execlists->port; in execlists_submit_ports()
471 for (n = execlists_num_ports(execlists); n--; ) { in execlists_submit_ports()
497 write_desc(execlists, desc, n); in execlists_submit_ports()
501 if (execlists->ctrl_reg) in execlists_submit_ports()
502 writel(EL_CTRL_LOAD, execlists->ctrl_reg); in execlists_submit_ports()
504 execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK); in execlists_submit_ports()
537 struct intel_engine_execlists *execlists = &engine->execlists; in inject_preempt_context() local
542 GEM_BUG_ON(execlists->preempt_complete_status != in inject_preempt_context()
555 for (n = execlists_num_ports(execlists); --n; ) in inject_preempt_context()
556 write_desc(execlists, 0, n); in inject_preempt_context()
558 write_desc(execlists, ce->lrc_desc, n); in inject_preempt_context()
561 if (execlists->ctrl_reg) in inject_preempt_context()
562 writel(EL_CTRL_LOAD, execlists->ctrl_reg); in inject_preempt_context()
564 execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK); in inject_preempt_context()
565 execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT); in inject_preempt_context()
568 static void complete_preempt_context(struct intel_engine_execlists *execlists) in complete_preempt_context() argument
570 GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)); in complete_preempt_context()
572 if (inject_preempt_hang(execlists)) in complete_preempt_context()
575 execlists_cancel_port_requests(execlists); in complete_preempt_context()
576 __unwind_incomplete_requests(container_of(execlists, in complete_preempt_context()
578 execlists)); in complete_preempt_context()
583 struct intel_engine_execlists * const execlists = &engine->execlists; in execlists_dequeue() local
584 struct execlist_port *port = execlists->port; in execlists_dequeue()
586 &execlists->port[execlists->port_mask]; in execlists_dequeue()
620 GEM_BUG_ON(!execlists_is_active(execlists, in execlists_dequeue()
631 if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK)) in execlists_dequeue()
634 if (need_preempt(engine, last, execlists->queue_priority)) { in execlists_dequeue()
674 while ((rb = rb_first_cached(&execlists->queue))) { in execlists_dequeue()
728 trace_i915_request_in(rq, port_index(port, execlists)); in execlists_dequeue()
733 rb_erase_cached(&p->node, &execlists->queue); in execlists_dequeue()
756 execlists->queue_priority = in execlists_dequeue()
757 port != execlists->port ? rq_prio(last) : INT_MIN; in execlists_dequeue()
765 GEM_BUG_ON(rb_first_cached(&execlists->queue) && in execlists_dequeue()
766 !port_isset(execlists->port)); in execlists_dequeue()
770 execlists_user_begin(execlists, execlists->port); in execlists_dequeue()
773 GEM_BUG_ON(execlists_is_active(&engine->execlists, in execlists_dequeue()
775 !port_isset(engine->execlists.port)); in execlists_dequeue()
779 execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) in execlists_cancel_port_requests() argument
781 struct execlist_port *port = execlists->port; in execlists_cancel_port_requests()
782 unsigned int num_ports = execlists_num_ports(execlists); in execlists_cancel_port_requests()
789 (unsigned int)(port - execlists->port), in execlists_cancel_port_requests()
794 GEM_BUG_ON(!execlists->active); in execlists_cancel_port_requests()
806 execlists_clear_all_active(execlists); in execlists_cancel_port_requests()
809 static void reset_csb_pointers(struct intel_engine_execlists *execlists) in reset_csb_pointers() argument
820 execlists->csb_head = execlists->csb_write_reset; in reset_csb_pointers()
821 WRITE_ONCE(*execlists->csb_write, execlists->csb_write_reset); in reset_csb_pointers()
831 struct intel_engine_execlists * const execlists = &engine->execlists; in execlists_cancel_requests() local
856 execlists_cancel_port_requests(execlists); in execlists_cancel_requests()
857 execlists_user_end(execlists); in execlists_cancel_requests()
867 while ((rb = rb_first_cached(&execlists->queue))) { in execlists_cancel_requests()
877 rb_erase_cached(&p->node, &execlists->queue); in execlists_cancel_requests()
885 execlists->queue_priority = INT_MIN; in execlists_cancel_requests()
886 execlists->queue = RB_ROOT_CACHED; in execlists_cancel_requests()
887 GEM_BUG_ON(port_isset(execlists->port)); in execlists_cancel_requests()
889 GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet)); in execlists_cancel_requests()
890 execlists->tasklet.func = nop_submission_tasklet; in execlists_cancel_requests()
896 reset_in_progress(const struct intel_engine_execlists *execlists) in reset_in_progress() argument
898 return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); in reset_in_progress()
903 struct intel_engine_execlists * const execlists = &engine->execlists; in process_csb() local
904 struct execlist_port *port = execlists->port; in process_csb()
905 const u32 * const buf = execlists->csb_status; in process_csb()
918 head = execlists->csb_head; in process_csb()
919 tail = READ_ONCE(*execlists->csb_write); in process_csb()
963 execlists->active); in process_csb()
968 execlists_set_active(execlists, in process_csb()
971 execlists_clear_active(execlists, in process_csb()
981 buf[2*head + 1] == execlists->preempt_complete_status) { in process_csb()
983 complete_preempt_context(execlists); in process_csb()
988 execlists_is_active(execlists, in process_csb()
992 GEM_BUG_ON(!execlists_is_active(execlists, in process_csb()
1039 port = execlists_port_complete(execlists, port); in process_csb()
1041 execlists_user_begin(execlists, port); in process_csb()
1043 execlists_user_end(execlists); in process_csb()
1049 execlists->csb_head = head; in process_csb()
1057 if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT)) in __execlists_submission_tasklet()
1073 engine->execlists.active); in execlists_submission_tasklet()
1090 engine->execlists.queue_priority = prio; in __update_queue()
1095 struct intel_engine_execlists * const execlists = &engine->execlists; in __submit_queue_imm() local
1097 if (reset_in_progress(execlists)) in __submit_queue_imm()
1100 if (execlists->tasklet.func == execlists_submission_tasklet) in __submit_queue_imm()
1103 tasklet_hi_schedule(&execlists->tasklet); in __submit_queue_imm()
1108 if (prio > engine->execlists.queue_priority) { in submit_queue()
1124 GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); in execlists_submit_request()
1254 if (prio > engine->execlists.queue_priority && in execlists_schedule()
1258 tasklet_hi_schedule(&engine->execlists.tasklet); in execlists_schedule()
1837 struct intel_engine_execlists * const execlists = &engine->execlists; in execlists_reset_prepare() local
1852 __tasklet_disable_sync_once(&execlists->tasklet); in execlists_reset_prepare()
1871 request = port_request(execlists->port); in execlists_reset_prepare()
1898 struct intel_engine_execlists * const execlists = &engine->execlists; in execlists_reset() local
1917 execlists_cancel_port_requests(execlists); in execlists_reset()
1923 reset_csb_pointers(&engine->execlists); in execlists_reset()
1972 struct intel_engine_execlists * const execlists = &engine->execlists; in execlists_reset_finish() local
1975 if (!RB_EMPTY_ROOT(&execlists->queue.rb_root)) in execlists_reset_finish()
1976 tasklet_schedule(&execlists->tasklet); in execlists_reset_finish()
1987 __tasklet_enable_sync_once(&execlists->tasklet); in execlists_reset_finish()
2274 &engine->execlists.tasklet.state))) in intel_logical_ring_cleanup()
2275 tasklet_kill(&engine->execlists.tasklet); in intel_logical_ring_cleanup()
2300 engine->execlists.tasklet.func = execlists_submission_tasklet; in intel_execlists_set_default_submission()
2380 tasklet_init(&engine->execlists.tasklet, in logical_ring_setup()
2396 struct intel_engine_execlists * const execlists = &engine->execlists; in logical_ring_init() local
2404 execlists->submit_reg = i915->regs + in logical_ring_init()
2406 execlists->ctrl_reg = i915->regs + in logical_ring_init()
2409 execlists->submit_reg = i915->regs + in logical_ring_init()
2413 execlists->preempt_complete_status = ~0u; in logical_ring_init()
2418 execlists->preempt_complete_status = in logical_ring_init()
2422 execlists->csb_read = in logical_ring_init()
2425 execlists->csb_status = (u32 __force *) in logical_ring_init()
2428 execlists->csb_write = (u32 __force *)execlists->csb_read; in logical_ring_init()
2429 execlists->csb_write_reset = in logical_ring_init()
2433 execlists->csb_status = in logical_ring_init()
2436 execlists->csb_write = in logical_ring_init()
2438 execlists->csb_write_reset = GEN8_CSB_ENTRIES - 1; in logical_ring_init()
2440 reset_csb_pointers(execlists); in logical_ring_init()