Lines Matching full:engine
142 * intel_engine_context_size() - return the size of the context for an engine
144 * @class: engine class
146 * Each engine class may require a different amount of space for a context
149 * Return: size (in bytes) of an engine class specific context image
239 static void __sprint_engine_name(struct intel_engine_cs *engine) in __sprint_engine_name() argument
242 * Before we know what the uABI name for this engine will be, in __sprint_engine_name()
243 * we still would like to keep track of this engine in the debug logs. in __sprint_engine_name()
246 GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u", in __sprint_engine_name()
247 intel_engine_class_repr(engine->class), in __sprint_engine_name()
248 engine->instance) >= sizeof(engine->name)); in __sprint_engine_name()
251 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask) in intel_engine_set_hwsp_writemask() argument
255 * per-engine HWSTAM until gen6. in intel_engine_set_hwsp_writemask()
257 if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS) in intel_engine_set_hwsp_writemask()
260 if (INTEL_GEN(engine->i915) >= 3) in intel_engine_set_hwsp_writemask()
261 ENGINE_WRITE(engine, RING_HWSTAM, mask); in intel_engine_set_hwsp_writemask()
263 ENGINE_WRITE16(engine, RING_HWSTAM, mask); in intel_engine_set_hwsp_writemask()
266 static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine) in intel_engine_sanitize_mmio() argument
269 intel_engine_set_hwsp_writemask(engine, ~0u); in intel_engine_sanitize_mmio()
275 struct intel_engine_cs *engine; in intel_engine_setup() local
289 engine = kzalloc(sizeof(*engine), GFP_KERNEL); in intel_engine_setup()
290 if (!engine) in intel_engine_setup()
293 BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES); in intel_engine_setup()
295 engine->id = id; in intel_engine_setup()
296 engine->mask = BIT(id); in intel_engine_setup()
297 engine->i915 = gt->i915; in intel_engine_setup()
298 engine->gt = gt; in intel_engine_setup()
299 engine->uncore = gt->uncore; in intel_engine_setup()
300 engine->hw_id = engine->guc_id = info->hw_id; in intel_engine_setup()
301 engine->mmio_base = __engine_mmio_base(gt->i915, info->mmio_bases); in intel_engine_setup()
303 engine->class = info->class; in intel_engine_setup()
304 engine->instance = info->instance; in intel_engine_setup()
305 __sprint_engine_name(engine); in intel_engine_setup()
311 engine->destroy = (typeof(engine->destroy))kfree; in intel_engine_setup()
313 engine->context_size = intel_engine_context_size(gt->i915, in intel_engine_setup()
314 engine->class); in intel_engine_setup()
315 if (WARN_ON(engine->context_size > BIT(20))) in intel_engine_setup()
316 engine->context_size = 0; in intel_engine_setup()
317 if (engine->context_size) in intel_engine_setup()
321 engine->schedule = NULL; in intel_engine_setup()
323 seqlock_init(&engine->stats.lock); in intel_engine_setup()
325 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); in intel_engine_setup()
328 intel_engine_sanitize_mmio(engine); in intel_engine_setup()
330 gt->engine_class[info->class][info->instance] = engine; in intel_engine_setup()
332 intel_engine_add_user(engine); in intel_engine_setup()
333 gt->i915->engine[id] = engine; in intel_engine_setup()
338 static void __setup_engine_capabilities(struct intel_engine_cs *engine) in __setup_engine_capabilities() argument
340 struct drm_i915_private *i915 = engine->i915; in __setup_engine_capabilities()
342 if (engine->class == VIDEO_DECODE_CLASS) { in __setup_engine_capabilities()
344 * HEVC support is present on first engine instance in __setup_engine_capabilities()
348 (INTEL_GEN(i915) >= 9 && engine->instance == 0)) in __setup_engine_capabilities()
349 engine->uabi_capabilities |= in __setup_engine_capabilities()
353 * SFC block is present only on even logical engine in __setup_engine_capabilities()
357 RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) || in __setup_engine_capabilities()
358 (INTEL_GEN(i915) >= 9 && engine->instance == 0)) in __setup_engine_capabilities()
359 engine->uabi_capabilities |= in __setup_engine_capabilities()
361 } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) { in __setup_engine_capabilities()
363 engine->uabi_capabilities |= in __setup_engine_capabilities()
370 struct intel_engine_cs *engine; in intel_setup_engine_capabilities() local
373 for_each_engine(engine, i915, id) in intel_setup_engine_capabilities()
374 __setup_engine_capabilities(engine); in intel_setup_engine_capabilities()
383 struct intel_engine_cs *engine; in intel_engines_cleanup() local
386 for_each_engine(engine, i915, id) { in intel_engines_cleanup()
387 engine->destroy(engine); in intel_engines_cleanup()
388 i915->engine[id] = NULL; in intel_engines_cleanup()
393 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
446 * intel_engines_init() - init the Engine Command Streamers
453 int (*init)(struct intel_engine_cs *engine); in intel_engines_init()
454 struct intel_engine_cs *engine; in intel_engines_init() local
463 for_each_engine(engine, i915, id) { in intel_engines_init()
464 err = init(engine); in intel_engines_init()
476 void intel_engine_init_execlists(struct intel_engine_cs *engine) in intel_engine_init_execlists() argument
478 struct intel_engine_execlists * const execlists = &engine->execlists; in intel_engine_init_execlists()
492 static void cleanup_status_page(struct intel_engine_cs *engine) in cleanup_status_page() argument
497 intel_engine_set_hwsp_writemask(engine, ~0u); in cleanup_status_page()
499 vma = fetch_and_zero(&engine->status_page.vma); in cleanup_status_page()
503 if (!HWS_NEEDS_PHYSICAL(engine->i915)) in cleanup_status_page()
510 static int pin_ggtt_status_page(struct intel_engine_cs *engine, in pin_ggtt_status_page() argument
516 if (!HAS_LLC(engine->i915)) in pin_ggtt_status_page()
535 static int init_status_page(struct intel_engine_cs *engine) in init_status_page() argument
549 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); in init_status_page()
557 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); in init_status_page()
569 engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE); in init_status_page()
570 engine->status_page.vma = vma; in init_status_page()
572 if (!HWS_NEEDS_PHYSICAL(engine->i915)) { in init_status_page()
573 ret = pin_ggtt_status_page(engine, vma); in init_status_page()
587 static int intel_engine_setup_common(struct intel_engine_cs *engine) in intel_engine_setup_common() argument
591 init_llist_head(&engine->barrier_tasks); in intel_engine_setup_common()
593 err = init_status_page(engine); in intel_engine_setup_common()
597 intel_engine_init_active(engine, ENGINE_PHYSICAL); in intel_engine_setup_common()
598 intel_engine_init_breadcrumbs(engine); in intel_engine_setup_common()
599 intel_engine_init_execlists(engine); in intel_engine_setup_common()
600 intel_engine_init_hangcheck(engine); in intel_engine_setup_common()
601 intel_engine_init_cmd_parser(engine); in intel_engine_setup_common()
602 intel_engine_init__pm(engine); in intel_engine_setup_common()
604 intel_engine_pool_init(&engine->pool); in intel_engine_setup_common()
607 engine->sseu = in intel_engine_setup_common()
608 intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu); in intel_engine_setup_common()
610 intel_engine_init_workarounds(engine); in intel_engine_setup_common()
611 intel_engine_init_whitelist(engine); in intel_engine_setup_common()
612 intel_engine_init_ctx_wa(engine); in intel_engine_setup_common()
618 * intel_engines_setup- setup engine state not requiring hw access
621 * Initializes engine structure members shared between legacy and execlists
624 * Typically done early in the submission mode specific engine setup stage.
628 int (*setup)(struct intel_engine_cs *engine); in intel_engines_setup()
629 struct intel_engine_cs *engine; in intel_engines_setup() local
638 for_each_engine(engine, i915, id) { in intel_engines_setup()
639 err = intel_engine_setup_common(engine); in intel_engines_setup()
643 err = setup(engine); in intel_engines_setup()
648 GEM_BUG_ON(engine->destroy == (typeof(engine->destroy))kfree); in intel_engines_setup()
650 GEM_BUG_ON(!engine->cops); in intel_engines_setup()
667 static int measure_breadcrumb_dw(struct intel_engine_cs *engine) in measure_breadcrumb_dw() argument
672 GEM_BUG_ON(!engine->gt->scratch); in measure_breadcrumb_dw()
679 engine->gt, in measure_breadcrumb_dw()
680 engine->status_page.vma)) in measure_breadcrumb_dw()
688 frame->rq.i915 = engine->i915; in measure_breadcrumb_dw()
689 frame->rq.engine = engine; in measure_breadcrumb_dw()
697 dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs; in measure_breadcrumb_dw()
710 intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass) in intel_engine_init_active() argument
712 INIT_LIST_HEAD(&engine->active.requests); in intel_engine_init_active()
714 spin_lock_init(&engine->active.lock); in intel_engine_init_active()
715 lockdep_set_subclass(&engine->active.lock, subclass); in intel_engine_init_active()
724 lock_map_acquire(&engine->active.lock.dep_map); in intel_engine_init_active()
725 lock_map_release(&engine->active.lock.dep_map); in intel_engine_init_active()
731 create_kernel_context(struct intel_engine_cs *engine) in create_kernel_context() argument
736 ce = intel_context_create(engine->i915->kernel_context, engine); in create_kernel_context()
753 * @engine: Engine to initialize.
755 * Initializes @engine@ structure members shared between legacy and execlists
758 * Typcally done at later stages of submission mode specific engine setup.
762 int intel_engine_init_common(struct intel_engine_cs *engine) in intel_engine_init_common() argument
767 engine->set_default_submission(engine); in intel_engine_init_common()
777 ce = create_kernel_context(engine); in intel_engine_init_common()
781 engine->kernel_context = ce; in intel_engine_init_common()
783 ret = measure_breadcrumb_dw(engine); in intel_engine_init_common()
787 engine->emit_fini_breadcrumb_dw = ret; in intel_engine_init_common()
798 * intel_engines_cleanup_common - cleans up the engine state created by
800 * @engine: Engine to cleanup.
804 void intel_engine_cleanup_common(struct intel_engine_cs *engine) in intel_engine_cleanup_common() argument
806 GEM_BUG_ON(!list_empty(&engine->active.requests)); in intel_engine_cleanup_common()
808 cleanup_status_page(engine); in intel_engine_cleanup_common()
810 intel_engine_pool_fini(&engine->pool); in intel_engine_cleanup_common()
811 intel_engine_fini_breadcrumbs(engine); in intel_engine_cleanup_common()
812 intel_engine_cleanup_cmd_parser(engine); in intel_engine_cleanup_common()
814 if (engine->default_state) in intel_engine_cleanup_common()
815 i915_gem_object_put(engine->default_state); in intel_engine_cleanup_common()
817 intel_context_unpin(engine->kernel_context); in intel_engine_cleanup_common()
818 intel_context_put(engine->kernel_context); in intel_engine_cleanup_common()
819 GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); in intel_engine_cleanup_common()
821 intel_wa_list_free(&engine->ctx_wa_list); in intel_engine_cleanup_common()
822 intel_wa_list_free(&engine->wa_list); in intel_engine_cleanup_common()
823 intel_wa_list_free(&engine->whitelist); in intel_engine_cleanup_common()
826 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) in intel_engine_get_active_head() argument
828 struct drm_i915_private *i915 = engine->i915; in intel_engine_get_active_head()
833 acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW); in intel_engine_get_active_head()
835 acthd = ENGINE_READ(engine, RING_ACTHD); in intel_engine_get_active_head()
837 acthd = ENGINE_READ(engine, ACTHD); in intel_engine_get_active_head()
842 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine) in intel_engine_get_last_batch_head() argument
846 if (INTEL_GEN(engine->i915) >= 8) in intel_engine_get_last_batch_head()
847 bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW); in intel_engine_get_last_batch_head()
849 bbaddr = ENGINE_READ(engine, RING_BBADDR); in intel_engine_get_last_batch_head()
854 int intel_engine_stop_cs(struct intel_engine_cs *engine) in intel_engine_stop_cs() argument
856 struct intel_uncore *uncore = engine->uncore; in intel_engine_stop_cs()
857 const u32 base = engine->mmio_base; in intel_engine_stop_cs()
861 if (INTEL_GEN(engine->i915) < 3) in intel_engine_stop_cs()
864 GEM_TRACE("%s\n", engine->name); in intel_engine_stop_cs()
873 GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name); in intel_engine_stop_cs()
883 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine) in intel_engine_cancel_stop_cs() argument
885 GEM_TRACE("%s\n", engine->name); in intel_engine_cancel_stop_cs()
887 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); in intel_engine_cancel_stop_cs()
902 read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice, in read_subslice_reg() argument
905 struct drm_i915_private *i915 = engine->i915; in read_subslice_reg()
906 struct intel_uncore *uncore = engine->uncore; in read_subslice_reg()
947 void intel_engine_get_instdone(struct intel_engine_cs *engine, in intel_engine_get_instdone() argument
950 struct drm_i915_private *i915 = engine->i915; in intel_engine_get_instdone()
951 struct intel_uncore *uncore = engine->uncore; in intel_engine_get_instdone()
952 u32 mmio_base = engine->mmio_base; in intel_engine_get_instdone()
963 if (engine->id != RCS0) in intel_engine_get_instdone()
970 read_subslice_reg(engine, slice, subslice, in intel_engine_get_instdone()
973 read_subslice_reg(engine, slice, subslice, in intel_engine_get_instdone()
981 if (engine->id != RCS0) in intel_engine_get_instdone()
997 if (engine->id == RCS0) in intel_engine_get_instdone()
1009 static bool ring_is_idle(struct intel_engine_cs *engine) in ring_is_idle() argument
1013 if (I915_SELFTEST_ONLY(!engine->mmio_base)) in ring_is_idle()
1016 if (!intel_engine_pm_get_if_awake(engine)) in ring_is_idle()
1020 if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) != in ring_is_idle()
1021 (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR)) in ring_is_idle()
1025 if (INTEL_GEN(engine->i915) > 2 && in ring_is_idle()
1026 !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE)) in ring_is_idle()
1029 intel_engine_pm_put(engine); in ring_is_idle()
1035 * intel_engine_is_idle() - Report if the engine has finished process all work
1036 * @engine: the intel_engine_cs
1039 * to hardware, and that the engine is idle.
1041 bool intel_engine_is_idle(struct intel_engine_cs *engine) in intel_engine_is_idle() argument
1044 if (intel_gt_is_wedged(engine->gt)) in intel_engine_is_idle()
1047 if (!intel_engine_pm_is_awake(engine)) in intel_engine_is_idle()
1051 if (execlists_active(&engine->execlists)) { in intel_engine_is_idle()
1052 struct tasklet_struct *t = &engine->execlists.tasklet; in intel_engine_is_idle()
1054 synchronize_hardirq(engine->i915->drm.pdev->irq); in intel_engine_is_idle()
1068 if (execlists_active(&engine->execlists)) in intel_engine_is_idle()
1073 if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)) in intel_engine_is_idle()
1077 return ring_is_idle(engine); in intel_engine_is_idle()
1082 struct intel_engine_cs *engine; in intel_engines_are_idle() local
1096 for_each_engine(engine, gt->i915, id) { in intel_engines_are_idle()
1097 if (!intel_engine_is_idle(engine)) in intel_engines_are_idle()
1106 struct intel_engine_cs *engine; in intel_engines_reset_default_submission() local
1109 for_each_engine(engine, gt->i915, id) in intel_engines_reset_default_submission()
1110 engine->set_default_submission(engine); in intel_engines_reset_default_submission()
1113 bool intel_engine_can_store_dword(struct intel_engine_cs *engine) in intel_engine_can_store_dword() argument
1115 switch (INTEL_GEN(engine->i915)) { in intel_engine_can_store_dword()
1120 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915)); in intel_engine_can_store_dword()
1122 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */ in intel_engine_can_store_dword()
1196 static void intel_engine_print_registers(struct intel_engine_cs *engine, in intel_engine_print_registers() argument
1199 struct drm_i915_private *dev_priv = engine->i915; in intel_engine_print_registers()
1200 struct intel_engine_execlists * const execlists = &engine->execlists; in intel_engine_print_registers()
1203 if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7)) in intel_engine_print_registers()
1204 drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID)); in intel_engine_print_registers()
1206 ENGINE_READ(engine, RING_START)); in intel_engine_print_registers()
1208 ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR); in intel_engine_print_registers()
1210 ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR); in intel_engine_print_registers()
1212 ENGINE_READ(engine, RING_CTL), in intel_engine_print_registers()
1213 ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : ""); in intel_engine_print_registers()
1214 if (INTEL_GEN(engine->i915) > 2) { in intel_engine_print_registers()
1216 ENGINE_READ(engine, RING_MI_MODE), in intel_engine_print_registers()
1217 ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : ""); in intel_engine_print_registers()
1222 ENGINE_READ(engine, RING_IMR)); in intel_engine_print_registers()
1225 addr = intel_engine_get_active_head(engine); in intel_engine_print_registers()
1228 addr = intel_engine_get_last_batch_head(engine); in intel_engine_print_registers()
1232 addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW); in intel_engine_print_registers()
1234 addr = ENGINE_READ(engine, RING_DMA_FADD); in intel_engine_print_registers()
1236 addr = ENGINE_READ(engine, DMA_FADD_I8XX); in intel_engine_print_registers()
1241 ENGINE_READ(engine, RING_IPEIR)); in intel_engine_print_registers()
1243 ENGINE_READ(engine, RING_IPEHR)); in intel_engine_print_registers()
1245 drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR)); in intel_engine_print_registers()
1246 drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR)); in intel_engine_print_registers()
1252 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; in intel_engine_print_registers()
1258 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO), in intel_engine_print_registers()
1259 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI), in intel_engine_print_registers()
1268 &engine->execlists.tasklet.state)), in intel_engine_print_registers()
1269 enableddisabled(!atomic_read(&engine->execlists.tasklet.count))); in intel_engine_print_registers()
1313 ENGINE_READ(engine, RING_PP_DIR_BASE)); in intel_engine_print_registers()
1315 ENGINE_READ(engine, RING_PP_DIR_BASE_READ)); in intel_engine_print_registers()
1317 ENGINE_READ(engine, RING_PP_DIR_DCLV)); in intel_engine_print_registers()
1354 void intel_engine_dump(struct intel_engine_cs *engine, in intel_engine_dump() argument
1358 struct i915_gpu_error * const error = &engine->i915->gpu_error; in intel_engine_dump()
1371 if (intel_gt_is_wedged(engine->gt)) in intel_engine_dump()
1374 drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count)); in intel_engine_dump()
1376 jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp)); in intel_engine_dump()
1378 i915_reset_engine_count(error, engine), in intel_engine_dump()
1383 spin_lock_irqsave(&engine->active.lock, flags); in intel_engine_dump()
1384 rq = intel_engine_find_active_request(engine); in intel_engine_dump()
1403 spin_unlock_irqrestore(&engine->active.lock, flags); in intel_engine_dump()
1405 drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base); in intel_engine_dump()
1406 wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm); in intel_engine_dump()
1408 intel_engine_print_registers(engine, m); in intel_engine_dump()
1409 intel_runtime_pm_put(&engine->i915->runtime_pm, wakeref); in intel_engine_dump()
1414 intel_execlists_show_requests(engine, m, print_request, 8); in intel_engine_dump()
1417 hexdump(m, engine->status_page.addr, PAGE_SIZE); in intel_engine_dump()
1419 drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine))); in intel_engine_dump()
1421 intel_engine_print_breadcrumbs(engine, m); in intel_engine_dump()
1425 * intel_enable_engine_stats() - Enable engine busy tracking on engine
1426 * @engine: engine to enable stats collection
1428 * Start collecting the engine busyness data for @engine.
1432 int intel_enable_engine_stats(struct intel_engine_cs *engine) in intel_enable_engine_stats() argument
1434 struct intel_engine_execlists *execlists = &engine->execlists; in intel_enable_engine_stats()
1438 if (!intel_engine_supports_stats(engine)) in intel_enable_engine_stats()
1442 write_seqlock_irqsave(&engine->stats.lock, flags); in intel_enable_engine_stats()
1444 if (unlikely(engine->stats.enabled == ~0)) { in intel_enable_engine_stats()
1449 if (engine->stats.enabled++ == 0) { in intel_enable_engine_stats()
1453 engine->stats.enabled_at = ktime_get(); in intel_enable_engine_stats()
1457 engine->stats.active++; in intel_enable_engine_stats()
1462 engine->stats.active++; in intel_enable_engine_stats()
1465 if (engine->stats.active) in intel_enable_engine_stats()
1466 engine->stats.start = engine->stats.enabled_at; in intel_enable_engine_stats()
1470 write_sequnlock_irqrestore(&engine->stats.lock, flags); in intel_enable_engine_stats()
1476 static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine) in __intel_engine_get_busy_time() argument
1478 ktime_t total = engine->stats.total; in __intel_engine_get_busy_time()
1481 * If the engine is executing something at the moment in __intel_engine_get_busy_time()
1484 if (engine->stats.active) in __intel_engine_get_busy_time()
1486 ktime_sub(ktime_get(), engine->stats.start)); in __intel_engine_get_busy_time()
1492 * intel_engine_get_busy_time() - Return current accumulated engine busyness
1493 * @engine: engine to report on
1495 * Returns accumulated time @engine was busy since engine stats were enabled.
1497 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine) in intel_engine_get_busy_time() argument
1503 seq = read_seqbegin(&engine->stats.lock); in intel_engine_get_busy_time()
1504 total = __intel_engine_get_busy_time(engine); in intel_engine_get_busy_time()
1505 } while (read_seqretry(&engine->stats.lock, seq)); in intel_engine_get_busy_time()
1511 * intel_disable_engine_stats() - Disable engine busy tracking on engine
1512 * @engine: engine to disable stats collection
1514 * Stops collecting the engine busyness data for @engine.
1516 void intel_disable_engine_stats(struct intel_engine_cs *engine) in intel_disable_engine_stats() argument
1520 if (!intel_engine_supports_stats(engine)) in intel_disable_engine_stats()
1523 write_seqlock_irqsave(&engine->stats.lock, flags); in intel_disable_engine_stats()
1524 WARN_ON_ONCE(engine->stats.enabled == 0); in intel_disable_engine_stats()
1525 if (--engine->stats.enabled == 0) { in intel_disable_engine_stats()
1526 engine->stats.total = __intel_engine_get_busy_time(engine); in intel_disable_engine_stats()
1527 engine->stats.active = 0; in intel_disable_engine_stats()
1529 write_sequnlock_irqrestore(&engine->stats.lock, flags); in intel_disable_engine_stats()
1534 u32 ring = ENGINE_READ(rq->engine, RING_START); in match_ring()
1540 intel_engine_find_active_request(struct intel_engine_cs *engine) in intel_engine_find_active_request() argument
1545 * We are called by the error capture, reset and to dump engine in intel_engine_find_active_request()
1551 * not need an engine->irq_seqno_barrier() before the seqno reads. in intel_engine_find_active_request()
1555 lockdep_assert_held(&engine->active.lock); in intel_engine_find_active_request()
1556 list_for_each_entry(request, &engine->active.requests, sched.link) { in intel_engine_find_active_request()