Lines Matching full:engines

238 		if (!e->engines[count])  in __free_engines()
241 intel_context_put(e->engines[count]); in __free_engines()
253 struct i915_gem_engines *engines = in free_engines_rcu() local
256 i915_sw_fence_fini(&engines->fence); in free_engines_rcu()
257 free_engines(engines); in free_engines_rcu()
263 struct i915_gem_engines *engines = in engines_notify() local
264 container_of(fence, typeof(*engines), fence); in engines_notify()
268 if (!list_empty(&engines->link)) { in engines_notify()
269 struct i915_gem_context *ctx = engines->ctx; in engines_notify()
273 list_del(&engines->link); in engines_notify()
276 i915_gem_context_put(engines->ctx); in engines_notify()
280 init_rcu_head(&engines->rcu); in engines_notify()
281 call_rcu(&engines->rcu, free_engines_rcu); in engines_notify()
292 e = kzalloc(struct_size(e, engines, count), GFP_KERNEL); in alloc_engines()
318 GEM_BUG_ON(e->engines[engine->legacy_idx]); in default_engines()
328 e->engines[engine->legacy_idx] = ce; in default_engines()
390 return rcu_dereference_protected(ctx->engines, true); in __context_engines_static()
486 static void kill_engines(struct i915_gem_engines *engines, bool ban) in kill_engines() argument
492 * Map the user's engine back to the actual engines; one virtual in kill_engines()
493 * engine will be mapped to multiple engines, and using ctx->engine[] in kill_engines()
496 * engines on which there are incomplete requests. in kill_engines()
498 for_each_gem_engine(ce, engines, it) { in kill_engines()
520 __reset_context(engines->ctx, engine); in kill_engines()
532 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) { in kill_context()
553 struct i915_gem_engines *engines) in engines_idle_release() argument
558 INIT_LIST_HEAD(&engines->link); in engines_idle_release()
560 engines->ctx = i915_gem_context_get(ctx); in engines_idle_release()
562 for_each_gem_engine(ce, engines, it) { in engines_idle_release()
571 err = i915_sw_fence_await_active(&engines->fence, in engines_idle_release()
581 list_add_tail(&engines->link, &ctx->stale.engines); in engines_idle_release()
585 if (list_empty(&engines->link)) /* raced, already closed */ in engines_idle_release()
586 kill_engines(engines, true); in engines_idle_release()
588 i915_sw_fence_commit(&engines->fence); in engines_idle_release()
614 engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1)); in context_close()
710 INIT_LIST_HEAD(&ctx->stale.engines); in __create_context()
718 RCU_INIT_POINTER(ctx->engines, e); in __create_context()
745 struct i915_gem_engines *engines; in __context_engines_await() local
749 engines = rcu_dereference(ctx->engines); in __context_engines_await()
750 GEM_BUG_ON(!engines); in __context_engines_await()
752 if (unlikely(!i915_sw_fence_await(&engines->fence))) in __context_engines_await()
755 if (likely(engines == rcu_access_pointer(ctx->engines))) in __context_engines_await()
758 i915_sw_fence_complete(&engines->fence); in __context_engines_await()
762 return engines; in __context_engines_await()
1092 intel_engine_mask_t engines, in context_barrier_task() argument
1134 if (!(ce->engine->mask & engines)) in context_barrier_task()
1580 struct i915_gem_engines *engines; member
1606 if (idx >= set->engines->num_engines) { in set_engines__load_balance()
1608 idx, set->engines->num_engines); in set_engines__load_balance()
1612 idx = array_index_nospec(idx, set->engines->num_engines); in set_engines__load_balance()
1613 if (set->engines->engines[idx]) { in set_engines__load_balance()
1642 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { in set_engines__load_balance()
1667 if (cmpxchg(&set->engines->engines[idx], NULL, ce)) { in set_engines__load_balance()
1696 if (idx >= set->engines->num_engines) { in set_engines__bond()
1699 idx, set->engines->num_engines); in set_engines__bond()
1703 idx = array_index_nospec(idx, set->engines->num_engines); in set_engines__bond()
1704 if (!set->engines->engines[idx]) { in set_engines__bond()
1708 virtual = set->engines->engines[idx]->engine; in set_engines__bond()
1738 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) in set_engines__bond()
1788 set.engines = default_engines(ctx); in set_engines()
1789 if (IS_ERR(set.engines)) in set_engines()
1790 return PTR_ERR(set.engines); in set_engines()
1795 BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines))); in set_engines()
1797 !IS_ALIGNED(args->size, sizeof(*user->engines))) { in set_engines()
1805 * first 64 engines defined here. in set_engines()
1807 num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); in set_engines()
1808 set.engines = alloc_engines(num_engines); in set_engines()
1809 if (!set.engines) in set_engines()
1817 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { in set_engines()
1818 __free_engines(set.engines, n); in set_engines()
1824 set.engines->engines[n] = NULL; in set_engines()
1835 __free_engines(set.engines, n); in set_engines()
1841 __free_engines(set.engines, n); in set_engines()
1847 set.engines->engines[n] = ce; in set_engines()
1849 set.engines->num_engines = num_engines; in set_engines()
1858 free_engines(set.engines); in set_engines()
1866 free_engines(set.engines); in set_engines()
1873 set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1); in set_engines()
1877 engines_idle_release(ctx, set.engines); in set_engines()
1893 if (e->engines[n]) in __copy_engines()
1894 copy->engines[n] = intel_context_get(e->engines[n]); in __copy_engines()
1896 copy->engines[n] = NULL; in __copy_engines()
1928 if (!check_struct_size(user, engines, count, &size)) { in get_engines()
1959 if (e->engines[n]) { in get_engines()
1960 ci.engine_class = e->engines[n]->engine->uabi_class; in get_engines()
1961 ci.engine_instance = e->engines[n]->engine->uabi_instance; in get_engines()
1964 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) { in get_engines()
2151 if (!e->engines[n]) { in clone_engines()
2152 clone->engines[n] = NULL; in clone_engines()
2155 engine = e->engines[n]->engine; in clone_engines()
2158 * Virtual engines are singletons; they can only exist in clone_engines()
2167 clone->engines[n] = in clone_engines()
2170 clone->engines[n] = intel_context_create(engine); in clone_engines()
2171 if (IS_ERR_OR_NULL(clone->engines[n])) { in clone_engines()
2176 intel_context_set_gem(clone->engines[n], dst); in clone_engines()
2179 if (copy_ring_size(clone->engines[n], e->engines[n])) { in clone_engines()
2190 engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1)); in clone_engines()
2232 struct intel_context *ce = e->engines[n]; in clone_sseu()
2234 if (clone->engines[n]->engine->class != ce->engine->class) { in clone_sseu()
2245 clone->engines[n]->sseu = ce->sseu; in clone_sseu()
2293 MAP(ENGINES, clone_engines), in create_clone()
2614 /* GEM context-engines iterator: for_each_gem_engine() */
2618 const struct i915_gem_engines *e = it->engines; in i915_gem_engines_iter_next()
2628 ctx = e->engines[it->idx++]; in i915_gem_engines_iter_next()