Lines Matching +full:termination +full:- +full:current

1 // SPDX-License-Identifier: MIT
29 * Objects can opt-in to PXP encryption at creation time via the
57 return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->ce; in intel_pxp_is_enabled()
62 return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->arb_is_valid; in intel_pxp_is_active()
70 intel_uncore_write(pxp->ctrl_gt->uncore, KCR_INIT(pxp->kcr_base), val); in kcr_pxp_set_status()
86 struct intel_gt *gt = pxp->ctrl_gt; in create_vcs_context()
96 engine = gt->engine_class[VIDEO_DECODE_CLASS][i]; in create_vcs_context()
98 GEM_BUG_ON(!engine || engine->class != VIDEO_DECODE_CLASS); in create_vcs_context()
100 ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K, in create_vcs_context()
104 drm_err(&gt->i915->drm, "failed to create VCS ctx for PXP\n"); in create_vcs_context()
108 pxp->ce = ce; in create_vcs_context()
115 if (pxp->ce) in destroy_vcs_context()
116 intel_engine_destroy_pinned_context(fetch_and_zero(&pxp->ce)); in destroy_vcs_context()
121 struct intel_gt *gt = pxp->ctrl_gt; in pxp_init_full()
125 * we'll use the completion to check if there is a termination pending, in pxp_init_full()
126 * so we start it as completed and we reinit it when a termination in pxp_init_full()
129 init_completion(&pxp->termination); in pxp_init_full()
130 complete_all(&pxp->termination); in pxp_init_full()
132 if (pxp->ctrl_gt->type == GT_MEDIA) in pxp_init_full()
133 pxp->kcr_base = MTL_KCR_BASE; in pxp_init_full()
135 pxp->kcr_base = GEN12_KCR_BASE; in pxp_init_full()
143 if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) in pxp_init_full()
150 drm_info(&gt->i915->drm, "Protected Xe Path (PXP) protected content support initialized\n"); in pxp_init_full()
161 * NOTE: Only certain platforms require PXP-tee-backend dependencies in find_gt_for_required_teelink()
165 intel_huc_is_loaded_by_gsc(&to_gt(i915)->uc.huc) && intel_uc_uses_huc(&to_gt(i915)->uc)) in find_gt_for_required_teelink()
173 if (!IS_ENABLED(CONFIG_DRM_I915_PXP) || !INTEL_INFO(i915)->has_pxp) in find_gt_for_required_protected_content()
177 * For MTL onwards, PXP-controller-GT needs to have a valid GSC engine in find_gt_for_required_protected_content()
178 * on the media GT. NOTE: if we have a media-tile with a GSC-engine, in find_gt_for_required_protected_content()
182 if (i915->media_gt && HAS_ENGINE(i915->media_gt, GSC0) && in find_gt_for_required_protected_content()
183 intel_uc_fw_is_loadable(&i915->media_gt->uc.gsc.fw) && in find_gt_for_required_protected_content()
184 intel_uc_fw_is_loadable(&i915->media_gt->uc.huc.fw)) in find_gt_for_required_protected_content()
185 return i915->media_gt; in find_gt_for_required_protected_content()
188 * Else we rely on mei-pxp module but only on legacy platforms in find_gt_for_required_protected_content()
191 if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && !i915->media_gt && VDBOX_MASK(to_gt(i915))) in find_gt_for_required_protected_content()
213 return -ENODEV; in intel_pxp_init()
220 i915->pxp = kzalloc(sizeof(*i915->pxp), GFP_KERNEL); in intel_pxp_init()
221 if (!i915->pxp) in intel_pxp_init()
222 return -ENOMEM; in intel_pxp_init()
224 /* init common info used by all feature-mode usages*/ in intel_pxp_init()
225 i915->pxp->ctrl_gt = gt; in intel_pxp_init()
226 mutex_init(&i915->pxp->tee_mutex); in intel_pxp_init()
229 * If full PXP feature is not available but HuC is loaded by GSC on pre-MTL in intel_pxp_init()
234 pxp_init_full(i915->pxp); in intel_pxp_init()
236 intel_pxp_tee_component_init(i915->pxp); in intel_pxp_init()
243 if (!i915->pxp) in intel_pxp_fini()
246 i915->pxp->arb_is_valid = false; in intel_pxp_fini()
248 if (HAS_ENGINE(i915->pxp->ctrl_gt, GSC0)) in intel_pxp_fini()
249 intel_pxp_gsccs_fini(i915->pxp); in intel_pxp_fini()
251 intel_pxp_tee_component_fini(i915->pxp); in intel_pxp_fini()
253 destroy_vcs_context(i915->pxp); in intel_pxp_fini()
255 kfree(i915->pxp); in intel_pxp_fini()
256 i915->pxp = NULL; in intel_pxp_fini()
261 pxp->arb_is_valid = false; in intel_pxp_mark_termination_in_progress()
262 reinit_completion(&pxp->termination); in intel_pxp_mark_termination_in_progress()
267 struct intel_gt *gt = pxp->ctrl_gt; in pxp_queue_termination()
270 * We want to get the same effect as if we received a termination in pxp_queue_termination()
273 spin_lock_irq(gt->irq_lock); in pxp_queue_termination()
275 pxp->session_events |= PXP_TERMINATION_REQUEST; in pxp_queue_termination()
276 queue_work(system_unbound_wq, &pxp->session_work); in pxp_queue_termination()
277 spin_unlock_irq(gt->irq_lock); in pxp_queue_termination()
284 mutex_lock(&pxp->tee_mutex); in pxp_component_bound()
285 if (pxp->pxp_component) in pxp_component_bound()
287 mutex_unlock(&pxp->tee_mutex); in pxp_component_bound()
294 if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) in intel_pxp_get_backend_timeout_ms()
304 if (!pxp->arb_is_valid) in __pxp_global_teardown_final()
315 if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout))) in __pxp_global_teardown_final()
316 return -ETIMEDOUT; in __pxp_global_teardown_final()
325 if (pxp->arb_is_valid) in __pxp_global_teardown_restart()
328 * The arb-session is currently inactive and we are doing a reset and restart in __pxp_global_teardown_restart()
335 if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout))) in __pxp_global_teardown_restart()
336 return -ETIMEDOUT; in __pxp_global_teardown_restart()
343 struct drm_i915_private *i915 = pxp->ctrl_gt->i915; in intel_pxp_end()
349 wakeref = intel_runtime_pm_get(&i915->runtime_pm); in intel_pxp_end()
351 mutex_lock(&pxp->arb_mutex); in intel_pxp_end()
354 drm_dbg(&i915->drm, "PXP end timed out\n"); in intel_pxp_end()
356 mutex_unlock(&pxp->arb_mutex); in intel_pxp_end()
359 intel_runtime_pm_put(&i915->runtime_pm, wakeref); in intel_pxp_end()
370 return -ENODEV; in intel_pxp_get_readiness_status()
372 if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) { in intel_pxp_get_readiness_status()
384 * termination completion interrupt
394 return -EIO; /* per UAPI spec, user may retry later */ in intel_pxp_start()
396 mutex_lock(&pxp->arb_mutex); in intel_pxp_start()
405 if (!pxp->arb_is_valid) in intel_pxp_start()
406 ret = -EIO; in intel_pxp_start()
409 mutex_unlock(&pxp->arb_mutex); in intel_pxp_start()
430 return -ENODEV; in intel_pxp_key_check()
433 return -EINVAL; in intel_pxp_key_check()
435 GEM_BUG_ON(!pxp->key_instance); in intel_pxp_key_check()
439 * encrypted yet; it will be encrypted with the current key, so mark it in intel_pxp_key_check()
443 if (!obj->pxp_key_instance && assign) in intel_pxp_key_check()
444 obj->pxp_key_instance = pxp->key_instance; in intel_pxp_key_check()
446 if (obj->pxp_key_instance != pxp->key_instance) in intel_pxp_key_check()
447 return -ENOEXEC; in intel_pxp_key_check()
454 struct drm_i915_private *i915 = pxp->ctrl_gt->i915; in intel_pxp_invalidate()
458 spin_lock_irq(&i915->gem.contexts.lock); in intel_pxp_invalidate()
459 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { in intel_pxp_invalidate()
463 if (!kref_get_unless_zero(&ctx->ref)) in intel_pxp_invalidate()
471 spin_unlock_irq(&i915->gem.contexts.lock); in intel_pxp_invalidate()
491 if (ctx->pxp_wakeref) { in intel_pxp_invalidate()
492 intel_runtime_pm_put(&i915->runtime_pm, in intel_pxp_invalidate()
493 ctx->pxp_wakeref); in intel_pxp_invalidate()
494 ctx->pxp_wakeref = 0; in intel_pxp_invalidate()
497 spin_lock_irq(&i915->gem.contexts.lock); in intel_pxp_invalidate()
501 spin_unlock_irq(&i915->gem.contexts.lock); in intel_pxp_invalidate()