Lines Matching refs:ring_id
137 int ring_id = workload->engine->id; in populate_shadow_context() local
188 if (s->last_ctx[ring_id].valid && in populate_shadow_context()
189 (s->last_ctx[ring_id].lrca == in populate_shadow_context()
191 (s->last_ctx[ring_id].ring_context_gpa == in populate_shadow_context()
195 s->last_ctx[ring_id].lrca = workload->ctx_desc.lrca; in populate_shadow_context()
196 s->last_ctx[ring_id].ring_context_gpa = workload->ring_context_gpa; in populate_shadow_context()
201 s->last_ctx[ring_id].valid = false; in populate_shadow_context()
240 s->last_ctx[ring_id].valid = true; in populate_shadow_context()
275 enum intel_engine_id ring_id = rq->engine->id; in shadow_context_status_change() local
282 scheduler->engine_owner[ring_id]) { in shadow_context_status_change()
284 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], in shadow_context_status_change()
286 scheduler->engine_owner[ring_id] = NULL; in shadow_context_status_change()
293 workload = scheduler->current_workload[ring_id]; in shadow_context_status_change()
300 if (workload->vgpu != scheduler->engine_owner[ring_id]) { in shadow_context_status_change()
302 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], in shadow_context_status_change()
304 scheduler->engine_owner[ring_id] = workload->vgpu; in shadow_context_status_change()
307 ring_id, workload->vgpu->id); in shadow_context_status_change()
1018 static void complete_current_workload(struct intel_gvt *gvt, int ring_id) in complete_current_workload() argument
1022 scheduler->current_workload[ring_id]; in complete_current_workload()
1052 !(vgpu->resetting_eng & BIT(ring_id))) { in complete_current_workload()
1064 ring_id, workload, workload->status); in complete_current_workload()
1066 scheduler->current_workload[ring_id] = NULL; in complete_current_workload()
1070 if (workload->status || vgpu->resetting_eng & BIT(ring_id)) { in complete_current_workload()
1084 intel_vgpu_clean_workloads(vgpu, BIT(ring_id)); in complete_current_workload()