Lines Matching refs:workload

54 static void update_shadow_pdps(struct intel_vgpu_workload *workload)  in update_shadow_pdps()  argument
57 workload->req->hw_context->state->obj; in update_shadow_pdps()
61 if (WARN_ON(!workload->shadow_mm)) in update_shadow_pdps()
64 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount))) in update_shadow_pdps()
70 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps); in update_shadow_pdps()
79 static void sr_oa_regs(struct intel_vgpu_workload *workload, in sr_oa_regs() argument
82 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; in sr_oa_regs()
96 if (workload->ring_id != RCS) in sr_oa_regs()
100 workload->oactxctrl = reg_state[ctx_oactxctrl + 1]; in sr_oa_regs()
102 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { in sr_oa_regs()
105 workload->flex_mmio[i] = reg_state[state_offset + 1]; in sr_oa_regs()
110 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl; in sr_oa_regs()
112 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { in sr_oa_regs()
117 reg_state[state_offset + 1] = workload->flex_mmio[i]; in sr_oa_regs()
122 static int populate_shadow_context(struct intel_vgpu_workload *workload) in populate_shadow_context() argument
124 struct intel_vgpu *vgpu = workload->vgpu; in populate_shadow_context()
126 int ring_id = workload->ring_id; in populate_shadow_context()
128 workload->req->hw_context->state->obj; in populate_shadow_context()
136 workload->ctx_desc.lrca); in populate_shadow_context()
149 (u32)((workload->ctx_desc.lrca + i) << in populate_shadow_context()
167 sr_oa_regs(workload, (u32 *)shadow_ring_context, true); in populate_shadow_context()
169 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ in populate_shadow_context()
172 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ in populate_shadow_context()
190 workload->ring_context_gpa + in populate_shadow_context()
196 sr_oa_regs(workload, (u32 *)shadow_ring_context, false); in populate_shadow_context()
228 struct intel_vgpu_workload *workload; in shadow_context_status_change() local
245 workload = scheduler->current_workload[ring_id]; in shadow_context_status_change()
246 if (unlikely(!workload)) in shadow_context_status_change()
252 if (workload->vgpu != scheduler->engine_owner[ring_id]) { in shadow_context_status_change()
255 workload->vgpu, ring_id); in shadow_context_status_change()
256 scheduler->engine_owner[ring_id] = workload->vgpu; in shadow_context_status_change()
259 ring_id, workload->vgpu->id); in shadow_context_status_change()
261 atomic_set(&workload->shadow_ctx_active, 1); in shadow_context_status_change()
264 save_ring_hw_state(workload->vgpu, ring_id); in shadow_context_status_change()
265 atomic_set(&workload->shadow_ctx_active, 0); in shadow_context_status_change()
268 save_ring_hw_state(workload->vgpu, ring_id); in shadow_context_status_change()
274 wake_up(&workload->shadow_ctx_status_wq); in shadow_context_status_change()
293 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) in copy_workload_to_ring_buffer() argument
295 struct intel_vgpu *vgpu = workload->vgpu; in copy_workload_to_ring_buffer()
296 struct i915_request *req = workload->req; in copy_workload_to_ring_buffer()
305 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); in copy_workload_to_ring_buffer()
308 workload->rb_len); in copy_workload_to_ring_buffer()
312 shadow_ring_buffer_va = workload->shadow_ring_buffer_va; in copy_workload_to_ring_buffer()
315 workload->shadow_ring_buffer_va = cs; in copy_workload_to_ring_buffer()
318 workload->rb_len); in copy_workload_to_ring_buffer()
320 cs += workload->rb_len / sizeof(u32); in copy_workload_to_ring_buffer()
321 intel_ring_advance(workload->req, cs); in copy_workload_to_ring_buffer()
343 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) in intel_gvt_scan_and_shadow_workload() argument
345 struct intel_vgpu *vgpu = workload->vgpu; in intel_gvt_scan_and_shadow_workload()
349 struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; in intel_gvt_scan_and_shadow_workload()
356 if (workload->req) in intel_gvt_scan_and_shadow_workload()
373 shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode << in intel_gvt_scan_and_shadow_workload()
376 if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated)) in intel_gvt_scan_and_shadow_workload()
379 ret = intel_gvt_scan_and_shadow_ringbuffer(workload); in intel_gvt_scan_and_shadow_workload()
383 if ((workload->ring_id == RCS) && in intel_gvt_scan_and_shadow_workload()
384 (workload->wa_ctx.indirect_ctx.size != 0)) { in intel_gvt_scan_and_shadow_workload()
385 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); in intel_gvt_scan_and_shadow_workload()
396 workload->req = i915_request_get(rq); in intel_gvt_scan_and_shadow_workload()
398 ret = populate_shadow_context(workload); in intel_gvt_scan_and_shadow_workload()
404 rq = fetch_and_zero(&workload->req); in intel_gvt_scan_and_shadow_workload()
407 release_shadow_wa_ctx(&workload->wa_ctx); in intel_gvt_scan_and_shadow_workload()
413 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
415 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) in prepare_shadow_batch_buffer() argument
417 struct intel_gvt *gvt = workload->vgpu->gvt; in prepare_shadow_batch_buffer()
422 list_for_each_entry(bb, &workload->shadow_bb, list) { in prepare_shadow_batch_buffer()
431 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va in prepare_shadow_batch_buffer()
480 workload->req, in prepare_shadow_batch_buffer()
488 release_shadow_batch_buffer(workload); in prepare_shadow_batch_buffer()
494 struct intel_vgpu_workload *workload = in update_wa_ctx_2_shadow_ctx() local
496 struct i915_request *rq = workload->req; in update_wa_ctx_2_shadow_ctx()
537 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) in release_shadow_batch_buffer() argument
539 struct intel_vgpu *vgpu = workload->vgpu; in release_shadow_batch_buffer()
543 if (list_empty(&workload->shadow_bb)) in release_shadow_batch_buffer()
546 bb = list_first_entry(&workload->shadow_bb, in release_shadow_batch_buffer()
551 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) { in release_shadow_batch_buffer()
572 static int prepare_workload(struct intel_vgpu_workload *workload) in prepare_workload() argument
574 struct intel_vgpu *vgpu = workload->vgpu; in prepare_workload()
577 ret = intel_vgpu_pin_mm(workload->shadow_mm); in prepare_workload()
583 update_shadow_pdps(workload); in prepare_workload()
585 ret = intel_vgpu_sync_oos_pages(workload->vgpu); in prepare_workload()
591 ret = intel_vgpu_flush_post_shadow(workload->vgpu); in prepare_workload()
597 ret = copy_workload_to_ring_buffer(workload); in prepare_workload()
603 ret = prepare_shadow_batch_buffer(workload); in prepare_workload()
609 ret = prepare_shadow_wa_ctx(&workload->wa_ctx); in prepare_workload()
615 if (workload->prepare) { in prepare_workload()
616 ret = workload->prepare(workload); in prepare_workload()
623 release_shadow_wa_ctx(&workload->wa_ctx); in prepare_workload()
625 release_shadow_batch_buffer(workload); in prepare_workload()
627 intel_vgpu_unpin_mm(workload->shadow_mm); in prepare_workload()
631 static int dispatch_workload(struct intel_vgpu_workload *workload) in dispatch_workload() argument
633 struct intel_vgpu *vgpu = workload->vgpu; in dispatch_workload()
635 int ring_id = workload->ring_id; in dispatch_workload()
639 ring_id, workload); in dispatch_workload()
644 ret = intel_gvt_scan_and_shadow_workload(workload); in dispatch_workload()
648 ret = prepare_workload(workload); in dispatch_workload()
652 workload->status = ret; in dispatch_workload()
654 if (!IS_ERR_OR_NULL(workload->req)) { in dispatch_workload()
656 ring_id, workload->req); in dispatch_workload()
657 i915_request_add(workload->req); in dispatch_workload()
658 workload->dispatched = true; in dispatch_workload()
670 struct intel_vgpu_workload *workload = NULL; in pick_next_workload() local
696 workload = scheduler->current_workload[ring_id]; in pick_next_workload()
698 ring_id, workload); in pick_next_workload()
712 workload = scheduler->current_workload[ring_id]; in pick_next_workload()
714 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload); in pick_next_workload()
716 atomic_inc(&workload->vgpu->submission.running_workload_num); in pick_next_workload()
719 return workload; in pick_next_workload()
722 static void update_guest_context(struct intel_vgpu_workload *workload) in update_guest_context() argument
724 struct i915_request *rq = workload->req; in update_guest_context()
725 struct intel_vgpu *vgpu = workload->vgpu; in update_guest_context()
735 workload->ctx_desc.lrca); in update_guest_context()
747 (u32)((workload->ctx_desc.lrca + i) << in update_guest_context()
762 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + in update_guest_context()
763 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4); in update_guest_context()
769 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \ in update_guest_context()
778 workload->ring_context_gpa + in update_guest_context()
810 struct intel_vgpu_workload *workload = in complete_current_workload() local
812 struct intel_vgpu *vgpu = workload->vgpu; in complete_current_workload()
814 struct i915_request *rq = workload->req; in complete_current_workload()
825 wait_event(workload->shadow_ctx_status_wq, in complete_current_workload()
826 !atomic_read(&workload->shadow_ctx_active)); in complete_current_workload()
833 if (likely(workload->status == -EINPROGRESS)) { in complete_current_workload()
834 if (workload->req->fence.error == -EIO) in complete_current_workload()
835 workload->status = -EIO; in complete_current_workload()
837 workload->status = 0; in complete_current_workload()
840 if (!workload->status && !(vgpu->resetting_eng & in complete_current_workload()
842 update_guest_context(workload); in complete_current_workload()
844 for_each_set_bit(event, workload->pending_events, in complete_current_workload()
854 i915_request_put(fetch_and_zero(&workload->req)); in complete_current_workload()
858 ring_id, workload, workload->status); in complete_current_workload()
862 list_del_init(&workload->list); in complete_current_workload()
864 if (!workload->status) { in complete_current_workload()
865 release_shadow_batch_buffer(workload); in complete_current_workload()
866 release_shadow_wa_ctx(&workload->wa_ctx); in complete_current_workload()
869 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { in complete_current_workload()
886 workload->complete(workload); in complete_current_workload()
909 struct intel_vgpu_workload *workload = NULL; in workload_thread() local
924 workload = pick_next_workload(gvt, ring_id); in workload_thread()
925 if (workload) in workload_thread()
932 if (!workload) in workload_thread()
936 workload->ring_id, workload, in workload_thread()
937 workload->vgpu->id); in workload_thread()
942 workload->ring_id, workload); in workload_thread()
948 ret = dispatch_workload(workload); in workload_thread()
951 vgpu = workload->vgpu; in workload_thread()
957 workload->ring_id, workload); in workload_thread()
958 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT); in workload_thread()
962 workload, workload->status); in workload_thread()
1198 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) in intel_vgpu_destroy_workload() argument
1200 struct intel_vgpu_submission *s = &workload->vgpu->submission; in intel_vgpu_destroy_workload()
1202 if (workload->shadow_mm) in intel_vgpu_destroy_workload()
1203 intel_vgpu_mm_put(workload->shadow_mm); in intel_vgpu_destroy_workload()
1205 kmem_cache_free(s->workloads, workload); in intel_vgpu_destroy_workload()
1212 struct intel_vgpu_workload *workload; in alloc_workload() local
1214 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL); in alloc_workload()
1215 if (!workload) in alloc_workload()
1218 INIT_LIST_HEAD(&workload->list); in alloc_workload()
1219 INIT_LIST_HEAD(&workload->shadow_bb); in alloc_workload()
1221 init_waitqueue_head(&workload->shadow_ctx_status_wq); in alloc_workload()
1222 atomic_set(&workload->shadow_ctx_active, 0); in alloc_workload()
1224 workload->status = -EINPROGRESS; in alloc_workload()
1225 workload->vgpu = vgpu; in alloc_workload()
1227 return workload; in alloc_workload()
1246 static int prepare_mm(struct intel_vgpu_workload *workload) in prepare_mm() argument
1248 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; in prepare_mm()
1250 struct intel_vgpu *vgpu = workload->vgpu; in prepare_mm()
1266 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps); in prepare_mm()
1268 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps); in prepare_mm()
1272 workload->shadow_mm = mm; in prepare_mm()
1301 struct intel_vgpu_workload *workload = NULL; in intel_vgpu_create_workload() local
1344 workload = alloc_workload(vgpu); in intel_vgpu_create_workload()
1345 if (IS_ERR(workload)) in intel_vgpu_create_workload()
1346 return workload; in intel_vgpu_create_workload()
1348 workload->ring_id = ring_id; in intel_vgpu_create_workload()
1349 workload->ctx_desc = *desc; in intel_vgpu_create_workload()
1350 workload->ring_context_gpa = ring_context_gpa; in intel_vgpu_create_workload()
1351 workload->rb_head = head; in intel_vgpu_create_workload()
1352 workload->rb_tail = tail; in intel_vgpu_create_workload()
1353 workload->rb_start = start; in intel_vgpu_create_workload()
1354 workload->rb_ctl = ctl; in intel_vgpu_create_workload()
1362 workload->wa_ctx.indirect_ctx.guest_gma = in intel_vgpu_create_workload()
1364 workload->wa_ctx.indirect_ctx.size = in intel_vgpu_create_workload()
1367 workload->wa_ctx.per_ctx.guest_gma = in intel_vgpu_create_workload()
1369 workload->wa_ctx.per_ctx.valid = per_ctx & 1; in intel_vgpu_create_workload()
1373 workload, ring_id, head, tail, start, ctl); in intel_vgpu_create_workload()
1375 ret = prepare_mm(workload); in intel_vgpu_create_workload()
1377 kmem_cache_free(s->workloads, workload); in intel_vgpu_create_workload()
1387 ret = intel_gvt_scan_and_shadow_workload(workload); in intel_vgpu_create_workload()
1394 intel_vgpu_destroy_workload(workload); in intel_vgpu_create_workload()
1398 return workload; in intel_vgpu_create_workload()
1405 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload) in intel_vgpu_queue_workload() argument
1407 list_add_tail(&workload->list, in intel_vgpu_queue_workload()
1408 workload_q_head(workload->vgpu, workload->ring_id)); in intel_vgpu_queue_workload()
1409 intel_gvt_kick_schedule(workload->vgpu->gvt); in intel_vgpu_queue_workload()
1410 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]); in intel_vgpu_queue_workload()