Lines Matching refs:workload
58 static void update_shadow_pdps(struct intel_vgpu_workload *workload) in update_shadow_pdps() argument
61 workload->req->hw_context->state->obj; in update_shadow_pdps()
65 if (WARN_ON(!workload->shadow_mm)) in update_shadow_pdps()
68 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount))) in update_shadow_pdps()
74 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps); in update_shadow_pdps()
83 static void sr_oa_regs(struct intel_vgpu_workload *workload, in sr_oa_regs() argument
86 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; in sr_oa_regs()
100 if (workload->ring_id != RCS0) in sr_oa_regs()
104 workload->oactxctrl = reg_state[ctx_oactxctrl + 1]; in sr_oa_regs()
106 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { in sr_oa_regs()
109 workload->flex_mmio[i] = reg_state[state_offset + 1]; in sr_oa_regs()
114 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl; in sr_oa_regs()
116 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { in sr_oa_regs()
121 reg_state[state_offset + 1] = workload->flex_mmio[i]; in sr_oa_regs()
126 static int populate_shadow_context(struct intel_vgpu_workload *workload) in populate_shadow_context() argument
128 struct intel_vgpu *vgpu = workload->vgpu; in populate_shadow_context()
130 int ring_id = workload->ring_id; in populate_shadow_context()
132 workload->req->hw_context->state->obj; in populate_shadow_context()
142 sr_oa_regs(workload, (u32 *)shadow_ring_context, true); in populate_shadow_context()
144 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ in populate_shadow_context()
147 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ in populate_shadow_context()
165 workload->ring_context_gpa + in populate_shadow_context()
171 sr_oa_regs(workload, (u32 *)shadow_ring_context, false); in populate_shadow_context()
178 workload->ctx_desc.lrca); in populate_shadow_context()
190 (u32)((workload->ctx_desc.lrca + i) << in populate_shadow_context()
234 struct intel_vgpu_workload *workload; in shadow_context_status_change() local
251 workload = scheduler->current_workload[ring_id]; in shadow_context_status_change()
252 if (unlikely(!workload)) in shadow_context_status_change()
258 if (workload->vgpu != scheduler->engine_owner[ring_id]) { in shadow_context_status_change()
261 workload->vgpu, ring_id); in shadow_context_status_change()
262 scheduler->engine_owner[ring_id] = workload->vgpu; in shadow_context_status_change()
265 ring_id, workload->vgpu->id); in shadow_context_status_change()
267 atomic_set(&workload->shadow_ctx_active, 1); in shadow_context_status_change()
270 save_ring_hw_state(workload->vgpu, ring_id); in shadow_context_status_change()
271 atomic_set(&workload->shadow_ctx_active, 0); in shadow_context_status_change()
274 save_ring_hw_state(workload->vgpu, ring_id); in shadow_context_status_change()
280 wake_up(&workload->shadow_ctx_status_wq); in shadow_context_status_change()
286 struct intel_vgpu_workload *workload) in shadow_context_descriptor_update() argument
295 desc |= workload->ctx_desc.addressing_mode << in shadow_context_descriptor_update()
301 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) in copy_workload_to_ring_buffer() argument
303 struct intel_vgpu *vgpu = workload->vgpu; in copy_workload_to_ring_buffer()
304 struct i915_request *req = workload->req; in copy_workload_to_ring_buffer()
331 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); in copy_workload_to_ring_buffer()
334 workload->rb_len); in copy_workload_to_ring_buffer()
338 shadow_ring_buffer_va = workload->shadow_ring_buffer_va; in copy_workload_to_ring_buffer()
341 workload->shadow_ring_buffer_va = cs; in copy_workload_to_ring_buffer()
344 workload->rb_len); in copy_workload_to_ring_buffer()
346 cs += workload->rb_len / sizeof(u32); in copy_workload_to_ring_buffer()
347 intel_ring_advance(workload->req, cs); in copy_workload_to_ring_buffer()
364 static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, in set_context_ppgtt_from_shadow() argument
367 struct intel_vgpu_mm *mm = workload->shadow_mm; in set_context_ppgtt_from_shadow()
384 intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) in intel_gvt_workload_req_alloc() argument
386 struct intel_vgpu *vgpu = workload->vgpu; in intel_gvt_workload_req_alloc()
393 if (workload->req) in intel_gvt_workload_req_alloc()
396 rq = i915_request_create(s->shadow[workload->ring_id]); in intel_gvt_workload_req_alloc()
402 workload->req = i915_request_get(rq); in intel_gvt_workload_req_alloc()
414 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) in intel_gvt_scan_and_shadow_workload() argument
416 struct intel_vgpu *vgpu = workload->vgpu; in intel_gvt_scan_and_shadow_workload()
423 if (workload->shadow) in intel_gvt_scan_and_shadow_workload()
426 if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated)) in intel_gvt_scan_and_shadow_workload()
427 shadow_context_descriptor_update(s->shadow[workload->ring_id], in intel_gvt_scan_and_shadow_workload()
428 workload); in intel_gvt_scan_and_shadow_workload()
430 ret = intel_gvt_scan_and_shadow_ringbuffer(workload); in intel_gvt_scan_and_shadow_workload()
434 if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) { in intel_gvt_scan_and_shadow_workload()
435 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); in intel_gvt_scan_and_shadow_workload()
440 workload->shadow = true; in intel_gvt_scan_and_shadow_workload()
443 release_shadow_wa_ctx(&workload->wa_ctx); in intel_gvt_scan_and_shadow_workload()
447 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
449 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) in prepare_shadow_batch_buffer() argument
451 struct intel_gvt *gvt = workload->vgpu->gvt; in prepare_shadow_batch_buffer()
456 list_for_each_entry(bb, &workload->shadow_bb, list) { in prepare_shadow_batch_buffer()
465 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va in prepare_shadow_batch_buffer()
511 workload->req, in prepare_shadow_batch_buffer()
522 release_shadow_batch_buffer(workload); in prepare_shadow_batch_buffer()
528 struct intel_vgpu_workload *workload = in update_wa_ctx_2_shadow_ctx() local
530 struct i915_request *rq = workload->req; in update_wa_ctx_2_shadow_ctx()
571 static void update_vreg_in_ctx(struct intel_vgpu_workload *workload) in update_vreg_in_ctx() argument
573 struct intel_vgpu *vgpu = workload->vgpu; in update_vreg_in_ctx()
577 ring_base = dev_priv->engine[workload->ring_id]->mmio_base; in update_vreg_in_ctx()
578 vgpu_vreg_t(vgpu, RING_START(ring_base)) = workload->rb_start; in update_vreg_in_ctx()
581 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) in release_shadow_batch_buffer() argument
583 struct intel_vgpu *vgpu = workload->vgpu; in release_shadow_batch_buffer()
587 if (list_empty(&workload->shadow_bb)) in release_shadow_batch_buffer()
590 bb = list_first_entry(&workload->shadow_bb, in release_shadow_batch_buffer()
595 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) { in release_shadow_batch_buffer()
616 static int prepare_workload(struct intel_vgpu_workload *workload) in prepare_workload() argument
618 struct intel_vgpu *vgpu = workload->vgpu; in prepare_workload()
620 int ring = workload->ring_id; in prepare_workload()
623 ret = intel_vgpu_pin_mm(workload->shadow_mm); in prepare_workload()
629 if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT || in prepare_workload()
630 !workload->shadow_mm->ppgtt_mm.shadowed) { in prepare_workload()
635 update_shadow_pdps(workload); in prepare_workload()
637 set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context); in prepare_workload()
639 ret = intel_vgpu_sync_oos_pages(workload->vgpu); in prepare_workload()
645 ret = intel_vgpu_flush_post_shadow(workload->vgpu); in prepare_workload()
651 ret = copy_workload_to_ring_buffer(workload); in prepare_workload()
657 ret = prepare_shadow_batch_buffer(workload); in prepare_workload()
663 ret = prepare_shadow_wa_ctx(&workload->wa_ctx); in prepare_workload()
669 if (workload->prepare) { in prepare_workload()
670 ret = workload->prepare(workload); in prepare_workload()
677 release_shadow_wa_ctx(&workload->wa_ctx); in prepare_workload()
679 release_shadow_batch_buffer(workload); in prepare_workload()
681 intel_vgpu_unpin_mm(workload->shadow_mm); in prepare_workload()
685 static int dispatch_workload(struct intel_vgpu_workload *workload) in dispatch_workload() argument
687 struct intel_vgpu *vgpu = workload->vgpu; in dispatch_workload()
690 int ring_id = workload->ring_id; in dispatch_workload()
694 ring_id, workload); in dispatch_workload()
699 ret = intel_gvt_workload_req_alloc(workload); in dispatch_workload()
703 ret = intel_gvt_scan_and_shadow_workload(workload); in dispatch_workload()
707 ret = populate_shadow_context(workload); in dispatch_workload()
709 release_shadow_wa_ctx(&workload->wa_ctx); in dispatch_workload()
713 ret = prepare_workload(workload); in dispatch_workload()
719 rq = fetch_and_zero(&workload->req); in dispatch_workload()
723 if (!IS_ERR_OR_NULL(workload->req)) { in dispatch_workload()
725 ring_id, workload->req); in dispatch_workload()
726 i915_request_add(workload->req); in dispatch_workload()
727 workload->dispatched = true; in dispatch_workload()
731 workload->status = ret; in dispatch_workload()
741 struct intel_vgpu_workload *workload = NULL; in pick_next_workload() local
768 workload = scheduler->current_workload[ring_id]; in pick_next_workload()
770 ring_id, workload); in pick_next_workload()
784 workload = scheduler->current_workload[ring_id]; in pick_next_workload()
786 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload); in pick_next_workload()
788 atomic_inc(&workload->vgpu->submission.running_workload_num); in pick_next_workload()
791 return workload; in pick_next_workload()
794 static void update_guest_context(struct intel_vgpu_workload *workload) in update_guest_context() argument
796 struct i915_request *rq = workload->req; in update_guest_context()
797 struct intel_vgpu *vgpu = workload->vgpu; in update_guest_context()
811 workload->ctx_desc.lrca); in update_guest_context()
813 head = workload->rb_head; in update_guest_context()
814 tail = workload->rb_tail; in update_guest_context()
815 wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF; in update_guest_context()
826 ring_base = dev_priv->engine[workload->ring_id]->mmio_base; in update_guest_context()
840 (u32)((workload->ctx_desc.lrca + i) << in update_guest_context()
855 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + in update_guest_context()
856 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4); in update_guest_context()
862 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \ in update_guest_context()
871 workload->ring_context_gpa + in update_guest_context()
903 struct intel_vgpu_workload *workload = in complete_current_workload() local
905 struct intel_vgpu *vgpu = workload->vgpu; in complete_current_workload()
907 struct i915_request *rq = workload->req; in complete_current_workload()
918 wait_event(workload->shadow_ctx_status_wq, in complete_current_workload()
919 !atomic_read(&workload->shadow_ctx_active)); in complete_current_workload()
926 if (likely(workload->status == -EINPROGRESS)) { in complete_current_workload()
927 if (workload->req->fence.error == -EIO) in complete_current_workload()
928 workload->status = -EIO; in complete_current_workload()
930 workload->status = 0; in complete_current_workload()
933 if (!workload->status && in complete_current_workload()
935 update_guest_context(workload); in complete_current_workload()
937 for_each_set_bit(event, workload->pending_events, in complete_current_workload()
942 i915_request_put(fetch_and_zero(&workload->req)); in complete_current_workload()
946 ring_id, workload, workload->status); in complete_current_workload()
950 list_del_init(&workload->list); in complete_current_workload()
952 if (workload->status || vgpu->resetting_eng & BIT(ring_id)) { in complete_current_workload()
969 workload->complete(workload); in complete_current_workload()
992 struct intel_vgpu_workload *workload = NULL; in workload_thread() local
1006 workload = pick_next_workload(gvt, ring_id); in workload_thread()
1007 if (workload) in workload_thread()
1014 if (!workload) in workload_thread()
1018 workload->ring_id, workload, in workload_thread()
1019 workload->vgpu->id); in workload_thread()
1024 workload->ring_id, workload); in workload_thread()
1035 update_vreg_in_ctx(workload); in workload_thread()
1037 ret = dispatch_workload(workload); in workload_thread()
1040 vgpu = workload->vgpu; in workload_thread()
1046 workload->ring_id, workload); in workload_thread()
1047 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT); in workload_thread()
1051 workload, workload->status); in workload_thread()
1374 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) in intel_vgpu_destroy_workload() argument
1376 struct intel_vgpu_submission *s = &workload->vgpu->submission; in intel_vgpu_destroy_workload()
1378 release_shadow_batch_buffer(workload); in intel_vgpu_destroy_workload()
1379 release_shadow_wa_ctx(&workload->wa_ctx); in intel_vgpu_destroy_workload()
1381 if (workload->shadow_mm) in intel_vgpu_destroy_workload()
1382 intel_vgpu_mm_put(workload->shadow_mm); in intel_vgpu_destroy_workload()
1384 kmem_cache_free(s->workloads, workload); in intel_vgpu_destroy_workload()
1391 struct intel_vgpu_workload *workload; in alloc_workload() local
1393 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL); in alloc_workload()
1394 if (!workload) in alloc_workload()
1397 INIT_LIST_HEAD(&workload->list); in alloc_workload()
1398 INIT_LIST_HEAD(&workload->shadow_bb); in alloc_workload()
1400 init_waitqueue_head(&workload->shadow_ctx_status_wq); in alloc_workload()
1401 atomic_set(&workload->shadow_ctx_active, 0); in alloc_workload()
1403 workload->status = -EINPROGRESS; in alloc_workload()
1404 workload->vgpu = vgpu; in alloc_workload()
1406 return workload; in alloc_workload()
1425 static int prepare_mm(struct intel_vgpu_workload *workload) in prepare_mm() argument
1427 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; in prepare_mm()
1429 struct intel_vgpu *vgpu = workload->vgpu; in prepare_mm()
1445 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps); in prepare_mm()
1447 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps); in prepare_mm()
1451 workload->shadow_mm = mm; in prepare_mm()
1478 struct intel_vgpu_workload *workload = NULL; in intel_vgpu_create_workload() local
1535 workload = alloc_workload(vgpu); in intel_vgpu_create_workload()
1536 if (IS_ERR(workload)) in intel_vgpu_create_workload()
1537 return workload; in intel_vgpu_create_workload()
1539 workload->ring_id = ring_id; in intel_vgpu_create_workload()
1540 workload->ctx_desc = *desc; in intel_vgpu_create_workload()
1541 workload->ring_context_gpa = ring_context_gpa; in intel_vgpu_create_workload()
1542 workload->rb_head = head; in intel_vgpu_create_workload()
1543 workload->guest_rb_head = guest_head; in intel_vgpu_create_workload()
1544 workload->rb_tail = tail; in intel_vgpu_create_workload()
1545 workload->rb_start = start; in intel_vgpu_create_workload()
1546 workload->rb_ctl = ctl; in intel_vgpu_create_workload()
1554 workload->wa_ctx.indirect_ctx.guest_gma = in intel_vgpu_create_workload()
1556 workload->wa_ctx.indirect_ctx.size = in intel_vgpu_create_workload()
1560 if (workload->wa_ctx.indirect_ctx.size != 0) { in intel_vgpu_create_workload()
1562 workload->wa_ctx.indirect_ctx.guest_gma, in intel_vgpu_create_workload()
1563 workload->wa_ctx.indirect_ctx.size)) { in intel_vgpu_create_workload()
1565 workload->wa_ctx.indirect_ctx.guest_gma); in intel_vgpu_create_workload()
1566 kmem_cache_free(s->workloads, workload); in intel_vgpu_create_workload()
1571 workload->wa_ctx.per_ctx.guest_gma = in intel_vgpu_create_workload()
1573 workload->wa_ctx.per_ctx.valid = per_ctx & 1; in intel_vgpu_create_workload()
1574 if (workload->wa_ctx.per_ctx.valid) { in intel_vgpu_create_workload()
1576 workload->wa_ctx.per_ctx.guest_gma, in intel_vgpu_create_workload()
1579 workload->wa_ctx.per_ctx.guest_gma); in intel_vgpu_create_workload()
1580 kmem_cache_free(s->workloads, workload); in intel_vgpu_create_workload()
1587 workload, ring_id, head, tail, start, ctl); in intel_vgpu_create_workload()
1589 ret = prepare_mm(workload); in intel_vgpu_create_workload()
1591 kmem_cache_free(s->workloads, workload); in intel_vgpu_create_workload()
1601 ret = intel_gvt_scan_and_shadow_workload(workload); in intel_vgpu_create_workload()
1609 intel_vgpu_destroy_workload(workload); in intel_vgpu_create_workload()
1613 return workload; in intel_vgpu_create_workload()
1620 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload) in intel_vgpu_queue_workload() argument
1622 list_add_tail(&workload->list, in intel_vgpu_queue_workload()
1623 workload_q_head(workload->vgpu, workload->ring_id)); in intel_vgpu_queue_workload()
1624 intel_gvt_kick_schedule(workload->vgpu->gvt); in intel_vgpu_queue_workload()
1625 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]); in intel_vgpu_queue_workload()