1 /*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Zhi Wang <zhi.a.wang@intel.com>
25 *
26 * Contributors:
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
33 *
34 */
35
36 #include <linux/kthread.h>
37
38 #include "i915_drv.h"
39 #include "gvt.h"
40
41 #define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
43
set_context_pdp_root_pointer(struct execlist_ring_context * ring_context,u32 pdp[8])44 static void set_context_pdp_root_pointer(
45 struct execlist_ring_context *ring_context,
46 u32 pdp[8])
47 {
48 int i;
49
50 for (i = 0; i < 8; i++)
51 ring_context->pdps[i].val = pdp[7 - i];
52 }
53
update_shadow_pdps(struct intel_vgpu_workload * workload)54 static void update_shadow_pdps(struct intel_vgpu_workload *workload)
55 {
56 struct drm_i915_gem_object *ctx_obj =
57 workload->req->hw_context->state->obj;
58 struct execlist_ring_context *shadow_ring_context;
59 struct page *page;
60
61 if (WARN_ON(!workload->shadow_mm))
62 return;
63
64 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
65 return;
66
67 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
68 shadow_ring_context = kmap(page);
69 set_context_pdp_root_pointer(shadow_ring_context,
70 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
71 kunmap(page);
72 }
73
74 /*
75 * when populating shadow ctx from guest, we should not overrride oa related
76 * registers, so that they will not be overlapped by guest oa configs. Thus
77 * made it possible to capture oa data from host for both host and guests.
78 */
sr_oa_regs(struct intel_vgpu_workload * workload,u32 * reg_state,bool save)79 static void sr_oa_regs(struct intel_vgpu_workload *workload,
80 u32 *reg_state, bool save)
81 {
82 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
83 u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
84 u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
85 int i = 0;
86 u32 flex_mmio[] = {
87 i915_mmio_reg_offset(EU_PERF_CNTL0),
88 i915_mmio_reg_offset(EU_PERF_CNTL1),
89 i915_mmio_reg_offset(EU_PERF_CNTL2),
90 i915_mmio_reg_offset(EU_PERF_CNTL3),
91 i915_mmio_reg_offset(EU_PERF_CNTL4),
92 i915_mmio_reg_offset(EU_PERF_CNTL5),
93 i915_mmio_reg_offset(EU_PERF_CNTL6),
94 };
95
96 if (workload->ring_id != RCS)
97 return;
98
99 if (save) {
100 workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
101
102 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
103 u32 state_offset = ctx_flexeu0 + i * 2;
104
105 workload->flex_mmio[i] = reg_state[state_offset + 1];
106 }
107 } else {
108 reg_state[ctx_oactxctrl] =
109 i915_mmio_reg_offset(GEN8_OACTXCONTROL);
110 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
111
112 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
113 u32 state_offset = ctx_flexeu0 + i * 2;
114 u32 mmio = flex_mmio[i];
115
116 reg_state[state_offset] = mmio;
117 reg_state[state_offset + 1] = workload->flex_mmio[i];
118 }
119 }
120 }
121
populate_shadow_context(struct intel_vgpu_workload * workload)122 static int populate_shadow_context(struct intel_vgpu_workload *workload)
123 {
124 struct intel_vgpu *vgpu = workload->vgpu;
125 struct intel_gvt *gvt = vgpu->gvt;
126 int ring_id = workload->ring_id;
127 struct drm_i915_gem_object *ctx_obj =
128 workload->req->hw_context->state->obj;
129 struct execlist_ring_context *shadow_ring_context;
130 struct page *page;
131 void *dst;
132 unsigned long context_gpa, context_page_num;
133 int i;
134
135 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
136 workload->ctx_desc.lrca);
137
138 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
139
140 context_page_num = context_page_num >> PAGE_SHIFT;
141
142 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
143 context_page_num = 19;
144
145 i = 2;
146
147 while (i < context_page_num) {
148 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
149 (u32)((workload->ctx_desc.lrca + i) <<
150 I915_GTT_PAGE_SHIFT));
151 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
152 gvt_vgpu_err("Invalid guest context descriptor\n");
153 return -EFAULT;
154 }
155
156 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
157 dst = kmap(page);
158 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
159 I915_GTT_PAGE_SIZE);
160 kunmap(page);
161 i++;
162 }
163
164 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
165 shadow_ring_context = kmap(page);
166
167 sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
168 #define COPY_REG(name) \
169 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
170 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
171 #define COPY_REG_MASKED(name) {\
172 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
173 + RING_CTX_OFF(name.val),\
174 &shadow_ring_context->name.val, 4);\
175 shadow_ring_context->name.val |= 0xffff << 16;\
176 }
177
178 COPY_REG_MASKED(ctx_ctrl);
179 COPY_REG(ctx_timestamp);
180
181 if (ring_id == RCS) {
182 COPY_REG(bb_per_ctx_ptr);
183 COPY_REG(rcs_indirect_ctx);
184 COPY_REG(rcs_indirect_ctx_offset);
185 }
186 #undef COPY_REG
187 #undef COPY_REG_MASKED
188
189 intel_gvt_hypervisor_read_gpa(vgpu,
190 workload->ring_context_gpa +
191 sizeof(*shadow_ring_context),
192 (void *)shadow_ring_context +
193 sizeof(*shadow_ring_context),
194 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
195
196 sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
197 kunmap(page);
198 return 0;
199 }
200
is_gvt_request(struct i915_request * req)201 static inline bool is_gvt_request(struct i915_request *req)
202 {
203 return i915_gem_context_force_single_submission(req->gem_context);
204 }
205
save_ring_hw_state(struct intel_vgpu * vgpu,int ring_id)206 static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
207 {
208 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
209 u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
210 i915_reg_t reg;
211
212 reg = RING_INSTDONE(ring_base);
213 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
214 reg = RING_ACTHD(ring_base);
215 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
216 reg = RING_ACTHD_UDW(ring_base);
217 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
218 }
219
shadow_context_status_change(struct notifier_block * nb,unsigned long action,void * data)220 static int shadow_context_status_change(struct notifier_block *nb,
221 unsigned long action, void *data)
222 {
223 struct i915_request *req = data;
224 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
225 shadow_ctx_notifier_block[req->engine->id]);
226 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
227 enum intel_engine_id ring_id = req->engine->id;
228 struct intel_vgpu_workload *workload;
229 unsigned long flags;
230
231 if (!is_gvt_request(req)) {
232 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
233 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
234 scheduler->engine_owner[ring_id]) {
235 /* Switch ring from vGPU to host. */
236 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
237 NULL, ring_id);
238 scheduler->engine_owner[ring_id] = NULL;
239 }
240 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
241
242 return NOTIFY_OK;
243 }
244
245 workload = scheduler->current_workload[ring_id];
246 if (unlikely(!workload))
247 return NOTIFY_OK;
248
249 switch (action) {
250 case INTEL_CONTEXT_SCHEDULE_IN:
251 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
252 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
253 /* Switch ring from host to vGPU or vGPU to vGPU. */
254 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
255 workload->vgpu, ring_id);
256 scheduler->engine_owner[ring_id] = workload->vgpu;
257 } else
258 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
259 ring_id, workload->vgpu->id);
260 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
261 atomic_set(&workload->shadow_ctx_active, 1);
262 break;
263 case INTEL_CONTEXT_SCHEDULE_OUT:
264 save_ring_hw_state(workload->vgpu, ring_id);
265 atomic_set(&workload->shadow_ctx_active, 0);
266 break;
267 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
268 save_ring_hw_state(workload->vgpu, ring_id);
269 break;
270 default:
271 WARN_ON(1);
272 return NOTIFY_OK;
273 }
274 wake_up(&workload->shadow_ctx_status_wq);
275 return NOTIFY_OK;
276 }
277
shadow_context_descriptor_update(struct intel_context * ce)278 static void shadow_context_descriptor_update(struct intel_context *ce)
279 {
280 u64 desc = 0;
281
282 desc = ce->lrc_desc;
283
284 /* Update bits 0-11 of the context descriptor which includes flags
285 * like GEN8_CTX_* cached in desc_template
286 */
287 desc &= U64_MAX << 12;
288 desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
289
290 ce->lrc_desc = desc;
291 }
292
copy_workload_to_ring_buffer(struct intel_vgpu_workload * workload)293 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
294 {
295 struct intel_vgpu *vgpu = workload->vgpu;
296 struct i915_request *req = workload->req;
297 void *shadow_ring_buffer_va;
298 u32 *cs;
299
300 if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915))
301 && is_inhibit_context(req->hw_context))
302 intel_vgpu_restore_inhibit_context(vgpu, req);
303
304 /* allocate shadow ring buffer */
305 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
306 if (IS_ERR(cs)) {
307 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
308 workload->rb_len);
309 return PTR_ERR(cs);
310 }
311
312 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
313
314 /* get shadow ring buffer va */
315 workload->shadow_ring_buffer_va = cs;
316
317 memcpy(cs, shadow_ring_buffer_va,
318 workload->rb_len);
319
320 cs += workload->rb_len / sizeof(u32);
321 intel_ring_advance(workload->req, cs);
322
323 return 0;
324 }
325
release_shadow_wa_ctx(struct intel_shadow_wa_ctx * wa_ctx)326 static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
327 {
328 if (!wa_ctx->indirect_ctx.obj)
329 return;
330
331 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
332 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
333 }
334
335 /**
336 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
337 * shadow it as well, include ringbuffer,wa_ctx and ctx.
338 * @workload: an abstract entity for each execlist submission.
339 *
340 * This function is called before the workload submitting to i915, to make
341 * sure the content of the workload is valid.
342 */
intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload * workload)343 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
344 {
345 struct intel_vgpu *vgpu = workload->vgpu;
346 struct intel_vgpu_submission *s = &vgpu->submission;
347 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
348 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
349 struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
350 struct intel_context *ce;
351 struct i915_request *rq;
352 int ret;
353
354 lockdep_assert_held(&dev_priv->drm.struct_mutex);
355
356 if (workload->req)
357 return 0;
358
359 /* pin shadow context by gvt even the shadow context will be pinned
360 * when i915 alloc request. That is because gvt will update the guest
361 * context from shadow context when workload is completed, and at that
362 * moment, i915 may already unpined the shadow context to make the
363 * shadow_ctx pages invalid. So gvt need to pin itself. After update
364 * the guest context, gvt can unpin the shadow_ctx safely.
365 */
366 ce = intel_context_pin(shadow_ctx, engine);
367 if (IS_ERR(ce)) {
368 gvt_vgpu_err("fail to pin shadow context\n");
369 return PTR_ERR(ce);
370 }
371
372 shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
373 shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
374 GEN8_CTX_ADDRESSING_MODE_SHIFT;
375
376 if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
377 shadow_context_descriptor_update(ce);
378
379 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
380 if (ret)
381 goto err_unpin;
382
383 if ((workload->ring_id == RCS) &&
384 (workload->wa_ctx.indirect_ctx.size != 0)) {
385 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
386 if (ret)
387 goto err_shadow;
388 }
389
390 rq = i915_request_alloc(engine, shadow_ctx);
391 if (IS_ERR(rq)) {
392 gvt_vgpu_err("fail to allocate gem request\n");
393 ret = PTR_ERR(rq);
394 goto err_shadow;
395 }
396 workload->req = i915_request_get(rq);
397
398 ret = populate_shadow_context(workload);
399 if (ret)
400 goto err_req;
401
402 return 0;
403 err_req:
404 rq = fetch_and_zero(&workload->req);
405 i915_request_put(rq);
406 err_shadow:
407 release_shadow_wa_ctx(&workload->wa_ctx);
408 err_unpin:
409 intel_context_unpin(ce);
410 return ret;
411 }
412
413 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
414
prepare_shadow_batch_buffer(struct intel_vgpu_workload * workload)415 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
416 {
417 struct intel_gvt *gvt = workload->vgpu->gvt;
418 const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
419 struct intel_vgpu_shadow_bb *bb;
420 int ret;
421
422 list_for_each_entry(bb, &workload->shadow_bb, list) {
423 /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
424 * is only updated into ring_scan_buffer, not real ring address
425 * allocated in later copy_workload_to_ring_buffer. pls be noted
426 * shadow_ring_buffer_va is now pointed to real ring buffer va
427 * in copy_workload_to_ring_buffer.
428 */
429
430 if (bb->bb_offset)
431 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
432 + bb->bb_offset;
433
434 if (bb->ppgtt) {
435 /* for non-priv bb, scan&shadow is only for
436 * debugging purpose, so the content of shadow bb
437 * is the same as original bb. Therefore,
438 * here, rather than switch to shadow bb's gma
439 * address, we directly use original batch buffer's
440 * gma address, and send original bb to hardware
441 * directly
442 */
443 if (bb->clflush & CLFLUSH_AFTER) {
444 drm_clflush_virt_range(bb->va,
445 bb->obj->base.size);
446 bb->clflush &= ~CLFLUSH_AFTER;
447 }
448 i915_gem_obj_finish_shmem_access(bb->obj);
449 bb->accessing = false;
450
451 } else {
452 bb->vma = i915_gem_object_ggtt_pin(bb->obj,
453 NULL, 0, 0, 0);
454 if (IS_ERR(bb->vma)) {
455 ret = PTR_ERR(bb->vma);
456 goto err;
457 }
458
459 /* relocate shadow batch buffer */
460 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
461 if (gmadr_bytes == 8)
462 bb->bb_start_cmd_va[2] = 0;
463
464 /* No one is going to touch shadow bb from now on. */
465 if (bb->clflush & CLFLUSH_AFTER) {
466 drm_clflush_virt_range(bb->va,
467 bb->obj->base.size);
468 bb->clflush &= ~CLFLUSH_AFTER;
469 }
470
471 ret = i915_gem_object_set_to_gtt_domain(bb->obj,
472 false);
473 if (ret)
474 goto err;
475
476 i915_gem_obj_finish_shmem_access(bb->obj);
477 bb->accessing = false;
478
479 ret = i915_vma_move_to_active(bb->vma,
480 workload->req,
481 0);
482 if (ret)
483 goto err;
484 }
485 }
486 return 0;
487 err:
488 release_shadow_batch_buffer(workload);
489 return ret;
490 }
491
update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx * wa_ctx)492 static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
493 {
494 struct intel_vgpu_workload *workload =
495 container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
496 struct i915_request *rq = workload->req;
497 struct execlist_ring_context *shadow_ring_context =
498 (struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
499
500 shadow_ring_context->bb_per_ctx_ptr.val =
501 (shadow_ring_context->bb_per_ctx_ptr.val &
502 (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
503 shadow_ring_context->rcs_indirect_ctx.val =
504 (shadow_ring_context->rcs_indirect_ctx.val &
505 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
506 }
507
prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx * wa_ctx)508 static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
509 {
510 struct i915_vma *vma;
511 unsigned char *per_ctx_va =
512 (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
513 wa_ctx->indirect_ctx.size;
514
515 if (wa_ctx->indirect_ctx.size == 0)
516 return 0;
517
518 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
519 0, CACHELINE_BYTES, 0);
520 if (IS_ERR(vma))
521 return PTR_ERR(vma);
522
523 /* FIXME: we are not tracking our pinned VMA leaving it
524 * up to the core to fix up the stray pin_count upon
525 * free.
526 */
527
528 wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
529
530 wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
531 memset(per_ctx_va, 0, CACHELINE_BYTES);
532
533 update_wa_ctx_2_shadow_ctx(wa_ctx);
534 return 0;
535 }
536
release_shadow_batch_buffer(struct intel_vgpu_workload * workload)537 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
538 {
539 struct intel_vgpu *vgpu = workload->vgpu;
540 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
541 struct intel_vgpu_shadow_bb *bb, *pos;
542
543 if (list_empty(&workload->shadow_bb))
544 return;
545
546 bb = list_first_entry(&workload->shadow_bb,
547 struct intel_vgpu_shadow_bb, list);
548
549 mutex_lock(&dev_priv->drm.struct_mutex);
550
551 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
552 if (bb->obj) {
553 if (bb->accessing)
554 i915_gem_obj_finish_shmem_access(bb->obj);
555
556 if (bb->va && !IS_ERR(bb->va))
557 i915_gem_object_unpin_map(bb->obj);
558
559 if (bb->vma && !IS_ERR(bb->vma)) {
560 i915_vma_unpin(bb->vma);
561 i915_vma_close(bb->vma);
562 }
563 __i915_gem_object_release_unless_active(bb->obj);
564 }
565 list_del(&bb->list);
566 kfree(bb);
567 }
568
569 mutex_unlock(&dev_priv->drm.struct_mutex);
570 }
571
prepare_workload(struct intel_vgpu_workload * workload)572 static int prepare_workload(struct intel_vgpu_workload *workload)
573 {
574 struct intel_vgpu *vgpu = workload->vgpu;
575 int ret = 0;
576
577 ret = intel_vgpu_pin_mm(workload->shadow_mm);
578 if (ret) {
579 gvt_vgpu_err("fail to vgpu pin mm\n");
580 return ret;
581 }
582
583 update_shadow_pdps(workload);
584
585 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
586 if (ret) {
587 gvt_vgpu_err("fail to vgpu sync oos pages\n");
588 goto err_unpin_mm;
589 }
590
591 ret = intel_vgpu_flush_post_shadow(workload->vgpu);
592 if (ret) {
593 gvt_vgpu_err("fail to flush post shadow\n");
594 goto err_unpin_mm;
595 }
596
597 ret = copy_workload_to_ring_buffer(workload);
598 if (ret) {
599 gvt_vgpu_err("fail to generate request\n");
600 goto err_unpin_mm;
601 }
602
603 ret = prepare_shadow_batch_buffer(workload);
604 if (ret) {
605 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
606 goto err_unpin_mm;
607 }
608
609 ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
610 if (ret) {
611 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
612 goto err_shadow_batch;
613 }
614
615 if (workload->prepare) {
616 ret = workload->prepare(workload);
617 if (ret)
618 goto err_shadow_wa_ctx;
619 }
620
621 return 0;
622 err_shadow_wa_ctx:
623 release_shadow_wa_ctx(&workload->wa_ctx);
624 err_shadow_batch:
625 release_shadow_batch_buffer(workload);
626 err_unpin_mm:
627 intel_vgpu_unpin_mm(workload->shadow_mm);
628 return ret;
629 }
630
dispatch_workload(struct intel_vgpu_workload * workload)631 static int dispatch_workload(struct intel_vgpu_workload *workload)
632 {
633 struct intel_vgpu *vgpu = workload->vgpu;
634 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
635 int ring_id = workload->ring_id;
636 int ret;
637
638 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
639 ring_id, workload);
640
641 mutex_lock(&vgpu->vgpu_lock);
642 mutex_lock(&dev_priv->drm.struct_mutex);
643
644 ret = intel_gvt_scan_and_shadow_workload(workload);
645 if (ret)
646 goto out;
647
648 ret = prepare_workload(workload);
649
650 out:
651 if (ret)
652 workload->status = ret;
653
654 if (!IS_ERR_OR_NULL(workload->req)) {
655 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
656 ring_id, workload->req);
657 i915_request_add(workload->req);
658 workload->dispatched = true;
659 }
660
661 mutex_unlock(&dev_priv->drm.struct_mutex);
662 mutex_unlock(&vgpu->vgpu_lock);
663 return ret;
664 }
665
pick_next_workload(struct intel_gvt * gvt,int ring_id)666 static struct intel_vgpu_workload *pick_next_workload(
667 struct intel_gvt *gvt, int ring_id)
668 {
669 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
670 struct intel_vgpu_workload *workload = NULL;
671
672 mutex_lock(&gvt->sched_lock);
673
674 /*
675 * no current vgpu / will be scheduled out / no workload
676 * bail out
677 */
678 if (!scheduler->current_vgpu) {
679 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
680 goto out;
681 }
682
683 if (scheduler->need_reschedule) {
684 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
685 goto out;
686 }
687
688 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
689 goto out;
690
691 /*
692 * still have current workload, maybe the workload disptacher
693 * fail to submit it for some reason, resubmit it.
694 */
695 if (scheduler->current_workload[ring_id]) {
696 workload = scheduler->current_workload[ring_id];
697 gvt_dbg_sched("ring id %d still have current workload %p\n",
698 ring_id, workload);
699 goto out;
700 }
701
702 /*
703 * pick a workload as current workload
704 * once current workload is set, schedule policy routines
705 * will wait the current workload is finished when trying to
706 * schedule out a vgpu.
707 */
708 scheduler->current_workload[ring_id] = container_of(
709 workload_q_head(scheduler->current_vgpu, ring_id)->next,
710 struct intel_vgpu_workload, list);
711
712 workload = scheduler->current_workload[ring_id];
713
714 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
715
716 atomic_inc(&workload->vgpu->submission.running_workload_num);
717 out:
718 mutex_unlock(&gvt->sched_lock);
719 return workload;
720 }
721
update_guest_context(struct intel_vgpu_workload * workload)722 static void update_guest_context(struct intel_vgpu_workload *workload)
723 {
724 struct i915_request *rq = workload->req;
725 struct intel_vgpu *vgpu = workload->vgpu;
726 struct intel_gvt *gvt = vgpu->gvt;
727 struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
728 struct execlist_ring_context *shadow_ring_context;
729 struct page *page;
730 void *src;
731 unsigned long context_gpa, context_page_num;
732 int i;
733
734 gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
735 workload->ctx_desc.lrca);
736
737 context_page_num = rq->engine->context_size;
738 context_page_num = context_page_num >> PAGE_SHIFT;
739
740 if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
741 context_page_num = 19;
742
743 i = 2;
744
745 while (i < context_page_num) {
746 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
747 (u32)((workload->ctx_desc.lrca + i) <<
748 I915_GTT_PAGE_SHIFT));
749 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
750 gvt_vgpu_err("invalid guest context descriptor\n");
751 return;
752 }
753
754 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
755 src = kmap(page);
756 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
757 I915_GTT_PAGE_SIZE);
758 kunmap(page);
759 i++;
760 }
761
762 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
763 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
764
765 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
766 shadow_ring_context = kmap(page);
767
768 #define COPY_REG(name) \
769 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
770 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
771
772 COPY_REG(ctx_ctrl);
773 COPY_REG(ctx_timestamp);
774
775 #undef COPY_REG
776
777 intel_gvt_hypervisor_write_gpa(vgpu,
778 workload->ring_context_gpa +
779 sizeof(*shadow_ring_context),
780 (void *)shadow_ring_context +
781 sizeof(*shadow_ring_context),
782 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
783
784 kunmap(page);
785 }
786
intel_vgpu_clean_workloads(struct intel_vgpu * vgpu,unsigned long engine_mask)787 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
788 unsigned long engine_mask)
789 {
790 struct intel_vgpu_submission *s = &vgpu->submission;
791 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
792 struct intel_engine_cs *engine;
793 struct intel_vgpu_workload *pos, *n;
794 unsigned int tmp;
795
796 /* free the unsubmited workloads in the queues. */
797 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
798 list_for_each_entry_safe(pos, n,
799 &s->workload_q_head[engine->id], list) {
800 list_del_init(&pos->list);
801 intel_vgpu_destroy_workload(pos);
802 }
803 clear_bit(engine->id, s->shadow_ctx_desc_updated);
804 }
805 }
806
complete_current_workload(struct intel_gvt * gvt,int ring_id)807 static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
808 {
809 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
810 struct intel_vgpu_workload *workload =
811 scheduler->current_workload[ring_id];
812 struct intel_vgpu *vgpu = workload->vgpu;
813 struct intel_vgpu_submission *s = &vgpu->submission;
814 struct i915_request *rq = workload->req;
815 int event;
816
817 mutex_lock(&vgpu->vgpu_lock);
818 mutex_lock(&gvt->sched_lock);
819
820 /* For the workload w/ request, needs to wait for the context
821 * switch to make sure request is completed.
822 * For the workload w/o request, directly complete the workload.
823 */
824 if (rq) {
825 wait_event(workload->shadow_ctx_status_wq,
826 !atomic_read(&workload->shadow_ctx_active));
827
828 /* If this request caused GPU hang, req->fence.error will
829 * be set to -EIO. Use -EIO to set workload status so
830 * that when this request caused GPU hang, didn't trigger
831 * context switch interrupt to guest.
832 */
833 if (likely(workload->status == -EINPROGRESS)) {
834 if (workload->req->fence.error == -EIO)
835 workload->status = -EIO;
836 else
837 workload->status = 0;
838 }
839
840 if (!workload->status && !(vgpu->resetting_eng &
841 ENGINE_MASK(ring_id))) {
842 update_guest_context(workload);
843
844 for_each_set_bit(event, workload->pending_events,
845 INTEL_GVT_EVENT_MAX)
846 intel_vgpu_trigger_virtual_event(vgpu, event);
847 }
848
849 /* unpin shadow ctx as the shadow_ctx update is done */
850 mutex_lock(&rq->i915->drm.struct_mutex);
851 intel_context_unpin(rq->hw_context);
852 mutex_unlock(&rq->i915->drm.struct_mutex);
853
854 i915_request_put(fetch_and_zero(&workload->req));
855 }
856
857 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
858 ring_id, workload, workload->status);
859
860 scheduler->current_workload[ring_id] = NULL;
861
862 list_del_init(&workload->list);
863
864 if (!workload->status) {
865 release_shadow_batch_buffer(workload);
866 release_shadow_wa_ctx(&workload->wa_ctx);
867 }
868
869 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
870 /* if workload->status is not successful means HW GPU
871 * has occurred GPU hang or something wrong with i915/GVT,
872 * and GVT won't inject context switch interrupt to guest.
873 * So this error is a vGPU hang actually to the guest.
874 * According to this we should emunlate a vGPU hang. If
875 * there are pending workloads which are already submitted
876 * from guest, we should clean them up like HW GPU does.
877 *
878 * if it is in middle of engine resetting, the pending
879 * workloads won't be submitted to HW GPU and will be
880 * cleaned up during the resetting process later, so doing
881 * the workload clean up here doesn't have any impact.
882 **/
883 intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id));
884 }
885
886 workload->complete(workload);
887
888 atomic_dec(&s->running_workload_num);
889 wake_up(&scheduler->workload_complete_wq);
890
891 if (gvt->scheduler.need_reschedule)
892 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
893
894 mutex_unlock(&gvt->sched_lock);
895 mutex_unlock(&vgpu->vgpu_lock);
896 }
897
898 struct workload_thread_param {
899 struct intel_gvt *gvt;
900 int ring_id;
901 };
902
workload_thread(void * priv)903 static int workload_thread(void *priv)
904 {
905 struct workload_thread_param *p = (struct workload_thread_param *)priv;
906 struct intel_gvt *gvt = p->gvt;
907 int ring_id = p->ring_id;
908 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
909 struct intel_vgpu_workload *workload = NULL;
910 struct intel_vgpu *vgpu = NULL;
911 int ret;
912 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
913 || IS_KABYLAKE(gvt->dev_priv)
914 || IS_BROXTON(gvt->dev_priv);
915 DEFINE_WAIT_FUNC(wait, woken_wake_function);
916
917 kfree(p);
918
919 gvt_dbg_core("workload thread for ring %d started\n", ring_id);
920
921 while (!kthread_should_stop()) {
922 add_wait_queue(&scheduler->waitq[ring_id], &wait);
923 do {
924 workload = pick_next_workload(gvt, ring_id);
925 if (workload)
926 break;
927 wait_woken(&wait, TASK_INTERRUPTIBLE,
928 MAX_SCHEDULE_TIMEOUT);
929 } while (!kthread_should_stop());
930 remove_wait_queue(&scheduler->waitq[ring_id], &wait);
931
932 if (!workload)
933 break;
934
935 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
936 workload->ring_id, workload,
937 workload->vgpu->id);
938
939 intel_runtime_pm_get(gvt->dev_priv);
940
941 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
942 workload->ring_id, workload);
943
944 if (need_force_wake)
945 intel_uncore_forcewake_get(gvt->dev_priv,
946 FORCEWAKE_ALL);
947
948 ret = dispatch_workload(workload);
949
950 if (ret) {
951 vgpu = workload->vgpu;
952 gvt_vgpu_err("fail to dispatch workload, skip\n");
953 goto complete;
954 }
955
956 gvt_dbg_sched("ring id %d wait workload %p\n",
957 workload->ring_id, workload);
958 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
959
960 complete:
961 gvt_dbg_sched("will complete workload %p, status: %d\n",
962 workload, workload->status);
963
964 complete_current_workload(gvt, ring_id);
965
966 if (need_force_wake)
967 intel_uncore_forcewake_put(gvt->dev_priv,
968 FORCEWAKE_ALL);
969
970 intel_runtime_pm_put(gvt->dev_priv);
971 if (ret && (vgpu_is_vm_unhealthy(ret)))
972 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
973 }
974 return 0;
975 }
976
intel_gvt_wait_vgpu_idle(struct intel_vgpu * vgpu)977 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
978 {
979 struct intel_vgpu_submission *s = &vgpu->submission;
980 struct intel_gvt *gvt = vgpu->gvt;
981 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
982
983 if (atomic_read(&s->running_workload_num)) {
984 gvt_dbg_sched("wait vgpu idle\n");
985
986 wait_event(scheduler->workload_complete_wq,
987 !atomic_read(&s->running_workload_num));
988 }
989 }
990
intel_gvt_clean_workload_scheduler(struct intel_gvt * gvt)991 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
992 {
993 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
994 struct intel_engine_cs *engine;
995 enum intel_engine_id i;
996
997 gvt_dbg_core("clean workload scheduler\n");
998
999 for_each_engine(engine, gvt->dev_priv, i) {
1000 atomic_notifier_chain_unregister(
1001 &engine->context_status_notifier,
1002 &gvt->shadow_ctx_notifier_block[i]);
1003 kthread_stop(scheduler->thread[i]);
1004 }
1005 }
1006
intel_gvt_init_workload_scheduler(struct intel_gvt * gvt)1007 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
1008 {
1009 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1010 struct workload_thread_param *param = NULL;
1011 struct intel_engine_cs *engine;
1012 enum intel_engine_id i;
1013 int ret;
1014
1015 gvt_dbg_core("init workload scheduler\n");
1016
1017 init_waitqueue_head(&scheduler->workload_complete_wq);
1018
1019 for_each_engine(engine, gvt->dev_priv, i) {
1020 init_waitqueue_head(&scheduler->waitq[i]);
1021
1022 param = kzalloc(sizeof(*param), GFP_KERNEL);
1023 if (!param) {
1024 ret = -ENOMEM;
1025 goto err;
1026 }
1027
1028 param->gvt = gvt;
1029 param->ring_id = i;
1030
1031 scheduler->thread[i] = kthread_run(workload_thread, param,
1032 "gvt workload %d", i);
1033 if (IS_ERR(scheduler->thread[i])) {
1034 gvt_err("fail to create workload thread\n");
1035 ret = PTR_ERR(scheduler->thread[i]);
1036 goto err;
1037 }
1038
1039 gvt->shadow_ctx_notifier_block[i].notifier_call =
1040 shadow_context_status_change;
1041 atomic_notifier_chain_register(&engine->context_status_notifier,
1042 &gvt->shadow_ctx_notifier_block[i]);
1043 }
1044 return 0;
1045 err:
1046 intel_gvt_clean_workload_scheduler(gvt);
1047 kfree(param);
1048 param = NULL;
1049 return ret;
1050 }
1051
1052 /**
1053 * intel_vgpu_clean_submission - free submission-related resource for vGPU
1054 * @vgpu: a vGPU
1055 *
1056 * This function is called when a vGPU is being destroyed.
1057 *
1058 */
intel_vgpu_clean_submission(struct intel_vgpu * vgpu)1059 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
1060 {
1061 struct intel_vgpu_submission *s = &vgpu->submission;
1062
1063 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
1064 i915_gem_context_put(s->shadow_ctx);
1065 kmem_cache_destroy(s->workloads);
1066 }
1067
1068
1069 /**
1070 * intel_vgpu_reset_submission - reset submission-related resource for vGPU
1071 * @vgpu: a vGPU
1072 * @engine_mask: engines expected to be reset
1073 *
1074 * This function is called when a vGPU is being destroyed.
1075 *
1076 */
intel_vgpu_reset_submission(struct intel_vgpu * vgpu,unsigned long engine_mask)1077 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
1078 unsigned long engine_mask)
1079 {
1080 struct intel_vgpu_submission *s = &vgpu->submission;
1081
1082 if (!s->active)
1083 return;
1084
1085 intel_vgpu_clean_workloads(vgpu, engine_mask);
1086 s->ops->reset(vgpu, engine_mask);
1087 }
1088
1089 /**
1090 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
1091 * @vgpu: a vGPU
1092 *
1093 * This function is called when a vGPU is being created.
1094 *
1095 * Returns:
1096 * Zero on success, negative error code if failed.
1097 *
1098 */
intel_vgpu_setup_submission(struct intel_vgpu * vgpu)1099 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
1100 {
1101 struct intel_vgpu_submission *s = &vgpu->submission;
1102 enum intel_engine_id i;
1103 struct intel_engine_cs *engine;
1104 int ret;
1105
1106 s->shadow_ctx = i915_gem_context_create_gvt(
1107 &vgpu->gvt->dev_priv->drm);
1108 if (IS_ERR(s->shadow_ctx))
1109 return PTR_ERR(s->shadow_ctx);
1110
1111 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
1112
1113 s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
1114 sizeof(struct intel_vgpu_workload), 0,
1115 SLAB_HWCACHE_ALIGN,
1116 offsetof(struct intel_vgpu_workload, rb_tail),
1117 sizeof_field(struct intel_vgpu_workload, rb_tail),
1118 NULL);
1119
1120 if (!s->workloads) {
1121 ret = -ENOMEM;
1122 goto out_shadow_ctx;
1123 }
1124
1125 for_each_engine(engine, vgpu->gvt->dev_priv, i)
1126 INIT_LIST_HEAD(&s->workload_q_head[i]);
1127
1128 atomic_set(&s->running_workload_num, 0);
1129 bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
1130
1131 return 0;
1132
1133 out_shadow_ctx:
1134 i915_gem_context_put(s->shadow_ctx);
1135 return ret;
1136 }
1137
1138 /**
1139 * intel_vgpu_select_submission_ops - select virtual submission interface
1140 * @vgpu: a vGPU
1141 * @interface: expected vGPU virtual submission interface
1142 *
1143 * This function is called when guest configures submission interface.
1144 *
1145 * Returns:
1146 * Zero on success, negative error code if failed.
1147 *
1148 */
intel_vgpu_select_submission_ops(struct intel_vgpu * vgpu,unsigned long engine_mask,unsigned int interface)1149 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
1150 unsigned long engine_mask,
1151 unsigned int interface)
1152 {
1153 struct intel_vgpu_submission *s = &vgpu->submission;
1154 const struct intel_vgpu_submission_ops *ops[] = {
1155 [INTEL_VGPU_EXECLIST_SUBMISSION] =
1156 &intel_vgpu_execlist_submission_ops,
1157 };
1158 int ret;
1159
1160 if (WARN_ON(interface >= ARRAY_SIZE(ops)))
1161 return -EINVAL;
1162
1163 if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
1164 return -EINVAL;
1165
1166 if (s->active)
1167 s->ops->clean(vgpu, engine_mask);
1168
1169 if (interface == 0) {
1170 s->ops = NULL;
1171 s->virtual_submission_interface = 0;
1172 s->active = false;
1173 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
1174 return 0;
1175 }
1176
1177 ret = ops[interface]->init(vgpu, engine_mask);
1178 if (ret)
1179 return ret;
1180
1181 s->ops = ops[interface];
1182 s->virtual_submission_interface = interface;
1183 s->active = true;
1184
1185 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
1186 vgpu->id, s->ops->name);
1187
1188 return 0;
1189 }
1190
1191 /**
1192 * intel_vgpu_destroy_workload - destroy a vGPU workload
1193 * @vgpu: a vGPU
1194 *
1195 * This function is called when destroy a vGPU workload.
1196 *
1197 */
intel_vgpu_destroy_workload(struct intel_vgpu_workload * workload)1198 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1199 {
1200 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1201
1202 if (workload->shadow_mm)
1203 intel_vgpu_mm_put(workload->shadow_mm);
1204
1205 kmem_cache_free(s->workloads, workload);
1206 }
1207
1208 static struct intel_vgpu_workload *
alloc_workload(struct intel_vgpu * vgpu)1209 alloc_workload(struct intel_vgpu *vgpu)
1210 {
1211 struct intel_vgpu_submission *s = &vgpu->submission;
1212 struct intel_vgpu_workload *workload;
1213
1214 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
1215 if (!workload)
1216 return ERR_PTR(-ENOMEM);
1217
1218 INIT_LIST_HEAD(&workload->list);
1219 INIT_LIST_HEAD(&workload->shadow_bb);
1220
1221 init_waitqueue_head(&workload->shadow_ctx_status_wq);
1222 atomic_set(&workload->shadow_ctx_active, 0);
1223
1224 workload->status = -EINPROGRESS;
1225 workload->vgpu = vgpu;
1226
1227 return workload;
1228 }
1229
1230 #define RING_CTX_OFF(x) \
1231 offsetof(struct execlist_ring_context, x)
1232
read_guest_pdps(struct intel_vgpu * vgpu,u64 ring_context_gpa,u32 pdp[8])1233 static void read_guest_pdps(struct intel_vgpu *vgpu,
1234 u64 ring_context_gpa, u32 pdp[8])
1235 {
1236 u64 gpa;
1237 int i;
1238
1239 gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
1240
1241 for (i = 0; i < 8; i++)
1242 intel_gvt_hypervisor_read_gpa(vgpu,
1243 gpa + i * 8, &pdp[7 - i], 4);
1244 }
1245
prepare_mm(struct intel_vgpu_workload * workload)1246 static int prepare_mm(struct intel_vgpu_workload *workload)
1247 {
1248 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
1249 struct intel_vgpu_mm *mm;
1250 struct intel_vgpu *vgpu = workload->vgpu;
1251 intel_gvt_gtt_type_t root_entry_type;
1252 u64 pdps[GVT_RING_CTX_NR_PDPS];
1253
1254 switch (desc->addressing_mode) {
1255 case 1: /* legacy 32-bit */
1256 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1257 break;
1258 case 3: /* legacy 64-bit */
1259 root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1260 break;
1261 default:
1262 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
1263 return -EINVAL;
1264 }
1265
1266 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
1267
1268 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
1269 if (IS_ERR(mm))
1270 return PTR_ERR(mm);
1271
1272 workload->shadow_mm = mm;
1273 return 0;
1274 }
1275
1276 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
1277 ((a)->lrca == (b)->lrca))
1278
1279 #define get_last_workload(q) \
1280 (list_empty(q) ? NULL : container_of(q->prev, \
1281 struct intel_vgpu_workload, list))
1282 /**
1283 * intel_vgpu_create_workload - create a vGPU workload
1284 * @vgpu: a vGPU
1285 * @desc: a guest context descriptor
1286 *
1287 * This function is called when creating a vGPU workload.
1288 *
1289 * Returns:
1290 * struct intel_vgpu_workload * on success, negative error code in
1291 * pointer if failed.
1292 *
1293 */
1294 struct intel_vgpu_workload *
intel_vgpu_create_workload(struct intel_vgpu * vgpu,int ring_id,struct execlist_ctx_descriptor_format * desc)1295 intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1296 struct execlist_ctx_descriptor_format *desc)
1297 {
1298 struct intel_vgpu_submission *s = &vgpu->submission;
1299 struct list_head *q = workload_q_head(vgpu, ring_id);
1300 struct intel_vgpu_workload *last_workload = get_last_workload(q);
1301 struct intel_vgpu_workload *workload = NULL;
1302 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1303 u64 ring_context_gpa;
1304 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
1305 int ret;
1306
1307 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
1308 (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
1309 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
1310 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
1311 return ERR_PTR(-EINVAL);
1312 }
1313
1314 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1315 RING_CTX_OFF(ring_header.val), &head, 4);
1316
1317 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1318 RING_CTX_OFF(ring_tail.val), &tail, 4);
1319
1320 head &= RB_HEAD_OFF_MASK;
1321 tail &= RB_TAIL_OFF_MASK;
1322
1323 if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
1324 gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
1325 gvt_dbg_el("ctx head %x real head %lx\n", head,
1326 last_workload->rb_tail);
1327 /*
1328 * cannot use guest context head pointer here,
1329 * as it might not be updated at this time
1330 */
1331 head = last_workload->rb_tail;
1332 }
1333
1334 gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
1335
1336 /* record some ring buffer register values for scan and shadow */
1337 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1338 RING_CTX_OFF(rb_start.val), &start, 4);
1339 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1340 RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
1341 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1342 RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
1343
1344 workload = alloc_workload(vgpu);
1345 if (IS_ERR(workload))
1346 return workload;
1347
1348 workload->ring_id = ring_id;
1349 workload->ctx_desc = *desc;
1350 workload->ring_context_gpa = ring_context_gpa;
1351 workload->rb_head = head;
1352 workload->rb_tail = tail;
1353 workload->rb_start = start;
1354 workload->rb_ctl = ctl;
1355
1356 if (ring_id == RCS) {
1357 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1358 RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
1359 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1360 RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
1361
1362 workload->wa_ctx.indirect_ctx.guest_gma =
1363 indirect_ctx & INDIRECT_CTX_ADDR_MASK;
1364 workload->wa_ctx.indirect_ctx.size =
1365 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
1366 CACHELINE_BYTES;
1367 workload->wa_ctx.per_ctx.guest_gma =
1368 per_ctx & PER_CTX_ADDR_MASK;
1369 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
1370 }
1371
1372 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
1373 workload, ring_id, head, tail, start, ctl);
1374
1375 ret = prepare_mm(workload);
1376 if (ret) {
1377 kmem_cache_free(s->workloads, workload);
1378 return ERR_PTR(ret);
1379 }
1380
1381 /* Only scan and shadow the first workload in the queue
1382 * as there is only one pre-allocated buf-obj for shadow.
1383 */
1384 if (list_empty(workload_q_head(vgpu, ring_id))) {
1385 intel_runtime_pm_get(dev_priv);
1386 mutex_lock(&dev_priv->drm.struct_mutex);
1387 ret = intel_gvt_scan_and_shadow_workload(workload);
1388 mutex_unlock(&dev_priv->drm.struct_mutex);
1389 intel_runtime_pm_put(dev_priv);
1390 }
1391
1392 if (ret && (vgpu_is_vm_unhealthy(ret))) {
1393 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1394 intel_vgpu_destroy_workload(workload);
1395 return ERR_PTR(ret);
1396 }
1397
1398 return workload;
1399 }
1400
1401 /**
1402 * intel_vgpu_queue_workload - Qeue a vGPU workload
1403 * @workload: the workload to queue in
1404 */
intel_vgpu_queue_workload(struct intel_vgpu_workload * workload)1405 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
1406 {
1407 list_add_tail(&workload->list,
1408 workload_q_head(workload->vgpu, workload->ring_id));
1409 intel_gvt_kick_schedule(workload->vgpu->gvt);
1410 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
1411 }
1412