Lines Matching +full:cs +full:- +full:2

1 // SPDX-License-Identifier: MIT
31 GEM_BUG_ON(engine->class != COPY_ENGINE_CLASS); in engine_supports_migration()
42 vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE, in insert_pte()
43 d->is_lmem ? PTE_LM : 0); in insert_pte()
44 d->offset += PAGE_SIZE; in insert_pte()
57 * to pre-allocate the page directories for the migration VM, this in migrate_vm()
64 * fly. Only 2 implicit vma are used for all migration operations. in migrate_vm()
68 * [0, CHUNK_SZ) -> first object in migrate_vm()
69 * [CHUNK_SZ, 2 * CHUNK_SZ) -> second object in migrate_vm()
70 * [2 * CHUNK_SZ, 2 * CHUNK_SZ + 2 * CHUNK_SZ >> 9] -> PTE in migrate_vm()
75 * i.e. within the same non-preemptible window so that we do not switch in migrate_vm()
85 if (!vm->vm.allocate_va_range || !vm->vm.foreach) { in migrate_vm()
86 err = -ENODEV; in migrate_vm()
94 for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) { in migrate_vm()
101 engine = gt->engine_class[COPY_ENGINE_CLASS][i]; in migrate_vm()
106 * We copy in 8MiB chunks. Each PDE covers 2MiB, so we need in migrate_vm()
109 sz = 2 * CHUNK_SZ; in migrate_vm()
118 err = i915_vm_alloc_pt_stash(&vm->vm, &stash, sz); in migrate_vm()
123 err = i915_vm_lock_objects(&vm->vm, &ww); in migrate_vm()
126 err = i915_vm_map_pt_stash(&vm->vm, &stash); in migrate_vm()
130 vm->vm.allocate_va_range(&vm->vm, &stash, base, sz); in migrate_vm()
132 i915_vm_free_pt_stash(&vm->vm, &stash); in migrate_vm()
137 d.is_lmem = i915_gem_object_is_lmem(vm->vm.scratch[0]); in migrate_vm()
138 vm->vm.foreach(&vm->vm, base, base + sz, insert_pte, &d); in migrate_vm()
141 return &vm->vm; in migrate_vm()
144 i915_vm_put(&vm->vm); in migrate_vm()
153 for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) { in first_copy_engine()
154 engine = gt->engine_class[COPY_ENGINE_CLASS][i]; in first_copy_engine()
171 return ERR_PTR(-ENODEV); in pinned_context()
194 m->context = ce; in intel_migrate_init()
210 for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) { in __migrate_engines()
211 engine = gt->engine_class[COPY_ENGINE_CLASS][i]; in __migrate_engines()
229 * balancing of the virtual-engine. in intel_migrate_create_context()
231 ce = __migrate_engines(m->context->engine->gt); in intel_migrate_create_context()
235 ce->ring = NULL; in intel_migrate_create_context()
236 ce->ring_size = SZ_256K; in intel_migrate_create_context()
238 i915_vm_put(ce->vm); in intel_migrate_create_context()
239 ce->vm = i915_vm_get(m->context->vm); in intel_migrate_create_context()
253 u32 *cs; in emit_no_arbitration() local
255 cs = intel_ring_begin(rq, 2); in emit_no_arbitration()
256 if (IS_ERR(cs)) in emit_no_arbitration()
257 return PTR_ERR(cs); in emit_no_arbitration()
260 *cs++ = MI_ARB_ON_OFF; in emit_no_arbitration()
261 *cs++ = MI_NOOP; in emit_no_arbitration()
262 intel_ring_advance(rq, cs); in emit_no_arbitration()
274 const u64 encode = rq->context->vm->pte_encode(0, cache_level, in emit_pte()
276 struct intel_ring *ring = rq->ring; in emit_pte()
278 u32 *hdr, *cs; in emit_pte() local
281 GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8); in emit_pte()
284 offset += (u64)rq->engine->instance << 32; in emit_pte()
287 offset += 2 * CHUNK_SZ; in emit_pte()
289 cs = intel_ring_begin(rq, 6); in emit_pte()
290 if (IS_ERR(cs)) in emit_pte()
291 return PTR_ERR(cs); in emit_pte()
294 pkt = min_t(int, 0x400, ring->space / sizeof(u32) + 5); in emit_pte()
295 pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5); in emit_pte()
297 hdr = cs; in emit_pte()
298 *cs++ = MI_STORE_DATA_IMM | REG_BIT(21); /* as qword elements */ in emit_pte()
299 *cs++ = lower_32_bits(offset); in emit_pte()
300 *cs++ = upper_32_bits(offset); in emit_pte()
303 if (cs - hdr >= pkt) { in emit_pte()
304 *hdr += cs - hdr - 2; in emit_pte()
305 *cs++ = MI_NOOP; in emit_pte()
307 ring->emit = (void *)cs - ring->vaddr; in emit_pte()
308 intel_ring_advance(rq, cs); in emit_pte()
311 cs = intel_ring_begin(rq, 6); in emit_pte()
312 if (IS_ERR(cs)) in emit_pte()
313 return PTR_ERR(cs); in emit_pte()
315 pkt = min_t(int, 0x400, ring->space / sizeof(u32) + 5); in emit_pte()
316 pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5); in emit_pte()
318 hdr = cs; in emit_pte()
319 *cs++ = MI_STORE_DATA_IMM | REG_BIT(21); in emit_pte()
320 *cs++ = lower_32_bits(offset); in emit_pte()
321 *cs++ = upper_32_bits(offset); in emit_pte()
324 *cs++ = lower_32_bits(encode | it->dma); in emit_pte()
325 *cs++ = upper_32_bits(encode | it->dma); in emit_pte()
330 it->dma += I915_GTT_PAGE_SIZE; in emit_pte()
331 if (it->dma >= it->max) { in emit_pte()
332 it->sg = __sg_next(it->sg); in emit_pte()
333 if (!it->sg || sg_dma_len(it->sg) == 0) in emit_pte()
336 it->dma = sg_dma_address(it->sg); in emit_pte()
337 it->max = it->dma + sg_dma_len(it->sg); in emit_pte()
341 *hdr += cs - hdr - 2; in emit_pte()
342 *cs++ = MI_NOOP; in emit_pte()
344 ring->emit = (void *)cs - ring->vaddr; in emit_pte()
345 intel_ring_advance(rq, cs); in emit_pte()
363 const int ver = GRAPHICS_VER(rq->engine->i915); in emit_copy()
364 u32 instance = rq->engine->instance; in emit_copy()
365 u32 *cs; in emit_copy() local
367 cs = intel_ring_begin(rq, ver >= 8 ? 10 : 6); in emit_copy()
368 if (IS_ERR(cs)) in emit_copy()
369 return PTR_ERR(cs); in emit_copy()
372 *cs++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2); in emit_copy()
373 *cs++ = BLT_DEPTH_32 | PAGE_SIZE; in emit_copy()
374 *cs++ = 0; in emit_copy()
375 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; in emit_copy()
376 *cs++ = CHUNK_SZ; /* dst offset */ in emit_copy()
377 *cs++ = instance; in emit_copy()
378 *cs++ = 0; in emit_copy()
379 *cs++ = PAGE_SIZE; in emit_copy()
380 *cs++ = 0; /* src offset */ in emit_copy()
381 *cs++ = instance; in emit_copy()
383 *cs++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2); in emit_copy()
384 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE; in emit_copy()
385 *cs++ = 0; in emit_copy()
386 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; in emit_copy()
387 *cs++ = CHUNK_SZ; /* dst offset */ in emit_copy()
388 *cs++ = instance; in emit_copy()
389 *cs++ = 0; in emit_copy()
390 *cs++ = PAGE_SIZE; in emit_copy()
391 *cs++ = 0; /* src offset */ in emit_copy()
392 *cs++ = instance; in emit_copy()
395 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2); in emit_copy()
396 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE; in emit_copy()
397 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE; in emit_copy()
398 *cs++ = CHUNK_SZ; /* dst offset */ in emit_copy()
399 *cs++ = PAGE_SIZE; in emit_copy()
400 *cs++ = 0; /* src offset */ in emit_copy()
403 intel_ring_advance(rq, cs); in emit_copy()
422 GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm); in intel_context_migrate_copy()
425 GEM_BUG_ON(ce->ring->size < SZ_64K); in intel_context_migrate_copy()
441 if (rq->engine->emit_init_breadcrumb) { in intel_context_migrate_copy()
442 err = rq->engine->emit_init_breadcrumb(rq); in intel_context_migrate_copy()
467 err = -EINVAL; in intel_context_migrate_copy()
471 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_migrate_copy()
477 /* Arbitration is re-enabled between requests. */ in intel_context_migrate_copy()
495 const int ver = GRAPHICS_VER(rq->engine->i915); in emit_clear()
496 u32 instance = rq->engine->instance; in emit_clear()
497 u32 *cs; in emit_clear() local
501 cs = intel_ring_begin(rq, ver >= 8 ? 8 : 6); in emit_clear()
502 if (IS_ERR(cs)) in emit_clear()
503 return PTR_ERR(cs); in emit_clear()
506 *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2); in emit_clear()
507 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE; in emit_clear()
508 *cs++ = 0; in emit_clear()
509 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; in emit_clear()
510 *cs++ = 0; /* offset */ in emit_clear()
511 *cs++ = instance; in emit_clear()
512 *cs++ = value; in emit_clear()
513 *cs++ = MI_NOOP; in emit_clear()
516 *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2); in emit_clear()
517 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE; in emit_clear()
518 *cs++ = 0; in emit_clear()
519 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; in emit_clear()
520 *cs++ = 0; in emit_clear()
521 *cs++ = value; in emit_clear()
524 intel_ring_advance(rq, cs); in emit_clear()
541 GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm); in intel_context_migrate_clear()
544 GEM_BUG_ON(ce->ring->size < SZ_64K); in intel_context_migrate_clear()
560 if (rq->engine->emit_init_breadcrumb) { in intel_context_migrate_clear()
561 err = rq->engine->emit_init_breadcrumb(rq); in intel_context_migrate_clear()
580 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_migrate_clear()
586 /* Arbitration is re-enabled between requests. */ in intel_context_migrate_clear()
617 if (!m->context) in intel_migrate_copy()
618 return -ENODEV; in intel_migrate_copy()
622 ce = intel_context_get(m->context); in intel_migrate_copy()
654 if (!m->context) in intel_migrate_clear()
655 return -ENODEV; in intel_migrate_clear()
659 ce = intel_context_get(m->context); in intel_migrate_clear()
679 ce = fetch_and_zero(&m->context); in intel_migrate_fini()