Lines Matching +full:cs +full:- +full:2
1 // SPDX-License-Identifier: MIT
32 GEM_BUG_ON(engine->class != COPY_ENGINE_CLASS); in engine_supports_migration()
47 vm->insert_page(vm, 0, d->offset, I915_CACHE_NONE, PTE_LM); in xehpsdv_toggle_pdes()
48 GEM_BUG_ON(!pt->is_compact); in xehpsdv_toggle_pdes()
49 d->offset += SZ_2M; in xehpsdv_toggle_pdes()
65 vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE, PTE_LM); in xehpsdv_insert_pte()
66 d->offset += SZ_64K; in xehpsdv_insert_pte()
75 vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE, in insert_pte()
76 i915_gem_object_is_lmem(pt->base) ? PTE_LM : 0); in insert_pte()
77 d->offset += PAGE_SIZE; in insert_pte()
90 * to pre-allocate the page directories for the migration VM, this in migrate_vm()
97 * fly. Only 2 implicit vma are used for all migration operations. in migrate_vm()
101 * [0, CHUNK_SZ) -> first object in migrate_vm()
102 * [CHUNK_SZ, 2 * CHUNK_SZ) -> second object in migrate_vm()
103 * [2 * CHUNK_SZ, 2 * CHUNK_SZ + 2 * CHUNK_SZ >> 9] -> PTE in migrate_vm()
108 * i.e. within the same non-preemptible window so that we do not switch in migrate_vm()
113 * first is reserved for mapping system-memory, and that just uses the in migrate_vm()
119 * compact layout for each of these page-tables, that fall within the in migrate_vm()
124 * [0, CHUNK_SZ) -> first window/object, maps smem in migrate_vm()
125 * [CHUNK_SZ, 2 * CHUNK_SZ) -> second window/object, maps lmem src in migrate_vm()
126 * [2 * CHUNK_SZ, 3 * CHUNK_SZ) -> third window/object, maps lmem dst in migrate_vm()
136 * [3 * CHUNK_SZ, 3 * CHUNK_SZ + ((3 * CHUNK_SZ / SZ_2M) * SZ_64K)] -> PTE in migrate_vm()
143 if (!vm->vm.allocate_va_range || !vm->vm.foreach) { in migrate_vm()
144 err = -ENODEV; in migrate_vm()
148 if (HAS_64K_PAGES(gt->i915)) in migrate_vm()
155 for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) { in migrate_vm()
162 engine = gt->engine_class[COPY_ENGINE_CLASS][i]; in migrate_vm()
167 * We copy in 8MiB chunks. Each PDE covers 2MiB, so we need in migrate_vm()
170 if (HAS_64K_PAGES(gt->i915)) in migrate_vm()
173 sz = 2 * CHUNK_SZ; in migrate_vm()
180 if (HAS_64K_PAGES(gt->i915)) in migrate_vm()
185 err = i915_vm_alloc_pt_stash(&vm->vm, &stash, sz); in migrate_vm()
190 err = i915_vm_lock_objects(&vm->vm, &ww); in migrate_vm()
193 err = i915_vm_map_pt_stash(&vm->vm, &stash); in migrate_vm()
197 vm->vm.allocate_va_range(&vm->vm, &stash, base, sz); in migrate_vm()
199 i915_vm_free_pt_stash(&vm->vm, &stash); in migrate_vm()
204 if (HAS_64K_PAGES(gt->i915)) { in migrate_vm()
205 vm->vm.foreach(&vm->vm, base, d.offset - base, in migrate_vm()
208 vm->vm.foreach(&vm->vm, in migrate_vm()
210 2 * CHUNK_SZ, in migrate_vm()
213 vm->vm.foreach(&vm->vm, base, d.offset - base, in migrate_vm()
218 return &vm->vm; in migrate_vm()
221 i915_vm_put(&vm->vm); in migrate_vm()
230 for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) { in first_copy_engine()
231 engine = gt->engine_class[COPY_ENGINE_CLASS][i]; in first_copy_engine()
248 return ERR_PTR(-ENODEV); in pinned_context()
271 m->context = ce; in intel_migrate_init()
287 for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) { in __migrate_engines()
288 engine = gt->engine_class[COPY_ENGINE_CLASS][i]; in __migrate_engines()
306 * balancing of the virtual-engine. in intel_migrate_create_context()
308 ce = __migrate_engines(m->context->engine->gt); in intel_migrate_create_context()
312 ce->ring = NULL; in intel_migrate_create_context()
313 ce->ring_size = SZ_256K; in intel_migrate_create_context()
315 i915_vm_put(ce->vm); in intel_migrate_create_context()
316 ce->vm = i915_vm_get(m->context->vm); in intel_migrate_create_context()
330 u32 *cs; in emit_no_arbitration() local
332 cs = intel_ring_begin(rq, 2); in emit_no_arbitration()
333 if (IS_ERR(cs)) in emit_no_arbitration()
334 return PTR_ERR(cs); in emit_no_arbitration()
337 *cs++ = MI_ARB_ON_OFF; in emit_no_arbitration()
338 *cs++ = MI_NOOP; in emit_no_arbitration()
339 intel_ring_advance(rq, cs); in emit_no_arbitration()
351 bool has_64K_pages = HAS_64K_PAGES(rq->engine->i915); in emit_pte()
352 const u64 encode = rq->context->vm->pte_encode(0, cache_level, in emit_pte()
354 struct intel_ring *ring = rq->ring; in emit_pte()
358 u32 *hdr, *cs; in emit_pte() local
360 GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8); in emit_pte()
380 offset += 2 * CHUNK_SZ; in emit_pte()
383 offset += (u64)rq->engine->instance << 32; in emit_pte()
385 cs = intel_ring_begin(rq, 6); in emit_pte()
386 if (IS_ERR(cs)) in emit_pte()
387 return PTR_ERR(cs); in emit_pte()
390 pkt = min_t(int, dword_length, ring->space / sizeof(u32) + 5); in emit_pte()
391 pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5); in emit_pte()
393 hdr = cs; in emit_pte()
394 *cs++ = MI_STORE_DATA_IMM | REG_BIT(21); /* as qword elements */ in emit_pte()
395 *cs++ = lower_32_bits(offset); in emit_pte()
396 *cs++ = upper_32_bits(offset); in emit_pte()
399 if (cs - hdr >= pkt) { in emit_pte()
402 *hdr += cs - hdr - 2; in emit_pte()
403 *cs++ = MI_NOOP; in emit_pte()
405 ring->emit = (void *)cs - ring->vaddr; in emit_pte()
406 intel_ring_advance(rq, cs); in emit_pte()
409 cs = intel_ring_begin(rq, 6); in emit_pte()
410 if (IS_ERR(cs)) in emit_pte()
411 return PTR_ERR(cs); in emit_pte()
418 dword_rem = SZ_2M - (total & (SZ_2M - 1)); in emit_pte()
420 dword_rem *= 2; in emit_pte()
424 pkt = min_t(int, dword_rem, ring->space / sizeof(u32) + 5); in emit_pte()
425 pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5); in emit_pte()
427 hdr = cs; in emit_pte()
428 *cs++ = MI_STORE_DATA_IMM | REG_BIT(21); in emit_pte()
429 *cs++ = lower_32_bits(offset); in emit_pte()
430 *cs++ = upper_32_bits(offset); in emit_pte()
433 GEM_BUG_ON(!IS_ALIGNED(it->dma, page_size)); in emit_pte()
435 *cs++ = lower_32_bits(encode | it->dma); in emit_pte()
436 *cs++ = upper_32_bits(encode | it->dma); in emit_pte()
441 it->dma += page_size; in emit_pte()
442 if (it->dma >= it->max) { in emit_pte()
443 it->sg = __sg_next(it->sg); in emit_pte()
444 if (!it->sg || sg_dma_len(it->sg) == 0) in emit_pte()
447 it->dma = sg_dma_address(it->sg); in emit_pte()
448 it->max = it->dma + sg_dma_len(it->sg); in emit_pte()
452 *hdr += cs - hdr - 2; in emit_pte()
453 *cs++ = MI_NOOP; in emit_pte()
455 ring->emit = (void *)cs - ring->vaddr; in emit_pte()
456 intel_ring_advance(rq, cs); in emit_pte()
473 * DOC: Flat-CCS - Memory compression for Local memory
475 * On Xe-HP and later devices, we use dedicated compression control state (CCS)
488 * I915 supports Flat-CCS on lmem only objects. When an objects has smem in
490 * content into smem. If the lmem object is Flat-CCS compressed by userspace,
492 * for such decompression. Hence I915 supports Flat-CCS only on lmem only objects.
494 * When we exhaust the lmem, Flat-CCS capable objects' lmem backing memory can
496 * it can be potentially swapped-out at a later point, if required.
499 * and potentially performing any required swap-in.
502 * {lmem, smem}, objects are treated as non Flat-CCS capable objects.
518 struct drm_i915_private *i915 = rq->engine->i915; in emit_copy_ccs()
519 int mocs = rq->engine->gt->mocs.uc_index << 1; in emit_copy_ccs()
521 u32 *cs; in emit_copy_ccs() local
523 cs = intel_ring_begin(rq, 12); in emit_copy_ccs()
524 if (IS_ERR(cs)) in emit_copy_ccs()
525 return PTR_ERR(cs); in emit_copy_ccs()
530 cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS); in emit_copy_ccs()
546 *cs++ = XY_CTRL_SURF_COPY_BLT | in emit_copy_ccs()
549 ((num_ccs_blks - 1) & CCS_SIZE_MASK) << CCS_SIZE_SHIFT; in emit_copy_ccs()
550 *cs++ = src_offset; in emit_copy_ccs()
551 *cs++ = rq->engine->instance | in emit_copy_ccs()
553 *cs++ = dst_offset; in emit_copy_ccs()
554 *cs++ = rq->engine->instance | in emit_copy_ccs()
557 cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS); in emit_copy_ccs()
558 *cs++ = MI_NOOP; in emit_copy_ccs()
560 intel_ring_advance(rq, cs); in emit_copy_ccs()
568 const int ver = GRAPHICS_VER(rq->engine->i915); in emit_copy()
569 u32 instance = rq->engine->instance; in emit_copy()
570 u32 *cs; in emit_copy() local
572 cs = intel_ring_begin(rq, ver >= 8 ? 10 : 6); in emit_copy()
573 if (IS_ERR(cs)) in emit_copy()
574 return PTR_ERR(cs); in emit_copy()
577 *cs++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2); in emit_copy()
578 *cs++ = BLT_DEPTH_32 | PAGE_SIZE; in emit_copy()
579 *cs++ = 0; in emit_copy()
580 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; in emit_copy()
581 *cs++ = dst_offset; in emit_copy()
582 *cs++ = instance; in emit_copy()
583 *cs++ = 0; in emit_copy()
584 *cs++ = PAGE_SIZE; in emit_copy()
585 *cs++ = src_offset; in emit_copy()
586 *cs++ = instance; in emit_copy()
588 *cs++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2); in emit_copy()
589 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE; in emit_copy()
590 *cs++ = 0; in emit_copy()
591 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; in emit_copy()
592 *cs++ = dst_offset; in emit_copy()
593 *cs++ = instance; in emit_copy()
594 *cs++ = 0; in emit_copy()
595 *cs++ = PAGE_SIZE; in emit_copy()
596 *cs++ = src_offset; in emit_copy()
597 *cs++ = instance; in emit_copy()
600 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2); in emit_copy()
601 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE; in emit_copy()
602 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE; in emit_copy()
603 *cs++ = dst_offset; in emit_copy()
604 *cs++ = PAGE_SIZE; in emit_copy()
605 *cs++ = src_offset; in emit_copy()
608 intel_ring_advance(rq, cs); in emit_copy()
631 * will be taken for the blt. in Flat-ccs supported in calculate_chunk_sz()
646 GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg)); in get_ccs_sg_sgt()
647 len = it->max - it->dma; in get_ccs_sg_sgt()
649 it->dma += bytes_to_cpy; in get_ccs_sg_sgt()
653 bytes_to_cpy -= len; in get_ccs_sg_sgt()
655 it->sg = __sg_next(it->sg); in get_ccs_sg_sgt()
656 it->dma = sg_dma_address(it->sg); in get_ccs_sg_sgt()
657 it->max = it->dma + sg_dma_len(it->sg); in get_ccs_sg_sgt()
673 struct drm_i915_private *i915 = ce->engine->i915; in intel_context_migrate_copy()
683 GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm); in intel_context_migrate_copy()
684 GEM_BUG_ON(IS_DGFX(ce->engine->i915) && (!src_is_lmem && !dst_is_lmem)); in intel_context_migrate_copy()
687 GEM_BUG_ON(ce->ring->size < SZ_64K); in intel_context_migrate_copy()
712 * TO-DO: Want to move the size mismatch check to a WARN_ON, in intel_context_migrate_copy()
713 * but still we have some requests of smem->lmem with same size. in intel_context_migrate_copy()
725 if (HAS_64K_PAGES(ce->engine->i915)) { in intel_context_migrate_copy()
731 dst_offset = 2 * CHUNK_SZ; in intel_context_migrate_copy()
748 if (rq->engine->emit_init_breadcrumb) { in intel_context_migrate_copy()
749 err = rq->engine->emit_init_breadcrumb(rq); in intel_context_migrate_copy()
768 err = -EINVAL; in intel_context_migrate_copy()
781 err = -EINVAL; in intel_context_migrate_copy()
785 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_migrate_copy()
793 bytes_to_cpy -= len; in intel_context_migrate_copy()
798 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_migrate_copy()
809 err = -EINVAL; in intel_context_migrate_copy()
813 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_migrate_copy()
822 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_migrate_copy()
825 ccs_bytes_to_cpy -= ccs_sz; in intel_context_migrate_copy()
827 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_migrate_copy()
842 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_migrate_copy()
847 /* Arbitration is re-enabled between requests. */ in intel_context_migrate_copy()
869 err = -EINVAL; in intel_context_migrate_copy()
883 struct drm_i915_private *i915 = rq->engine->i915; in emit_clear()
884 int mocs = rq->engine->gt->mocs.uc_index << 1; in emit_clear()
887 u32 *cs; in emit_clear() local
898 cs = intel_ring_begin(rq, ring_sz); in emit_clear()
899 if (IS_ERR(cs)) in emit_clear()
900 return PTR_ERR(cs); in emit_clear()
903 *cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 | in emit_clear()
904 (XY_FAST_COLOR_BLT_DW - 2); in emit_clear()
905 *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) | in emit_clear()
906 (PAGE_SIZE - 1); in emit_clear()
907 *cs++ = 0; in emit_clear()
908 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; in emit_clear()
909 *cs++ = offset; in emit_clear()
910 *cs++ = rq->engine->instance; in emit_clear()
911 *cs++ = !is_lmem << XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT; in emit_clear()
913 *cs++ = value; in emit_clear()
914 *cs++ = 0; in emit_clear()
915 *cs++ = 0; in emit_clear()
916 *cs++ = 0; in emit_clear()
918 *cs++ = 0; in emit_clear()
919 *cs++ = 0; in emit_clear()
921 *cs++ = 0; in emit_clear()
922 *cs++ = 0; in emit_clear()
923 *cs++ = 0; in emit_clear()
925 *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2); in emit_clear()
926 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE; in emit_clear()
927 *cs++ = 0; in emit_clear()
928 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; in emit_clear()
929 *cs++ = offset; in emit_clear()
930 *cs++ = rq->engine->instance; in emit_clear()
931 *cs++ = value; in emit_clear()
932 *cs++ = MI_NOOP; in emit_clear()
934 *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2); in emit_clear()
935 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE; in emit_clear()
936 *cs++ = 0; in emit_clear()
937 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; in emit_clear()
938 *cs++ = offset; in emit_clear()
939 *cs++ = value; in emit_clear()
942 intel_ring_advance(rq, cs); in emit_clear()
955 struct drm_i915_private *i915 = ce->engine->i915; in intel_context_migrate_clear()
961 GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm); in intel_context_migrate_clear()
964 GEM_BUG_ON(ce->ring->size < SZ_64K); in intel_context_migrate_clear()
984 if (rq->engine->emit_init_breadcrumb) { in intel_context_migrate_clear()
985 err = rq->engine->emit_init_breadcrumb(rq); in intel_context_migrate_clear()
1004 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_migrate_clear()
1023 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_migrate_clear()
1025 /* Arbitration is re-enabled between requests. */ in intel_context_migrate_clear()
1056 if (!m->context) in intel_migrate_copy()
1057 return -ENODEV; in intel_migrate_copy()
1061 ce = intel_context_get(m->context); in intel_migrate_copy()
1093 if (!m->context) in intel_migrate_clear()
1094 return -ENODEV; in intel_migrate_clear()
1098 ce = intel_context_get(m->context); in intel_migrate_clear()
1118 ce = fetch_and_zero(&m->context); in intel_migrate_fini()