Lines Matching +full:1 +full:- +full:eng

15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
27 #include <linux/io-64-nonatomic-lo-hi.h>
41 * amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
52 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes; in amdgpu_gmc_pdb0_alloc()
53 uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21; in amdgpu_gmc_pdb0_alloc()
54 uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) -1) >> pde0_page_shift; in amdgpu_gmc_pdb0_alloc()
57 bp.size = PAGE_ALIGN((npdes + 1) * 8); in amdgpu_gmc_pdb0_alloc()
66 r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
70 r = amdgpu_bo_reserve(adev->gmc.pdb0_bo, false); in amdgpu_gmc_pdb0_alloc()
74 r = amdgpu_bo_pin(adev->gmc.pdb0_bo, AMDGPU_GEM_DOMAIN_VRAM); in amdgpu_gmc_pdb0_alloc()
77 r = amdgpu_bo_kmap(adev->gmc.pdb0_bo, &adev->gmc.ptr_pdb0); in amdgpu_gmc_pdb0_alloc()
81 amdgpu_bo_unreserve(adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
85 amdgpu_bo_unpin(adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
87 amdgpu_bo_unreserve(adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
89 amdgpu_bo_unref(&adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
94 * amdgpu_gmc_get_pde_for_bo - get the PDE for a BO
106 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gmc_get_pde_for_bo()
108 switch (bo->tbo.resource->mem_type) { in amdgpu_gmc_get_pde_for_bo()
110 *addr = bo->tbo.ttm->dma_address[0]; in amdgpu_gmc_get_pde_for_bo()
119 *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource); in amdgpu_gmc_get_pde_for_bo()
124 * amdgpu_gmc_pd_addr - return the address of the root directory
128 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gmc_pd_addr()
132 if (adev->asic_type >= CHIP_VEGA10) { in amdgpu_gmc_pd_addr()
135 amdgpu_gmc_get_pde_for_bo(bo, -1, &pd_addr, &flags); in amdgpu_gmc_pd_addr()
144 * amdgpu_gmc_set_pte_pde - update the page tables using CPU
172 * amdgpu_gmc_agp_addr - return the address in the AGP address space
181 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); in amdgpu_gmc_agp_addr()
183 if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached) in amdgpu_gmc_agp_addr()
186 if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) in amdgpu_gmc_agp_addr()
189 return adev->gmc.agp_start + bo->ttm->dma_address[0]; in amdgpu_gmc_agp_addr()
193 * amdgpu_gmc_vram_location - try to find VRAM location
208 mc->vram_start = base; in amdgpu_gmc_vram_location()
209 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; in amdgpu_gmc_vram_location()
210 if (limit < mc->real_vram_size) in amdgpu_gmc_vram_location()
211 mc->real_vram_size = limit; in amdgpu_gmc_vram_location()
213 if (vis_limit && vis_limit < mc->visible_vram_size) in amdgpu_gmc_vram_location()
214 mc->visible_vram_size = vis_limit; in amdgpu_gmc_vram_location()
216 if (mc->real_vram_size < mc->visible_vram_size) in amdgpu_gmc_vram_location()
217 mc->visible_vram_size = mc->real_vram_size; in amdgpu_gmc_vram_location()
219 if (mc->xgmi.num_physical_nodes == 0) { in amdgpu_gmc_vram_location()
220 mc->fb_start = mc->vram_start; in amdgpu_gmc_vram_location()
221 mc->fb_end = mc->vram_end; in amdgpu_gmc_vram_location()
223 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", in amdgpu_gmc_vram_location()
224 mc->mc_vram_size >> 20, mc->vram_start, in amdgpu_gmc_vram_location()
225 mc->vram_end, mc->real_vram_size >> 20); in amdgpu_gmc_vram_location()
228 /** amdgpu_gmc_sysvm_location - place vram and gart in sysvm aperture
247 u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1; in amdgpu_gmc_sysvm_location()
248 mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id; in amdgpu_gmc_sysvm_location()
249 mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1; in amdgpu_gmc_sysvm_location()
250 mc->gart_start = hive_vram_end + 1; in amdgpu_gmc_sysvm_location()
251 mc->gart_end = mc->gart_start + mc->gart_size - 1; in amdgpu_gmc_sysvm_location()
252 mc->fb_start = hive_vram_start; in amdgpu_gmc_sysvm_location()
253 mc->fb_end = hive_vram_end; in amdgpu_gmc_sysvm_location()
254 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", in amdgpu_gmc_sysvm_location()
255 mc->mc_vram_size >> 20, mc->vram_start, in amdgpu_gmc_sysvm_location()
256 mc->vram_end, mc->real_vram_size >> 20); in amdgpu_gmc_sysvm_location()
257 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n", in amdgpu_gmc_sysvm_location()
258 mc->gart_size >> 20, mc->gart_start, mc->gart_end); in amdgpu_gmc_sysvm_location()
262 * amdgpu_gmc_gart_location - try to find GART location
276 u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1); in amdgpu_gmc_gart_location()
281 size_bf = mc->fb_start; in amdgpu_gmc_gart_location()
282 size_af = max_mc_address + 1 - ALIGN(mc->fb_end + 1, four_gb); in amdgpu_gmc_gart_location()
284 if (mc->gart_size > max(size_bf, size_af)) { in amdgpu_gmc_gart_location()
285 dev_warn(adev->dev, "limiting GART\n"); in amdgpu_gmc_gart_location()
286 mc->gart_size = max(size_bf, size_af); in amdgpu_gmc_gart_location()
289 if ((size_bf >= mc->gart_size && size_bf < size_af) || in amdgpu_gmc_gart_location()
290 (size_af < mc->gart_size)) in amdgpu_gmc_gart_location()
291 mc->gart_start = 0; in amdgpu_gmc_gart_location()
293 mc->gart_start = max_mc_address - mc->gart_size + 1; in amdgpu_gmc_gart_location()
295 mc->gart_start &= ~(four_gb - 1); in amdgpu_gmc_gart_location()
296 mc->gart_end = mc->gart_start + mc->gart_size - 1; in amdgpu_gmc_gart_location()
297 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n", in amdgpu_gmc_gart_location()
298 mc->gart_size >> 20, mc->gart_start, mc->gart_end); in amdgpu_gmc_gart_location()
302 * amdgpu_gmc_agp_location - try to find AGP location
314 const uint64_t sixteen_gb = 1ULL << 34; in amdgpu_gmc_agp_location()
315 const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1); in amdgpu_gmc_agp_location()
319 mc->agp_start = 0xffffffffffff; in amdgpu_gmc_agp_location()
320 mc->agp_end = 0x0; in amdgpu_gmc_agp_location()
321 mc->agp_size = 0; in amdgpu_gmc_agp_location()
326 if (mc->fb_start > mc->gart_start) { in amdgpu_gmc_agp_location()
327 size_bf = (mc->fb_start & sixteen_gb_mask) - in amdgpu_gmc_agp_location()
328 ALIGN(mc->gart_end + 1, sixteen_gb); in amdgpu_gmc_agp_location()
329 size_af = mc->mc_mask + 1 - ALIGN(mc->fb_end + 1, sixteen_gb); in amdgpu_gmc_agp_location()
331 size_bf = mc->fb_start & sixteen_gb_mask; in amdgpu_gmc_agp_location()
332 size_af = (mc->gart_start & sixteen_gb_mask) - in amdgpu_gmc_agp_location()
333 ALIGN(mc->fb_end + 1, sixteen_gb); in amdgpu_gmc_agp_location()
337 mc->agp_start = (mc->fb_start - size_bf) & sixteen_gb_mask; in amdgpu_gmc_agp_location()
338 mc->agp_size = size_bf; in amdgpu_gmc_agp_location()
340 mc->agp_start = ALIGN(mc->fb_end + 1, sixteen_gb); in amdgpu_gmc_agp_location()
341 mc->agp_size = size_af; in amdgpu_gmc_agp_location()
344 mc->agp_end = mc->agp_start + mc->agp_size - 1; in amdgpu_gmc_agp_location()
345 dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n", in amdgpu_gmc_agp_location()
346 mc->agp_size >> 20, mc->agp_start, mc->agp_end); in amdgpu_gmc_agp_location()
350 * amdgpu_gmc_fault_key - get hask key from vm fault address and pasid
361 * amdgpu_gmc_filter_faults - filter VM faults
377 struct amdgpu_gmc *gmc = &adev->gmc; in amdgpu_gmc_filter_faults()
383 if (amdgpu_ih_ts_after(timestamp, ih->processed_timestamp)) in amdgpu_gmc_filter_faults()
387 stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) - in amdgpu_gmc_filter_faults()
389 if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp) in amdgpu_gmc_filter_faults()
394 fault = &gmc->fault_ring[gmc->fault_hash[hash].idx]; in amdgpu_gmc_filter_faults()
395 while (fault->timestamp >= stamp) { in amdgpu_gmc_filter_faults()
398 if (atomic64_read(&fault->key) == key) { in amdgpu_gmc_filter_faults()
406 if (fault->timestamp_expiry != 0 && in amdgpu_gmc_filter_faults()
407 amdgpu_ih_ts_after(fault->timestamp_expiry, in amdgpu_gmc_filter_faults()
414 tmp = fault->timestamp; in amdgpu_gmc_filter_faults()
415 fault = &gmc->fault_ring[fault->next]; in amdgpu_gmc_filter_faults()
418 if (fault->timestamp >= tmp) in amdgpu_gmc_filter_faults()
423 fault = &gmc->fault_ring[gmc->last_fault]; in amdgpu_gmc_filter_faults()
424 atomic64_set(&fault->key, key); in amdgpu_gmc_filter_faults()
425 fault->timestamp = timestamp; in amdgpu_gmc_filter_faults()
428 fault->next = gmc->fault_hash[hash].idx; in amdgpu_gmc_filter_faults()
429 gmc->fault_hash[hash].idx = gmc->last_fault++; in amdgpu_gmc_filter_faults()
434 * amdgpu_gmc_filter_faults_remove - remove address from VM faults filter
446 struct amdgpu_gmc *gmc = &adev->gmc; in amdgpu_gmc_filter_faults_remove()
455 ih = adev->irq.retry_cam_enabled ? &adev->irq.ih_soft : &adev->irq.ih1; in amdgpu_gmc_filter_faults_remove()
461 last_ts = amdgpu_ih_decode_iv_ts(adev, ih, last_wptr, -1); in amdgpu_gmc_filter_faults_remove()
464 fault = &gmc->fault_ring[gmc->fault_hash[hash].idx]; in amdgpu_gmc_filter_faults_remove()
466 if (atomic64_read(&fault->key) == key) { in amdgpu_gmc_filter_faults_remove()
471 fault->timestamp_expiry = last_ts; in amdgpu_gmc_filter_faults_remove()
475 tmp = fault->timestamp; in amdgpu_gmc_filter_faults_remove()
476 fault = &gmc->fault_ring[fault->next]; in amdgpu_gmc_filter_faults_remove()
477 } while (fault->timestamp < tmp); in amdgpu_gmc_filter_faults_remove()
533 * Engine 0, 1, 4~16: amdgpu ring,
546 /* init the vm inv eng for all vmhubs */ in amdgpu_gmc_allocate_vm_inv_eng()
547 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { in amdgpu_gmc_allocate_vm_inv_eng()
550 if (adev->enable_mes) in amdgpu_gmc_allocate_vm_inv_eng()
551 vm_inv_engs[i] &= ~(1 << 5); in amdgpu_gmc_allocate_vm_inv_eng()
554 for (i = 0; i < adev->num_rings; ++i) { in amdgpu_gmc_allocate_vm_inv_eng()
555 ring = adev->rings[i]; in amdgpu_gmc_allocate_vm_inv_eng()
556 vmhub = ring->vm_hub; in amdgpu_gmc_allocate_vm_inv_eng()
558 if (ring == &adev->mes.ring) in amdgpu_gmc_allocate_vm_inv_eng()
563 dev_err(adev->dev, "no VM inv eng for ring %s\n", in amdgpu_gmc_allocate_vm_inv_eng()
564 ring->name); in amdgpu_gmc_allocate_vm_inv_eng()
565 return -EINVAL; in amdgpu_gmc_allocate_vm_inv_eng()
568 ring->vm_inv_eng = inv_eng - 1; in amdgpu_gmc_allocate_vm_inv_eng()
569 vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng); in amdgpu_gmc_allocate_vm_inv_eng()
571 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", in amdgpu_gmc_allocate_vm_inv_eng()
572 ring->name, ring->vm_inv_eng, ring->vm_hub); in amdgpu_gmc_allocate_vm_inv_eng()
579 * amdgpu_gmc_tmz_set -- check and set if a device supports TMZ
587 switch (adev->ip_versions[GC_HWIP][0]) { in amdgpu_gmc_tmz_set()
590 case IP_VERSION(9, 1, 0): in amdgpu_gmc_tmz_set()
595 /* GC 11.0.1 */ in amdgpu_gmc_tmz_set()
596 case IP_VERSION(11, 0, 1): in amdgpu_gmc_tmz_set()
598 adev->gmc.tmz_enabled = false; in amdgpu_gmc_tmz_set()
599 dev_info(adev->dev, in amdgpu_gmc_tmz_set()
602 adev->gmc.tmz_enabled = true; in amdgpu_gmc_tmz_set()
603 dev_info(adev->dev, in amdgpu_gmc_tmz_set()
607 case IP_VERSION(10, 1, 10): in amdgpu_gmc_tmz_set()
608 case IP_VERSION(10, 1, 1): in amdgpu_gmc_tmz_set()
609 case IP_VERSION(10, 1, 2): in amdgpu_gmc_tmz_set()
610 case IP_VERSION(10, 1, 3): in amdgpu_gmc_tmz_set()
617 case IP_VERSION(10, 3, 1): in amdgpu_gmc_tmz_set()
623 if (amdgpu_tmz < 1) { in amdgpu_gmc_tmz_set()
624 adev->gmc.tmz_enabled = false; in amdgpu_gmc_tmz_set()
625 dev_info(adev->dev, in amdgpu_gmc_tmz_set()
628 adev->gmc.tmz_enabled = true; in amdgpu_gmc_tmz_set()
629 dev_info(adev->dev, in amdgpu_gmc_tmz_set()
634 adev->gmc.tmz_enabled = false; in amdgpu_gmc_tmz_set()
635 dev_info(adev->dev, in amdgpu_gmc_tmz_set()
642 * amdgpu_gmc_noretry_set -- set per asic noretry defaults
645 * Set a per asic default for the no-retry parameter.
650 struct amdgpu_gmc *gmc = &adev->gmc; in amdgpu_gmc_noretry_set()
651 uint32_t gc_ver = adev->ip_versions[GC_HWIP][0]; in amdgpu_gmc_noretry_set()
652 bool noretry_default = (gc_ver == IP_VERSION(9, 0, 1) || in amdgpu_gmc_noretry_set()
655 gc_ver == IP_VERSION(9, 4, 1) || in amdgpu_gmc_noretry_set()
660 gmc->noretry = (amdgpu_noretry == -1) ? noretry_default : amdgpu_noretry; in amdgpu_gmc_noretry_set()
669 hub = &adev->vmhub[hub_type]; in amdgpu_gmc_set_vm_fault_masks()
671 reg = hub->vm_context0_cntl + hub->ctx_distance * i; in amdgpu_gmc_set_vm_fault_masks()
678 tmp |= hub->vm_cntx_cntl_vm_fault; in amdgpu_gmc_set_vm_fault_masks()
680 tmp &= ~hub->vm_cntx_cntl_vm_fault; in amdgpu_gmc_set_vm_fault_masks()
696 adev->mman.stolen_reserved_offset = 0; in amdgpu_gmc_get_vbios_allocations()
697 adev->mman.stolen_reserved_size = 0; in amdgpu_gmc_get_vbios_allocations()
707 switch (adev->asic_type) { in amdgpu_gmc_get_vbios_allocations()
709 adev->mman.keep_stolen_vga_memory = true; in amdgpu_gmc_get_vbios_allocations()
715 adev->mman.stolen_reserved_offset = 0x500000; in amdgpu_gmc_get_vbios_allocations()
716 adev->mman.stolen_reserved_size = 0x200000; in amdgpu_gmc_get_vbios_allocations()
722 adev->mman.keep_stolen_vga_memory = true; in amdgpu_gmc_get_vbios_allocations()
726 adev->mman.stolen_reserved_offset = 0x1ffb0000; in amdgpu_gmc_get_vbios_allocations()
727 adev->mman.stolen_reserved_size = 64 * PAGE_SIZE; in amdgpu_gmc_get_vbios_allocations()
731 adev->mman.keep_stolen_vga_memory = false; in amdgpu_gmc_get_vbios_allocations()
741 if (adev->mman.keep_stolen_vga_memory) in amdgpu_gmc_get_vbios_allocations()
745 /* set to 0 if the pre-OS buffer uses up most of vram */ in amdgpu_gmc_get_vbios_allocations()
746 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) in amdgpu_gmc_get_vbios_allocations()
750 adev->mman.stolen_vga_size = AMDGPU_VBIOS_VGA_ALLOCATION; in amdgpu_gmc_get_vbios_allocations()
751 adev->mman.stolen_extended_size = size - adev->mman.stolen_vga_size; in amdgpu_gmc_get_vbios_allocations()
753 adev->mman.stolen_vga_size = size; in amdgpu_gmc_get_vbios_allocations()
754 adev->mman.stolen_extended_size = 0; in amdgpu_gmc_get_vbios_allocations()
759 * amdgpu_gmc_init_pdb0 - initialize PDB0
765 * a 2-level system VM page table: PDB0->PTB, to cover both
770 * P bit to 1, pointing to VRAM. The n+1'th entry points
777 uint64_t flags = adev->gart.gart_pte_flags; //TODO it is UC. explore NC/RW? in amdgpu_gmc_init_pdb0()
780 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes; in amdgpu_gmc_init_pdb0()
781 u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21; in amdgpu_gmc_init_pdb0()
782 u64 vram_addr = adev->vm_manager.vram_base_offset - in amdgpu_gmc_init_pdb0()
783 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in amdgpu_gmc_init_pdb0()
785 u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo); in amdgpu_gmc_init_pdb0()
794 flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1)); in amdgpu_gmc_init_pdb0()
801 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, vram_addr, flags); in amdgpu_gmc_init_pdb0()
803 /* The n+1'th PDE0 entry points to a huge in amdgpu_gmc_init_pdb0()
810 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags); in amdgpu_gmc_init_pdb0()
815 * amdgpu_gmc_vram_mc2pa - calculate vram buffer's physical address from MC
823 return mc_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset; in amdgpu_gmc_vram_mc2pa()
827 * amdgpu_gmc_vram_pa - calculate vram buffer object's physical address from
839 * amdgpu_gmc_vram_cpu_pa - calculate vram buffer object's physical address
847 return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base; in amdgpu_gmc_vram_cpu_pa()
875 * Note: If check the each byte of whole 1M bo, it will cost too many in amdgpu_gmc_vram_checking()
886 ret = memcmp(vram_ptr + size - 10, cptr, 10); in amdgpu_gmc_vram_checking()
903 mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); in current_memory_partition_show()
928 if (!adev->gmc.gmc_funcs->query_mem_partition_mode) in amdgpu_gmc_sysfs_init()
931 return device_create_file(adev->dev, in amdgpu_gmc_sysfs_init()
937 device_remove_file(adev->dev, &dev_attr_current_memory_partition); in amdgpu_gmc_sysfs_fini()