Lines Matching refs:adev
415 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, in gmc_v9_0_ecc_interrupt_state() argument
425 if (adev->asic_type >= CHIP_VEGA20) in gmc_v9_0_ecc_interrupt_state()
466 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, in gmc_v9_0_vm_fault_interrupt_state() argument
484 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { in gmc_v9_0_vm_fault_interrupt_state()
485 hub = &adev->vmhub[j]; in gmc_v9_0_vm_fault_interrupt_state()
494 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0))) in gmc_v9_0_vm_fault_interrupt_state()
512 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { in gmc_v9_0_vm_fault_interrupt_state()
513 hub = &adev->vmhub[j]; in gmc_v9_0_vm_fault_interrupt_state()
522 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0))) in gmc_v9_0_vm_fault_interrupt_state()
546 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, in gmc_v9_0_process_interrupt() argument
569 hub = &adev->vmhub[AMDGPU_MMHUB0(node_id / 4)]; in gmc_v9_0_process_interrupt()
572 hub = &adev->vmhub[AMDGPU_MMHUB1(0)]; in gmc_v9_0_process_interrupt()
575 if (adev->gfx.funcs->ih_node_to_logical_xcc) { in gmc_v9_0_process_interrupt()
576 xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev, in gmc_v9_0_process_interrupt()
581 hub = &adev->vmhub[xcc_id]; in gmc_v9_0_process_interrupt()
585 if (adev->irq.retry_cam_enabled) { in gmc_v9_0_process_interrupt()
589 if (entry->ih == &adev->irq.ih) { in gmc_v9_0_process_interrupt()
590 amdgpu_irq_delegate(adev, entry, 8); in gmc_v9_0_process_interrupt()
596 ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id, in gmc_v9_0_process_interrupt()
598 WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index); in gmc_v9_0_process_interrupt()
603 if (entry->ih != &adev->irq.ih_soft && in gmc_v9_0_process_interrupt()
604 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, in gmc_v9_0_process_interrupt()
611 if (entry->ih == &adev->irq.ih) { in gmc_v9_0_process_interrupt()
612 amdgpu_irq_delegate(adev, entry, 8); in gmc_v9_0_process_interrupt()
619 if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id, in gmc_v9_0_process_interrupt()
630 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); in gmc_v9_0_process_interrupt()
632 dev_err(adev->dev, in gmc_v9_0_process_interrupt()
638 dev_err(adev->dev, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n", in gmc_v9_0_process_interrupt()
642 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) in gmc_v9_0_process_interrupt()
643 dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n", in gmc_v9_0_process_interrupt()
647 if (amdgpu_sriov_vf(adev)) in gmc_v9_0_process_interrupt()
656 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) in gmc_v9_0_process_interrupt()
664 dev_err(adev->dev, in gmc_v9_0_process_interrupt()
668 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", in gmc_v9_0_process_interrupt()
673 switch (adev->ip_versions[MMHUB_HWIP][0]) { in gmc_v9_0_process_interrupt()
702 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", in gmc_v9_0_process_interrupt()
705 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", in gmc_v9_0_process_interrupt()
708 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", in gmc_v9_0_process_interrupt()
711 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", in gmc_v9_0_process_interrupt()
714 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", in gmc_v9_0_process_interrupt()
717 dev_err(adev->dev, "\t RW: 0x%x\n", rw); in gmc_v9_0_process_interrupt()
732 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_irq_funcs() argument
734 adev->gmc.vm_fault.num_types = 1; in gmc_v9_0_set_irq_funcs()
735 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs; in gmc_v9_0_set_irq_funcs()
737 if (!amdgpu_sriov_vf(adev) && in gmc_v9_0_set_irq_funcs()
738 !adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_set_irq_funcs()
739 adev->gmc.ecc_irq.num_types = 1; in gmc_v9_0_set_irq_funcs()
740 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs; in gmc_v9_0_set_irq_funcs()
770 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, in gmc_v9_0_use_invalidate_semaphore() argument
773 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || in gmc_v9_0_use_invalidate_semaphore()
774 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) in gmc_v9_0_use_invalidate_semaphore()
779 (!amdgpu_sriov_vf(adev)) && in gmc_v9_0_use_invalidate_semaphore()
780 (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) && in gmc_v9_0_use_invalidate_semaphore()
781 (adev->apu_flags & AMD_APU_IS_PICASSO)))); in gmc_v9_0_use_invalidate_semaphore()
784 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, in gmc_v9_0_get_atc_vmid_pasid_mapping_info() argument
813 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, in gmc_v9_0_flush_gpu_tlb() argument
816 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); in gmc_v9_0_flush_gpu_tlb()
823 hub = &adev->vmhub[vmhub]; in gmc_v9_0_flush_gpu_tlb()
824 if (adev->gmc.xgmi.num_physical_nodes && in gmc_v9_0_flush_gpu_tlb()
825 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)) { in gmc_v9_0_flush_gpu_tlb()
835 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) && in gmc_v9_0_flush_gpu_tlb()
836 adev->rev_id == 0) { in gmc_v9_0_flush_gpu_tlb()
847 if (adev->gfx.kiq[0].ring.sched.ready && in gmc_v9_0_flush_gpu_tlb()
848 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && in gmc_v9_0_flush_gpu_tlb()
849 down_read_trylock(&adev->reset_domain->sem)) { in gmc_v9_0_flush_gpu_tlb()
853 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, in gmc_v9_0_flush_gpu_tlb()
855 up_read(&adev->reset_domain->sem); in gmc_v9_0_flush_gpu_tlb()
859 spin_lock(&adev->gmc.invalidate_lock); in gmc_v9_0_flush_gpu_tlb()
870 for (j = 0; j < adev->usec_timeout; j++) { in gmc_v9_0_flush_gpu_tlb()
881 if (j >= adev->usec_timeout) in gmc_v9_0_flush_gpu_tlb()
897 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) in gmc_v9_0_flush_gpu_tlb()
901 for (j = 0; j < adev->usec_timeout; j++) { in gmc_v9_0_flush_gpu_tlb()
927 spin_unlock(&adev->gmc.invalidate_lock); in gmc_v9_0_flush_gpu_tlb()
929 if (j < adev->usec_timeout) in gmc_v9_0_flush_gpu_tlb()
946 static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, in gmc_v9_0_flush_gpu_tlb_pasid() argument
955 u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout; in gmc_v9_0_flush_gpu_tlb_pasid()
956 struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring; in gmc_v9_0_flush_gpu_tlb_pasid()
957 struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst]; in gmc_v9_0_flush_gpu_tlb_pasid()
959 if (amdgpu_in_reset(adev)) in gmc_v9_0_flush_gpu_tlb_pasid()
962 if (ring->sched.ready && down_read_trylock(&adev->reset_domain->sem)) { in gmc_v9_0_flush_gpu_tlb_pasid()
969 bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes && in gmc_v9_0_flush_gpu_tlb_pasid()
970 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)); in gmc_v9_0_flush_gpu_tlb_pasid()
977 spin_lock(&adev->gfx.kiq[inst].ring_lock); in gmc_v9_0_flush_gpu_tlb_pasid()
985 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) && in gmc_v9_0_flush_gpu_tlb_pasid()
986 adev->rev_id == 0) in gmc_v9_0_flush_gpu_tlb_pasid()
995 spin_unlock(&adev->gfx.kiq[inst].ring_lock); in gmc_v9_0_flush_gpu_tlb_pasid()
996 up_read(&adev->reset_domain->sem); in gmc_v9_0_flush_gpu_tlb_pasid()
1001 spin_unlock(&adev->gfx.kiq[inst].ring_lock); in gmc_v9_0_flush_gpu_tlb_pasid()
1004 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); in gmc_v9_0_flush_gpu_tlb_pasid()
1005 up_read(&adev->reset_domain->sem); in gmc_v9_0_flush_gpu_tlb_pasid()
1008 up_read(&adev->reset_domain->sem); in gmc_v9_0_flush_gpu_tlb_pasid()
1014 ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid, in gmc_v9_0_flush_gpu_tlb_pasid()
1018 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) in gmc_v9_0_flush_gpu_tlb_pasid()
1019 gmc_v9_0_flush_gpu_tlb(adev, vmid, in gmc_v9_0_flush_gpu_tlb_pasid()
1022 gmc_v9_0_flush_gpu_tlb(adev, vmid, in gmc_v9_0_flush_gpu_tlb_pasid()
1036 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); in gmc_v9_0_emit_flush_gpu_tlb()
1037 struct amdgpu_device *adev = ring->adev; in gmc_v9_0_emit_flush_gpu_tlb() local
1038 struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub]; in gmc_v9_0_emit_flush_gpu_tlb()
1085 struct amdgpu_device *adev = ring->adev; in gmc_v9_0_emit_pasid_mapping() local
1132 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) in gmc_v9_0_map_mtype() argument
1153 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, in gmc_v9_0_get_vm_pde() argument
1157 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr); in gmc_v9_0_get_vm_pde()
1160 if (!adev->gmc.translate_further) in gmc_v9_0_get_vm_pde()
1179 static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, in gmc_v9_0_get_coherence_flags() argument
1193 switch (adev->ip_versions[GC_HWIP][0]) { in gmc_v9_0_get_coherence_flags()
1197 if (bo_adev == adev) { in gmc_v9_0_get_coherence_flags()
1207 if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || in gmc_v9_0_get_coherence_flags()
1208 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) && in gmc_v9_0_get_coherence_flags()
1209 adev->gmc.xgmi.connected_to_cpu) in gmc_v9_0_get_coherence_flags()
1247 is_local = (!is_vram && (adev->flags & AMD_IS_APU) && in gmc_v9_0_get_coherence_flags()
1249 (is_vram && adev == bo_adev && in gmc_v9_0_get_coherence_flags()
1250 KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id); in gmc_v9_0_get_coherence_flags()
1254 } else if (adev->flags & AMD_IS_APU) { in gmc_v9_0_get_coherence_flags()
1286 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, in gmc_v9_0_get_vm_pte() argument
1304 gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo, in gmc_v9_0_get_vm_pte()
1308 static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, in gmc_v9_0_override_vm_pte_flags() argument
1317 if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) in gmc_v9_0_override_vm_pte_flags()
1323 if (!adev->ram_is_direct_mapped) { in gmc_v9_0_override_vm_pte_flags()
1324 dev_dbg(adev->dev, "RAM is not direct mapped\n"); in gmc_v9_0_override_vm_pte_flags()
1333 dev_dbg(adev->dev, "MTYPE is not NC\n"); in gmc_v9_0_override_vm_pte_flags()
1341 if (adev->gmc.is_app_apu && vm->mem_id >= 0) { in gmc_v9_0_override_vm_pte_flags()
1342 local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node; in gmc_v9_0_override_vm_pte_flags()
1344 dev_dbg(adev->dev, "Only native mode APU is supported.\n"); in gmc_v9_0_override_vm_pte_flags()
1352 dev_dbg(adev->dev, "Page is not RAM.\n"); in gmc_v9_0_override_vm_pte_flags()
1356 dev_dbg(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n", in gmc_v9_0_override_vm_pte_flags()
1369 dev_dbg(adev->dev, "flags updated from %llx to %llx\n", in gmc_v9_0_override_vm_pte_flags()
1374 static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) in gmc_v9_0_get_vbios_fb_size() argument
1386 switch (adev->ip_versions[DCE_HWIP][0]) { in gmc_v9_0_get_vbios_fb_size()
1417 gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes) in gmc_v9_0_get_memory_partition() argument
1421 if (adev->nbio.funcs->get_memory_partition_mode) in gmc_v9_0_get_memory_partition()
1422 mode = adev->nbio.funcs->get_memory_partition_mode(adev, in gmc_v9_0_get_memory_partition()
1429 gmc_v9_0_query_memory_partition(struct amdgpu_device *adev) in gmc_v9_0_query_memory_partition() argument
1431 if (amdgpu_sriov_vf(adev)) in gmc_v9_0_query_memory_partition()
1434 return gmc_v9_0_get_memory_partition(adev, NULL); in gmc_v9_0_query_memory_partition()
1450 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_gmc_funcs() argument
1452 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs; in gmc_v9_0_set_gmc_funcs()
1455 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_umc_funcs() argument
1457 switch (adev->ip_versions[UMC_HWIP][0]) { in gmc_v9_0_set_umc_funcs()
1459 adev->umc.funcs = &umc_v6_0_funcs; in gmc_v9_0_set_umc_funcs()
1462 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; in gmc_v9_0_set_umc_funcs()
1463 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1464 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1465 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20; in gmc_v9_0_set_umc_funcs()
1466 adev->umc.retire_unit = 1; in gmc_v9_0_set_umc_funcs()
1467 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; in gmc_v9_0_set_umc_funcs()
1468 adev->umc.ras = &umc_v6_1_ras; in gmc_v9_0_set_umc_funcs()
1471 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; in gmc_v9_0_set_umc_funcs()
1472 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1473 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1474 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT; in gmc_v9_0_set_umc_funcs()
1475 adev->umc.retire_unit = 1; in gmc_v9_0_set_umc_funcs()
1476 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; in gmc_v9_0_set_umc_funcs()
1477 adev->umc.ras = &umc_v6_1_ras; in gmc_v9_0_set_umc_funcs()
1480 adev->umc.max_ras_err_cnt_per_query = in gmc_v9_0_set_umc_funcs()
1482 adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1483 adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1484 adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET; in gmc_v9_0_set_umc_funcs()
1485 adev->umc.retire_unit = (UMC_V6_7_NA_MAP_PA_NUM * 2); in gmc_v9_0_set_umc_funcs()
1486 if (!adev->gmc.xgmi.connected_to_cpu) in gmc_v9_0_set_umc_funcs()
1487 adev->umc.ras = &umc_v6_7_ras; in gmc_v9_0_set_umc_funcs()
1488 if (1 & adev->smuio.funcs->get_die_id(adev)) in gmc_v9_0_set_umc_funcs()
1489 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0]; in gmc_v9_0_set_umc_funcs()
1491 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0]; in gmc_v9_0_set_umc_funcs()
1498 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_mmhub_funcs() argument
1500 switch (adev->ip_versions[MMHUB_HWIP][0]) { in gmc_v9_0_set_mmhub_funcs()
1502 adev->mmhub.funcs = &mmhub_v9_4_funcs; in gmc_v9_0_set_mmhub_funcs()
1505 adev->mmhub.funcs = &mmhub_v1_7_funcs; in gmc_v9_0_set_mmhub_funcs()
1508 adev->mmhub.funcs = &mmhub_v1_8_funcs; in gmc_v9_0_set_mmhub_funcs()
1511 adev->mmhub.funcs = &mmhub_v1_0_funcs; in gmc_v9_0_set_mmhub_funcs()
1516 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_mmhub_ras_funcs() argument
1518 switch (adev->ip_versions[MMHUB_HWIP][0]) { in gmc_v9_0_set_mmhub_ras_funcs()
1520 adev->mmhub.ras = &mmhub_v1_0_ras; in gmc_v9_0_set_mmhub_ras_funcs()
1523 adev->mmhub.ras = &mmhub_v9_4_ras; in gmc_v9_0_set_mmhub_ras_funcs()
1526 adev->mmhub.ras = &mmhub_v1_7_ras; in gmc_v9_0_set_mmhub_ras_funcs()
1529 adev->mmhub.ras = &mmhub_v1_8_ras; in gmc_v9_0_set_mmhub_ras_funcs()
1537 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_gfxhub_funcs() argument
1539 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) in gmc_v9_0_set_gfxhub_funcs()
1540 adev->gfxhub.funcs = &gfxhub_v1_2_funcs; in gmc_v9_0_set_gfxhub_funcs()
1542 adev->gfxhub.funcs = &gfxhub_v1_0_funcs; in gmc_v9_0_set_gfxhub_funcs()
1545 static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_hdp_ras_funcs() argument
1547 adev->hdp.ras = &hdp_v4_0_ras; in gmc_v9_0_set_hdp_ras_funcs()
1550 static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_mca_ras_funcs() argument
1552 struct amdgpu_mca *mca = &adev->mca; in gmc_v9_0_set_mca_ras_funcs()
1555 switch (adev->ip_versions[UMC_HWIP][0]) { in gmc_v9_0_set_mca_ras_funcs()
1557 if (!adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_set_mca_ras_funcs()
1568 static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_xgmi_ras_funcs() argument
1570 if (!adev->gmc.xgmi.connected_to_cpu) in gmc_v9_0_set_xgmi_ras_funcs()
1571 adev->gmc.xgmi.ras = &xgmi_ras; in gmc_v9_0_set_xgmi_ras_funcs()
1576 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_early_init() local
1582 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0) || in gmc_v9_0_early_init()
1583 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || in gmc_v9_0_early_init()
1584 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) in gmc_v9_0_early_init()
1585 adev->gmc.xgmi.supported = true; in gmc_v9_0_early_init()
1587 if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) { in gmc_v9_0_early_init()
1588 adev->gmc.xgmi.supported = true; in gmc_v9_0_early_init()
1589 adev->gmc.xgmi.connected_to_cpu = in gmc_v9_0_early_init()
1590 adev->smuio.funcs->is_host_gpu_xgmi_supported(adev); in gmc_v9_0_early_init()
1593 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { in gmc_v9_0_early_init()
1595 adev->smuio.funcs->get_pkg_type(adev); in gmc_v9_0_early_init()
1603 adev->gmc.is_app_apu = (pkg_type == AMDGPU_PKG_TYPE_APU && in gmc_v9_0_early_init()
1604 !pci_resource_len(adev->pdev, 0)); in gmc_v9_0_early_init()
1607 gmc_v9_0_set_gmc_funcs(adev); in gmc_v9_0_early_init()
1608 gmc_v9_0_set_irq_funcs(adev); in gmc_v9_0_early_init()
1609 gmc_v9_0_set_umc_funcs(adev); in gmc_v9_0_early_init()
1610 gmc_v9_0_set_mmhub_funcs(adev); in gmc_v9_0_early_init()
1611 gmc_v9_0_set_mmhub_ras_funcs(adev); in gmc_v9_0_early_init()
1612 gmc_v9_0_set_gfxhub_funcs(adev); in gmc_v9_0_early_init()
1613 gmc_v9_0_set_hdp_ras_funcs(adev); in gmc_v9_0_early_init()
1614 gmc_v9_0_set_mca_ras_funcs(adev); in gmc_v9_0_early_init()
1615 gmc_v9_0_set_xgmi_ras_funcs(adev); in gmc_v9_0_early_init()
1617 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; in gmc_v9_0_early_init()
1618 adev->gmc.shared_aperture_end = in gmc_v9_0_early_init()
1619 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; in gmc_v9_0_early_init()
1620 adev->gmc.private_aperture_start = 0x1000000000000000ULL; in gmc_v9_0_early_init()
1621 adev->gmc.private_aperture_end = in gmc_v9_0_early_init()
1622 adev->gmc.private_aperture_start + (4ULL << 30) - 1; in gmc_v9_0_early_init()
1623 adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; in gmc_v9_0_early_init()
1630 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_late_init() local
1633 r = amdgpu_gmc_allocate_vm_inv_eng(adev); in gmc_v9_0_late_init()
1641 if (!amdgpu_sriov_vf(adev) && in gmc_v9_0_late_init()
1642 (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) { in gmc_v9_0_late_init()
1643 if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) { in gmc_v9_0_late_init()
1644 if (adev->df.funcs && in gmc_v9_0_late_init()
1645 adev->df.funcs->enable_ecc_force_par_wr_rmw) in gmc_v9_0_late_init()
1646 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); in gmc_v9_0_late_init()
1650 if (!amdgpu_persistent_edc_harvesting_supported(adev)) { in gmc_v9_0_late_init()
1651 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops && in gmc_v9_0_late_init()
1652 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count) in gmc_v9_0_late_init()
1653 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev); in gmc_v9_0_late_init()
1655 if (adev->hdp.ras && adev->hdp.ras->ras_block.hw_ops && in gmc_v9_0_late_init()
1656 adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count) in gmc_v9_0_late_init()
1657 adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count(adev); in gmc_v9_0_late_init()
1660 r = amdgpu_gmc_ras_late_init(adev); in gmc_v9_0_late_init()
1664 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); in gmc_v9_0_late_init()
1667 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, in gmc_v9_0_vram_gtt_location() argument
1670 u64 base = adev->mmhub.funcs->get_fb_location(adev); in gmc_v9_0_vram_gtt_location()
1673 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in gmc_v9_0_vram_gtt_location()
1674 if (adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_vram_gtt_location()
1675 amdgpu_gmc_sysvm_location(adev, mc); in gmc_v9_0_vram_gtt_location()
1677 amdgpu_gmc_vram_location(adev, mc, base); in gmc_v9_0_vram_gtt_location()
1678 amdgpu_gmc_gart_location(adev, mc); in gmc_v9_0_vram_gtt_location()
1679 amdgpu_gmc_agp_location(adev, mc); in gmc_v9_0_vram_gtt_location()
1682 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev); in gmc_v9_0_vram_gtt_location()
1685 adev->vm_manager.vram_base_offset += in gmc_v9_0_vram_gtt_location()
1686 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in gmc_v9_0_vram_gtt_location()
1698 static int gmc_v9_0_mc_init(struct amdgpu_device *adev) in gmc_v9_0_mc_init() argument
1703 if (!adev->gmc.is_app_apu) { in gmc_v9_0_mc_init()
1704 adev->gmc.mc_vram_size = in gmc_v9_0_mc_init()
1705 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; in gmc_v9_0_mc_init()
1708 adev->gmc.mc_vram_size = 0; in gmc_v9_0_mc_init()
1710 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; in gmc_v9_0_mc_init()
1712 if (!(adev->flags & AMD_IS_APU) && in gmc_v9_0_mc_init()
1713 !adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_mc_init()
1714 r = amdgpu_device_resize_fb_bar(adev); in gmc_v9_0_mc_init()
1718 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); in gmc_v9_0_mc_init()
1719 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); in gmc_v9_0_mc_init()
1734 if ((!amdgpu_sriov_vf(adev) && in gmc_v9_0_mc_init()
1735 (adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) || in gmc_v9_0_mc_init()
1736 (adev->gmc.xgmi.supported && in gmc_v9_0_mc_init()
1737 adev->gmc.xgmi.connected_to_cpu)) { in gmc_v9_0_mc_init()
1738 adev->gmc.aper_base = in gmc_v9_0_mc_init()
1739 adev->gfxhub.funcs->get_mc_fb_offset(adev) + in gmc_v9_0_mc_init()
1740 adev->gmc.xgmi.physical_node_id * in gmc_v9_0_mc_init()
1741 adev->gmc.xgmi.node_segment_size; in gmc_v9_0_mc_init()
1742 adev->gmc.aper_size = adev->gmc.real_vram_size; in gmc_v9_0_mc_init()
1746 adev->gmc.visible_vram_size = adev->gmc.aper_size; in gmc_v9_0_mc_init()
1750 switch (adev->ip_versions[GC_HWIP][0]) { in gmc_v9_0_mc_init()
1758 adev->gmc.gart_size = 512ULL << 20; in gmc_v9_0_mc_init()
1763 adev->gmc.gart_size = 1024ULL << 20; in gmc_v9_0_mc_init()
1767 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; in gmc_v9_0_mc_init()
1770 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size; in gmc_v9_0_mc_init()
1772 gmc_v9_0_vram_gtt_location(adev, &adev->gmc); in gmc_v9_0_mc_init()
1777 static int gmc_v9_0_gart_init(struct amdgpu_device *adev) in gmc_v9_0_gart_init() argument
1781 if (adev->gart.bo) { in gmc_v9_0_gart_init()
1786 if (adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_gart_init()
1787 adev->gmc.vmid0_page_table_depth = 1; in gmc_v9_0_gart_init()
1788 adev->gmc.vmid0_page_table_block_size = 12; in gmc_v9_0_gart_init()
1790 adev->gmc.vmid0_page_table_depth = 0; in gmc_v9_0_gart_init()
1791 adev->gmc.vmid0_page_table_block_size = 0; in gmc_v9_0_gart_init()
1795 r = amdgpu_gart_init(adev); in gmc_v9_0_gart_init()
1798 adev->gart.table_size = adev->gart.num_gpu_pages * 8; in gmc_v9_0_gart_init()
1799 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) | in gmc_v9_0_gart_init()
1802 if (!adev->gmc.real_vram_size) { in gmc_v9_0_gart_init()
1803 dev_info(adev->dev, "Put GART in system memory for APU\n"); in gmc_v9_0_gart_init()
1804 r = amdgpu_gart_table_ram_alloc(adev); in gmc_v9_0_gart_init()
1806 dev_err(adev->dev, "Failed to allocate GART in system memory\n"); in gmc_v9_0_gart_init()
1808 r = amdgpu_gart_table_vram_alloc(adev); in gmc_v9_0_gart_init()
1812 if (adev->gmc.xgmi.connected_to_cpu) in gmc_v9_0_gart_init()
1813 r = amdgpu_gmc_pdb0_alloc(adev); in gmc_v9_0_gart_init()
1827 static void gmc_v9_0_save_registers(struct amdgpu_device *adev) in gmc_v9_0_save_registers() argument
1829 if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || in gmc_v9_0_save_registers()
1830 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) in gmc_v9_0_save_registers()
1831 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); in gmc_v9_0_save_registers()
1834 static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev) in gmc_v9_0_validate_partition_info() argument
1840 mode = gmc_v9_0_get_memory_partition(adev, &supp_modes); in gmc_v9_0_validate_partition_info()
1850 valid = (adev->gmc.num_mem_partitions == 1); in gmc_v9_0_validate_partition_info()
1853 valid = (adev->gmc.num_mem_partitions == 2); in gmc_v9_0_validate_partition_info()
1856 valid = (adev->gmc.num_mem_partitions == 3 || in gmc_v9_0_validate_partition_info()
1857 adev->gmc.num_mem_partitions == 4); in gmc_v9_0_validate_partition_info()
1879 gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev, in gmc_v9_0_init_acpi_mem_ranges() argument
1888 num_xcc = NUM_XCC(adev->gfx.xcc_mask); in gmc_v9_0_init_acpi_mem_ranges()
1890 mem_groups = hweight32(adev->aid_mask); in gmc_v9_0_init_acpi_mem_ranges()
1893 ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info); in gmc_v9_0_init_acpi_mem_ranges()
1914 adev->gmc.num_mem_partitions = num_ranges; in gmc_v9_0_init_acpi_mem_ranges()
1917 if (adev->gmc.num_mem_partitions == 1) { in gmc_v9_0_init_acpi_mem_ranges()
1924 gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, in gmc_v9_0_init_sw_mem_ranges() argument
1931 mode = gmc_v9_0_query_memory_partition(adev); in gmc_v9_0_init_sw_mem_ranges()
1936 adev->gmc.num_mem_partitions = 1; in gmc_v9_0_init_sw_mem_ranges()
1939 adev->gmc.num_mem_partitions = 2; in gmc_v9_0_init_sw_mem_ranges()
1942 if (adev->flags & AMD_IS_APU) in gmc_v9_0_init_sw_mem_ranges()
1943 adev->gmc.num_mem_partitions = 3; in gmc_v9_0_init_sw_mem_ranges()
1945 adev->gmc.num_mem_partitions = 4; in gmc_v9_0_init_sw_mem_ranges()
1948 adev->gmc.num_mem_partitions = 1; in gmc_v9_0_init_sw_mem_ranges()
1952 size = adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT; in gmc_v9_0_init_sw_mem_ranges()
1953 size /= adev->gmc.num_mem_partitions; in gmc_v9_0_init_sw_mem_ranges()
1955 for (i = 0; i < adev->gmc.num_mem_partitions; ++i) { in gmc_v9_0_init_sw_mem_ranges()
1963 mem_ranges[adev->gmc.num_mem_partitions - 1].range.lpfn = in gmc_v9_0_init_sw_mem_ranges()
1964 (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1; in gmc_v9_0_init_sw_mem_ranges()
1965 mem_ranges[adev->gmc.num_mem_partitions - 1].size = in gmc_v9_0_init_sw_mem_ranges()
1966 adev->gmc.real_vram_size - in gmc_v9_0_init_sw_mem_ranges()
1967 ((u64)mem_ranges[adev->gmc.num_mem_partitions - 1].range.fpfn in gmc_v9_0_init_sw_mem_ranges()
1971 static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev) in gmc_v9_0_init_mem_ranges() argument
1975 adev->gmc.mem_partitions = kzalloc( in gmc_v9_0_init_mem_ranges()
1979 if (!adev->gmc.mem_partitions) in gmc_v9_0_init_mem_ranges()
1983 if (adev->gmc.is_app_apu) in gmc_v9_0_init_mem_ranges()
1984 gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions); in gmc_v9_0_init_mem_ranges()
1986 gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); in gmc_v9_0_init_mem_ranges()
1988 if (amdgpu_sriov_vf(adev)) in gmc_v9_0_init_mem_ranges()
1991 valid = gmc_v9_0_validate_partition_info(adev); in gmc_v9_0_init_mem_ranges()
1994 dev_WARN(adev->dev, in gmc_v9_0_init_mem_ranges()
2001 static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) in gmc_v9_4_3_init_vram_info() argument
2006 if (!amdgpu_sriov_vf(adev)) { in gmc_v9_4_3_init_vram_info()
2008 adev->gmc.vram_vendor = vram_info & 0xF; in gmc_v9_4_3_init_vram_info()
2010 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; in gmc_v9_4_3_init_vram_info()
2011 adev->gmc.vram_width = 128 * 64; in gmc_v9_4_3_init_vram_info()
2017 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_sw_init() local
2018 unsigned long inst_mask = adev->aid_mask; in gmc_v9_0_sw_init()
2020 adev->gfxhub.funcs->init(adev); in gmc_v9_0_sw_init()
2022 adev->mmhub.funcs->init(adev); in gmc_v9_0_sw_init()
2024 spin_lock_init(&adev->gmc.invalidate_lock); in gmc_v9_0_sw_init()
2026 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { in gmc_v9_0_sw_init()
2027 gmc_v9_4_3_init_vram_info(adev); in gmc_v9_0_sw_init()
2028 } else if (!adev->bios) { in gmc_v9_0_sw_init()
2029 if (adev->flags & AMD_IS_APU) { in gmc_v9_0_sw_init()
2030 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4; in gmc_v9_0_sw_init()
2031 adev->gmc.vram_width = 64 * 64; in gmc_v9_0_sw_init()
2033 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; in gmc_v9_0_sw_init()
2034 adev->gmc.vram_width = 128 * 64; in gmc_v9_0_sw_init()
2037 r = amdgpu_atomfirmware_get_vram_info(adev, in gmc_v9_0_sw_init()
2039 if (amdgpu_sriov_vf(adev)) in gmc_v9_0_sw_init()
2044 adev->gmc.vram_width = 2048; in gmc_v9_0_sw_init()
2046 adev->gmc.vram_width = vram_width; in gmc_v9_0_sw_init()
2048 if (!adev->gmc.vram_width) { in gmc_v9_0_sw_init()
2052 if (adev->flags & AMD_IS_APU) in gmc_v9_0_sw_init()
2056 if (adev->df.funcs && in gmc_v9_0_sw_init()
2057 adev->df.funcs->get_hbm_channel_number) { in gmc_v9_0_sw_init()
2058 numchan = adev->df.funcs->get_hbm_channel_number(adev); in gmc_v9_0_sw_init()
2059 adev->gmc.vram_width = numchan * chansize; in gmc_v9_0_sw_init()
2063 adev->gmc.vram_type = vram_type; in gmc_v9_0_sw_init()
2064 adev->gmc.vram_vendor = vram_vendor; in gmc_v9_0_sw_init()
2066 switch (adev->ip_versions[GC_HWIP][0]) { in gmc_v9_0_sw_init()
2069 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); in gmc_v9_0_sw_init()
2070 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); in gmc_v9_0_sw_init()
2072 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { in gmc_v9_0_sw_init()
2073 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v9_0_sw_init()
2076 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48); in gmc_v9_0_sw_init()
2077 adev->gmc.translate_further = in gmc_v9_0_sw_init()
2078 adev->vm_manager.num_level > 1; in gmc_v9_0_sw_init()
2086 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); in gmc_v9_0_sw_init()
2087 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); in gmc_v9_0_sw_init()
2095 if (amdgpu_sriov_vf(adev)) in gmc_v9_0_sw_init()
2096 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47); in gmc_v9_0_sw_init()
2098 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v9_0_sw_init()
2099 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) in gmc_v9_0_sw_init()
2100 adev->gmc.translate_further = adev->vm_manager.num_level > 1; in gmc_v9_0_sw_init()
2103 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); in gmc_v9_0_sw_init()
2104 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); in gmc_v9_0_sw_init()
2105 set_bit(AMDGPU_MMHUB1(0), adev->vmhubs_mask); in gmc_v9_0_sw_init()
2108 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v9_0_sw_init()
2109 adev->gmc.translate_further = adev->vm_manager.num_level > 1; in gmc_v9_0_sw_init()
2112 bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0), in gmc_v9_0_sw_init()
2113 NUM_XCC(adev->gfx.xcc_mask)); in gmc_v9_0_sw_init()
2116 bitmap_or(adev->vmhubs_mask, adev->vmhubs_mask, &inst_mask, 32); in gmc_v9_0_sw_init()
2118 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v9_0_sw_init()
2119 adev->gmc.translate_further = adev->vm_manager.num_level > 1; in gmc_v9_0_sw_init()
2126 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT, in gmc_v9_0_sw_init()
2127 &adev->gmc.vm_fault); in gmc_v9_0_sw_init()
2131 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) { in gmc_v9_0_sw_init()
2132 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT, in gmc_v9_0_sw_init()
2133 &adev->gmc.vm_fault); in gmc_v9_0_sw_init()
2138 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT, in gmc_v9_0_sw_init()
2139 &adev->gmc.vm_fault); in gmc_v9_0_sw_init()
2144 if (!amdgpu_sriov_vf(adev) && in gmc_v9_0_sw_init()
2145 !adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_sw_init()
2147 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, in gmc_v9_0_sw_init()
2148 &adev->gmc.ecc_irq); in gmc_v9_0_sw_init()
2157 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ in gmc_v9_0_sw_init()
2159 dma_addr_bits = adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) ? 48:44; in gmc_v9_0_sw_init()
2160 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits)); in gmc_v9_0_sw_init()
2162 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); in gmc_v9_0_sw_init()
2165 adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits); in gmc_v9_0_sw_init()
2167 r = gmc_v9_0_mc_init(adev); in gmc_v9_0_sw_init()
2171 amdgpu_gmc_get_vbios_allocations(adev); in gmc_v9_0_sw_init()
2173 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { in gmc_v9_0_sw_init()
2174 r = gmc_v9_0_init_mem_ranges(adev); in gmc_v9_0_sw_init()
2180 r = amdgpu_bo_init(adev); in gmc_v9_0_sw_init()
2184 r = gmc_v9_0_gart_init(adev); in gmc_v9_0_sw_init()
2198 adev->vm_manager.first_kfd_vmid = in gmc_v9_0_sw_init()
2199 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || in gmc_v9_0_sw_init()
2200 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || in gmc_v9_0_sw_init()
2201 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) ? 3 : 8; in gmc_v9_0_sw_init()
2203 amdgpu_vm_manager_init(adev); in gmc_v9_0_sw_init()
2205 gmc_v9_0_save_registers(adev); in gmc_v9_0_sw_init()
2207 r = amdgpu_gmc_ras_sw_init(adev); in gmc_v9_0_sw_init()
2211 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) in gmc_v9_0_sw_init()
2212 amdgpu_gmc_sysfs_init(adev); in gmc_v9_0_sw_init()
2219 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_sw_fini() local
2221 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) in gmc_v9_0_sw_fini()
2222 amdgpu_gmc_sysfs_fini(adev); in gmc_v9_0_sw_fini()
2223 adev->gmc.num_mem_partitions = 0; in gmc_v9_0_sw_fini()
2224 kfree(adev->gmc.mem_partitions); in gmc_v9_0_sw_fini()
2226 amdgpu_gmc_ras_fini(adev); in gmc_v9_0_sw_fini()
2227 amdgpu_gem_force_release(adev); in gmc_v9_0_sw_fini()
2228 amdgpu_vm_manager_fini(adev); in gmc_v9_0_sw_fini()
2229 if (!adev->gmc.real_vram_size) { in gmc_v9_0_sw_fini()
2230 dev_info(adev->dev, "Put GART in system memory for APU free\n"); in gmc_v9_0_sw_fini()
2231 amdgpu_gart_table_ram_free(adev); in gmc_v9_0_sw_fini()
2233 amdgpu_gart_table_vram_free(adev); in gmc_v9_0_sw_fini()
2235 amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0); in gmc_v9_0_sw_fini()
2236 amdgpu_bo_fini(adev); in gmc_v9_0_sw_fini()
2241 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) in gmc_v9_0_init_golden_registers() argument
2244 switch (adev->ip_versions[MMHUB_HWIP][0]) { in gmc_v9_0_init_golden_registers()
2246 if (amdgpu_sriov_vf(adev)) in gmc_v9_0_init_golden_registers()
2250 soc15_program_register_sequence(adev, in gmc_v9_0_init_golden_registers()
2253 soc15_program_register_sequence(adev, in gmc_v9_0_init_golden_registers()
2260 soc15_program_register_sequence(adev, in gmc_v9_0_init_golden_registers()
2276 void gmc_v9_0_restore_registers(struct amdgpu_device *adev) in gmc_v9_0_restore_registers() argument
2278 if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || in gmc_v9_0_restore_registers()
2279 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) { in gmc_v9_0_restore_registers()
2280 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register); in gmc_v9_0_restore_registers()
2281 WARN_ON(adev->gmc.sdpif_register != in gmc_v9_0_restore_registers()
2291 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) in gmc_v9_0_gart_enable() argument
2295 if (adev->gmc.xgmi.connected_to_cpu) in gmc_v9_0_gart_enable()
2296 amdgpu_gmc_init_pdb0(adev); in gmc_v9_0_gart_enable()
2298 if (adev->gart.bo == NULL) { in gmc_v9_0_gart_enable()
2299 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); in gmc_v9_0_gart_enable()
2303 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); in gmc_v9_0_gart_enable()
2305 if (!adev->in_s0ix) { in gmc_v9_0_gart_enable()
2306 r = adev->gfxhub.funcs->gart_enable(adev); in gmc_v9_0_gart_enable()
2311 r = adev->mmhub.funcs->gart_enable(adev); in gmc_v9_0_gart_enable()
2316 (unsigned int)(adev->gmc.gart_size >> 20)); in gmc_v9_0_gart_enable()
2317 if (adev->gmc.pdb0_bo) in gmc_v9_0_gart_enable()
2319 (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo)); in gmc_v9_0_gart_enable()
2321 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); in gmc_v9_0_gart_enable()
2328 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_hw_init() local
2333 gmc_v9_0_init_golden_registers(adev); in gmc_v9_0_hw_init()
2335 if (adev->mode_info.num_crtc) { in gmc_v9_0_hw_init()
2342 if (adev->mmhub.funcs->update_power_gating) in gmc_v9_0_hw_init()
2343 adev->mmhub.funcs->update_power_gating(adev, true); in gmc_v9_0_hw_init()
2345 adev->hdp.funcs->init_registers(adev); in gmc_v9_0_hw_init()
2348 adev->hdp.funcs->flush_hdp(adev, NULL); in gmc_v9_0_hw_init()
2355 if (!amdgpu_sriov_vf(adev)) { in gmc_v9_0_hw_init()
2356 if (!adev->in_s0ix) in gmc_v9_0_hw_init()
2357 adev->gfxhub.funcs->set_fault_enable_default(adev, value); in gmc_v9_0_hw_init()
2358 adev->mmhub.funcs->set_fault_enable_default(adev, value); in gmc_v9_0_hw_init()
2360 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { in gmc_v9_0_hw_init()
2361 if (adev->in_s0ix && (i == AMDGPU_GFXHUB(0))) in gmc_v9_0_hw_init()
2363 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); in gmc_v9_0_hw_init()
2366 if (adev->umc.funcs && adev->umc.funcs->init_registers) in gmc_v9_0_hw_init()
2367 adev->umc.funcs->init_registers(adev); in gmc_v9_0_hw_init()
2369 r = gmc_v9_0_gart_enable(adev); in gmc_v9_0_hw_init()
2374 return amdgpu_gmc_vram_checking(adev); in gmc_v9_0_hw_init()
2386 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) in gmc_v9_0_gart_disable() argument
2388 if (!adev->in_s0ix) in gmc_v9_0_gart_disable()
2389 adev->gfxhub.funcs->gart_disable(adev); in gmc_v9_0_gart_disable()
2390 adev->mmhub.funcs->gart_disable(adev); in gmc_v9_0_gart_disable()
2395 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_hw_fini() local
2397 gmc_v9_0_gart_disable(adev); in gmc_v9_0_hw_fini()
2399 if (amdgpu_sriov_vf(adev)) { in gmc_v9_0_hw_fini()
2410 if (adev->mmhub.funcs->update_power_gating) in gmc_v9_0_hw_fini()
2411 adev->mmhub.funcs->update_power_gating(adev, false); in gmc_v9_0_hw_fini()
2413 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); in gmc_v9_0_hw_fini()
2420 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_suspend() local
2422 return gmc_v9_0_hw_fini(adev); in gmc_v9_0_suspend()
2428 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_resume() local
2430 r = gmc_v9_0_hw_init(adev); in gmc_v9_0_resume()
2434 amdgpu_vmid_reset_all(adev); in gmc_v9_0_resume()
2460 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_set_clockgating_state() local
2462 adev->mmhub.funcs->set_clockgating(adev, state); in gmc_v9_0_set_clockgating_state()
2464 athub_v1_0_set_clockgating(adev, state); in gmc_v9_0_set_clockgating_state()
2471 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_get_clockgating_state() local
2473 adev->mmhub.funcs->get_clockgating(adev, flags); in gmc_v9_0_get_clockgating_state()
2475 athub_v1_0_get_clockgating(adev, flags); in gmc_v9_0_get_clockgating_state()