Lines Matching refs:adev
201 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, in gmc_v9_0_ecc_interrupt_state() argument
246 static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev, in gmc_v9_0_process_ras_data_cb() argument
250 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); in gmc_v9_0_process_ras_data_cb()
251 if (adev->umc.funcs->query_ras_error_count) in gmc_v9_0_process_ras_data_cb()
252 adev->umc.funcs->query_ras_error_count(adev, err_data); in gmc_v9_0_process_ras_data_cb()
256 if (adev->umc.funcs->query_ras_error_address) in gmc_v9_0_process_ras_data_cb()
257 adev->umc.funcs->query_ras_error_address(adev, err_data); in gmc_v9_0_process_ras_data_cb()
261 amdgpu_ras_reset_gpu(adev, 0); in gmc_v9_0_process_ras_data_cb()
266 static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev, in gmc_v9_0_process_ecc_irq() argument
270 struct ras_common_if *ras_if = adev->gmc.umc_ras_if; in gmc_v9_0_process_ecc_irq()
280 amdgpu_ras_interrupt_dispatch(adev, &ih_data); in gmc_v9_0_process_ecc_irq()
284 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, in gmc_v9_0_vm_fault_interrupt_state() argument
302 for (j = 0; j < adev->num_vmhubs; j++) { in gmc_v9_0_vm_fault_interrupt_state()
303 hub = &adev->vmhub[j]; in gmc_v9_0_vm_fault_interrupt_state()
313 for (j = 0; j < adev->num_vmhubs; j++) { in gmc_v9_0_vm_fault_interrupt_state()
314 hub = &adev->vmhub[j]; in gmc_v9_0_vm_fault_interrupt_state()
329 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, in gmc_v9_0_process_interrupt() argument
342 if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid, in gmc_v9_0_process_interrupt()
348 hub = &adev->vmhub[AMDGPU_MMHUB_0]; in gmc_v9_0_process_interrupt()
351 hub = &adev->vmhub[AMDGPU_MMHUB_1]; in gmc_v9_0_process_interrupt()
354 hub = &adev->vmhub[AMDGPU_GFXHUB_0]; in gmc_v9_0_process_interrupt()
358 if (!amdgpu_sriov_vf(adev)) { in gmc_v9_0_process_interrupt()
375 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); in gmc_v9_0_process_interrupt()
377 dev_err(adev->dev, in gmc_v9_0_process_interrupt()
384 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", in gmc_v9_0_process_interrupt()
386 if (!amdgpu_sriov_vf(adev)) { in gmc_v9_0_process_interrupt()
387 dev_err(adev->dev, in gmc_v9_0_process_interrupt()
390 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", in gmc_v9_0_process_interrupt()
393 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", in gmc_v9_0_process_interrupt()
396 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", in gmc_v9_0_process_interrupt()
399 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", in gmc_v9_0_process_interrupt()
402 dev_err(adev->dev, "\t RW: 0x%lx\n", in gmc_v9_0_process_interrupt()
423 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_irq_funcs() argument
425 adev->gmc.vm_fault.num_types = 1; in gmc_v9_0_set_irq_funcs()
426 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs; in gmc_v9_0_set_irq_funcs()
428 adev->gmc.ecc_irq.num_types = 1; in gmc_v9_0_set_irq_funcs()
429 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs; in gmc_v9_0_set_irq_funcs()
467 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, in gmc_v9_0_flush_gpu_tlb() argument
474 BUG_ON(vmhub >= adev->num_vmhubs); in gmc_v9_0_flush_gpu_tlb()
476 hub = &adev->vmhub[vmhub]; in gmc_v9_0_flush_gpu_tlb()
482 if (adev->gfx.kiq.ring.sched.ready && in gmc_v9_0_flush_gpu_tlb()
483 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && in gmc_v9_0_flush_gpu_tlb()
484 !adev->in_gpu_reset) { in gmc_v9_0_flush_gpu_tlb()
488 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp, in gmc_v9_0_flush_gpu_tlb()
493 spin_lock(&adev->gmc.invalidate_lock); in gmc_v9_0_flush_gpu_tlb()
503 for (j = 0; j < adev->usec_timeout; j++) { in gmc_v9_0_flush_gpu_tlb()
509 spin_unlock(&adev->gmc.invalidate_lock); in gmc_v9_0_flush_gpu_tlb()
510 if (j < adev->usec_timeout) in gmc_v9_0_flush_gpu_tlb()
519 struct amdgpu_device *adev = ring->adev; in gmc_v9_0_emit_flush_gpu_tlb() local
520 struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub]; in gmc_v9_0_emit_flush_gpu_tlb()
540 struct amdgpu_device *adev = ring->adev; in gmc_v9_0_emit_pasid_mapping() local
587 static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev, in gmc_v9_0_get_vm_pte_flags() argument
627 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, in gmc_v9_0_get_vm_pde() argument
631 *addr = adev->vm_manager.vram_base_offset + *addr - in gmc_v9_0_get_vm_pde()
632 adev->gmc.vram_start; in gmc_v9_0_get_vm_pde()
635 if (!adev->gmc.translate_further) in gmc_v9_0_get_vm_pde()
659 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_gmc_funcs() argument
661 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs; in gmc_v9_0_set_gmc_funcs()
664 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_umc_funcs() argument
666 switch (adev->asic_type) { in gmc_v9_0_set_umc_funcs()
668 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; in gmc_v9_0_set_umc_funcs()
669 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
670 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
671 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET; in gmc_v9_0_set_umc_funcs()
672 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; in gmc_v9_0_set_umc_funcs()
673 adev->umc.funcs = &umc_v6_1_funcs; in gmc_v9_0_set_umc_funcs()
680 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_mmhub_funcs() argument
682 switch (adev->asic_type) { in gmc_v9_0_set_mmhub_funcs()
684 adev->mmhub_funcs = &mmhub_v1_0_funcs; in gmc_v9_0_set_mmhub_funcs()
693 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_early_init() local
695 gmc_v9_0_set_gmc_funcs(adev); in gmc_v9_0_early_init()
696 gmc_v9_0_set_irq_funcs(adev); in gmc_v9_0_early_init()
697 gmc_v9_0_set_umc_funcs(adev); in gmc_v9_0_early_init()
698 gmc_v9_0_set_mmhub_funcs(adev); in gmc_v9_0_early_init()
700 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; in gmc_v9_0_early_init()
701 adev->gmc.shared_aperture_end = in gmc_v9_0_early_init()
702 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; in gmc_v9_0_early_init()
703 adev->gmc.private_aperture_start = 0x1000000000000000ULL; in gmc_v9_0_early_init()
704 adev->gmc.private_aperture_end = in gmc_v9_0_early_init()
705 adev->gmc.private_aperture_start + (4ULL << 30) - 1; in gmc_v9_0_early_init()
710 static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev) in gmc_v9_0_keep_stolen_memory() argument
722 switch (adev->asic_type) { in gmc_v9_0_keep_stolen_memory()
735 static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev) in gmc_v9_0_allocate_vm_inv_eng() argument
744 for (i = 0; i < adev->num_rings; ++i) { in gmc_v9_0_allocate_vm_inv_eng()
745 ring = adev->rings[i]; in gmc_v9_0_allocate_vm_inv_eng()
750 dev_err(adev->dev, "no VM inv eng for ring %s\n", in gmc_v9_0_allocate_vm_inv_eng()
758 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", in gmc_v9_0_allocate_vm_inv_eng()
768 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_ecc_ras_block_late_init() local
776 ras_if = &adev->gmc.umc_ras_if; in gmc_v9_0_ecc_ras_block_late_init()
778 ras_if = &adev->gmc.mmhub_ras_if; in gmc_v9_0_ecc_ras_block_late_init()
782 if (!amdgpu_ras_is_supported(adev, ras_block->block)) { in gmc_v9_0_ecc_ras_block_late_init()
783 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); in gmc_v9_0_ecc_ras_block_late_init()
793 r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); in gmc_v9_0_ecc_ras_block_late_init()
797 amdgpu_ras_request_reset_on_boot(adev, in gmc_v9_0_ecc_ras_block_late_init()
814 r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); in gmc_v9_0_ecc_ras_block_late_init()
817 amdgpu_ras_request_reset_on_boot(adev, in gmc_v9_0_ecc_ras_block_late_init()
828 r = amdgpu_ras_interrupt_add_handler(adev, &ih_info); in gmc_v9_0_ecc_ras_block_late_init()
833 amdgpu_ras_debugfs_create(adev, fs_info); in gmc_v9_0_ecc_ras_block_late_init()
835 r = amdgpu_ras_sysfs_create(adev, fs_info); in gmc_v9_0_ecc_ras_block_late_init()
840 r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0); in gmc_v9_0_ecc_ras_block_late_init()
847 amdgpu_ras_sysfs_remove(adev, *ras_if); in gmc_v9_0_ecc_ras_block_late_init()
849 amdgpu_ras_debugfs_remove(adev, *ras_if); in gmc_v9_0_ecc_ras_block_late_init()
851 amdgpu_ras_interrupt_remove_handler(adev, &ih_info); in gmc_v9_0_ecc_ras_block_late_init()
853 amdgpu_ras_feature_enable(adev, *ras_if, 0); in gmc_v9_0_ecc_ras_block_late_init()
897 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_late_init() local
900 if (!gmc_v9_0_keep_stolen_memory(adev)) in gmc_v9_0_late_init()
901 amdgpu_bo_late_init(adev); in gmc_v9_0_late_init()
903 r = gmc_v9_0_allocate_vm_inv_eng(adev); in gmc_v9_0_late_init()
907 if (!amdgpu_sriov_vf(adev)) { in gmc_v9_0_late_init()
908 switch (adev->asic_type) { in gmc_v9_0_late_init()
911 r = amdgpu_atomfirmware_mem_ecc_supported(adev); in gmc_v9_0_late_init()
914 if (adev->df_funcs->enable_ecc_force_par_wr_rmw) in gmc_v9_0_late_init()
915 adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false); in gmc_v9_0_late_init()
920 r = amdgpu_atomfirmware_sram_ecc_supported(adev); in gmc_v9_0_late_init()
936 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); in gmc_v9_0_late_init()
939 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, in gmc_v9_0_vram_gtt_location() argument
944 if (adev->asic_type == CHIP_ARCTURUS) in gmc_v9_0_vram_gtt_location()
945 base = mmhub_v9_4_get_fb_location(adev); in gmc_v9_0_vram_gtt_location()
946 else if (!amdgpu_sriov_vf(adev)) in gmc_v9_0_vram_gtt_location()
947 base = mmhub_v1_0_get_fb_location(adev); in gmc_v9_0_vram_gtt_location()
950 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in gmc_v9_0_vram_gtt_location()
951 amdgpu_gmc_vram_location(adev, mc, base); in gmc_v9_0_vram_gtt_location()
952 amdgpu_gmc_gart_location(adev, mc); in gmc_v9_0_vram_gtt_location()
953 amdgpu_gmc_agp_location(adev, mc); in gmc_v9_0_vram_gtt_location()
955 adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); in gmc_v9_0_vram_gtt_location()
958 adev->vm_manager.vram_base_offset += in gmc_v9_0_vram_gtt_location()
959 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in gmc_v9_0_vram_gtt_location()
971 static int gmc_v9_0_mc_init(struct amdgpu_device *adev) in gmc_v9_0_mc_init() argument
976 if (amdgpu_sriov_vf(adev)) { in gmc_v9_0_mc_init()
981 adev->gmc.vram_width = 2048; in gmc_v9_0_mc_init()
983 adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); in gmc_v9_0_mc_init()
986 if (!adev->gmc.vram_width) { in gmc_v9_0_mc_init()
988 if (adev->flags & AMD_IS_APU) in gmc_v9_0_mc_init()
993 numchan = adev->df_funcs->get_hbm_channel_number(adev); in gmc_v9_0_mc_init()
994 adev->gmc.vram_width = numchan * chansize; in gmc_v9_0_mc_init()
998 adev->gmc.mc_vram_size = in gmc_v9_0_mc_init()
999 adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL; in gmc_v9_0_mc_init()
1000 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; in gmc_v9_0_mc_init()
1002 if (!(adev->flags & AMD_IS_APU)) { in gmc_v9_0_mc_init()
1003 r = amdgpu_device_resize_fb_bar(adev); in gmc_v9_0_mc_init()
1007 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); in gmc_v9_0_mc_init()
1008 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); in gmc_v9_0_mc_init()
1011 if (adev->flags & AMD_IS_APU) { in gmc_v9_0_mc_init()
1012 adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev); in gmc_v9_0_mc_init()
1013 adev->gmc.aper_size = adev->gmc.real_vram_size; in gmc_v9_0_mc_init()
1017 adev->gmc.visible_vram_size = adev->gmc.aper_size; in gmc_v9_0_mc_init()
1018 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) in gmc_v9_0_mc_init()
1019 adev->gmc.visible_vram_size = adev->gmc.real_vram_size; in gmc_v9_0_mc_init()
1023 switch (adev->asic_type) { in gmc_v9_0_mc_init()
1029 adev->gmc.gart_size = 512ULL << 20; in gmc_v9_0_mc_init()
1033 adev->gmc.gart_size = 1024ULL << 20; in gmc_v9_0_mc_init()
1037 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; in gmc_v9_0_mc_init()
1040 gmc_v9_0_vram_gtt_location(adev, &adev->gmc); in gmc_v9_0_mc_init()
1045 static int gmc_v9_0_gart_init(struct amdgpu_device *adev) in gmc_v9_0_gart_init() argument
1049 if (adev->gart.bo) { in gmc_v9_0_gart_init()
1054 r = amdgpu_gart_init(adev); in gmc_v9_0_gart_init()
1057 adev->gart.table_size = adev->gart.num_gpu_pages * 8; in gmc_v9_0_gart_init()
1058 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) | in gmc_v9_0_gart_init()
1060 return amdgpu_gart_table_vram_alloc(adev); in gmc_v9_0_gart_init()
1063 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) in gmc_v9_0_get_vbios_fb_size() argument
1072 if (gmc_v9_0_keep_stolen_memory(adev)) in gmc_v9_0_get_vbios_fb_size()
1081 switch (adev->asic_type) { in gmc_v9_0_get_vbios_fb_size()
1103 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) in gmc_v9_0_get_vbios_fb_size()
1112 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_sw_init() local
1114 gfxhub_v1_0_init(adev); in gmc_v9_0_sw_init()
1115 if (adev->asic_type == CHIP_ARCTURUS) in gmc_v9_0_sw_init()
1116 mmhub_v9_4_init(adev); in gmc_v9_0_sw_init()
1118 mmhub_v1_0_init(adev); in gmc_v9_0_sw_init()
1120 spin_lock_init(&adev->gmc.invalidate_lock); in gmc_v9_0_sw_init()
1122 adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev); in gmc_v9_0_sw_init()
1123 switch (adev->asic_type) { in gmc_v9_0_sw_init()
1125 adev->num_vmhubs = 2; in gmc_v9_0_sw_init()
1127 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { in gmc_v9_0_sw_init()
1128 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v9_0_sw_init()
1131 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48); in gmc_v9_0_sw_init()
1132 adev->gmc.translate_further = in gmc_v9_0_sw_init()
1133 adev->vm_manager.num_level > 1; in gmc_v9_0_sw_init()
1140 adev->num_vmhubs = 2; in gmc_v9_0_sw_init()
1149 if (amdgpu_sriov_vf(adev)) in gmc_v9_0_sw_init()
1150 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47); in gmc_v9_0_sw_init()
1152 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v9_0_sw_init()
1155 adev->num_vmhubs = 3; in gmc_v9_0_sw_init()
1158 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v9_0_sw_init()
1165 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT, in gmc_v9_0_sw_init()
1166 &adev->gmc.vm_fault); in gmc_v9_0_sw_init()
1170 if (adev->asic_type == CHIP_ARCTURUS) { in gmc_v9_0_sw_init()
1171 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT, in gmc_v9_0_sw_init()
1172 &adev->gmc.vm_fault); in gmc_v9_0_sw_init()
1177 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT, in gmc_v9_0_sw_init()
1178 &adev->gmc.vm_fault); in gmc_v9_0_sw_init()
1184 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, in gmc_v9_0_sw_init()
1185 &adev->gmc.ecc_irq); in gmc_v9_0_sw_init()
1193 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ in gmc_v9_0_sw_init()
1195 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); in gmc_v9_0_sw_init()
1200 adev->need_swiotlb = drm_need_swiotlb(44); in gmc_v9_0_sw_init()
1202 if (adev->gmc.xgmi.supported) { in gmc_v9_0_sw_init()
1203 r = gfxhub_v1_1_get_xgmi_info(adev); in gmc_v9_0_sw_init()
1208 r = gmc_v9_0_mc_init(adev); in gmc_v9_0_sw_init()
1212 adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev); in gmc_v9_0_sw_init()
1215 r = amdgpu_bo_init(adev); in gmc_v9_0_sw_init()
1219 r = gmc_v9_0_gart_init(adev); in gmc_v9_0_sw_init()
1229 adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; in gmc_v9_0_sw_init()
1230 adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; in gmc_v9_0_sw_init()
1231 adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS; in gmc_v9_0_sw_init()
1233 amdgpu_vm_manager_init(adev); in gmc_v9_0_sw_init()
1240 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_sw_fini() local
1243 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) && in gmc_v9_0_sw_fini()
1244 adev->gmc.umc_ras_if) { in gmc_v9_0_sw_fini()
1245 struct ras_common_if *ras_if = adev->gmc.umc_ras_if; in gmc_v9_0_sw_fini()
1251 amdgpu_ras_debugfs_remove(adev, ras_if); in gmc_v9_0_sw_fini()
1252 amdgpu_ras_sysfs_remove(adev, ras_if); in gmc_v9_0_sw_fini()
1254 amdgpu_ras_interrupt_remove_handler(adev, &ih_info); in gmc_v9_0_sw_fini()
1255 amdgpu_ras_feature_enable(adev, ras_if, 0); in gmc_v9_0_sw_fini()
1259 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) && in gmc_v9_0_sw_fini()
1260 adev->gmc.mmhub_ras_if) { in gmc_v9_0_sw_fini()
1261 struct ras_common_if *ras_if = adev->gmc.mmhub_ras_if; in gmc_v9_0_sw_fini()
1264 amdgpu_ras_debugfs_remove(adev, ras_if); in gmc_v9_0_sw_fini()
1265 amdgpu_ras_sysfs_remove(adev, ras_if); in gmc_v9_0_sw_fini()
1266 amdgpu_ras_feature_enable(adev, ras_if, 0); in gmc_v9_0_sw_fini()
1270 amdgpu_gem_force_release(adev); in gmc_v9_0_sw_fini()
1271 amdgpu_vm_manager_fini(adev); in gmc_v9_0_sw_fini()
1273 if (gmc_v9_0_keep_stolen_memory(adev)) in gmc_v9_0_sw_fini()
1274 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf); in gmc_v9_0_sw_fini()
1276 amdgpu_gart_table_vram_free(adev); in gmc_v9_0_sw_fini()
1277 amdgpu_bo_fini(adev); in gmc_v9_0_sw_fini()
1278 amdgpu_gart_fini(adev); in gmc_v9_0_sw_fini()
1283 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) in gmc_v9_0_init_golden_registers() argument
1286 switch (adev->asic_type) { in gmc_v9_0_init_golden_registers()
1288 if (amdgpu_sriov_vf(adev)) in gmc_v9_0_init_golden_registers()
1292 soc15_program_register_sequence(adev, in gmc_v9_0_init_golden_registers()
1295 soc15_program_register_sequence(adev, in gmc_v9_0_init_golden_registers()
1303 soc15_program_register_sequence(adev, in gmc_v9_0_init_golden_registers()
1317 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) in gmc_v9_0_gart_enable() argument
1323 amdgpu_device_program_register_sequence(adev, in gmc_v9_0_gart_enable()
1327 if (adev->gart.bo == NULL) { in gmc_v9_0_gart_enable()
1328 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); in gmc_v9_0_gart_enable()
1331 r = amdgpu_gart_table_vram_pin(adev); in gmc_v9_0_gart_enable()
1335 switch (adev->asic_type) { in gmc_v9_0_gart_enable()
1338 mmhub_v1_0_update_power_gating(adev, true); in gmc_v9_0_gart_enable()
1344 r = gfxhub_v1_0_gart_enable(adev); in gmc_v9_0_gart_enable()
1348 if (adev->asic_type == CHIP_ARCTURUS) in gmc_v9_0_gart_enable()
1349 r = mmhub_v9_4_gart_enable(adev); in gmc_v9_0_gart_enable()
1351 r = mmhub_v1_0_gart_enable(adev); in gmc_v9_0_gart_enable()
1360 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8)); in gmc_v9_0_gart_enable()
1361 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40)); in gmc_v9_0_gart_enable()
1364 adev->nbio_funcs->hdp_flush(adev, NULL); in gmc_v9_0_gart_enable()
1371 gfxhub_v1_0_set_fault_enable_default(adev, value); in gmc_v9_0_gart_enable()
1372 if (adev->asic_type == CHIP_ARCTURUS) in gmc_v9_0_gart_enable()
1373 mmhub_v9_4_set_fault_enable_default(adev, value); in gmc_v9_0_gart_enable()
1375 mmhub_v1_0_set_fault_enable_default(adev, value); in gmc_v9_0_gart_enable()
1377 for (i = 0; i < adev->num_vmhubs; ++i) in gmc_v9_0_gart_enable()
1378 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); in gmc_v9_0_gart_enable()
1381 (unsigned)(adev->gmc.gart_size >> 20), in gmc_v9_0_gart_enable()
1382 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); in gmc_v9_0_gart_enable()
1383 adev->gart.ready = true; in gmc_v9_0_gart_enable()
1390 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_hw_init() local
1393 gmc_v9_0_init_golden_registers(adev); in gmc_v9_0_hw_init()
1395 if (adev->mode_info.num_crtc) { in gmc_v9_0_hw_init()
1403 r = gmc_v9_0_gart_enable(adev); in gmc_v9_0_hw_init()
1415 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) in gmc_v9_0_gart_disable() argument
1417 gfxhub_v1_0_gart_disable(adev); in gmc_v9_0_gart_disable()
1418 if (adev->asic_type == CHIP_ARCTURUS) in gmc_v9_0_gart_disable()
1419 mmhub_v9_4_gart_disable(adev); in gmc_v9_0_gart_disable()
1421 mmhub_v1_0_gart_disable(adev); in gmc_v9_0_gart_disable()
1422 amdgpu_gart_table_vram_unpin(adev); in gmc_v9_0_gart_disable()
1427 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_hw_fini() local
1429 if (amdgpu_sriov_vf(adev)) { in gmc_v9_0_hw_fini()
1435 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); in gmc_v9_0_hw_fini()
1436 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); in gmc_v9_0_hw_fini()
1437 gmc_v9_0_gart_disable(adev); in gmc_v9_0_hw_fini()
1444 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_suspend() local
1446 return gmc_v9_0_hw_fini(adev); in gmc_v9_0_suspend()
1452 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_resume() local
1454 r = gmc_v9_0_hw_init(adev); in gmc_v9_0_resume()
1458 amdgpu_vmid_reset_all(adev); in gmc_v9_0_resume()
1484 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_set_clockgating_state() local
1486 if (adev->asic_type == CHIP_ARCTURUS) in gmc_v9_0_set_clockgating_state()
1487 mmhub_v9_4_set_clockgating(adev, state); in gmc_v9_0_set_clockgating_state()
1489 mmhub_v1_0_set_clockgating(adev, state); in gmc_v9_0_set_clockgating_state()
1491 athub_v1_0_set_clockgating(adev, state); in gmc_v9_0_set_clockgating_state()
1498 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_get_clockgating_state() local
1500 if (adev->asic_type == CHIP_ARCTURUS) in gmc_v9_0_get_clockgating_state()
1501 mmhub_v9_4_get_clockgating(adev, flags); in gmc_v9_0_get_clockgating_state()
1503 mmhub_v1_0_get_clockgating(adev, flags); in gmc_v9_0_get_clockgating_state()
1505 athub_v1_0_get_clockgating(adev, flags); in gmc_v9_0_get_clockgating_state()