| /Linux-v5.15/drivers/gpu/drm/amd/amdgpu/ |
| D | amdgpu_xgmi.c | 250 return sysfs_emit(buf, "%llu\n", adev->gmc.xgmi.node_id); in amdgpu_xgmi_show_device_id() 360 if (!adev->gmc.xgmi.hive_id) in amdgpu_get_xgmi_hive() 371 if (hive->hive_id == adev->gmc.xgmi.hive_id) in amdgpu_get_xgmi_hive() 394 hive->hive_id = adev->gmc.xgmi.hive_id; in amdgpu_get_xgmi_hive() 465 request_adev->gmc.xgmi.node_id, in amdgpu_xgmi_set_pstate() 466 request_adev->gmc.xgmi.hive_id, ret); in amdgpu_xgmi_set_pstate() 494 adev->gmc.xgmi.node_id, in amdgpu_xgmi_update_topology() 495 adev->gmc.xgmi.hive_id, ret); in amdgpu_xgmi_update_topology() 515 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id) in amdgpu_xgmi_get_hops_count() 527 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id) in amdgpu_xgmi_get_num_links() [all …]
|
| D | gfxhub_v1_1.c | 86 if (max_region || adev->gmc.xgmi.connected_to_cpu) { in gfxhub_v1_1_get_xgmi_info() 87 adev->gmc.xgmi.num_physical_nodes = max_region + 1; in gfxhub_v1_1_get_xgmi_info() 89 if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes) in gfxhub_v1_1_get_xgmi_info() 92 adev->gmc.xgmi.physical_node_id = in gfxhub_v1_1_get_xgmi_info() 96 if (adev->gmc.xgmi.physical_node_id > max_physical_node_id) in gfxhub_v1_1_get_xgmi_info() 99 adev->gmc.xgmi.node_segment_size = seg_size; in gfxhub_v1_1_get_xgmi_info()
|
| D | amdgpu_gmc.c | 48 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes; in amdgpu_gmc_pdb0_alloc() 214 if (mc->xgmi.num_physical_nodes == 0) { in amdgpu_gmc_vram_location() 242 u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1; in amdgpu_gmc_sysvm_location() 243 mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id; in amdgpu_gmc_sysvm_location() 244 mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1; in amdgpu_gmc_sysvm_location() 457 if (!adev->gmc.xgmi.connected_to_cpu) in amdgpu_gmc_ras_late_init() 458 adev->gmc.xgmi.ras_funcs = &xgmi_ras_funcs; in amdgpu_gmc_ras_late_init() 460 if (adev->gmc.xgmi.ras_funcs && in amdgpu_gmc_ras_late_init() 461 adev->gmc.xgmi.ras_funcs->ras_late_init) { in amdgpu_gmc_ras_late_init() 462 r = adev->gmc.xgmi.ras_funcs->ras_late_init(adev); in amdgpu_gmc_ras_late_init() [all …]
|
| D | aldebaran.c | 51 if (adev->gmc.xgmi.connected_to_cpu) { in aldebaran_get_reset_handler() 150 gmc.xgmi.head) { in aldebaran_mode2_perform_reset() 159 gmc.xgmi.head) { in aldebaran_mode2_perform_reset() 161 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { in aldebaran_mode2_perform_reset() 179 gmc.xgmi.head) { in aldebaran_mode2_perform_reset() 180 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { in aldebaran_mode2_perform_reset() 190 gmc.xgmi.head) { in aldebaran_mode2_perform_reset() 329 gmc.xgmi.head) { in aldebaran_mode2_restore_hwcontext() 347 tmp_adev->gmc.xgmi.num_physical_nodes > 1) in aldebaran_mode2_restore_hwcontext()
|
| D | gmc_v9_0.c | 662 !adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_set_irq_funcs() 747 if (adev->gmc.xgmi.num_physical_nodes && in gmc_v9_0_flush_gpu_tlb() 876 bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes && in gmc_v9_0_flush_gpu_tlb_pasid() 1179 if (!adev->gmc.xgmi.connected_to_cpu) in gmc_v9_0_set_umc_funcs() 1238 if (!adev->gmc.xgmi.connected_to_cpu) in gmc_v9_0_set_mca_funcs() 1252 adev->gmc.xgmi.supported = true; in gmc_v9_0_early_init() 1255 adev->gmc.xgmi.supported = true; in gmc_v9_0_early_init() 1256 adev->gmc.xgmi.connected_to_cpu = in gmc_v9_0_early_init() 1322 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in gmc_v9_0_vram_gtt_location() 1323 if (adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_vram_gtt_location() [all …]
|
| D | amdgpu_xgmi.h | 70 adev->gmc.xgmi.hive_id && in amdgpu_xgmi_same_hive() 71 adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id); in amdgpu_xgmi_same_hive()
|
| D | psp_v11_0.c | 154 adev->psp.xgmi.feature_version = le32_to_cpu(ta_hdr->xgmi.fw_version); in psp_v11_0_init_microcode() 155 adev->psp.xgmi.size_bytes = le32_to_cpu(ta_hdr->xgmi.size_bytes); in psp_v11_0_init_microcode() 156 adev->psp.xgmi.start_addr = (uint8_t *)ta_hdr + in psp_v11_0_init_microcode() 161 adev->psp.ras.start_addr = (uint8_t *)adev->psp.xgmi.start_addr + in psp_v11_0_init_microcode()
|
| D | gfxhub_v2_1.c | 520 adev->gmc.xgmi.num_physical_nodes = max_region + 1; in gfxhub_v2_1_get_xgmi_info() 521 if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes) in gfxhub_v2_1_get_xgmi_info() 524 adev->gmc.xgmi.physical_node_id = in gfxhub_v2_1_get_xgmi_info() 526 if (adev->gmc.xgmi.physical_node_id > max_physical_node_id) in gfxhub_v2_1_get_xgmi_info() 529 adev->gmc.xgmi.node_segment_size = REG_GET_FIELD( in gfxhub_v2_1_get_xgmi_info()
|
| D | amdgpu_device.c | 1290 if (adev->gmc.xgmi.pending_reset) in amdgpu_device_need_post() 2428 if (adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_ip_init() 2432 if (!adev->gmc.xgmi.pending_reset) in amdgpu_device_ip_init() 2658 adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_ip_late_init() 2661 if (adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_device_ip_late_init() 2677 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) { in amdgpu_device_ip_late_init() 2771 if (adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_ip_fini() 2916 if (adev->gmc.xgmi.pending_reset && in amdgpu_device_ip_suspend_phase2() 3592 if (adev->gmc.xgmi.num_physical_nodes) { in amdgpu_device_init() 3594 adev->gmc.xgmi.pending_reset = true; in amdgpu_device_init() [all …]
|
| D | amdgpu_amdkfd.c | 542 return adev->gmc.xgmi.hive_id; in amdgpu_amdkfd_get_hive_id() 560 adev->gmc.xgmi.physical_node_id, in amdgpu_amdkfd_get_xgmi_hops_count() 561 peer_adev->gmc.xgmi.physical_node_id, ret); in amdgpu_amdkfd_get_xgmi_hops_count() 582 adev->gmc.xgmi.physical_node_id, in amdgpu_amdkfd_get_xgmi_bandwidth_mbytes() 583 peer_adev->gmc.xgmi.physical_node_id, num_links); in amdgpu_amdkfd_get_xgmi_bandwidth_mbytes()
|
| D | amdgpu_ras.c | 866 if (adev->gmc.xgmi.ras_funcs && in amdgpu_ras_query_error_status() 867 adev->gmc.xgmi.ras_funcs->query_ras_error_count) in amdgpu_ras_query_error_status() 868 adev->gmc.xgmi.ras_funcs->query_ras_error_count(adev, &err_data); in amdgpu_ras_query_error_status() 1012 if (adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_ras_error_inject() 1738 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_ras_do_recovery() 1742 list_add_tail(&adev->gmc.xgmi.head, &device_list); in amdgpu_ras_do_recovery() 1747 device_list_handle, gmc.xgmi.head) { in amdgpu_ras_do_recovery() 1998 if (adev->gmc.xgmi.pending_reset) in amdgpu_ras_recovery_init() 2117 if (!adev->gmc.xgmi.connected_to_cpu) { in amdgpu_ras_check_supported() 2226 if (!adev->gmc.xgmi.connected_to_cpu) in amdgpu_ras_init() [all …]
|
| D | amdgpu_psp.c | 970 psp_copy_fw(psp, psp->xgmi.start_addr, psp->xgmi.size_bytes); in psp_xgmi_load() 974 psp->xgmi.size_bytes, in psp_xgmi_load() 999 (adev->asic_type == CHIP_ALDEBARAN && adev->gmc.xgmi.connected_to_cpu)) in psp_xgmi_unload() 1048 !psp->xgmi.size_bytes || in psp_xgmi_initialize() 1049 !psp->xgmi.start_addr) in psp_xgmi_initialize() 1121 psp->xgmi.feature_version >= 0x2000000b; in psp_xgmi_peer_link_info_supported() 1136 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id; in psp_xgmi_reflect_topology_info() 1142 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) { in psp_xgmi_reflect_topology_info() 1146 if (mirror_adev->gmc.xgmi.node_id != dst_node_id) in psp_xgmi_reflect_topology_info() 1305 if (psp->adev->gmc.xgmi.connected_to_cpu) in psp_ras_load() [all …]
|
| D | gmc_v10_0.c | 750 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in gmc_v10_0_vram_gtt_location() 761 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in gmc_v10_0_vram_gtt_location() 931 if (adev->gmc.xgmi.supported) { in gmc_v10_0_sw_init()
|
| D | amdgpu_gmc.h | 254 struct amdgpu_xgmi xgmi; member
|
| D | amdgpu_ib.c | 384 } else if (adev->gmc.xgmi.hive_id) { in amdgpu_ib_ring_tests()
|
| D | amdgpu_ucode.h | 139 struct psp_fw_legacy_bin_desc xgmi; member
|
| D | amdgpu_psp.h | 329 struct psp_bin_desc xgmi; member
|
| D | gfxhub_v1_0.c | 207 if (adev->gmc.xgmi.connected_to_cpu) { in gfxhub_v1_0_init_cache_regs()
|
| D | amdgpu_vram_mgr.c | 466 if (adev->gmc.xgmi.connected_to_cpu) in amdgpu_vram_mgr_new()
|
| D | soc15.c | 551 if (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu) in soc15_asic_reset_method()
|
| D | amdgpu_virt.c | 537 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, adev->psp.xgmi.feature_version); in amdgpu_virt_populate_vf2pf_ucode_info()
|
| D | amdgpu_ucode.c | 530 FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi.feature_version);
|
| D | amdgpu_amdkfd_gpuvm.c | 432 if (adev->gmc.xgmi.connected_to_cpu || in get_pte_flags() 440 if (adev->gmc.xgmi.connected_to_cpu) in get_pte_flags()
|
| D | amdgpu_object.c | 1090 if (!adev->gmc.xgmi.connected_to_cpu) { in amdgpu_bo_init()
|
| D | nv.c | 706 adev->gmc.xgmi.supported = true; in nv_set_ip_blocks()
|