Lines Matching refs:xgmi
244 return sysfs_emit(buf, "%llu\n", adev->gmc.xgmi.node_id); in amdgpu_xgmi_show_device_id()
359 if (!adev->gmc.xgmi.hive_id) in amdgpu_get_xgmi_hive()
370 if (hive->hive_id == adev->gmc.xgmi.hive_id) in amdgpu_get_xgmi_hive()
423 hive->hive_id = adev->gmc.xgmi.hive_id; in amdgpu_get_xgmi_hive()
494 request_adev->gmc.xgmi.node_id, in amdgpu_xgmi_set_pstate()
495 request_adev->gmc.xgmi.hive_id, ret); in amdgpu_xgmi_set_pstate()
526 adev->gmc.xgmi.node_id, in amdgpu_xgmi_update_topology()
527 adev->gmc.xgmi.hive_id, ret); in amdgpu_xgmi_update_topology()
547 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id) in amdgpu_xgmi_get_hops_count()
559 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id) in amdgpu_xgmi_get_num_links()
576 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_initialize_hive_get_data_partition()
599 if (!adev->gmc.xgmi.supported) in amdgpu_xgmi_add_device()
602 if (!adev->gmc.xgmi.pending_reset && in amdgpu_xgmi_add_device()
611 ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id); in amdgpu_xgmi_add_device()
618 ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id); in amdgpu_xgmi_add_device()
625 adev->gmc.xgmi.hive_id = 16; in amdgpu_xgmi_add_device()
626 adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16; in amdgpu_xgmi_add_device()
634 adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id); in amdgpu_xgmi_add_device()
641 list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); in amdgpu_xgmi_add_device()
649 if (!adev->gmc.xgmi.pending_reset && in amdgpu_xgmi_add_device()
651 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_add_device()
656 adev->gmc.xgmi.node_id; in amdgpu_xgmi_add_device()
665 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_add_device()
671 tmp_adev->gmc.xgmi.node_id, in amdgpu_xgmi_add_device()
672 tmp_adev->gmc.xgmi.hive_id, ret); in amdgpu_xgmi_add_device()
687 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_add_device()
693 tmp_adev->gmc.xgmi.node_id, in amdgpu_xgmi_add_device()
694 tmp_adev->gmc.xgmi.hive_id, ret); in amdgpu_xgmi_add_device()
707 if (!ret && !adev->gmc.xgmi.pending_reset) in amdgpu_xgmi_add_device()
716 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id); in amdgpu_xgmi_add_device()
720 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id, in amdgpu_xgmi_add_device()
731 if (!adev->gmc.xgmi.supported) in amdgpu_xgmi_remove_device()
742 list_del(&adev->gmc.xgmi.head); in amdgpu_xgmi_remove_device()
762 if (!adev->gmc.xgmi.supported || in amdgpu_xgmi_ras_late_init()
763 adev->gmc.xgmi.num_physical_nodes == 0) in amdgpu_xgmi_ras_late_init()
766 adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev); in amdgpu_xgmi_ras_late_init()
774 struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi; in amdgpu_xgmi_get_relative_phy_addr() local
775 return (addr + xgmi->physical_node_id * xgmi->node_segment_size); in amdgpu_xgmi_get_relative_phy_addr()
920 adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev); in amdgpu_xgmi_query_ras_error_count()