Lines Matching refs:gmc

220 	return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id);  in amdgpu_xgmi_show_device_id()
330 if (!adev->gmc.xgmi.hive_id) in amdgpu_get_xgmi_hive()
342 if (hive->hive_id == adev->gmc.xgmi.hive_id) in amdgpu_get_xgmi_hive()
366 hive->hive_id = adev->gmc.xgmi.hive_id; in amdgpu_get_xgmi_hive()
432 request_adev->gmc.xgmi.node_id, in amdgpu_xgmi_set_pstate()
433 request_adev->gmc.xgmi.hive_id, ret); in amdgpu_xgmi_set_pstate()
461 adev->gmc.xgmi.node_id, in amdgpu_xgmi_update_topology()
462 adev->gmc.xgmi.hive_id, ret); in amdgpu_xgmi_update_topology()
475 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id) in amdgpu_xgmi_get_hops_count()
489 if (!adev->gmc.xgmi.supported) in amdgpu_xgmi_add_device()
500 ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id); in amdgpu_xgmi_add_device()
507 ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id); in amdgpu_xgmi_add_device()
514 adev->gmc.xgmi.hive_id = 16; in amdgpu_xgmi_add_device()
515 adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16; in amdgpu_xgmi_add_device()
523 adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id); in amdgpu_xgmi_add_device()
530 list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); in amdgpu_xgmi_add_device()
539 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_add_device()
544 adev->gmc.xgmi.node_id; in amdgpu_xgmi_add_device()
553 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_add_device()
559 tmp_adev->gmc.xgmi.node_id, in amdgpu_xgmi_add_device()
560 tmp_adev->gmc.xgmi.hive_id, ret); in amdgpu_xgmi_add_device()
576 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id); in amdgpu_xgmi_add_device()
580 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id, in amdgpu_xgmi_add_device()
591 if (!adev->gmc.xgmi.supported) in amdgpu_xgmi_remove_device()
602 list_del(&adev->gmc.xgmi.head); in amdgpu_xgmi_remove_device()
630 if (!adev->gmc.xgmi.supported || in amdgpu_xgmi_ras_late_init()
631 adev->gmc.xgmi.num_physical_nodes == 0) in amdgpu_xgmi_ras_late_init()
636 if (!adev->gmc.xgmi.ras_if) { in amdgpu_xgmi_ras_late_init()
637 adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); in amdgpu_xgmi_ras_late_init()
638 if (!adev->gmc.xgmi.ras_if) in amdgpu_xgmi_ras_late_init()
640 adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL; in amdgpu_xgmi_ras_late_init()
641 adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; in amdgpu_xgmi_ras_late_init()
642 adev->gmc.xgmi.ras_if->sub_block_index = 0; in amdgpu_xgmi_ras_late_init()
643 strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl"); in amdgpu_xgmi_ras_late_init()
645 ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if; in amdgpu_xgmi_ras_late_init()
646 r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if, in amdgpu_xgmi_ras_late_init()
648 if (r || !amdgpu_ras_is_supported(adev, adev->gmc.xgmi.ras_if->block)) { in amdgpu_xgmi_ras_late_init()
649 kfree(adev->gmc.xgmi.ras_if); in amdgpu_xgmi_ras_late_init()
650 adev->gmc.xgmi.ras_if = NULL; in amdgpu_xgmi_ras_late_init()
659 adev->gmc.xgmi.ras_if) { in amdgpu_xgmi_ras_fini()
660 struct ras_common_if *ras_if = adev->gmc.xgmi.ras_if; in amdgpu_xgmi_ras_fini()
673 struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi; in amdgpu_xgmi_get_relative_phy_addr()