Lines Matching full:gpu
107 return top_dev->gpu; in kfd_device_by_id()
118 if (top_dev->gpu && top_dev->gpu->pdev == pdev) { in kfd_device_by_pci_dev()
119 device = top_dev->gpu; in kfd_device_by_pci_dev()
136 if (top_dev->gpu && top_dev->gpu->adev == adev) { in kfd_device_by_adev()
137 device = top_dev->gpu; in kfd_device_by_adev()
296 if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu)) in iolink_show()
336 if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu)) in mem_show()
369 if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu)) in kfd_cache_show()
450 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show()
459 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show()
466 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show()
471 dev->gpu ? dev->node_props.simd_count : 0); in node_show()
527 if (dev->gpu) { in node_show()
529 __ilog2_u32(dev->gpu->device_info.num_of_watch_points); in node_show()
541 if (dev->gpu->adev->asic_type == CHIP_TONGA) in node_show()
551 dev->gpu->mec_fw_version); in node_show()
555 dev->gpu->sdma_fw_version); in node_show()
557 dev->gpu->adev->unique_id); in node_show()
1022 if (!kdev->gpu) { in kfd_add_non_crat_information()
1026 /* TODO: For GPU node, rearrange code from kfd_topology_add_device */ in kfd_add_non_crat_information()
1031 * and GPU cores are present.
1135 /* For nodes with GPU, this information gets added in kfd_topology_init()
1136 * when GPU is detected (kfd_topology_add_device). in kfd_topology_init()
1160 static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) in kfd_generate_gpu_id() argument
1167 if (!gpu) in kfd_generate_gpu_id()
1170 local_mem_size = gpu->local_mem_info.local_mem_size_private + in kfd_generate_gpu_id()
1171 gpu->local_mem_info.local_mem_size_public; in kfd_generate_gpu_id()
1173 buf[0] = gpu->pdev->devfn; in kfd_generate_gpu_id()
1174 buf[1] = gpu->pdev->subsystem_vendor | in kfd_generate_gpu_id()
1175 (gpu->pdev->subsystem_device << 16); in kfd_generate_gpu_id()
1176 buf[2] = pci_domain_nr(gpu->pdev->bus); in kfd_generate_gpu_id()
1177 buf[3] = gpu->pdev->device; in kfd_generate_gpu_id()
1178 buf[4] = gpu->pdev->bus->number; in kfd_generate_gpu_id()
1187 /* kfd_assign_gpu - Attach @gpu to the correct kfd topology device. If
1188 * the GPU device is not already present in the topology device
1190 * be created for this GPU.
1192 static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) in kfd_assign_gpu() argument
1206 if (!gpu->use_iommu_v2 && in kfd_assign_gpu()
1210 if (!dev->gpu && (dev->node_props.simd_count > 0)) { in kfd_assign_gpu()
1211 dev->gpu = gpu; in kfd_assign_gpu()
1215 mem->gpu = dev->gpu; in kfd_assign_gpu()
1217 cache->gpu = dev->gpu; in kfd_assign_gpu()
1219 iolink->gpu = dev->gpu; in kfd_assign_gpu()
1221 p2plink->gpu = dev->gpu; in kfd_assign_gpu()
1233 * of the GPU in kfd_notify_gpu_change()
1254 amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info); in kfd_fill_mem_clk_max_info()
1272 pcie_capability_read_dword(target_gpu_dev->gpu->pdev, in kfd_set_iolink_no_atomics()
1279 /* set gpu (dev) flags. */ in kfd_set_iolink_no_atomics()
1281 if (!dev->gpu->pci_atomic_requested || in kfd_set_iolink_no_atomics()
1282 dev->gpu->adev->asic_type == CHIP_HAWAII) in kfd_set_iolink_no_atomics()
1292 /* CPU -> GPU with PCIe */ in kfd_set_iolink_non_coherent()
1293 if (!to_dev->gpu && in kfd_set_iolink_non_coherent()
1297 if (to_dev->gpu) { in kfd_set_iolink_non_coherent()
1298 /* GPU <-> GPU with PCIe and in kfd_set_iolink_non_coherent()
1303 KFD_GC_VERSION(to_dev->gpu) == IP_VERSION(9, 4, 0))) { in kfd_set_iolink_non_coherent()
1315 if (!dev || !dev->gpu) in kfd_fill_iolink_non_crat_info()
1318 /* GPU only creates direct links so apply flags setting to all */ in kfd_fill_iolink_non_crat_info()
1328 /* Include the CPU peer in GPU hive if connected over xGMI. */ in kfd_fill_iolink_non_crat_info()
1329 if (!peer_dev->gpu && !peer_dev->node_props.hive_id && in kfd_fill_iolink_non_crat_info()
1331 dev->gpu->adev->gmc.xgmi.connected_to_cpu) in kfd_fill_iolink_non_crat_info()
1403 if (cpu_dev->gpu) in kfd_create_indirect_link_prop()
1414 /* CPU <--> GPU */ in kfd_create_indirect_link_prop()
1434 /* CPU <--> CPU <--> GPU, GPU node*/ in kfd_create_indirect_link_prop()
1454 /* for small Bar, no CPU --> GPU in-direct links */ in kfd_create_indirect_link_prop()
1455 if (kfd_dev_is_large_bar(kdev->gpu)) { in kfd_create_indirect_link_prop()
1456 /* CPU <--> CPU <--> GPU, CPU node*/ in kfd_create_indirect_link_prop()
1485 kdev->gpu->adev, in kfd_add_peer_prop()
1486 peer->gpu->adev)) in kfd_add_peer_prop()
1558 if (WARN_ON(!new_dev->gpu)) in kfd_dev_create_p2p_links()
1574 if (!dev->gpu || !dev->gpu->adev || in kfd_dev_create_p2p_links()
1575 (dev->gpu->hive_id && in kfd_dev_create_p2p_links()
1576 dev->gpu->hive_id == new_dev->gpu->hive_id)) in kfd_dev_create_p2p_links()
1596 int kfd_topology_add_device(struct kfd_dev *gpu) in kfd_topology_add_device() argument
1607 const char *asic_name = amdgpu_asic_name[gpu->adev->asic_type]; in kfd_topology_add_device()
1611 gpu_id = kfd_generate_gpu_id(gpu); in kfd_topology_add_device()
1612 pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id); in kfd_topology_add_device()
1614 /* Check to see if this gpu device exists in the topology_device_list. in kfd_topology_add_device()
1615 * If so, assign the gpu to that device, in kfd_topology_add_device()
1616 * else create a Virtual CRAT for this gpu device and then parse that in kfd_topology_add_device()
1617 * CRAT to create a new topology device. Once created assign the gpu to in kfd_topology_add_device()
1620 dev = kfd_assign_gpu(gpu); in kfd_topology_add_device()
1626 COMPUTE_UNIT_GPU, gpu, in kfd_topology_add_device()
1629 pr_err("Error creating VCRAT for GPU (ID: 0x%x)\n", in kfd_topology_add_device()
1638 pr_err("Error parsing VCRAT for GPU (ID: 0x%x)\n", in kfd_topology_add_device()
1656 pr_err("Failed to update GPU (ID: 0x%x) to sysfs topology. res=%d\n", in kfd_topology_add_device()
1658 dev = kfd_assign_gpu(gpu); in kfd_topology_add_device()
1666 gpu->id = gpu_id; in kfd_topology_add_device()
1678 amdgpu_amdkfd_get_cu_info(dev->gpu->adev, &cu_info); in kfd_topology_add_device()
1690 dev->node_props.gfx_target_version = gpu->device_info.gfx_target_version; in kfd_topology_add_device()
1691 dev->node_props.vendor_id = gpu->pdev->vendor; in kfd_topology_add_device()
1692 dev->node_props.device_id = gpu->pdev->device; in kfd_topology_add_device()
1694 ((dev->gpu->adev->rev_id << HSA_CAP_ASIC_REVISION_SHIFT) & in kfd_topology_add_device()
1696 dev->node_props.location_id = pci_dev_id(gpu->pdev); in kfd_topology_add_device()
1697 dev->node_props.domain = pci_domain_nr(gpu->pdev->bus); in kfd_topology_add_device()
1699 amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->adev); in kfd_topology_add_device()
1703 gpu->shared_resources.drm_render_minor; in kfd_topology_add_device()
1705 dev->node_props.hive_id = gpu->hive_id; in kfd_topology_add_device()
1706 dev->node_props.num_sdma_engines = kfd_get_num_sdma_engines(gpu); in kfd_topology_add_device()
1708 kfd_get_num_xgmi_sdma_engines(gpu); in kfd_topology_add_device()
1710 gpu->device_info.num_sdma_queues_per_engine - in kfd_topology_add_device()
1711 gpu->device_info.num_reserved_sdma_queues_per_engine; in kfd_topology_add_device()
1712 dev->node_props.num_gws = (dev->gpu->gws && in kfd_topology_add_device()
1713 dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? in kfd_topology_add_device()
1714 dev->gpu->adev->gds.gws_size : 0; in kfd_topology_add_device()
1715 dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm); in kfd_topology_add_device()
1720 switch (dev->gpu->adev->asic_type) { in kfd_topology_add_device()
1740 if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 0, 1)) in kfd_topology_add_device()
1746 dev->gpu->adev->asic_type); in kfd_topology_add_device()
1753 if (dev->gpu->use_iommu_v2) in kfd_topology_add_device()
1763 if (dev->gpu->adev->asic_type == CHIP_CARRIZO) { in kfd_topology_add_device()
1771 ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ? in kfd_topology_add_device()
1774 ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ? in kfd_topology_add_device()
1777 if (KFD_GC_VERSION(dev->gpu) != IP_VERSION(9, 0, 1)) in kfd_topology_add_device()
1778 dev->node_props.capability |= (dev->gpu->adev->ras_enabled != 0) ? in kfd_topology_add_device()
1781 if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev->kfd.dev)) in kfd_topology_add_device()
1855 int kfd_topology_remove_device(struct kfd_dev *gpu) in kfd_topology_remove_device() argument
1865 if (dev->gpu == gpu) { in kfd_topology_remove_device()
1890 * topology. If GPU device is found @idx, then valid kfd_dev pointer is
1892 * Return - 0: On success (@kdev will be NULL for non GPU nodes)
1906 *kdev = top_dev->gpu; in kfd_topology_enum_kfd_devices()
1949 void kfd_double_confirm_iommu_support(struct kfd_dev *gpu) in kfd_double_confirm_iommu_support() argument
1953 gpu->use_iommu_v2 = false; in kfd_double_confirm_iommu_support()
1955 if (!gpu->device_info.needs_iommu_device) in kfd_double_confirm_iommu_support()
1960 /* Only use IOMMUv2 if there is an APU topology node with no GPU in kfd_double_confirm_iommu_support()
1961 * assigned yet. This GPU will be assigned to it. in kfd_double_confirm_iommu_support()
1966 !dev->gpu) in kfd_double_confirm_iommu_support()
1967 gpu->use_iommu_v2 = true; in kfd_double_confirm_iommu_support()
1983 if (!dev->gpu) { in kfd_debugfs_hqds_by_device()
1988 seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id); in kfd_debugfs_hqds_by_device()
1989 r = dqm_debugfs_hqds(m, dev->gpu->dqm); in kfd_debugfs_hqds_by_device()
2008 if (!dev->gpu) { in kfd_debugfs_rls_by_device()
2013 seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id); in kfd_debugfs_rls_by_device()
2014 r = pm_debugfs_runlist(m, &dev->gpu->dqm->packet_mgr); in kfd_debugfs_rls_by_device()