Lines Matching refs:gpu
94 return top_dev->gpu; in kfd_device_by_id()
105 if (top_dev->gpu && top_dev->gpu->pdev == pdev) { in kfd_device_by_pci_dev()
106 device = top_dev->gpu; in kfd_device_by_pci_dev()
123 if (top_dev->gpu && top_dev->gpu->kgd == kgd) { in kfd_device_by_kgd()
124 device = top_dev->gpu; in kfd_device_by_kgd()
274 if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu)) in iolink_show()
314 if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu)) in mem_show()
347 if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu)) in kfd_cache_show()
428 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show()
437 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show()
444 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show()
449 dev->gpu ? dev->node_props.simd_count : 0); in node_show()
503 if (dev->gpu) { in node_show()
505 __ilog2_u32(dev->gpu->device_info->num_of_watch_points); in node_show()
517 if (dev->gpu->device_info->asic_family == CHIP_TONGA) in node_show()
527 dev->gpu->mec_fw_version); in node_show()
531 dev->gpu->sdma_fw_version); in node_show()
956 if (!kdev->gpu) { in kfd_add_non_crat_information()
1094 static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) in kfd_generate_gpu_id() argument
1102 if (!gpu) in kfd_generate_gpu_id()
1105 amdgpu_amdkfd_get_local_mem_info(gpu->kgd, &local_mem_info); in kfd_generate_gpu_id()
1110 buf[0] = gpu->pdev->devfn; in kfd_generate_gpu_id()
1111 buf[1] = gpu->pdev->subsystem_vendor | in kfd_generate_gpu_id()
1112 (gpu->pdev->subsystem_device << 16); in kfd_generate_gpu_id()
1113 buf[2] = pci_domain_nr(gpu->pdev->bus); in kfd_generate_gpu_id()
1114 buf[3] = gpu->pdev->device; in kfd_generate_gpu_id()
1115 buf[4] = gpu->pdev->bus->number; in kfd_generate_gpu_id()
1129 static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) in kfd_assign_gpu() argument
1142 if (!gpu->use_iommu_v2 && in kfd_assign_gpu()
1146 if (!dev->gpu && (dev->node_props.simd_count > 0)) { in kfd_assign_gpu()
1147 dev->gpu = gpu; in kfd_assign_gpu()
1151 mem->gpu = dev->gpu; in kfd_assign_gpu()
1153 cache->gpu = dev->gpu; in kfd_assign_gpu()
1155 iolink->gpu = dev->gpu; in kfd_assign_gpu()
1188 amdgpu_amdkfd_get_local_mem_info(dev->gpu->kgd, &local_mem_info); in kfd_fill_mem_clk_max_info()
1202 if (!dev || !dev->gpu) in kfd_fill_iolink_non_crat_info()
1205 pcie_capability_read_dword(dev->gpu->pdev, in kfd_fill_iolink_non_crat_info()
1213 if (!dev->gpu->pci_atomic_requested || in kfd_fill_iolink_non_crat_info()
1214 dev->gpu->device_info->asic_family == CHIP_HAWAII) in kfd_fill_iolink_non_crat_info()
1232 int kfd_topology_add_device(struct kfd_dev *gpu) in kfd_topology_add_device() argument
1246 gpu_id = kfd_generate_gpu_id(gpu); in kfd_topology_add_device()
1258 dev = kfd_assign_gpu(gpu); in kfd_topology_add_device()
1261 COMPUTE_UNIT_GPU, gpu, in kfd_topology_add_device()
1292 dev = kfd_assign_gpu(gpu); in kfd_topology_add_device()
1300 gpu->id = gpu_id; in kfd_topology_add_device()
1310 amdgpu_amdkfd_get_cu_info(dev->gpu->kgd, &cu_info); in kfd_topology_add_device()
1312 strncpy(dev->node_props.name, gpu->device_info->asic_name, in kfd_topology_add_device()
1318 dev->node_props.vendor_id = gpu->pdev->vendor; in kfd_topology_add_device()
1319 dev->node_props.device_id = gpu->pdev->device; in kfd_topology_add_device()
1321 ((amdgpu_amdkfd_get_asic_rev_id(dev->gpu->kgd) << in kfd_topology_add_device()
1324 dev->node_props.location_id = pci_dev_id(gpu->pdev); in kfd_topology_add_device()
1325 dev->node_props.domain = pci_domain_nr(gpu->pdev->bus); in kfd_topology_add_device()
1327 amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd); in kfd_topology_add_device()
1331 gpu->shared_resources.drm_render_minor; in kfd_topology_add_device()
1333 dev->node_props.hive_id = gpu->hive_id; in kfd_topology_add_device()
1334 dev->node_props.num_sdma_engines = gpu->device_info->num_sdma_engines; in kfd_topology_add_device()
1336 gpu->device_info->num_xgmi_sdma_engines; in kfd_topology_add_device()
1338 gpu->device_info->num_sdma_queues_per_engine; in kfd_topology_add_device()
1339 dev->node_props.num_gws = (dev->gpu->gws && in kfd_topology_add_device()
1340 dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? in kfd_topology_add_device()
1341 amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0; in kfd_topology_add_device()
1342 dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm); in kfd_topology_add_device()
1343 dev->node_props.unique_id = gpu->unique_id; in kfd_topology_add_device()
1348 switch (dev->gpu->device_info->asic_family) { in kfd_topology_add_device()
1384 dev->gpu->device_info->asic_family); in kfd_topology_add_device()
1391 if (dev->gpu->use_iommu_v2) in kfd_topology_add_device()
1401 if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) { in kfd_topology_add_device()
1407 adev = (struct amdgpu_device *)(dev->gpu->kgd); in kfd_topology_add_device()
1428 int kfd_topology_remove_device(struct kfd_dev *gpu) in kfd_topology_remove_device() argument
1437 if (dev->gpu == gpu) { in kfd_topology_remove_device()
1473 *kdev = top_dev->gpu; in kfd_topology_enum_kfd_devices()
1516 void kfd_double_confirm_iommu_support(struct kfd_dev *gpu) in kfd_double_confirm_iommu_support() argument
1520 gpu->use_iommu_v2 = false; in kfd_double_confirm_iommu_support()
1522 if (!gpu->device_info->needs_iommu_device) in kfd_double_confirm_iommu_support()
1533 !dev->gpu) in kfd_double_confirm_iommu_support()
1534 gpu->use_iommu_v2 = true; in kfd_double_confirm_iommu_support()
1550 if (!dev->gpu) { in kfd_debugfs_hqds_by_device()
1555 seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id); in kfd_debugfs_hqds_by_device()
1556 r = dqm_debugfs_hqds(m, dev->gpu->dqm); in kfd_debugfs_hqds_by_device()
1575 if (!dev->gpu) { in kfd_debugfs_rls_by_device()
1580 seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id); in kfd_debugfs_rls_by_device()
1581 r = pm_debugfs_runlist(m, &dev->gpu->dqm->packets); in kfd_debugfs_rls_by_device()