Lines Matching full:cu

66 		/* TCP L1 Cache per CU */
99 /* TCP L1 Cache per CU */
146 /* TCP L1 Cache per CU */
185 /* TCP L1 Cache per CU */
224 /* TCP L1 Cache per CU */
263 /* TCP L1 Cache per CU */
302 /* TCP L1 Cache per CU */
341 /* TCP L1 Cache per CU */
380 /* TCP L1 Cache per CU */
428 /* TCP L1 Cache per CU */
476 /* TCP L1 Cache per CU */
524 /* TCP L1 Cache per CU */
581 /* TCP L1 Cache per CU */
638 /* TCP L1 Cache per CU */
695 /* TCP L1 Cache per CU */
752 /* TCP L1 Cache per CU */
800 /* TCP L1 Cache per CU */
848 /* TCP L1 Cache per CU */
895 struct crat_subtype_computeunit *cu) in kfd_populated_cu_info_cpu() argument
897 dev->node_props.cpu_cores_count = cu->num_cpu_cores; in kfd_populated_cu_info_cpu()
898 dev->node_props.cpu_core_id_base = cu->processor_id_low; in kfd_populated_cu_info_cpu()
899 if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT) in kfd_populated_cu_info_cpu()
902 pr_debug("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores, in kfd_populated_cu_info_cpu()
903 cu->processor_id_low); in kfd_populated_cu_info_cpu()
907 struct crat_subtype_computeunit *cu) in kfd_populated_cu_info_gpu() argument
909 dev->node_props.simd_id_base = cu->processor_id_low; in kfd_populated_cu_info_gpu()
910 dev->node_props.simd_count = cu->num_simd_cores; in kfd_populated_cu_info_gpu()
911 dev->node_props.lds_size_in_kb = cu->lds_size_in_kb; in kfd_populated_cu_info_gpu()
912 dev->node_props.max_waves_per_simd = cu->max_waves_simd; in kfd_populated_cu_info_gpu()
913 dev->node_props.wave_front_size = cu->wave_front_size; in kfd_populated_cu_info_gpu()
914 dev->node_props.array_count = cu->array_count; in kfd_populated_cu_info_gpu()
915 dev->node_props.cu_per_simd_array = cu->num_cu_per_array; in kfd_populated_cu_info_gpu()
916 dev->node_props.simd_per_cu = cu->num_simd_per_cu; in kfd_populated_cu_info_gpu()
917 dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu; in kfd_populated_cu_info_gpu()
918 if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE) in kfd_populated_cu_info_gpu()
920 pr_debug("CU GPU: id_base=%d\n", cu->processor_id_low); in kfd_populated_cu_info_gpu()
926 static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu, in kfd_parse_subtype_cu() argument
931 pr_debug("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n", in kfd_parse_subtype_cu()
932 cu->proximity_domain, cu->hsa_capability); in kfd_parse_subtype_cu()
934 if (cu->proximity_domain == dev->proximity_domain) { in kfd_parse_subtype_cu()
935 if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT) in kfd_parse_subtype_cu()
936 kfd_populated_cu_info_cpu(dev, cu); in kfd_parse_subtype_cu()
938 if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT) in kfd_parse_subtype_cu()
939 kfd_populated_cu_info_gpu(dev, cu); in kfd_parse_subtype_cu()
1180 struct crat_subtype_computeunit *cu; in kfd_parse_subtype() local
1188 cu = (struct crat_subtype_computeunit *)sub_type_hdr; in kfd_parse_subtype()
1189 ret = kfd_parse_subtype_cu(cu, device_list); in kfd_parse_subtype()
1316 /* CU could be inactive. In case of shared cache find the first active in fill_in_l1_pcache()
1317 * CU. and incase of non-shared cache check if the CU is inactive. If in fill_in_l1_pcache()
1331 * inactive CU in fill_in_l1_pcache()
1368 /* CU could be inactive. In case of shared cache find the first active in fill_in_l2_l3_pcache()
1369 * CU. and incase of non-shared cache check if the CU is inactive. If in fill_in_l2_l3_pcache()
1383 * inactive CU in fill_in_l2_l3_pcache()
1420 /* TCP L1 Cache per CU */ in kfd_fill_gpu_cache_info_from_gfx_config()
1644 * will parse through all available CU in kfd_fill_gpu_cache_info()
1646 * then it will consider only one CU from in kfd_fill_gpu_cache_info()
1676 /* Move to next CU block */ in kfd_fill_gpu_cache_info()
1815 /* Fill in CU data */ in kfd_fill_cu_for_cpu()
2262 struct crat_subtype_computeunit *cu; in kfd_create_vcrat_image_gpu() local
2304 /* Fill CU subtype data */ in kfd_create_vcrat_image_gpu()
2305 cu = (struct crat_subtype_computeunit *)sub_type_hdr; in kfd_create_vcrat_image_gpu()
2306 cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT; in kfd_create_vcrat_image_gpu()
2307 cu->proximity_domain = proximity_domain; in kfd_create_vcrat_image_gpu()
2310 cu->num_simd_per_cu = cu_info.simd_per_cu; in kfd_create_vcrat_image_gpu()
2311 cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number; in kfd_create_vcrat_image_gpu()
2312 cu->max_waves_simd = cu_info.max_waves_per_simd; in kfd_create_vcrat_image_gpu()
2314 cu->wave_front_size = cu_info.wave_front_size; in kfd_create_vcrat_image_gpu()
2315 cu->array_count = cu_info.num_shader_arrays_per_engine * in kfd_create_vcrat_image_gpu()
2317 total_num_of_cu = (cu->array_count * cu_info.num_cu_per_sh); in kfd_create_vcrat_image_gpu()
2318 cu->processor_id_low = get_and_inc_gpu_processor_id(total_num_of_cu); in kfd_create_vcrat_image_gpu()
2319 cu->num_cu_per_array = cu_info.num_cu_per_sh; in kfd_create_vcrat_image_gpu()
2320 cu->max_slots_scatch_cu = cu_info.max_scratch_slots_per_cu; in kfd_create_vcrat_image_gpu()
2321 cu->num_banks = cu_info.num_shader_engines; in kfd_create_vcrat_image_gpu()
2322 cu->lds_size_in_kb = cu_info.lds_size; in kfd_create_vcrat_image_gpu()
2324 cu->hsa_capability = 0; in kfd_create_vcrat_image_gpu()
2330 cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT; in kfd_create_vcrat_image_gpu()
2373 ret = kfd_fill_gpu_cache_info(kdev, cu->processor_id_low, in kfd_create_vcrat_image_gpu()