1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30
31 #include "soc15.h"
32 #include "gfx_v9_0.h"
33 #include "gfx_v9_4_3.h"
34 #include "gmc_v9_0.h"
35 #include "df_v1_7.h"
36 #include "df_v3_6.h"
37 #include "df_v4_3.h"
38 #include "nbio_v6_1.h"
39 #include "nbio_v7_0.h"
40 #include "nbio_v7_4.h"
41 #include "nbio_v7_9.h"
42 #include "hdp_v4_0.h"
43 #include "vega10_ih.h"
44 #include "vega20_ih.h"
45 #include "sdma_v4_0.h"
46 #include "sdma_v4_4_2.h"
47 #include "uvd_v7_0.h"
48 #include "vce_v4_0.h"
49 #include "vcn_v1_0.h"
50 #include "vcn_v2_5.h"
51 #include "jpeg_v2_5.h"
52 #include "smuio_v9_0.h"
53 #include "gmc_v10_0.h"
54 #include "gmc_v11_0.h"
55 #include "gfxhub_v2_0.h"
56 #include "mmhub_v2_0.h"
57 #include "nbio_v2_3.h"
58 #include "nbio_v4_3.h"
59 #include "nbio_v7_2.h"
60 #include "nbio_v7_7.h"
61 #include "hdp_v5_0.h"
62 #include "hdp_v5_2.h"
63 #include "hdp_v6_0.h"
64 #include "nv.h"
65 #include "soc21.h"
66 #include "navi10_ih.h"
67 #include "ih_v6_0.h"
68 #include "ih_v6_1.h"
69 #include "gfx_v10_0.h"
70 #include "gfx_v11_0.h"
71 #include "sdma_v5_0.h"
72 #include "sdma_v5_2.h"
73 #include "sdma_v6_0.h"
74 #include "lsdma_v6_0.h"
75 #include "vcn_v2_0.h"
76 #include "jpeg_v2_0.h"
77 #include "vcn_v3_0.h"
78 #include "jpeg_v3_0.h"
79 #include "vcn_v4_0.h"
80 #include "jpeg_v4_0.h"
81 #include "vcn_v4_0_3.h"
82 #include "jpeg_v4_0_3.h"
83 #include "amdgpu_vkms.h"
84 #include "mes_v10_1.h"
85 #include "mes_v11_0.h"
86 #include "smuio_v11_0.h"
87 #include "smuio_v11_0_6.h"
88 #include "smuio_v13_0.h"
89 #include "smuio_v13_0_3.h"
90 #include "smuio_v13_0_6.h"
91
92 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
93 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
94
95 #define mmRCC_CONFIG_MEMSIZE 0xde3
96 #define mmMM_INDEX 0x0
97 #define mmMM_INDEX_HI 0x6
98 #define mmMM_DATA 0x1
99
100 static const char *hw_id_names[HW_ID_MAX] = {
101 [MP1_HWID] = "MP1",
102 [MP2_HWID] = "MP2",
103 [THM_HWID] = "THM",
104 [SMUIO_HWID] = "SMUIO",
105 [FUSE_HWID] = "FUSE",
106 [CLKA_HWID] = "CLKA",
107 [PWR_HWID] = "PWR",
108 [GC_HWID] = "GC",
109 [UVD_HWID] = "UVD",
110 [AUDIO_AZ_HWID] = "AUDIO_AZ",
111 [ACP_HWID] = "ACP",
112 [DCI_HWID] = "DCI",
113 [DMU_HWID] = "DMU",
114 [DCO_HWID] = "DCO",
115 [DIO_HWID] = "DIO",
116 [XDMA_HWID] = "XDMA",
117 [DCEAZ_HWID] = "DCEAZ",
118 [DAZ_HWID] = "DAZ",
119 [SDPMUX_HWID] = "SDPMUX",
120 [NTB_HWID] = "NTB",
121 [IOHC_HWID] = "IOHC",
122 [L2IMU_HWID] = "L2IMU",
123 [VCE_HWID] = "VCE",
124 [MMHUB_HWID] = "MMHUB",
125 [ATHUB_HWID] = "ATHUB",
126 [DBGU_NBIO_HWID] = "DBGU_NBIO",
127 [DFX_HWID] = "DFX",
128 [DBGU0_HWID] = "DBGU0",
129 [DBGU1_HWID] = "DBGU1",
130 [OSSSYS_HWID] = "OSSSYS",
131 [HDP_HWID] = "HDP",
132 [SDMA0_HWID] = "SDMA0",
133 [SDMA1_HWID] = "SDMA1",
134 [SDMA2_HWID] = "SDMA2",
135 [SDMA3_HWID] = "SDMA3",
136 [LSDMA_HWID] = "LSDMA",
137 [ISP_HWID] = "ISP",
138 [DBGU_IO_HWID] = "DBGU_IO",
139 [DF_HWID] = "DF",
140 [CLKB_HWID] = "CLKB",
141 [FCH_HWID] = "FCH",
142 [DFX_DAP_HWID] = "DFX_DAP",
143 [L1IMU_PCIE_HWID] = "L1IMU_PCIE",
144 [L1IMU_NBIF_HWID] = "L1IMU_NBIF",
145 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR",
146 [L1IMU3_HWID] = "L1IMU3",
147 [L1IMU4_HWID] = "L1IMU4",
148 [L1IMU5_HWID] = "L1IMU5",
149 [L1IMU6_HWID] = "L1IMU6",
150 [L1IMU7_HWID] = "L1IMU7",
151 [L1IMU8_HWID] = "L1IMU8",
152 [L1IMU9_HWID] = "L1IMU9",
153 [L1IMU10_HWID] = "L1IMU10",
154 [L1IMU11_HWID] = "L1IMU11",
155 [L1IMU12_HWID] = "L1IMU12",
156 [L1IMU13_HWID] = "L1IMU13",
157 [L1IMU14_HWID] = "L1IMU14",
158 [L1IMU15_HWID] = "L1IMU15",
159 [WAFLC_HWID] = "WAFLC",
160 [FCH_USB_PD_HWID] = "FCH_USB_PD",
161 [PCIE_HWID] = "PCIE",
162 [PCS_HWID] = "PCS",
163 [DDCL_HWID] = "DDCL",
164 [SST_HWID] = "SST",
165 [IOAGR_HWID] = "IOAGR",
166 [NBIF_HWID] = "NBIF",
167 [IOAPIC_HWID] = "IOAPIC",
168 [SYSTEMHUB_HWID] = "SYSTEMHUB",
169 [NTBCCP_HWID] = "NTBCCP",
170 [UMC_HWID] = "UMC",
171 [SATA_HWID] = "SATA",
172 [USB_HWID] = "USB",
173 [CCXSEC_HWID] = "CCXSEC",
174 [XGMI_HWID] = "XGMI",
175 [XGBE_HWID] = "XGBE",
176 [MP0_HWID] = "MP0",
177 };
178
179 static int hw_id_map[MAX_HWIP] = {
180 [GC_HWIP] = GC_HWID,
181 [HDP_HWIP] = HDP_HWID,
182 [SDMA0_HWIP] = SDMA0_HWID,
183 [SDMA1_HWIP] = SDMA1_HWID,
184 [SDMA2_HWIP] = SDMA2_HWID,
185 [SDMA3_HWIP] = SDMA3_HWID,
186 [LSDMA_HWIP] = LSDMA_HWID,
187 [MMHUB_HWIP] = MMHUB_HWID,
188 [ATHUB_HWIP] = ATHUB_HWID,
189 [NBIO_HWIP] = NBIF_HWID,
190 [MP0_HWIP] = MP0_HWID,
191 [MP1_HWIP] = MP1_HWID,
192 [UVD_HWIP] = UVD_HWID,
193 [VCE_HWIP] = VCE_HWID,
194 [DF_HWIP] = DF_HWID,
195 [DCE_HWIP] = DMU_HWID,
196 [OSSSYS_HWIP] = OSSSYS_HWID,
197 [SMUIO_HWIP] = SMUIO_HWID,
198 [PWR_HWIP] = PWR_HWID,
199 [NBIF_HWIP] = NBIF_HWID,
200 [THM_HWIP] = THM_HWID,
201 [CLK_HWIP] = CLKA_HWID,
202 [UMC_HWIP] = UMC_HWID,
203 [XGMI_HWIP] = XGMI_HWID,
204 [DCI_HWIP] = DCI_HWID,
205 [PCIE_HWIP] = PCIE_HWID,
206 };
207
amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device * adev,uint8_t * binary)208 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
209 {
210 u64 tmr_offset, tmr_size, pos;
211 void *discv_regn;
212 int ret;
213
214 ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
215 if (ret)
216 return ret;
217
218 pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
219
220 /* This region is read-only and reserved from system use */
221 discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
222 if (discv_regn) {
223 memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
224 memunmap(discv_regn);
225 return 0;
226 }
227
228 return -ENOENT;
229 }
230
amdgpu_discovery_read_binary_from_mem(struct amdgpu_device * adev,uint8_t * binary)231 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
232 uint8_t *binary)
233 {
234 uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
235 int ret = 0;
236
237 if (vram_size) {
238 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
239 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
240 adev->mman.discovery_tmr_size, false);
241 } else {
242 ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
243 }
244
245 return ret;
246 }
247
amdgpu_discovery_read_binary_from_file(struct amdgpu_device * adev,uint8_t * binary)248 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
249 {
250 const struct firmware *fw;
251 const char *fw_name;
252 int r;
253
254 switch (amdgpu_discovery) {
255 case 2:
256 fw_name = FIRMWARE_IP_DISCOVERY;
257 break;
258 default:
259 dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
260 return -EINVAL;
261 }
262
263 r = request_firmware(&fw, fw_name, adev->dev);
264 if (r) {
265 dev_err(adev->dev, "can't load firmware \"%s\"\n",
266 fw_name);
267 return r;
268 }
269
270 memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
271 release_firmware(fw);
272
273 return 0;
274 }
275
amdgpu_discovery_calculate_checksum(uint8_t * data,uint32_t size)276 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
277 {
278 uint16_t checksum = 0;
279 int i;
280
281 for (i = 0; i < size; i++)
282 checksum += data[i];
283
284 return checksum;
285 }
286
amdgpu_discovery_verify_checksum(uint8_t * data,uint32_t size,uint16_t expected)287 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
288 uint16_t expected)
289 {
290 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
291 }
292
amdgpu_discovery_verify_binary_signature(uint8_t * binary)293 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
294 {
295 struct binary_header *bhdr;
296 bhdr = (struct binary_header *)binary;
297
298 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
299 }
300
amdgpu_discovery_harvest_config_quirk(struct amdgpu_device * adev)301 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
302 {
303 /*
304 * So far, apply this quirk only on those Navy Flounder boards which
305 * have a bad harvest table of VCN config.
306 */
307 if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
308 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) {
309 switch (adev->pdev->revision) {
310 case 0xC1:
311 case 0xC2:
312 case 0xC3:
313 case 0xC5:
314 case 0xC7:
315 case 0xCF:
316 case 0xDF:
317 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
318 adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
319 break;
320 default:
321 break;
322 }
323 }
324 }
325
amdgpu_discovery_init(struct amdgpu_device * adev)326 static int amdgpu_discovery_init(struct amdgpu_device *adev)
327 {
328 struct table_info *info;
329 struct binary_header *bhdr;
330 uint16_t offset;
331 uint16_t size;
332 uint16_t checksum;
333 int r;
334
335 adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
336 adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
337 if (!adev->mman.discovery_bin)
338 return -ENOMEM;
339
340 /* Read from file if it is the preferred option */
341 if (amdgpu_discovery == 2) {
342 dev_info(adev->dev, "use ip discovery information from file");
343 r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
344
345 if (r) {
346 dev_err(adev->dev, "failed to read ip discovery binary from file\n");
347 r = -EINVAL;
348 goto out;
349 }
350
351 } else {
352 r = amdgpu_discovery_read_binary_from_mem(
353 adev, adev->mman.discovery_bin);
354 if (r)
355 goto out;
356 }
357
358 /* check the ip discovery binary signature */
359 if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
360 dev_err(adev->dev,
361 "get invalid ip discovery binary signature\n");
362 r = -EINVAL;
363 goto out;
364 }
365
366 bhdr = (struct binary_header *)adev->mman.discovery_bin;
367
368 offset = offsetof(struct binary_header, binary_checksum) +
369 sizeof(bhdr->binary_checksum);
370 size = le16_to_cpu(bhdr->binary_size) - offset;
371 checksum = le16_to_cpu(bhdr->binary_checksum);
372
373 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
374 size, checksum)) {
375 dev_err(adev->dev, "invalid ip discovery binary checksum\n");
376 r = -EINVAL;
377 goto out;
378 }
379
380 info = &bhdr->table_list[IP_DISCOVERY];
381 offset = le16_to_cpu(info->offset);
382 checksum = le16_to_cpu(info->checksum);
383
384 if (offset) {
385 struct ip_discovery_header *ihdr =
386 (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
387 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
388 dev_err(adev->dev, "invalid ip discovery data table signature\n");
389 r = -EINVAL;
390 goto out;
391 }
392
393 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
394 le16_to_cpu(ihdr->size), checksum)) {
395 dev_err(adev->dev, "invalid ip discovery data table checksum\n");
396 r = -EINVAL;
397 goto out;
398 }
399 }
400
401 info = &bhdr->table_list[GC];
402 offset = le16_to_cpu(info->offset);
403 checksum = le16_to_cpu(info->checksum);
404
405 if (offset) {
406 struct gpu_info_header *ghdr =
407 (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
408
409 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
410 dev_err(adev->dev, "invalid ip discovery gc table id\n");
411 r = -EINVAL;
412 goto out;
413 }
414
415 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
416 le32_to_cpu(ghdr->size), checksum)) {
417 dev_err(adev->dev, "invalid gc data table checksum\n");
418 r = -EINVAL;
419 goto out;
420 }
421 }
422
423 info = &bhdr->table_list[HARVEST_INFO];
424 offset = le16_to_cpu(info->offset);
425 checksum = le16_to_cpu(info->checksum);
426
427 if (offset) {
428 struct harvest_info_header *hhdr =
429 (struct harvest_info_header *)(adev->mman.discovery_bin + offset);
430
431 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
432 dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
433 r = -EINVAL;
434 goto out;
435 }
436
437 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
438 sizeof(struct harvest_table), checksum)) {
439 dev_err(adev->dev, "invalid harvest data table checksum\n");
440 r = -EINVAL;
441 goto out;
442 }
443 }
444
445 info = &bhdr->table_list[VCN_INFO];
446 offset = le16_to_cpu(info->offset);
447 checksum = le16_to_cpu(info->checksum);
448
449 if (offset) {
450 struct vcn_info_header *vhdr =
451 (struct vcn_info_header *)(adev->mman.discovery_bin + offset);
452
453 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
454 dev_err(adev->dev, "invalid ip discovery vcn table id\n");
455 r = -EINVAL;
456 goto out;
457 }
458
459 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
460 le32_to_cpu(vhdr->size_bytes), checksum)) {
461 dev_err(adev->dev, "invalid vcn data table checksum\n");
462 r = -EINVAL;
463 goto out;
464 }
465 }
466
467 info = &bhdr->table_list[MALL_INFO];
468 offset = le16_to_cpu(info->offset);
469 checksum = le16_to_cpu(info->checksum);
470
471 if (0 && offset) {
472 struct mall_info_header *mhdr =
473 (struct mall_info_header *)(adev->mman.discovery_bin + offset);
474
475 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
476 dev_err(adev->dev, "invalid ip discovery mall table id\n");
477 r = -EINVAL;
478 goto out;
479 }
480
481 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
482 le32_to_cpu(mhdr->size_bytes), checksum)) {
483 dev_err(adev->dev, "invalid mall data table checksum\n");
484 r = -EINVAL;
485 goto out;
486 }
487 }
488
489 return 0;
490
491 out:
492 kfree(adev->mman.discovery_bin);
493 adev->mman.discovery_bin = NULL;
494
495 return r;
496 }
497
498 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
499
amdgpu_discovery_fini(struct amdgpu_device * adev)500 void amdgpu_discovery_fini(struct amdgpu_device *adev)
501 {
502 amdgpu_discovery_sysfs_fini(adev);
503 kfree(adev->mman.discovery_bin);
504 adev->mman.discovery_bin = NULL;
505 }
506
amdgpu_discovery_validate_ip(const struct ip_v4 * ip)507 static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
508 {
509 if (ip->instance_number >= HWIP_MAX_INSTANCE) {
510 DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
511 ip->instance_number);
512 return -EINVAL;
513 }
514 if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
515 DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
516 le16_to_cpu(ip->hw_id));
517 return -EINVAL;
518 }
519
520 return 0;
521 }
522
amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device * adev,uint32_t * vcn_harvest_count)523 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
524 uint32_t *vcn_harvest_count)
525 {
526 struct binary_header *bhdr;
527 struct ip_discovery_header *ihdr;
528 struct die_header *dhdr;
529 struct ip_v4 *ip;
530 uint16_t die_offset, ip_offset, num_dies, num_ips;
531 int i, j;
532
533 bhdr = (struct binary_header *)adev->mman.discovery_bin;
534 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
535 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
536 num_dies = le16_to_cpu(ihdr->num_dies);
537
538 /* scan harvest bit of all IP data structures */
539 for (i = 0; i < num_dies; i++) {
540 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
541 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
542 num_ips = le16_to_cpu(dhdr->num_ips);
543 ip_offset = die_offset + sizeof(*dhdr);
544
545 for (j = 0; j < num_ips; j++) {
546 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
547
548 if (amdgpu_discovery_validate_ip(ip))
549 goto next_ip;
550
551 if (le16_to_cpu(ip->variant) == 1) {
552 switch (le16_to_cpu(ip->hw_id)) {
553 case VCN_HWID:
554 (*vcn_harvest_count)++;
555 if (ip->instance_number == 0) {
556 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
557 adev->vcn.inst_mask &=
558 ~AMDGPU_VCN_HARVEST_VCN0;
559 adev->jpeg.inst_mask &=
560 ~AMDGPU_VCN_HARVEST_VCN0;
561 } else {
562 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
563 adev->vcn.inst_mask &=
564 ~AMDGPU_VCN_HARVEST_VCN1;
565 adev->jpeg.inst_mask &=
566 ~AMDGPU_VCN_HARVEST_VCN1;
567 }
568 break;
569 case DMU_HWID:
570 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
571 break;
572 default:
573 break;
574 }
575 }
576 next_ip:
577 if (ihdr->base_addr_64_bit)
578 ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
579 else
580 ip_offset += struct_size(ip, base_address, ip->num_base_address);
581 }
582 }
583 }
584
amdgpu_discovery_read_from_harvest_table(struct amdgpu_device * adev,uint32_t * vcn_harvest_count,uint32_t * umc_harvest_count)585 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
586 uint32_t *vcn_harvest_count,
587 uint32_t *umc_harvest_count)
588 {
589 struct binary_header *bhdr;
590 struct harvest_table *harvest_info;
591 u16 offset;
592 int i;
593 uint32_t umc_harvest_config = 0;
594
595 bhdr = (struct binary_header *)adev->mman.discovery_bin;
596 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
597
598 if (!offset) {
599 dev_err(adev->dev, "invalid harvest table offset\n");
600 return;
601 }
602
603 harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
604
605 for (i = 0; i < 32; i++) {
606 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
607 break;
608
609 switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
610 case VCN_HWID:
611 (*vcn_harvest_count)++;
612 adev->vcn.harvest_config |=
613 (1 << harvest_info->list[i].number_instance);
614 adev->jpeg.harvest_config |=
615 (1 << harvest_info->list[i].number_instance);
616
617 adev->vcn.inst_mask &=
618 ~(1U << harvest_info->list[i].number_instance);
619 adev->jpeg.inst_mask &=
620 ~(1U << harvest_info->list[i].number_instance);
621 break;
622 case DMU_HWID:
623 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
624 break;
625 case UMC_HWID:
626 umc_harvest_config |=
627 1 << (le16_to_cpu(harvest_info->list[i].number_instance));
628 (*umc_harvest_count)++;
629 break;
630 case GC_HWID:
631 adev->gfx.xcc_mask &=
632 ~(1U << harvest_info->list[i].number_instance);
633 break;
634 case SDMA0_HWID:
635 adev->sdma.sdma_mask &=
636 ~(1U << harvest_info->list[i].number_instance);
637 break;
638 default:
639 break;
640 }
641 }
642
643 adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
644 ~umc_harvest_config;
645 }
646
647 /* ================================================== */
648
649 struct ip_hw_instance {
650 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
651
652 int hw_id;
653 u8 num_instance;
654 u8 major, minor, revision;
655 u8 harvest;
656
657 int num_base_addresses;
658 u32 base_addr[];
659 };
660
661 struct ip_hw_id {
662 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
663 int hw_id;
664 };
665
666 struct ip_die_entry {
667 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */
668 u16 num_ips;
669 };
670
671 /* -------------------------------------------------- */
672
673 struct ip_hw_instance_attr {
674 struct attribute attr;
675 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
676 };
677
hw_id_show(struct ip_hw_instance * ip_hw_instance,char * buf)678 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
679 {
680 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
681 }
682
num_instance_show(struct ip_hw_instance * ip_hw_instance,char * buf)683 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
684 {
685 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
686 }
687
major_show(struct ip_hw_instance * ip_hw_instance,char * buf)688 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
689 {
690 return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
691 }
692
minor_show(struct ip_hw_instance * ip_hw_instance,char * buf)693 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
694 {
695 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
696 }
697
revision_show(struct ip_hw_instance * ip_hw_instance,char * buf)698 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
699 {
700 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
701 }
702
harvest_show(struct ip_hw_instance * ip_hw_instance,char * buf)703 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
704 {
705 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
706 }
707
num_base_addresses_show(struct ip_hw_instance * ip_hw_instance,char * buf)708 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
709 {
710 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
711 }
712
base_addr_show(struct ip_hw_instance * ip_hw_instance,char * buf)713 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
714 {
715 ssize_t res, at;
716 int ii;
717
718 for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
719 /* Here we satisfy the condition that, at + size <= PAGE_SIZE.
720 */
721 if (at + 12 > PAGE_SIZE)
722 break;
723 res = sysfs_emit_at(buf, at, "0x%08X\n",
724 ip_hw_instance->base_addr[ii]);
725 if (res <= 0)
726 break;
727 at += res;
728 }
729
730 return res < 0 ? res : at;
731 }
732
733 static struct ip_hw_instance_attr ip_hw_attr[] = {
734 __ATTR_RO(hw_id),
735 __ATTR_RO(num_instance),
736 __ATTR_RO(major),
737 __ATTR_RO(minor),
738 __ATTR_RO(revision),
739 __ATTR_RO(harvest),
740 __ATTR_RO(num_base_addresses),
741 __ATTR_RO(base_addr),
742 };
743
744 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
745 ATTRIBUTE_GROUPS(ip_hw_instance);
746
747 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
748 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
749
ip_hw_instance_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)750 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
751 struct attribute *attr,
752 char *buf)
753 {
754 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
755 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
756
757 if (!ip_hw_attr->show)
758 return -EIO;
759
760 return ip_hw_attr->show(ip_hw_instance, buf);
761 }
762
763 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
764 .show = ip_hw_instance_attr_show,
765 };
766
ip_hw_instance_release(struct kobject * kobj)767 static void ip_hw_instance_release(struct kobject *kobj)
768 {
769 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
770
771 kfree(ip_hw_instance);
772 }
773
774 static const struct kobj_type ip_hw_instance_ktype = {
775 .release = ip_hw_instance_release,
776 .sysfs_ops = &ip_hw_instance_sysfs_ops,
777 .default_groups = ip_hw_instance_groups,
778 };
779
780 /* -------------------------------------------------- */
781
782 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
783
ip_hw_id_release(struct kobject * kobj)784 static void ip_hw_id_release(struct kobject *kobj)
785 {
786 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
787
788 if (!list_empty(&ip_hw_id->hw_id_kset.list))
789 DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
790 kfree(ip_hw_id);
791 }
792
793 static const struct kobj_type ip_hw_id_ktype = {
794 .release = ip_hw_id_release,
795 .sysfs_ops = &kobj_sysfs_ops,
796 };
797
798 /* -------------------------------------------------- */
799
800 static void die_kobj_release(struct kobject *kobj);
801 static void ip_disc_release(struct kobject *kobj);
802
803 struct ip_die_entry_attribute {
804 struct attribute attr;
805 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
806 };
807
808 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr)
809
num_ips_show(struct ip_die_entry * ip_die_entry,char * buf)810 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
811 {
812 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
813 }
814
815 /* If there are more ip_die_entry attrs, other than the number of IPs,
816 * we can make this intro an array of attrs, and then initialize
817 * ip_die_entry_attrs in a loop.
818 */
819 static struct ip_die_entry_attribute num_ips_attr =
820 __ATTR_RO(num_ips);
821
822 static struct attribute *ip_die_entry_attrs[] = {
823 &num_ips_attr.attr,
824 NULL,
825 };
826 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
827
828 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
829
ip_die_entry_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)830 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
831 struct attribute *attr,
832 char *buf)
833 {
834 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
835 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
836
837 if (!ip_die_entry_attr->show)
838 return -EIO;
839
840 return ip_die_entry_attr->show(ip_die_entry, buf);
841 }
842
ip_die_entry_release(struct kobject * kobj)843 static void ip_die_entry_release(struct kobject *kobj)
844 {
845 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
846
847 if (!list_empty(&ip_die_entry->ip_kset.list))
848 DRM_ERROR("ip_die_entry->ip_kset is not empty");
849 kfree(ip_die_entry);
850 }
851
852 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
853 .show = ip_die_entry_attr_show,
854 };
855
856 static const struct kobj_type ip_die_entry_ktype = {
857 .release = ip_die_entry_release,
858 .sysfs_ops = &ip_die_entry_sysfs_ops,
859 .default_groups = ip_die_entry_groups,
860 };
861
862 static const struct kobj_type die_kobj_ktype = {
863 .release = die_kobj_release,
864 .sysfs_ops = &kobj_sysfs_ops,
865 };
866
867 static const struct kobj_type ip_discovery_ktype = {
868 .release = ip_disc_release,
869 .sysfs_ops = &kobj_sysfs_ops,
870 };
871
872 struct ip_discovery_top {
873 struct kobject kobj; /* ip_discovery/ */
874 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */
875 struct amdgpu_device *adev;
876 };
877
die_kobj_release(struct kobject * kobj)878 static void die_kobj_release(struct kobject *kobj)
879 {
880 struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
881 struct ip_discovery_top,
882 die_kset);
883 if (!list_empty(&ip_top->die_kset.list))
884 DRM_ERROR("ip_top->die_kset is not empty");
885 }
886
ip_disc_release(struct kobject * kobj)887 static void ip_disc_release(struct kobject *kobj)
888 {
889 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
890 kobj);
891 struct amdgpu_device *adev = ip_top->adev;
892
893 adev->ip_top = NULL;
894 kfree(ip_top);
895 }
896
amdgpu_discovery_get_harvest_info(struct amdgpu_device * adev,uint16_t hw_id,uint8_t inst)897 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
898 uint16_t hw_id, uint8_t inst)
899 {
900 uint8_t harvest = 0;
901
902 /* Until a uniform way is figured, get mask based on hwid */
903 switch (hw_id) {
904 case VCN_HWID:
905 harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
906 break;
907 case DMU_HWID:
908 if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
909 harvest = 0x1;
910 break;
911 case UMC_HWID:
912 /* TODO: It needs another parsing; for now, ignore.*/
913 break;
914 case GC_HWID:
915 harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
916 break;
917 case SDMA0_HWID:
918 harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
919 break;
920 default:
921 break;
922 }
923
924 return harvest;
925 }
926
amdgpu_discovery_sysfs_ips(struct amdgpu_device * adev,struct ip_die_entry * ip_die_entry,const size_t _ip_offset,const int num_ips,bool reg_base_64)927 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
928 struct ip_die_entry *ip_die_entry,
929 const size_t _ip_offset, const int num_ips,
930 bool reg_base_64)
931 {
932 int ii, jj, kk, res;
933
934 DRM_DEBUG("num_ips:%d", num_ips);
935
936 /* Find all IPs of a given HW ID, and add their instance to
937 * #die/#hw_id/#instance/<attributes>
938 */
939 for (ii = 0; ii < HW_ID_MAX; ii++) {
940 struct ip_hw_id *ip_hw_id = NULL;
941 size_t ip_offset = _ip_offset;
942
943 for (jj = 0; jj < num_ips; jj++) {
944 struct ip_v4 *ip;
945 struct ip_hw_instance *ip_hw_instance;
946
947 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
948 if (amdgpu_discovery_validate_ip(ip) ||
949 le16_to_cpu(ip->hw_id) != ii)
950 goto next_ip;
951
952 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
953
954 /* We have a hw_id match; register the hw
955 * block if not yet registered.
956 */
957 if (!ip_hw_id) {
958 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
959 if (!ip_hw_id)
960 return -ENOMEM;
961 ip_hw_id->hw_id = ii;
962
963 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
964 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
965 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
966 res = kset_register(&ip_hw_id->hw_id_kset);
967 if (res) {
968 DRM_ERROR("Couldn't register ip_hw_id kset");
969 kfree(ip_hw_id);
970 return res;
971 }
972 if (hw_id_names[ii]) {
973 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
974 &ip_hw_id->hw_id_kset.kobj,
975 hw_id_names[ii]);
976 if (res) {
977 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
978 hw_id_names[ii],
979 kobject_name(&ip_die_entry->ip_kset.kobj));
980 }
981 }
982 }
983
984 /* Now register its instance.
985 */
986 ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
987 base_addr,
988 ip->num_base_address),
989 GFP_KERNEL);
990 if (!ip_hw_instance) {
991 DRM_ERROR("no memory for ip_hw_instance");
992 return -ENOMEM;
993 }
994 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
995 ip_hw_instance->num_instance = ip->instance_number;
996 ip_hw_instance->major = ip->major;
997 ip_hw_instance->minor = ip->minor;
998 ip_hw_instance->revision = ip->revision;
999 ip_hw_instance->harvest =
1000 amdgpu_discovery_get_harvest_info(
1001 adev, ip_hw_instance->hw_id,
1002 ip_hw_instance->num_instance);
1003 ip_hw_instance->num_base_addresses = ip->num_base_address;
1004
1005 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1006 if (reg_base_64)
1007 ip_hw_instance->base_addr[kk] =
1008 lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1009 else
1010 ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1011 }
1012
1013 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1014 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1015 res = kobject_add(&ip_hw_instance->kobj, NULL,
1016 "%d", ip_hw_instance->num_instance);
1017 next_ip:
1018 if (reg_base_64)
1019 ip_offset += struct_size(ip, base_address_64,
1020 ip->num_base_address);
1021 else
1022 ip_offset += struct_size(ip, base_address,
1023 ip->num_base_address);
1024 }
1025 }
1026
1027 return 0;
1028 }
1029
amdgpu_discovery_sysfs_recurse(struct amdgpu_device * adev)1030 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1031 {
1032 struct binary_header *bhdr;
1033 struct ip_discovery_header *ihdr;
1034 struct die_header *dhdr;
1035 struct kset *die_kset = &adev->ip_top->die_kset;
1036 u16 num_dies, die_offset, num_ips;
1037 size_t ip_offset;
1038 int ii, res;
1039
1040 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1041 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1042 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1043 num_dies = le16_to_cpu(ihdr->num_dies);
1044
1045 DRM_DEBUG("number of dies: %d\n", num_dies);
1046
1047 for (ii = 0; ii < num_dies; ii++) {
1048 struct ip_die_entry *ip_die_entry;
1049
1050 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1051 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1052 num_ips = le16_to_cpu(dhdr->num_ips);
1053 ip_offset = die_offset + sizeof(*dhdr);
1054
1055 /* Add the die to the kset.
1056 *
1057 * dhdr->die_id == ii, which was checked in
1058 * amdgpu_discovery_reg_base_init().
1059 */
1060
1061 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
1062 if (!ip_die_entry)
1063 return -ENOMEM;
1064
1065 ip_die_entry->num_ips = num_ips;
1066
1067 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1068 ip_die_entry->ip_kset.kobj.kset = die_kset;
1069 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1070 res = kset_register(&ip_die_entry->ip_kset);
1071 if (res) {
1072 DRM_ERROR("Couldn't register ip_die_entry kset");
1073 kfree(ip_die_entry);
1074 return res;
1075 }
1076
1077 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1078 }
1079
1080 return 0;
1081 }
1082
amdgpu_discovery_sysfs_init(struct amdgpu_device * adev)1083 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1084 {
1085 struct kset *die_kset;
1086 int res, ii;
1087
1088 if (!adev->mman.discovery_bin)
1089 return -EINVAL;
1090
1091 adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
1092 if (!adev->ip_top)
1093 return -ENOMEM;
1094
1095 adev->ip_top->adev = adev;
1096
1097 res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
1098 &adev->dev->kobj, "ip_discovery");
1099 if (res) {
1100 DRM_ERROR("Couldn't init and add ip_discovery/");
1101 goto Err;
1102 }
1103
1104 die_kset = &adev->ip_top->die_kset;
1105 kobject_set_name(&die_kset->kobj, "%s", "die");
1106 die_kset->kobj.parent = &adev->ip_top->kobj;
1107 die_kset->kobj.ktype = &die_kobj_ktype;
1108 res = kset_register(&adev->ip_top->die_kset);
1109 if (res) {
1110 DRM_ERROR("Couldn't register die_kset");
1111 goto Err;
1112 }
1113
1114 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1115 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1116 ip_hw_instance_attrs[ii] = NULL;
1117
1118 res = amdgpu_discovery_sysfs_recurse(adev);
1119
1120 return res;
1121 Err:
1122 kobject_put(&adev->ip_top->kobj);
1123 return res;
1124 }
1125
1126 /* -------------------------------------------------- */
1127
1128 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1129
amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id * ip_hw_id)1130 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1131 {
1132 struct list_head *el, *tmp;
1133 struct kset *hw_id_kset;
1134
1135 hw_id_kset = &ip_hw_id->hw_id_kset;
1136 spin_lock(&hw_id_kset->list_lock);
1137 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1138 list_del_init(el);
1139 spin_unlock(&hw_id_kset->list_lock);
1140 /* kobject is embedded in ip_hw_instance */
1141 kobject_put(list_to_kobj(el));
1142 spin_lock(&hw_id_kset->list_lock);
1143 }
1144 spin_unlock(&hw_id_kset->list_lock);
1145 kobject_put(&ip_hw_id->hw_id_kset.kobj);
1146 }
1147
amdgpu_discovery_sysfs_die_free(struct ip_die_entry * ip_die_entry)1148 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1149 {
1150 struct list_head *el, *tmp;
1151 struct kset *ip_kset;
1152
1153 ip_kset = &ip_die_entry->ip_kset;
1154 spin_lock(&ip_kset->list_lock);
1155 list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1156 list_del_init(el);
1157 spin_unlock(&ip_kset->list_lock);
1158 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1159 spin_lock(&ip_kset->list_lock);
1160 }
1161 spin_unlock(&ip_kset->list_lock);
1162 kobject_put(&ip_die_entry->ip_kset.kobj);
1163 }
1164
amdgpu_discovery_sysfs_fini(struct amdgpu_device * adev)1165 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1166 {
1167 struct list_head *el, *tmp;
1168 struct kset *die_kset;
1169
1170 die_kset = &adev->ip_top->die_kset;
1171 spin_lock(&die_kset->list_lock);
1172 list_for_each_prev_safe(el, tmp, &die_kset->list) {
1173 list_del_init(el);
1174 spin_unlock(&die_kset->list_lock);
1175 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1176 spin_lock(&die_kset->list_lock);
1177 }
1178 spin_unlock(&die_kset->list_lock);
1179 kobject_put(&adev->ip_top->die_kset.kobj);
1180 kobject_put(&adev->ip_top->kobj);
1181 }
1182
1183 /* ================================================== */
1184
amdgpu_discovery_reg_base_init(struct amdgpu_device * adev)1185 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1186 {
1187 struct binary_header *bhdr;
1188 struct ip_discovery_header *ihdr;
1189 struct die_header *dhdr;
1190 struct ip_v4 *ip;
1191 uint16_t die_offset;
1192 uint16_t ip_offset;
1193 uint16_t num_dies;
1194 uint16_t num_ips;
1195 uint8_t num_base_address;
1196 int hw_ip;
1197 int i, j, k;
1198 int r;
1199
1200 r = amdgpu_discovery_init(adev);
1201 if (r) {
1202 DRM_ERROR("amdgpu_discovery_init failed\n");
1203 return r;
1204 }
1205
1206 adev->gfx.xcc_mask = 0;
1207 adev->sdma.sdma_mask = 0;
1208 adev->vcn.inst_mask = 0;
1209 adev->jpeg.inst_mask = 0;
1210 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1211 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1212 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1213 num_dies = le16_to_cpu(ihdr->num_dies);
1214
1215 DRM_DEBUG("number of dies: %d\n", num_dies);
1216
1217 for (i = 0; i < num_dies; i++) {
1218 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1219 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1220 num_ips = le16_to_cpu(dhdr->num_ips);
1221 ip_offset = die_offset + sizeof(*dhdr);
1222
1223 if (le16_to_cpu(dhdr->die_id) != i) {
1224 DRM_ERROR("invalid die id %d, expected %d\n",
1225 le16_to_cpu(dhdr->die_id), i);
1226 return -EINVAL;
1227 }
1228
1229 DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1230 le16_to_cpu(dhdr->die_id), num_ips);
1231
1232 for (j = 0; j < num_ips; j++) {
1233 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1234
1235 if (amdgpu_discovery_validate_ip(ip))
1236 goto next_ip;
1237
1238 num_base_address = ip->num_base_address;
1239
1240 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1241 hw_id_names[le16_to_cpu(ip->hw_id)],
1242 le16_to_cpu(ip->hw_id),
1243 ip->instance_number,
1244 ip->major, ip->minor,
1245 ip->revision);
1246
1247 if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1248 /* Bit [5:0]: original revision value
1249 * Bit [7:6]: en/decode capability:
1250 * 0b00 : VCN function normally
1251 * 0b10 : encode is disabled
1252 * 0b01 : decode is disabled
1253 */
1254 adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1255 ip->revision & 0xc0;
1256 ip->revision &= ~0xc0;
1257 if (adev->vcn.num_vcn_inst <
1258 AMDGPU_MAX_VCN_INSTANCES) {
1259 adev->vcn.num_vcn_inst++;
1260 adev->vcn.inst_mask |=
1261 (1U << ip->instance_number);
1262 adev->jpeg.inst_mask |=
1263 (1U << ip->instance_number);
1264 } else {
1265 dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1266 adev->vcn.num_vcn_inst + 1,
1267 AMDGPU_MAX_VCN_INSTANCES);
1268 }
1269 }
1270 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1271 le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1272 le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1273 le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1274 if (adev->sdma.num_instances <
1275 AMDGPU_MAX_SDMA_INSTANCES) {
1276 adev->sdma.num_instances++;
1277 adev->sdma.sdma_mask |=
1278 (1U << ip->instance_number);
1279 } else {
1280 dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1281 adev->sdma.num_instances + 1,
1282 AMDGPU_MAX_SDMA_INSTANCES);
1283 }
1284 }
1285
1286 if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1287 adev->gmc.num_umc++;
1288 adev->umc.node_inst_num++;
1289 }
1290
1291 if (le16_to_cpu(ip->hw_id) == GC_HWID)
1292 adev->gfx.xcc_mask |=
1293 (1U << ip->instance_number);
1294
1295 for (k = 0; k < num_base_address; k++) {
1296 /*
1297 * convert the endianness of base addresses in place,
1298 * so that we don't need to convert them when accessing adev->reg_offset.
1299 */
1300 if (ihdr->base_addr_64_bit)
1301 /* Truncate the 64bit base address from ip discovery
1302 * and only store lower 32bit ip base in reg_offset[].
1303 * Bits > 32 follows ASIC specific format, thus just
1304 * discard them and handle it within specific ASIC.
1305 * By this way reg_offset[] and related helpers can
1306 * stay unchanged.
1307 * The base address is in dwords, thus clear the
1308 * highest 2 bits to store.
1309 */
1310 ip->base_address[k] =
1311 lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1312 else
1313 ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1314 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1315 }
1316
1317 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1318 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1319 hw_id_map[hw_ip] != 0) {
1320 DRM_DEBUG("set register base offset for %s\n",
1321 hw_id_names[le16_to_cpu(ip->hw_id)]);
1322 adev->reg_offset[hw_ip][ip->instance_number] =
1323 ip->base_address;
1324 /* Instance support is somewhat inconsistent.
1325 * SDMA is a good example. Sienna cichlid has 4 total
1326 * SDMA instances, each enumerated separately (HWIDs
1327 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances,
1328 * but they are enumerated as multiple instances of the
1329 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another
1330 * example. On most chips there are multiple instances
1331 * with the same HWID.
1332 */
1333 adev->ip_versions[hw_ip][ip->instance_number] =
1334 IP_VERSION(ip->major, ip->minor, ip->revision);
1335 }
1336 }
1337
1338 next_ip:
1339 if (ihdr->base_addr_64_bit)
1340 ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1341 else
1342 ip_offset += struct_size(ip, base_address, ip->num_base_address);
1343 }
1344 }
1345
1346 return 0;
1347 }
1348
amdgpu_discovery_harvest_ip(struct amdgpu_device * adev)1349 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1350 {
1351 int vcn_harvest_count = 0;
1352 int umc_harvest_count = 0;
1353
1354 /*
1355 * Harvest table does not fit Navi1x and legacy GPUs,
1356 * so read harvest bit per IP data structure to set
1357 * harvest configuration.
1358 */
1359 if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0) &&
1360 adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) {
1361 if ((adev->pdev->device == 0x731E &&
1362 (adev->pdev->revision == 0xC6 ||
1363 adev->pdev->revision == 0xC7)) ||
1364 (adev->pdev->device == 0x7340 &&
1365 adev->pdev->revision == 0xC9) ||
1366 (adev->pdev->device == 0x7360 &&
1367 adev->pdev->revision == 0xC7))
1368 amdgpu_discovery_read_harvest_bit_per_ip(adev,
1369 &vcn_harvest_count);
1370 } else {
1371 amdgpu_discovery_read_from_harvest_table(adev,
1372 &vcn_harvest_count,
1373 &umc_harvest_count);
1374 }
1375
1376 amdgpu_discovery_harvest_config_quirk(adev);
1377
1378 if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1379 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1380 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1381 }
1382
1383 if (umc_harvest_count < adev->gmc.num_umc) {
1384 adev->gmc.num_umc -= umc_harvest_count;
1385 }
1386 }
1387
1388 union gc_info {
1389 struct gc_info_v1_0 v1;
1390 struct gc_info_v1_1 v1_1;
1391 struct gc_info_v1_2 v1_2;
1392 struct gc_info_v2_0 v2;
1393 struct gc_info_v2_1 v2_1;
1394 };
1395
amdgpu_discovery_get_gfx_info(struct amdgpu_device * adev)1396 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1397 {
1398 struct binary_header *bhdr;
1399 union gc_info *gc_info;
1400 u16 offset;
1401
1402 if (!adev->mman.discovery_bin) {
1403 DRM_ERROR("ip discovery uninitialized\n");
1404 return -EINVAL;
1405 }
1406
1407 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1408 offset = le16_to_cpu(bhdr->table_list[GC].offset);
1409
1410 if (!offset)
1411 return 0;
1412
1413 gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1414
1415 switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1416 case 1:
1417 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1418 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1419 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1420 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1421 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1422 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1423 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1424 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1425 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1426 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1427 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1428 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1429 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1430 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1431 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1432 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1433 le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1434 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1435 if (gc_info->v1.header.version_minor >= 1) {
1436 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1437 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1438 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1439 }
1440 if (gc_info->v1.header.version_minor >= 2) {
1441 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1442 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1443 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1444 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1445 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1446 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1447 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1448 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1449 }
1450 break;
1451 case 2:
1452 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1453 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1454 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1455 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1456 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1457 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1458 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1459 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1460 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1461 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1462 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1463 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1464 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1465 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1466 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1467 le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1468 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1469 if (gc_info->v2.header.version_minor == 1) {
1470 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1471 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1472 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1473 adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1474 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1475 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1476 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1477 }
1478 break;
1479 default:
1480 dev_err(adev->dev,
1481 "Unhandled GC info table %d.%d\n",
1482 le16_to_cpu(gc_info->v1.header.version_major),
1483 le16_to_cpu(gc_info->v1.header.version_minor));
1484 return -EINVAL;
1485 }
1486 return 0;
1487 }
1488
1489 union mall_info {
1490 struct mall_info_v1_0 v1;
1491 struct mall_info_v2_0 v2;
1492 };
1493
amdgpu_discovery_get_mall_info(struct amdgpu_device * adev)1494 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1495 {
1496 struct binary_header *bhdr;
1497 union mall_info *mall_info;
1498 u32 u, mall_size_per_umc, m_s_present, half_use;
1499 u64 mall_size;
1500 u16 offset;
1501
1502 if (!adev->mman.discovery_bin) {
1503 DRM_ERROR("ip discovery uninitialized\n");
1504 return -EINVAL;
1505 }
1506
1507 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1508 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1509
1510 if (!offset)
1511 return 0;
1512
1513 mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1514
1515 switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1516 case 1:
1517 mall_size = 0;
1518 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1519 m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1520 half_use = le32_to_cpu(mall_info->v1.m_half_use);
1521 for (u = 0; u < adev->gmc.num_umc; u++) {
1522 if (m_s_present & (1 << u))
1523 mall_size += mall_size_per_umc * 2;
1524 else if (half_use & (1 << u))
1525 mall_size += mall_size_per_umc / 2;
1526 else
1527 mall_size += mall_size_per_umc;
1528 }
1529 adev->gmc.mall_size = mall_size;
1530 adev->gmc.m_half_use = half_use;
1531 break;
1532 case 2:
1533 mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1534 adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
1535 break;
1536 default:
1537 dev_err(adev->dev,
1538 "Unhandled MALL info table %d.%d\n",
1539 le16_to_cpu(mall_info->v1.header.version_major),
1540 le16_to_cpu(mall_info->v1.header.version_minor));
1541 return -EINVAL;
1542 }
1543 return 0;
1544 }
1545
1546 union vcn_info {
1547 struct vcn_info_v1_0 v1;
1548 };
1549
amdgpu_discovery_get_vcn_info(struct amdgpu_device * adev)1550 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1551 {
1552 struct binary_header *bhdr;
1553 union vcn_info *vcn_info;
1554 u16 offset;
1555 int v;
1556
1557 if (!adev->mman.discovery_bin) {
1558 DRM_ERROR("ip discovery uninitialized\n");
1559 return -EINVAL;
1560 }
1561
1562 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1563 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1564 * but that may change in the future with new GPUs so keep this
1565 * check for defensive purposes.
1566 */
1567 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1568 dev_err(adev->dev, "invalid vcn instances\n");
1569 return -EINVAL;
1570 }
1571
1572 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1573 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1574
1575 if (!offset)
1576 return 0;
1577
1578 vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1579
1580 switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1581 case 1:
1582 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1583 * so this won't overflow.
1584 */
1585 for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1586 adev->vcn.vcn_codec_disable_mask[v] =
1587 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1588 }
1589 break;
1590 default:
1591 dev_err(adev->dev,
1592 "Unhandled VCN info table %d.%d\n",
1593 le16_to_cpu(vcn_info->v1.header.version_major),
1594 le16_to_cpu(vcn_info->v1.header.version_minor));
1595 return -EINVAL;
1596 }
1597 return 0;
1598 }
1599
amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device * adev)1600 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1601 {
1602 /* what IP to use for this? */
1603 switch (adev->ip_versions[GC_HWIP][0]) {
1604 case IP_VERSION(9, 0, 1):
1605 case IP_VERSION(9, 1, 0):
1606 case IP_VERSION(9, 2, 1):
1607 case IP_VERSION(9, 2, 2):
1608 case IP_VERSION(9, 3, 0):
1609 case IP_VERSION(9, 4, 0):
1610 case IP_VERSION(9, 4, 1):
1611 case IP_VERSION(9, 4, 2):
1612 case IP_VERSION(9, 4, 3):
1613 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1614 break;
1615 case IP_VERSION(10, 1, 10):
1616 case IP_VERSION(10, 1, 1):
1617 case IP_VERSION(10, 1, 2):
1618 case IP_VERSION(10, 1, 3):
1619 case IP_VERSION(10, 1, 4):
1620 case IP_VERSION(10, 3, 0):
1621 case IP_VERSION(10, 3, 1):
1622 case IP_VERSION(10, 3, 2):
1623 case IP_VERSION(10, 3, 3):
1624 case IP_VERSION(10, 3, 4):
1625 case IP_VERSION(10, 3, 5):
1626 case IP_VERSION(10, 3, 6):
1627 case IP_VERSION(10, 3, 7):
1628 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1629 break;
1630 case IP_VERSION(11, 0, 0):
1631 case IP_VERSION(11, 0, 1):
1632 case IP_VERSION(11, 0, 2):
1633 case IP_VERSION(11, 0, 3):
1634 case IP_VERSION(11, 0, 4):
1635 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1636 break;
1637 default:
1638 dev_err(adev->dev,
1639 "Failed to add common ip block(GC_HWIP:0x%x)\n",
1640 adev->ip_versions[GC_HWIP][0]);
1641 return -EINVAL;
1642 }
1643 return 0;
1644 }
1645
amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device * adev)1646 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1647 {
1648 /* use GC or MMHUB IP version */
1649 switch (adev->ip_versions[GC_HWIP][0]) {
1650 case IP_VERSION(9, 0, 1):
1651 case IP_VERSION(9, 1, 0):
1652 case IP_VERSION(9, 2, 1):
1653 case IP_VERSION(9, 2, 2):
1654 case IP_VERSION(9, 3, 0):
1655 case IP_VERSION(9, 4, 0):
1656 case IP_VERSION(9, 4, 1):
1657 case IP_VERSION(9, 4, 2):
1658 case IP_VERSION(9, 4, 3):
1659 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1660 break;
1661 case IP_VERSION(10, 1, 10):
1662 case IP_VERSION(10, 1, 1):
1663 case IP_VERSION(10, 1, 2):
1664 case IP_VERSION(10, 1, 3):
1665 case IP_VERSION(10, 1, 4):
1666 case IP_VERSION(10, 3, 0):
1667 case IP_VERSION(10, 3, 1):
1668 case IP_VERSION(10, 3, 2):
1669 case IP_VERSION(10, 3, 3):
1670 case IP_VERSION(10, 3, 4):
1671 case IP_VERSION(10, 3, 5):
1672 case IP_VERSION(10, 3, 6):
1673 case IP_VERSION(10, 3, 7):
1674 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1675 break;
1676 case IP_VERSION(11, 0, 0):
1677 case IP_VERSION(11, 0, 1):
1678 case IP_VERSION(11, 0, 2):
1679 case IP_VERSION(11, 0, 3):
1680 case IP_VERSION(11, 0, 4):
1681 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1682 break;
1683 default:
1684 dev_err(adev->dev,
1685 "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1686 adev->ip_versions[GC_HWIP][0]);
1687 return -EINVAL;
1688 }
1689 return 0;
1690 }
1691
amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device * adev)1692 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1693 {
1694 switch (adev->ip_versions[OSSSYS_HWIP][0]) {
1695 case IP_VERSION(4, 0, 0):
1696 case IP_VERSION(4, 0, 1):
1697 case IP_VERSION(4, 1, 0):
1698 case IP_VERSION(4, 1, 1):
1699 case IP_VERSION(4, 3, 0):
1700 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1701 break;
1702 case IP_VERSION(4, 2, 0):
1703 case IP_VERSION(4, 2, 1):
1704 case IP_VERSION(4, 4, 0):
1705 case IP_VERSION(4, 4, 2):
1706 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1707 break;
1708 case IP_VERSION(5, 0, 0):
1709 case IP_VERSION(5, 0, 1):
1710 case IP_VERSION(5, 0, 2):
1711 case IP_VERSION(5, 0, 3):
1712 case IP_VERSION(5, 2, 0):
1713 case IP_VERSION(5, 2, 1):
1714 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1715 break;
1716 case IP_VERSION(6, 0, 0):
1717 case IP_VERSION(6, 0, 1):
1718 case IP_VERSION(6, 0, 2):
1719 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1720 break;
1721 case IP_VERSION(6, 1, 0):
1722 amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
1723 break;
1724 default:
1725 dev_err(adev->dev,
1726 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1727 adev->ip_versions[OSSSYS_HWIP][0]);
1728 return -EINVAL;
1729 }
1730 return 0;
1731 }
1732
amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device * adev)1733 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1734 {
1735 switch (adev->ip_versions[MP0_HWIP][0]) {
1736 case IP_VERSION(9, 0, 0):
1737 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1738 break;
1739 case IP_VERSION(10, 0, 0):
1740 case IP_VERSION(10, 0, 1):
1741 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1742 break;
1743 case IP_VERSION(11, 0, 0):
1744 case IP_VERSION(11, 0, 2):
1745 case IP_VERSION(11, 0, 4):
1746 case IP_VERSION(11, 0, 5):
1747 case IP_VERSION(11, 0, 9):
1748 case IP_VERSION(11, 0, 7):
1749 case IP_VERSION(11, 0, 11):
1750 case IP_VERSION(11, 0, 12):
1751 case IP_VERSION(11, 0, 13):
1752 case IP_VERSION(11, 5, 0):
1753 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1754 break;
1755 case IP_VERSION(11, 0, 8):
1756 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
1757 break;
1758 case IP_VERSION(11, 0, 3):
1759 case IP_VERSION(12, 0, 1):
1760 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
1761 break;
1762 case IP_VERSION(13, 0, 0):
1763 case IP_VERSION(13, 0, 1):
1764 case IP_VERSION(13, 0, 2):
1765 case IP_VERSION(13, 0, 3):
1766 case IP_VERSION(13, 0, 5):
1767 case IP_VERSION(13, 0, 6):
1768 case IP_VERSION(13, 0, 7):
1769 case IP_VERSION(13, 0, 8):
1770 case IP_VERSION(13, 0, 10):
1771 case IP_VERSION(13, 0, 11):
1772 case IP_VERSION(14, 0, 0):
1773 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
1774 break;
1775 case IP_VERSION(13, 0, 4):
1776 amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
1777 break;
1778 default:
1779 dev_err(adev->dev,
1780 "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
1781 adev->ip_versions[MP0_HWIP][0]);
1782 return -EINVAL;
1783 }
1784 return 0;
1785 }
1786
amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device * adev)1787 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
1788 {
1789 switch (adev->ip_versions[MP1_HWIP][0]) {
1790 case IP_VERSION(9, 0, 0):
1791 case IP_VERSION(10, 0, 0):
1792 case IP_VERSION(10, 0, 1):
1793 case IP_VERSION(11, 0, 2):
1794 if (adev->asic_type == CHIP_ARCTURUS)
1795 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1796 else
1797 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1798 break;
1799 case IP_VERSION(11, 0, 0):
1800 case IP_VERSION(11, 0, 5):
1801 case IP_VERSION(11, 0, 9):
1802 case IP_VERSION(11, 0, 7):
1803 case IP_VERSION(11, 0, 8):
1804 case IP_VERSION(11, 0, 11):
1805 case IP_VERSION(11, 0, 12):
1806 case IP_VERSION(11, 0, 13):
1807 case IP_VERSION(11, 5, 0):
1808 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1809 break;
1810 case IP_VERSION(12, 0, 0):
1811 case IP_VERSION(12, 0, 1):
1812 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
1813 break;
1814 case IP_VERSION(13, 0, 0):
1815 case IP_VERSION(13, 0, 1):
1816 case IP_VERSION(13, 0, 2):
1817 case IP_VERSION(13, 0, 3):
1818 case IP_VERSION(13, 0, 4):
1819 case IP_VERSION(13, 0, 5):
1820 case IP_VERSION(13, 0, 6):
1821 case IP_VERSION(13, 0, 7):
1822 case IP_VERSION(13, 0, 8):
1823 case IP_VERSION(13, 0, 10):
1824 case IP_VERSION(13, 0, 11):
1825 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
1826 break;
1827 default:
1828 dev_err(adev->dev,
1829 "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
1830 adev->ip_versions[MP1_HWIP][0]);
1831 return -EINVAL;
1832 }
1833 return 0;
1834 }
1835
1836 #if defined(CONFIG_DRM_AMD_DC)
amdgpu_discovery_set_sriov_display(struct amdgpu_device * adev)1837 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
1838 {
1839 amdgpu_device_set_sriov_virtual_display(adev);
1840 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1841 }
1842 #endif
1843
amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device * adev)1844 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
1845 {
1846 if (adev->enable_virtual_display) {
1847 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1848 return 0;
1849 }
1850
1851 if (!amdgpu_device_has_dc_support(adev))
1852 return 0;
1853
1854 #if defined(CONFIG_DRM_AMD_DC)
1855 if (adev->ip_versions[DCE_HWIP][0]) {
1856 switch (adev->ip_versions[DCE_HWIP][0]) {
1857 case IP_VERSION(1, 0, 0):
1858 case IP_VERSION(1, 0, 1):
1859 case IP_VERSION(2, 0, 2):
1860 case IP_VERSION(2, 0, 0):
1861 case IP_VERSION(2, 0, 3):
1862 case IP_VERSION(2, 1, 0):
1863 case IP_VERSION(3, 0, 0):
1864 case IP_VERSION(3, 0, 2):
1865 case IP_VERSION(3, 0, 3):
1866 case IP_VERSION(3, 0, 1):
1867 case IP_VERSION(3, 1, 2):
1868 case IP_VERSION(3, 1, 3):
1869 case IP_VERSION(3, 1, 4):
1870 case IP_VERSION(3, 1, 5):
1871 case IP_VERSION(3, 1, 6):
1872 case IP_VERSION(3, 2, 0):
1873 case IP_VERSION(3, 2, 1):
1874 if (amdgpu_sriov_vf(adev))
1875 amdgpu_discovery_set_sriov_display(adev);
1876 else
1877 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1878 break;
1879 default:
1880 dev_err(adev->dev,
1881 "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
1882 adev->ip_versions[DCE_HWIP][0]);
1883 return -EINVAL;
1884 }
1885 } else if (adev->ip_versions[DCI_HWIP][0]) {
1886 switch (adev->ip_versions[DCI_HWIP][0]) {
1887 case IP_VERSION(12, 0, 0):
1888 case IP_VERSION(12, 0, 1):
1889 case IP_VERSION(12, 1, 0):
1890 if (amdgpu_sriov_vf(adev))
1891 amdgpu_discovery_set_sriov_display(adev);
1892 else
1893 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1894 break;
1895 default:
1896 dev_err(adev->dev,
1897 "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
1898 adev->ip_versions[DCI_HWIP][0]);
1899 return -EINVAL;
1900 }
1901 }
1902 #endif
1903 return 0;
1904 }
1905
amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device * adev)1906 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
1907 {
1908 switch (adev->ip_versions[GC_HWIP][0]) {
1909 case IP_VERSION(9, 0, 1):
1910 case IP_VERSION(9, 1, 0):
1911 case IP_VERSION(9, 2, 1):
1912 case IP_VERSION(9, 2, 2):
1913 case IP_VERSION(9, 3, 0):
1914 case IP_VERSION(9, 4, 0):
1915 case IP_VERSION(9, 4, 1):
1916 case IP_VERSION(9, 4, 2):
1917 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
1918 break;
1919 case IP_VERSION(9, 4, 3):
1920 if (!amdgpu_exp_hw_support)
1921 return -EINVAL;
1922 amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
1923 break;
1924 case IP_VERSION(10, 1, 10):
1925 case IP_VERSION(10, 1, 2):
1926 case IP_VERSION(10, 1, 1):
1927 case IP_VERSION(10, 1, 3):
1928 case IP_VERSION(10, 1, 4):
1929 case IP_VERSION(10, 3, 0):
1930 case IP_VERSION(10, 3, 2):
1931 case IP_VERSION(10, 3, 1):
1932 case IP_VERSION(10, 3, 4):
1933 case IP_VERSION(10, 3, 5):
1934 case IP_VERSION(10, 3, 6):
1935 case IP_VERSION(10, 3, 3):
1936 case IP_VERSION(10, 3, 7):
1937 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
1938 break;
1939 case IP_VERSION(11, 0, 0):
1940 case IP_VERSION(11, 0, 1):
1941 case IP_VERSION(11, 0, 2):
1942 case IP_VERSION(11, 0, 3):
1943 case IP_VERSION(11, 0, 4):
1944 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
1945 break;
1946 default:
1947 dev_err(adev->dev,
1948 "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
1949 adev->ip_versions[GC_HWIP][0]);
1950 return -EINVAL;
1951 }
1952 return 0;
1953 }
1954
amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device * adev)1955 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
1956 {
1957 switch (adev->ip_versions[SDMA0_HWIP][0]) {
1958 case IP_VERSION(4, 0, 0):
1959 case IP_VERSION(4, 0, 1):
1960 case IP_VERSION(4, 1, 0):
1961 case IP_VERSION(4, 1, 1):
1962 case IP_VERSION(4, 1, 2):
1963 case IP_VERSION(4, 2, 0):
1964 case IP_VERSION(4, 2, 2):
1965 case IP_VERSION(4, 4, 0):
1966 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
1967 break;
1968 case IP_VERSION(4, 4, 2):
1969 amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
1970 break;
1971 case IP_VERSION(5, 0, 0):
1972 case IP_VERSION(5, 0, 1):
1973 case IP_VERSION(5, 0, 2):
1974 case IP_VERSION(5, 0, 5):
1975 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
1976 break;
1977 case IP_VERSION(5, 2, 0):
1978 case IP_VERSION(5, 2, 2):
1979 case IP_VERSION(5, 2, 4):
1980 case IP_VERSION(5, 2, 5):
1981 case IP_VERSION(5, 2, 6):
1982 case IP_VERSION(5, 2, 3):
1983 case IP_VERSION(5, 2, 1):
1984 case IP_VERSION(5, 2, 7):
1985 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
1986 break;
1987 case IP_VERSION(6, 0, 0):
1988 case IP_VERSION(6, 0, 1):
1989 case IP_VERSION(6, 0, 2):
1990 case IP_VERSION(6, 0, 3):
1991 case IP_VERSION(6, 1, 0):
1992 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
1993 break;
1994 default:
1995 dev_err(adev->dev,
1996 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
1997 adev->ip_versions[SDMA0_HWIP][0]);
1998 return -EINVAL;
1999 }
2000 return 0;
2001 }
2002
amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device * adev)2003 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2004 {
2005 if (adev->ip_versions[VCE_HWIP][0]) {
2006 switch (adev->ip_versions[UVD_HWIP][0]) {
2007 case IP_VERSION(7, 0, 0):
2008 case IP_VERSION(7, 2, 0):
2009 /* UVD is not supported on vega20 SR-IOV */
2010 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2011 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2012 break;
2013 default:
2014 dev_err(adev->dev,
2015 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2016 adev->ip_versions[UVD_HWIP][0]);
2017 return -EINVAL;
2018 }
2019 switch (adev->ip_versions[VCE_HWIP][0]) {
2020 case IP_VERSION(4, 0, 0):
2021 case IP_VERSION(4, 1, 0):
2022 /* VCE is not supported on vega20 SR-IOV */
2023 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2024 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2025 break;
2026 default:
2027 dev_err(adev->dev,
2028 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2029 adev->ip_versions[VCE_HWIP][0]);
2030 return -EINVAL;
2031 }
2032 } else {
2033 switch (adev->ip_versions[UVD_HWIP][0]) {
2034 case IP_VERSION(1, 0, 0):
2035 case IP_VERSION(1, 0, 1):
2036 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2037 break;
2038 case IP_VERSION(2, 0, 0):
2039 case IP_VERSION(2, 0, 2):
2040 case IP_VERSION(2, 2, 0):
2041 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2042 if (!amdgpu_sriov_vf(adev))
2043 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2044 break;
2045 case IP_VERSION(2, 0, 3):
2046 break;
2047 case IP_VERSION(2, 5, 0):
2048 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2049 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2050 break;
2051 case IP_VERSION(2, 6, 0):
2052 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2053 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2054 break;
2055 case IP_VERSION(3, 0, 0):
2056 case IP_VERSION(3, 0, 16):
2057 case IP_VERSION(3, 1, 1):
2058 case IP_VERSION(3, 1, 2):
2059 case IP_VERSION(3, 0, 2):
2060 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2061 if (!amdgpu_sriov_vf(adev))
2062 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2063 break;
2064 case IP_VERSION(3, 0, 33):
2065 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2066 break;
2067 case IP_VERSION(4, 0, 0):
2068 case IP_VERSION(4, 0, 2):
2069 case IP_VERSION(4, 0, 4):
2070 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2071 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2072 break;
2073 case IP_VERSION(4, 0, 3):
2074 amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2075 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2076 break;
2077 default:
2078 dev_err(adev->dev,
2079 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2080 adev->ip_versions[UVD_HWIP][0]);
2081 return -EINVAL;
2082 }
2083 }
2084 return 0;
2085 }
2086
amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device * adev)2087 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2088 {
2089 switch (adev->ip_versions[GC_HWIP][0]) {
2090 case IP_VERSION(10, 1, 10):
2091 case IP_VERSION(10, 1, 1):
2092 case IP_VERSION(10, 1, 2):
2093 case IP_VERSION(10, 1, 3):
2094 case IP_VERSION(10, 1, 4):
2095 case IP_VERSION(10, 3, 0):
2096 case IP_VERSION(10, 3, 1):
2097 case IP_VERSION(10, 3, 2):
2098 case IP_VERSION(10, 3, 3):
2099 case IP_VERSION(10, 3, 4):
2100 case IP_VERSION(10, 3, 5):
2101 case IP_VERSION(10, 3, 6):
2102 if (amdgpu_mes) {
2103 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
2104 adev->enable_mes = true;
2105 if (amdgpu_mes_kiq)
2106 adev->enable_mes_kiq = true;
2107 }
2108 break;
2109 case IP_VERSION(11, 0, 0):
2110 case IP_VERSION(11, 0, 1):
2111 case IP_VERSION(11, 0, 2):
2112 case IP_VERSION(11, 0, 3):
2113 case IP_VERSION(11, 0, 4):
2114 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2115 adev->enable_mes = true;
2116 adev->enable_mes_kiq = true;
2117 break;
2118 default:
2119 break;
2120 }
2121 return 0;
2122 }
2123
amdgpu_discovery_init_soc_config(struct amdgpu_device * adev)2124 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2125 {
2126 switch (adev->ip_versions[GC_HWIP][0]) {
2127 case IP_VERSION(9, 4, 3):
2128 aqua_vanjaram_init_soc_config(adev);
2129 break;
2130 default:
2131 break;
2132 }
2133 }
2134
amdgpu_discovery_set_ip_blocks(struct amdgpu_device * adev)2135 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2136 {
2137 int r;
2138
2139 switch (adev->asic_type) {
2140 case CHIP_VEGA10:
2141 vega10_reg_base_init(adev);
2142 adev->sdma.num_instances = 2;
2143 adev->gmc.num_umc = 4;
2144 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2145 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2146 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2147 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2148 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2149 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2150 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2151 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2152 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2153 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2154 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2155 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2156 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2157 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2158 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2159 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2160 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2161 break;
2162 case CHIP_VEGA12:
2163 vega10_reg_base_init(adev);
2164 adev->sdma.num_instances = 2;
2165 adev->gmc.num_umc = 4;
2166 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2167 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2168 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2169 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2170 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2171 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2172 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2173 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2174 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2175 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2176 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2177 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2178 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2179 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2180 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2181 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2182 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2183 break;
2184 case CHIP_RAVEN:
2185 vega10_reg_base_init(adev);
2186 adev->sdma.num_instances = 1;
2187 adev->vcn.num_vcn_inst = 1;
2188 adev->gmc.num_umc = 2;
2189 if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2190 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2191 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2192 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2193 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2194 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2195 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2196 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2197 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2198 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2199 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2200 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2201 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2202 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2203 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2204 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2205 } else {
2206 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2207 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2208 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2209 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2210 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2211 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2212 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2213 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2214 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2215 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2216 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2217 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2218 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2219 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2220 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2221 }
2222 break;
2223 case CHIP_VEGA20:
2224 vega20_reg_base_init(adev);
2225 adev->sdma.num_instances = 2;
2226 adev->gmc.num_umc = 8;
2227 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2228 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2229 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2230 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2231 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2232 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2233 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2234 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2235 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2236 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2237 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2238 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2239 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2240 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2241 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2242 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2243 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2244 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2245 break;
2246 case CHIP_ARCTURUS:
2247 arct_reg_base_init(adev);
2248 adev->sdma.num_instances = 8;
2249 adev->vcn.num_vcn_inst = 2;
2250 adev->gmc.num_umc = 8;
2251 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2252 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2253 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2254 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2255 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2256 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2257 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2258 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2259 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2260 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2261 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2262 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2263 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2264 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2265 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2266 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2267 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2268 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2269 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2270 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2271 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2272 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2273 break;
2274 case CHIP_ALDEBARAN:
2275 aldebaran_reg_base_init(adev);
2276 adev->sdma.num_instances = 5;
2277 adev->vcn.num_vcn_inst = 2;
2278 adev->gmc.num_umc = 4;
2279 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2280 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2281 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2282 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2283 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2284 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2285 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2286 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2287 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2288 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2289 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2290 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2291 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2292 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2293 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2294 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2295 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2296 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2297 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2298 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2299 break;
2300 default:
2301 r = amdgpu_discovery_reg_base_init(adev);
2302 if (r)
2303 return -EINVAL;
2304
2305 amdgpu_discovery_harvest_ip(adev);
2306 amdgpu_discovery_get_gfx_info(adev);
2307 amdgpu_discovery_get_mall_info(adev);
2308 amdgpu_discovery_get_vcn_info(adev);
2309 break;
2310 }
2311
2312 amdgpu_discovery_init_soc_config(adev);
2313 amdgpu_discovery_sysfs_init(adev);
2314
2315 switch (adev->ip_versions[GC_HWIP][0]) {
2316 case IP_VERSION(9, 0, 1):
2317 case IP_VERSION(9, 2, 1):
2318 case IP_VERSION(9, 4, 0):
2319 case IP_VERSION(9, 4, 1):
2320 case IP_VERSION(9, 4, 2):
2321 case IP_VERSION(9, 4, 3):
2322 adev->family = AMDGPU_FAMILY_AI;
2323 break;
2324 case IP_VERSION(9, 1, 0):
2325 case IP_VERSION(9, 2, 2):
2326 case IP_VERSION(9, 3, 0):
2327 adev->family = AMDGPU_FAMILY_RV;
2328 break;
2329 case IP_VERSION(10, 1, 10):
2330 case IP_VERSION(10, 1, 1):
2331 case IP_VERSION(10, 1, 2):
2332 case IP_VERSION(10, 1, 3):
2333 case IP_VERSION(10, 1, 4):
2334 case IP_VERSION(10, 3, 0):
2335 case IP_VERSION(10, 3, 2):
2336 case IP_VERSION(10, 3, 4):
2337 case IP_VERSION(10, 3, 5):
2338 adev->family = AMDGPU_FAMILY_NV;
2339 break;
2340 case IP_VERSION(10, 3, 1):
2341 adev->family = AMDGPU_FAMILY_VGH;
2342 adev->apu_flags |= AMD_APU_IS_VANGOGH;
2343 break;
2344 case IP_VERSION(10, 3, 3):
2345 adev->family = AMDGPU_FAMILY_YC;
2346 break;
2347 case IP_VERSION(10, 3, 6):
2348 adev->family = AMDGPU_FAMILY_GC_10_3_6;
2349 break;
2350 case IP_VERSION(10, 3, 7):
2351 adev->family = AMDGPU_FAMILY_GC_10_3_7;
2352 break;
2353 case IP_VERSION(11, 0, 0):
2354 case IP_VERSION(11, 0, 2):
2355 case IP_VERSION(11, 0, 3):
2356 adev->family = AMDGPU_FAMILY_GC_11_0_0;
2357 break;
2358 case IP_VERSION(11, 0, 1):
2359 case IP_VERSION(11, 0, 4):
2360 adev->family = AMDGPU_FAMILY_GC_11_0_1;
2361 break;
2362 default:
2363 return -EINVAL;
2364 }
2365
2366 switch (adev->ip_versions[GC_HWIP][0]) {
2367 case IP_VERSION(9, 1, 0):
2368 case IP_VERSION(9, 2, 2):
2369 case IP_VERSION(9, 3, 0):
2370 case IP_VERSION(10, 1, 3):
2371 case IP_VERSION(10, 1, 4):
2372 case IP_VERSION(10, 3, 1):
2373 case IP_VERSION(10, 3, 3):
2374 case IP_VERSION(10, 3, 6):
2375 case IP_VERSION(10, 3, 7):
2376 case IP_VERSION(11, 0, 1):
2377 case IP_VERSION(11, 0, 4):
2378 adev->flags |= AMD_IS_APU;
2379 break;
2380 default:
2381 break;
2382 }
2383
2384 if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0))
2385 adev->gmc.xgmi.supported = true;
2386
2387 /* set NBIO version */
2388 switch (adev->ip_versions[NBIO_HWIP][0]) {
2389 case IP_VERSION(6, 1, 0):
2390 case IP_VERSION(6, 2, 0):
2391 adev->nbio.funcs = &nbio_v6_1_funcs;
2392 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2393 break;
2394 case IP_VERSION(7, 0, 0):
2395 case IP_VERSION(7, 0, 1):
2396 case IP_VERSION(2, 5, 0):
2397 adev->nbio.funcs = &nbio_v7_0_funcs;
2398 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2399 break;
2400 case IP_VERSION(7, 4, 0):
2401 case IP_VERSION(7, 4, 1):
2402 case IP_VERSION(7, 4, 4):
2403 adev->nbio.funcs = &nbio_v7_4_funcs;
2404 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2405 break;
2406 case IP_VERSION(7, 9, 0):
2407 adev->nbio.funcs = &nbio_v7_9_funcs;
2408 adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
2409 break;
2410 case IP_VERSION(7, 2, 0):
2411 case IP_VERSION(7, 2, 1):
2412 case IP_VERSION(7, 3, 0):
2413 case IP_VERSION(7, 5, 0):
2414 case IP_VERSION(7, 5, 1):
2415 adev->nbio.funcs = &nbio_v7_2_funcs;
2416 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2417 break;
2418 case IP_VERSION(2, 1, 1):
2419 case IP_VERSION(2, 3, 0):
2420 case IP_VERSION(2, 3, 1):
2421 case IP_VERSION(2, 3, 2):
2422 case IP_VERSION(3, 3, 0):
2423 case IP_VERSION(3, 3, 1):
2424 case IP_VERSION(3, 3, 2):
2425 case IP_VERSION(3, 3, 3):
2426 adev->nbio.funcs = &nbio_v2_3_funcs;
2427 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2428 break;
2429 case IP_VERSION(4, 3, 0):
2430 case IP_VERSION(4, 3, 1):
2431 if (amdgpu_sriov_vf(adev))
2432 adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2433 else
2434 adev->nbio.funcs = &nbio_v4_3_funcs;
2435 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2436 break;
2437 case IP_VERSION(7, 7, 0):
2438 case IP_VERSION(7, 7, 1):
2439 adev->nbio.funcs = &nbio_v7_7_funcs;
2440 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2441 break;
2442 default:
2443 break;
2444 }
2445
2446 switch (adev->ip_versions[HDP_HWIP][0]) {
2447 case IP_VERSION(4, 0, 0):
2448 case IP_VERSION(4, 0, 1):
2449 case IP_VERSION(4, 1, 0):
2450 case IP_VERSION(4, 1, 1):
2451 case IP_VERSION(4, 1, 2):
2452 case IP_VERSION(4, 2, 0):
2453 case IP_VERSION(4, 2, 1):
2454 case IP_VERSION(4, 4, 0):
2455 case IP_VERSION(4, 4, 2):
2456 adev->hdp.funcs = &hdp_v4_0_funcs;
2457 break;
2458 case IP_VERSION(5, 0, 0):
2459 case IP_VERSION(5, 0, 1):
2460 case IP_VERSION(5, 0, 2):
2461 case IP_VERSION(5, 0, 3):
2462 case IP_VERSION(5, 0, 4):
2463 case IP_VERSION(5, 2, 0):
2464 adev->hdp.funcs = &hdp_v5_0_funcs;
2465 break;
2466 case IP_VERSION(5, 2, 1):
2467 adev->hdp.funcs = &hdp_v5_2_funcs;
2468 break;
2469 case IP_VERSION(6, 0, 0):
2470 case IP_VERSION(6, 0, 1):
2471 case IP_VERSION(6, 1, 0):
2472 adev->hdp.funcs = &hdp_v6_0_funcs;
2473 break;
2474 default:
2475 break;
2476 }
2477
2478 switch (adev->ip_versions[DF_HWIP][0]) {
2479 case IP_VERSION(3, 6, 0):
2480 case IP_VERSION(3, 6, 1):
2481 case IP_VERSION(3, 6, 2):
2482 adev->df.funcs = &df_v3_6_funcs;
2483 break;
2484 case IP_VERSION(2, 1, 0):
2485 case IP_VERSION(2, 1, 1):
2486 case IP_VERSION(2, 5, 0):
2487 case IP_VERSION(3, 5, 1):
2488 case IP_VERSION(3, 5, 2):
2489 adev->df.funcs = &df_v1_7_funcs;
2490 break;
2491 case IP_VERSION(4, 3, 0):
2492 adev->df.funcs = &df_v4_3_funcs;
2493 break;
2494 default:
2495 break;
2496 }
2497
2498 switch (adev->ip_versions[SMUIO_HWIP][0]) {
2499 case IP_VERSION(9, 0, 0):
2500 case IP_VERSION(9, 0, 1):
2501 case IP_VERSION(10, 0, 0):
2502 case IP_VERSION(10, 0, 1):
2503 case IP_VERSION(10, 0, 2):
2504 adev->smuio.funcs = &smuio_v9_0_funcs;
2505 break;
2506 case IP_VERSION(11, 0, 0):
2507 case IP_VERSION(11, 0, 2):
2508 case IP_VERSION(11, 0, 3):
2509 case IP_VERSION(11, 0, 4):
2510 case IP_VERSION(11, 0, 7):
2511 case IP_VERSION(11, 0, 8):
2512 adev->smuio.funcs = &smuio_v11_0_funcs;
2513 break;
2514 case IP_VERSION(11, 0, 6):
2515 case IP_VERSION(11, 0, 10):
2516 case IP_VERSION(11, 0, 11):
2517 case IP_VERSION(11, 5, 0):
2518 case IP_VERSION(13, 0, 1):
2519 case IP_VERSION(13, 0, 9):
2520 case IP_VERSION(13, 0, 10):
2521 adev->smuio.funcs = &smuio_v11_0_6_funcs;
2522 break;
2523 case IP_VERSION(13, 0, 2):
2524 adev->smuio.funcs = &smuio_v13_0_funcs;
2525 break;
2526 case IP_VERSION(13, 0, 3):
2527 adev->smuio.funcs = &smuio_v13_0_3_funcs;
2528 if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
2529 adev->flags |= AMD_IS_APU;
2530 }
2531 break;
2532 case IP_VERSION(13, 0, 6):
2533 case IP_VERSION(13, 0, 8):
2534 case IP_VERSION(14, 0, 0):
2535 adev->smuio.funcs = &smuio_v13_0_6_funcs;
2536 break;
2537 default:
2538 break;
2539 }
2540
2541 switch (adev->ip_versions[LSDMA_HWIP][0]) {
2542 case IP_VERSION(6, 0, 0):
2543 case IP_VERSION(6, 0, 1):
2544 case IP_VERSION(6, 0, 2):
2545 case IP_VERSION(6, 0, 3):
2546 adev->lsdma.funcs = &lsdma_v6_0_funcs;
2547 break;
2548 default:
2549 break;
2550 }
2551
2552 r = amdgpu_discovery_set_common_ip_blocks(adev);
2553 if (r)
2554 return r;
2555
2556 r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2557 if (r)
2558 return r;
2559
2560 /* For SR-IOV, PSP needs to be initialized before IH */
2561 if (amdgpu_sriov_vf(adev)) {
2562 r = amdgpu_discovery_set_psp_ip_blocks(adev);
2563 if (r)
2564 return r;
2565 r = amdgpu_discovery_set_ih_ip_blocks(adev);
2566 if (r)
2567 return r;
2568 } else {
2569 r = amdgpu_discovery_set_ih_ip_blocks(adev);
2570 if (r)
2571 return r;
2572
2573 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2574 r = amdgpu_discovery_set_psp_ip_blocks(adev);
2575 if (r)
2576 return r;
2577 }
2578 }
2579
2580 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2581 r = amdgpu_discovery_set_smu_ip_blocks(adev);
2582 if (r)
2583 return r;
2584 }
2585
2586 r = amdgpu_discovery_set_display_ip_blocks(adev);
2587 if (r)
2588 return r;
2589
2590 r = amdgpu_discovery_set_gc_ip_blocks(adev);
2591 if (r)
2592 return r;
2593
2594 r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2595 if (r)
2596 return r;
2597
2598 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2599 !amdgpu_sriov_vf(adev)) ||
2600 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2601 r = amdgpu_discovery_set_smu_ip_blocks(adev);
2602 if (r)
2603 return r;
2604 }
2605
2606 r = amdgpu_discovery_set_mm_ip_blocks(adev);
2607 if (r)
2608 return r;
2609
2610 r = amdgpu_discovery_set_mes_ip_blocks(adev);
2611 if (r)
2612 return r;
2613
2614 return 0;
2615 }
2616
2617