1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/amdgpu_drm.h>
37 #include <linux/vgaarb.h>
38 #include <linux/vga_switcheroo.h>
39 #include <linux/efi.h>
40 #include "amdgpu.h"
41 #include "amdgpu_trace.h"
42 #include "amdgpu_i2c.h"
43 #include "atom.h"
44 #include "amdgpu_atombios.h"
45 #include "amdgpu_atomfirmware.h"
46 #include "amd_pcie.h"
47 #ifdef CONFIG_DRM_AMDGPU_SI
48 #include "si.h"
49 #endif
50 #ifdef CONFIG_DRM_AMDGPU_CIK
51 #include "cik.h"
52 #endif
53 #include "vi.h"
54 #include "soc15.h"
55 #include "nv.h"
56 #include "bif/bif_4_1_d.h"
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include "amdgpu_vf_error.h"
60 
61 #include "amdgpu_amdkfd.h"
62 #include "amdgpu_pm.h"
63 
64 #include "amdgpu_xgmi.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_pmu.h"
67 #include "amdgpu_fru_eeprom.h"
68 #include "amdgpu_reset.h"
69 
70 #include <linux/suspend.h>
71 #include <drm/task_barrier.h>
72 #include <linux/pm_runtime.h>
73 
74 #include <drm/drm_drv.h>
75 
76 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
77 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
87 MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin");
88 
89 #define AMDGPU_RESUME_MS		2000
90 
91 const char *amdgpu_asic_name[] = {
92 	"TAHITI",
93 	"PITCAIRN",
94 	"VERDE",
95 	"OLAND",
96 	"HAINAN",
97 	"BONAIRE",
98 	"KAVERI",
99 	"KABINI",
100 	"HAWAII",
101 	"MULLINS",
102 	"TOPAZ",
103 	"TONGA",
104 	"FIJI",
105 	"CARRIZO",
106 	"STONEY",
107 	"POLARIS10",
108 	"POLARIS11",
109 	"POLARIS12",
110 	"VEGAM",
111 	"VEGA10",
112 	"VEGA12",
113 	"VEGA20",
114 	"RAVEN",
115 	"ARCTURUS",
116 	"RENOIR",
117 	"ALDEBARAN",
118 	"NAVI10",
119 	"CYAN_SKILLFISH",
120 	"NAVI14",
121 	"NAVI12",
122 	"SIENNA_CICHLID",
123 	"NAVY_FLOUNDER",
124 	"VANGOGH",
125 	"DIMGREY_CAVEFISH",
126 	"BEIGE_GOBY",
127 	"YELLOW_CARP",
128 	"LAST",
129 };
130 
131 /**
132  * DOC: pcie_replay_count
133  *
134  * The amdgpu driver provides a sysfs API for reporting the total number
135  * of PCIe replays (NAKs)
136  * The file pcie_replay_count is used for this and returns the total
137  * number of replays as a sum of the NAKs generated and NAKs received
138  */
139 
amdgpu_device_get_pcie_replay_count(struct device * dev,struct device_attribute * attr,char * buf)140 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
141 		struct device_attribute *attr, char *buf)
142 {
143 	struct drm_device *ddev = dev_get_drvdata(dev);
144 	struct amdgpu_device *adev = drm_to_adev(ddev);
145 	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
146 
147 	return sysfs_emit(buf, "%llu\n", cnt);
148 }
149 
150 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
151 		amdgpu_device_get_pcie_replay_count, NULL);
152 
153 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
154 
155 /**
156  * DOC: product_name
157  *
158  * The amdgpu driver provides a sysfs API for reporting the product name
159  * for the device
160  * The file serial_number is used for this and returns the product name
161  * as returned from the FRU.
162  * NOTE: This is only available for certain server cards
163  */
164 
amdgpu_device_get_product_name(struct device * dev,struct device_attribute * attr,char * buf)165 static ssize_t amdgpu_device_get_product_name(struct device *dev,
166 		struct device_attribute *attr, char *buf)
167 {
168 	struct drm_device *ddev = dev_get_drvdata(dev);
169 	struct amdgpu_device *adev = drm_to_adev(ddev);
170 
171 	return sysfs_emit(buf, "%s\n", adev->product_name);
172 }
173 
174 static DEVICE_ATTR(product_name, S_IRUGO,
175 		amdgpu_device_get_product_name, NULL);
176 
177 /**
178  * DOC: product_number
179  *
180  * The amdgpu driver provides a sysfs API for reporting the part number
181  * for the device
182  * The file serial_number is used for this and returns the part number
183  * as returned from the FRU.
184  * NOTE: This is only available for certain server cards
185  */
186 
amdgpu_device_get_product_number(struct device * dev,struct device_attribute * attr,char * buf)187 static ssize_t amdgpu_device_get_product_number(struct device *dev,
188 		struct device_attribute *attr, char *buf)
189 {
190 	struct drm_device *ddev = dev_get_drvdata(dev);
191 	struct amdgpu_device *adev = drm_to_adev(ddev);
192 
193 	return sysfs_emit(buf, "%s\n", adev->product_number);
194 }
195 
196 static DEVICE_ATTR(product_number, S_IRUGO,
197 		amdgpu_device_get_product_number, NULL);
198 
199 /**
200  * DOC: serial_number
201  *
202  * The amdgpu driver provides a sysfs API for reporting the serial number
203  * for the device
204  * The file serial_number is used for this and returns the serial number
205  * as returned from the FRU.
206  * NOTE: This is only available for certain server cards
207  */
208 
amdgpu_device_get_serial_number(struct device * dev,struct device_attribute * attr,char * buf)209 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
210 		struct device_attribute *attr, char *buf)
211 {
212 	struct drm_device *ddev = dev_get_drvdata(dev);
213 	struct amdgpu_device *adev = drm_to_adev(ddev);
214 
215 	return sysfs_emit(buf, "%s\n", adev->serial);
216 }
217 
218 static DEVICE_ATTR(serial_number, S_IRUGO,
219 		amdgpu_device_get_serial_number, NULL);
220 
221 /**
222  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
223  *
224  * @dev: drm_device pointer
225  *
226  * Returns true if the device is a dGPU with ATPX power control,
227  * otherwise return false.
228  */
amdgpu_device_supports_px(struct drm_device * dev)229 bool amdgpu_device_supports_px(struct drm_device *dev)
230 {
231 	struct amdgpu_device *adev = drm_to_adev(dev);
232 
233 	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
234 		return true;
235 	return false;
236 }
237 
238 /**
239  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
240  *
241  * @dev: drm_device pointer
242  *
243  * Returns true if the device is a dGPU with ACPI power control,
244  * otherwise return false.
245  */
amdgpu_device_supports_boco(struct drm_device * dev)246 bool amdgpu_device_supports_boco(struct drm_device *dev)
247 {
248 	struct amdgpu_device *adev = drm_to_adev(dev);
249 
250 	if (adev->has_pr3 ||
251 	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
252 		return true;
253 	return false;
254 }
255 
256 /**
257  * amdgpu_device_supports_baco - Does the device support BACO
258  *
259  * @dev: drm_device pointer
260  *
261  * Returns true if the device supporte BACO,
262  * otherwise return false.
263  */
amdgpu_device_supports_baco(struct drm_device * dev)264 bool amdgpu_device_supports_baco(struct drm_device *dev)
265 {
266 	struct amdgpu_device *adev = drm_to_adev(dev);
267 
268 	return amdgpu_asic_supports_baco(adev);
269 }
270 
271 /**
272  * amdgpu_device_supports_smart_shift - Is the device dGPU with
273  * smart shift support
274  *
275  * @dev: drm_device pointer
276  *
277  * Returns true if the device is a dGPU with Smart Shift support,
278  * otherwise returns false.
279  */
amdgpu_device_supports_smart_shift(struct drm_device * dev)280 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
281 {
282 	return (amdgpu_device_supports_boco(dev) &&
283 		amdgpu_acpi_is_power_shift_control_supported());
284 }
285 
286 /*
287  * VRAM access helper functions
288  */
289 
290 /**
291  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
292  *
293  * @adev: amdgpu_device pointer
294  * @pos: offset of the buffer in vram
295  * @buf: virtual address of the buffer in system memory
296  * @size: read/write size, sizeof(@buf) must > @size
297  * @write: true - write to vram, otherwise - read from vram
298  */
amdgpu_device_mm_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)299 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
300 			     void *buf, size_t size, bool write)
301 {
302 	unsigned long flags;
303 	uint32_t hi = ~0, tmp = 0;
304 	uint32_t *data = buf;
305 	uint64_t last;
306 	int idx;
307 
308 	if (!drm_dev_enter(&adev->ddev, &idx))
309 		return;
310 
311 	BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
312 
313 	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
314 	for (last = pos + size; pos < last; pos += 4) {
315 		tmp = pos >> 31;
316 
317 		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
318 		if (tmp != hi) {
319 			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
320 			hi = tmp;
321 		}
322 		if (write)
323 			WREG32_NO_KIQ(mmMM_DATA, *data++);
324 		else
325 			*data++ = RREG32_NO_KIQ(mmMM_DATA);
326 	}
327 
328 	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
329 	drm_dev_exit(idx);
330 }
331 
332 /**
333  * amdgpu_device_vram_access - access vram by vram aperature
334  *
335  * @adev: amdgpu_device pointer
336  * @pos: offset of the buffer in vram
337  * @buf: virtual address of the buffer in system memory
338  * @size: read/write size, sizeof(@buf) must > @size
339  * @write: true - write to vram, otherwise - read from vram
340  *
341  * The return value means how many bytes have been transferred.
342  */
amdgpu_device_aper_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)343 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
344 				 void *buf, size_t size, bool write)
345 {
346 #ifdef CONFIG_64BIT
347 	void __iomem *addr;
348 	size_t count = 0;
349 	uint64_t last;
350 
351 	if (!adev->mman.aper_base_kaddr)
352 		return 0;
353 
354 	last = min(pos + size, adev->gmc.visible_vram_size);
355 	if (last > pos) {
356 		addr = adev->mman.aper_base_kaddr + pos;
357 		count = last - pos;
358 
359 		if (write) {
360 			memcpy_toio(addr, buf, count);
361 			mb();
362 			amdgpu_device_flush_hdp(adev, NULL);
363 		} else {
364 			amdgpu_device_invalidate_hdp(adev, NULL);
365 			mb();
366 			memcpy_fromio(buf, addr, count);
367 		}
368 
369 	}
370 
371 	return count;
372 #else
373 	return 0;
374 #endif
375 }
376 
377 /**
378  * amdgpu_device_vram_access - read/write a buffer in vram
379  *
380  * @adev: amdgpu_device pointer
381  * @pos: offset of the buffer in vram
382  * @buf: virtual address of the buffer in system memory
383  * @size: read/write size, sizeof(@buf) must > @size
384  * @write: true - write to vram, otherwise - read from vram
385  */
amdgpu_device_vram_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)386 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
387 			       void *buf, size_t size, bool write)
388 {
389 	size_t count;
390 
391 	/* try to using vram apreature to access vram first */
392 	count = amdgpu_device_aper_access(adev, pos, buf, size, write);
393 	size -= count;
394 	if (size) {
395 		/* using MM to access rest vram */
396 		pos += count;
397 		buf += count;
398 		amdgpu_device_mm_access(adev, pos, buf, size, write);
399 	}
400 }
401 
402 /*
403  * register access helper functions.
404  */
405 
406 /* Check if hw access should be skipped because of hotplug or device error */
amdgpu_device_skip_hw_access(struct amdgpu_device * adev)407 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
408 {
409 	if (adev->no_hw_access)
410 		return true;
411 
412 #ifdef CONFIG_LOCKDEP
413 	/*
414 	 * This is a bit complicated to understand, so worth a comment. What we assert
415 	 * here is that the GPU reset is not running on another thread in parallel.
416 	 *
417 	 * For this we trylock the read side of the reset semaphore, if that succeeds
418 	 * we know that the reset is not running in paralell.
419 	 *
420 	 * If the trylock fails we assert that we are either already holding the read
421 	 * side of the lock or are the reset thread itself and hold the write side of
422 	 * the lock.
423 	 */
424 	if (in_task()) {
425 		if (down_read_trylock(&adev->reset_sem))
426 			up_read(&adev->reset_sem);
427 		else
428 			lockdep_assert_held(&adev->reset_sem);
429 	}
430 #endif
431 	return false;
432 }
433 
434 /**
435  * amdgpu_device_rreg - read a memory mapped IO or indirect register
436  *
437  * @adev: amdgpu_device pointer
438  * @reg: dword aligned register offset
439  * @acc_flags: access flags which require special behavior
440  *
441  * Returns the 32 bit value from the offset specified.
442  */
amdgpu_device_rreg(struct amdgpu_device * adev,uint32_t reg,uint32_t acc_flags)443 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
444 			    uint32_t reg, uint32_t acc_flags)
445 {
446 	uint32_t ret;
447 
448 	if (amdgpu_device_skip_hw_access(adev))
449 		return 0;
450 
451 	if ((reg * 4) < adev->rmmio_size) {
452 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
453 		    amdgpu_sriov_runtime(adev) &&
454 		    down_read_trylock(&adev->reset_sem)) {
455 			ret = amdgpu_kiq_rreg(adev, reg);
456 			up_read(&adev->reset_sem);
457 		} else {
458 			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
459 		}
460 	} else {
461 		ret = adev->pcie_rreg(adev, reg * 4);
462 	}
463 
464 	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
465 
466 	return ret;
467 }
468 
469 /*
470  * MMIO register read with bytes helper functions
471  * @offset:bytes offset from MMIO start
472  *
473 */
474 
475 /**
476  * amdgpu_mm_rreg8 - read a memory mapped IO register
477  *
478  * @adev: amdgpu_device pointer
479  * @offset: byte aligned register offset
480  *
481  * Returns the 8 bit value from the offset specified.
482  */
amdgpu_mm_rreg8(struct amdgpu_device * adev,uint32_t offset)483 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
484 {
485 	if (amdgpu_device_skip_hw_access(adev))
486 		return 0;
487 
488 	if (offset < adev->rmmio_size)
489 		return (readb(adev->rmmio + offset));
490 	BUG();
491 }
492 
493 /*
494  * MMIO register write with bytes helper functions
495  * @offset:bytes offset from MMIO start
496  * @value: the value want to be written to the register
497  *
498 */
499 /**
500  * amdgpu_mm_wreg8 - read a memory mapped IO register
501  *
502  * @adev: amdgpu_device pointer
503  * @offset: byte aligned register offset
504  * @value: 8 bit value to write
505  *
506  * Writes the value specified to the offset specified.
507  */
amdgpu_mm_wreg8(struct amdgpu_device * adev,uint32_t offset,uint8_t value)508 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
509 {
510 	if (amdgpu_device_skip_hw_access(adev))
511 		return;
512 
513 	if (offset < adev->rmmio_size)
514 		writeb(value, adev->rmmio + offset);
515 	else
516 		BUG();
517 }
518 
519 /**
520  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
521  *
522  * @adev: amdgpu_device pointer
523  * @reg: dword aligned register offset
524  * @v: 32 bit value to write to the register
525  * @acc_flags: access flags which require special behavior
526  *
527  * Writes the value specified to the offset specified.
528  */
amdgpu_device_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t v,uint32_t acc_flags)529 void amdgpu_device_wreg(struct amdgpu_device *adev,
530 			uint32_t reg, uint32_t v,
531 			uint32_t acc_flags)
532 {
533 	if (amdgpu_device_skip_hw_access(adev))
534 		return;
535 
536 	if ((reg * 4) < adev->rmmio_size) {
537 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
538 		    amdgpu_sriov_runtime(adev) &&
539 		    down_read_trylock(&adev->reset_sem)) {
540 			amdgpu_kiq_wreg(adev, reg, v);
541 			up_read(&adev->reset_sem);
542 		} else {
543 			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
544 		}
545 	} else {
546 		adev->pcie_wreg(adev, reg * 4, v);
547 	}
548 
549 	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
550 }
551 
552 /*
553  * amdgpu_mm_wreg_mmio_rlc -  write register either with mmio or with RLC path if in range
554  *
555  * this function is invoked only the debugfs register access
556  * */
amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device * adev,uint32_t reg,uint32_t v)557 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
558 			     uint32_t reg, uint32_t v)
559 {
560 	if (amdgpu_device_skip_hw_access(adev))
561 		return;
562 
563 	if (amdgpu_sriov_fullaccess(adev) &&
564 	    adev->gfx.rlc.funcs &&
565 	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
566 		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
567 			return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
568 	} else {
569 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
570 	}
571 }
572 
573 /**
574  * amdgpu_mm_rdoorbell - read a doorbell dword
575  *
576  * @adev: amdgpu_device pointer
577  * @index: doorbell index
578  *
579  * Returns the value in the doorbell aperture at the
580  * requested doorbell index (CIK).
581  */
amdgpu_mm_rdoorbell(struct amdgpu_device * adev,u32 index)582 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
583 {
584 	if (amdgpu_device_skip_hw_access(adev))
585 		return 0;
586 
587 	if (index < adev->doorbell.num_doorbells) {
588 		return readl(adev->doorbell.ptr + index);
589 	} else {
590 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
591 		return 0;
592 	}
593 }
594 
595 /**
596  * amdgpu_mm_wdoorbell - write a doorbell dword
597  *
598  * @adev: amdgpu_device pointer
599  * @index: doorbell index
600  * @v: value to write
601  *
602  * Writes @v to the doorbell aperture at the
603  * requested doorbell index (CIK).
604  */
amdgpu_mm_wdoorbell(struct amdgpu_device * adev,u32 index,u32 v)605 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
606 {
607 	if (amdgpu_device_skip_hw_access(adev))
608 		return;
609 
610 	if (index < adev->doorbell.num_doorbells) {
611 		writel(v, adev->doorbell.ptr + index);
612 	} else {
613 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
614 	}
615 }
616 
617 /**
618  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
619  *
620  * @adev: amdgpu_device pointer
621  * @index: doorbell index
622  *
623  * Returns the value in the doorbell aperture at the
624  * requested doorbell index (VEGA10+).
625  */
amdgpu_mm_rdoorbell64(struct amdgpu_device * adev,u32 index)626 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
627 {
628 	if (amdgpu_device_skip_hw_access(adev))
629 		return 0;
630 
631 	if (index < adev->doorbell.num_doorbells) {
632 		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
633 	} else {
634 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
635 		return 0;
636 	}
637 }
638 
639 /**
640  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
641  *
642  * @adev: amdgpu_device pointer
643  * @index: doorbell index
644  * @v: value to write
645  *
646  * Writes @v to the doorbell aperture at the
647  * requested doorbell index (VEGA10+).
648  */
amdgpu_mm_wdoorbell64(struct amdgpu_device * adev,u32 index,u64 v)649 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
650 {
651 	if (amdgpu_device_skip_hw_access(adev))
652 		return;
653 
654 	if (index < adev->doorbell.num_doorbells) {
655 		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
656 	} else {
657 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
658 	}
659 }
660 
661 /**
662  * amdgpu_device_indirect_rreg - read an indirect register
663  *
664  * @adev: amdgpu_device pointer
665  * @pcie_index: mmio register offset
666  * @pcie_data: mmio register offset
667  * @reg_addr: indirect register address to read from
668  *
669  * Returns the value of indirect register @reg_addr
670  */
amdgpu_device_indirect_rreg(struct amdgpu_device * adev,u32 pcie_index,u32 pcie_data,u32 reg_addr)671 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
672 				u32 pcie_index, u32 pcie_data,
673 				u32 reg_addr)
674 {
675 	unsigned long flags;
676 	u32 r;
677 	void __iomem *pcie_index_offset;
678 	void __iomem *pcie_data_offset;
679 
680 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
681 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
682 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
683 
684 	writel(reg_addr, pcie_index_offset);
685 	readl(pcie_index_offset);
686 	r = readl(pcie_data_offset);
687 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
688 
689 	return r;
690 }
691 
692 /**
693  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
694  *
695  * @adev: amdgpu_device pointer
696  * @pcie_index: mmio register offset
697  * @pcie_data: mmio register offset
698  * @reg_addr: indirect register address to read from
699  *
700  * Returns the value of indirect register @reg_addr
701  */
amdgpu_device_indirect_rreg64(struct amdgpu_device * adev,u32 pcie_index,u32 pcie_data,u32 reg_addr)702 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
703 				  u32 pcie_index, u32 pcie_data,
704 				  u32 reg_addr)
705 {
706 	unsigned long flags;
707 	u64 r;
708 	void __iomem *pcie_index_offset;
709 	void __iomem *pcie_data_offset;
710 
711 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
712 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
713 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
714 
715 	/* read low 32 bits */
716 	writel(reg_addr, pcie_index_offset);
717 	readl(pcie_index_offset);
718 	r = readl(pcie_data_offset);
719 	/* read high 32 bits */
720 	writel(reg_addr + 4, pcie_index_offset);
721 	readl(pcie_index_offset);
722 	r |= ((u64)readl(pcie_data_offset) << 32);
723 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
724 
725 	return r;
726 }
727 
728 /**
729  * amdgpu_device_indirect_wreg - write an indirect register address
730  *
731  * @adev: amdgpu_device pointer
732  * @pcie_index: mmio register offset
733  * @pcie_data: mmio register offset
734  * @reg_addr: indirect register offset
735  * @reg_data: indirect register data
736  *
737  */
amdgpu_device_indirect_wreg(struct amdgpu_device * adev,u32 pcie_index,u32 pcie_data,u32 reg_addr,u32 reg_data)738 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
739 				 u32 pcie_index, u32 pcie_data,
740 				 u32 reg_addr, u32 reg_data)
741 {
742 	unsigned long flags;
743 	void __iomem *pcie_index_offset;
744 	void __iomem *pcie_data_offset;
745 
746 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
747 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
748 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
749 
750 	writel(reg_addr, pcie_index_offset);
751 	readl(pcie_index_offset);
752 	writel(reg_data, pcie_data_offset);
753 	readl(pcie_data_offset);
754 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
755 }
756 
757 /**
758  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
759  *
760  * @adev: amdgpu_device pointer
761  * @pcie_index: mmio register offset
762  * @pcie_data: mmio register offset
763  * @reg_addr: indirect register offset
764  * @reg_data: indirect register data
765  *
766  */
amdgpu_device_indirect_wreg64(struct amdgpu_device * adev,u32 pcie_index,u32 pcie_data,u32 reg_addr,u64 reg_data)767 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
768 				   u32 pcie_index, u32 pcie_data,
769 				   u32 reg_addr, u64 reg_data)
770 {
771 	unsigned long flags;
772 	void __iomem *pcie_index_offset;
773 	void __iomem *pcie_data_offset;
774 
775 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
776 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
777 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
778 
779 	/* write low 32 bits */
780 	writel(reg_addr, pcie_index_offset);
781 	readl(pcie_index_offset);
782 	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
783 	readl(pcie_data_offset);
784 	/* write high 32 bits */
785 	writel(reg_addr + 4, pcie_index_offset);
786 	readl(pcie_index_offset);
787 	writel((u32)(reg_data >> 32), pcie_data_offset);
788 	readl(pcie_data_offset);
789 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
790 }
791 
792 /**
793  * amdgpu_invalid_rreg - dummy reg read function
794  *
795  * @adev: amdgpu_device pointer
796  * @reg: offset of register
797  *
798  * Dummy register read function.  Used for register blocks
799  * that certain asics don't have (all asics).
800  * Returns the value in the register.
801  */
amdgpu_invalid_rreg(struct amdgpu_device * adev,uint32_t reg)802 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
803 {
804 	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
805 	BUG();
806 	return 0;
807 }
808 
809 /**
810  * amdgpu_invalid_wreg - dummy reg write function
811  *
812  * @adev: amdgpu_device pointer
813  * @reg: offset of register
814  * @v: value to write to the register
815  *
816  * Dummy register read function.  Used for register blocks
817  * that certain asics don't have (all asics).
818  */
amdgpu_invalid_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t v)819 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
820 {
821 	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
822 		  reg, v);
823 	BUG();
824 }
825 
826 /**
827  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
828  *
829  * @adev: amdgpu_device pointer
830  * @reg: offset of register
831  *
832  * Dummy register read function.  Used for register blocks
833  * that certain asics don't have (all asics).
834  * Returns the value in the register.
835  */
amdgpu_invalid_rreg64(struct amdgpu_device * adev,uint32_t reg)836 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
837 {
838 	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
839 	BUG();
840 	return 0;
841 }
842 
843 /**
844  * amdgpu_invalid_wreg64 - dummy reg write function
845  *
846  * @adev: amdgpu_device pointer
847  * @reg: offset of register
848  * @v: value to write to the register
849  *
850  * Dummy register read function.  Used for register blocks
851  * that certain asics don't have (all asics).
852  */
amdgpu_invalid_wreg64(struct amdgpu_device * adev,uint32_t reg,uint64_t v)853 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
854 {
855 	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
856 		  reg, v);
857 	BUG();
858 }
859 
860 /**
861  * amdgpu_block_invalid_rreg - dummy reg read function
862  *
863  * @adev: amdgpu_device pointer
864  * @block: offset of instance
865  * @reg: offset of register
866  *
867  * Dummy register read function.  Used for register blocks
868  * that certain asics don't have (all asics).
869  * Returns the value in the register.
870  */
amdgpu_block_invalid_rreg(struct amdgpu_device * adev,uint32_t block,uint32_t reg)871 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
872 					  uint32_t block, uint32_t reg)
873 {
874 	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
875 		  reg, block);
876 	BUG();
877 	return 0;
878 }
879 
880 /**
881  * amdgpu_block_invalid_wreg - dummy reg write function
882  *
883  * @adev: amdgpu_device pointer
884  * @block: offset of instance
885  * @reg: offset of register
886  * @v: value to write to the register
887  *
888  * Dummy register read function.  Used for register blocks
889  * that certain asics don't have (all asics).
890  */
amdgpu_block_invalid_wreg(struct amdgpu_device * adev,uint32_t block,uint32_t reg,uint32_t v)891 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
892 				      uint32_t block,
893 				      uint32_t reg, uint32_t v)
894 {
895 	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
896 		  reg, block, v);
897 	BUG();
898 }
899 
900 /**
901  * amdgpu_device_asic_init - Wrapper for atom asic_init
902  *
903  * @adev: amdgpu_device pointer
904  *
905  * Does any asic specific work and then calls atom asic init.
906  */
amdgpu_device_asic_init(struct amdgpu_device * adev)907 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
908 {
909 	amdgpu_asic_pre_asic_init(adev);
910 
911 	return amdgpu_atom_asic_init(adev->mode_info.atom_context);
912 }
913 
914 /**
915  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
916  *
917  * @adev: amdgpu_device pointer
918  *
919  * Allocates a scratch page of VRAM for use by various things in the
920  * driver.
921  */
amdgpu_device_vram_scratch_init(struct amdgpu_device * adev)922 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
923 {
924 	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
925 				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
926 				       &adev->vram_scratch.robj,
927 				       &adev->vram_scratch.gpu_addr,
928 				       (void **)&adev->vram_scratch.ptr);
929 }
930 
931 /**
932  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
933  *
934  * @adev: amdgpu_device pointer
935  *
936  * Frees the VRAM scratch page.
937  */
amdgpu_device_vram_scratch_fini(struct amdgpu_device * adev)938 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
939 {
940 	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
941 }
942 
943 /**
944  * amdgpu_device_program_register_sequence - program an array of registers.
945  *
946  * @adev: amdgpu_device pointer
947  * @registers: pointer to the register array
948  * @array_size: size of the register array
949  *
950  * Programs an array or registers with and and or masks.
951  * This is a helper for setting golden registers.
952  */
amdgpu_device_program_register_sequence(struct amdgpu_device * adev,const u32 * registers,const u32 array_size)953 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
954 					     const u32 *registers,
955 					     const u32 array_size)
956 {
957 	u32 tmp, reg, and_mask, or_mask;
958 	int i;
959 
960 	if (array_size % 3)
961 		return;
962 
963 	for (i = 0; i < array_size; i +=3) {
964 		reg = registers[i + 0];
965 		and_mask = registers[i + 1];
966 		or_mask = registers[i + 2];
967 
968 		if (and_mask == 0xffffffff) {
969 			tmp = or_mask;
970 		} else {
971 			tmp = RREG32(reg);
972 			tmp &= ~and_mask;
973 			if (adev->family >= AMDGPU_FAMILY_AI)
974 				tmp |= (or_mask & and_mask);
975 			else
976 				tmp |= or_mask;
977 		}
978 		WREG32(reg, tmp);
979 	}
980 }
981 
982 /**
983  * amdgpu_device_pci_config_reset - reset the GPU
984  *
985  * @adev: amdgpu_device pointer
986  *
987  * Resets the GPU using the pci config reset sequence.
988  * Only applicable to asics prior to vega10.
989  */
amdgpu_device_pci_config_reset(struct amdgpu_device * adev)990 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
991 {
992 	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
993 }
994 
995 /**
996  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
997  *
998  * @adev: amdgpu_device pointer
999  *
1000  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1001  */
amdgpu_device_pci_reset(struct amdgpu_device * adev)1002 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1003 {
1004 	return pci_reset_function(adev->pdev);
1005 }
1006 
1007 /*
1008  * GPU doorbell aperture helpers function.
1009  */
1010 /**
1011  * amdgpu_device_doorbell_init - Init doorbell driver information.
1012  *
1013  * @adev: amdgpu_device pointer
1014  *
1015  * Init doorbell driver information (CIK)
1016  * Returns 0 on success, error on failure.
1017  */
amdgpu_device_doorbell_init(struct amdgpu_device * adev)1018 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1019 {
1020 
1021 	/* No doorbell on SI hardware generation */
1022 	if (adev->asic_type < CHIP_BONAIRE) {
1023 		adev->doorbell.base = 0;
1024 		adev->doorbell.size = 0;
1025 		adev->doorbell.num_doorbells = 0;
1026 		adev->doorbell.ptr = NULL;
1027 		return 0;
1028 	}
1029 
1030 	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1031 		return -EINVAL;
1032 
1033 	amdgpu_asic_init_doorbell_index(adev);
1034 
1035 	/* doorbell bar mapping */
1036 	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1037 	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1038 
1039 	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
1040 					     adev->doorbell_index.max_assignment+1);
1041 	if (adev->doorbell.num_doorbells == 0)
1042 		return -EINVAL;
1043 
1044 	/* For Vega, reserve and map two pages on doorbell BAR since SDMA
1045 	 * paging queue doorbell use the second page. The
1046 	 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1047 	 * doorbells are in the first page. So with paging queue enabled,
1048 	 * the max num_doorbells should + 1 page (0x400 in dword)
1049 	 */
1050 	if (adev->asic_type >= CHIP_VEGA10)
1051 		adev->doorbell.num_doorbells += 0x400;
1052 
1053 	adev->doorbell.ptr = ioremap(adev->doorbell.base,
1054 				     adev->doorbell.num_doorbells *
1055 				     sizeof(u32));
1056 	if (adev->doorbell.ptr == NULL)
1057 		return -ENOMEM;
1058 
1059 	return 0;
1060 }
1061 
1062 /**
1063  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1064  *
1065  * @adev: amdgpu_device pointer
1066  *
1067  * Tear down doorbell driver information (CIK)
1068  */
amdgpu_device_doorbell_fini(struct amdgpu_device * adev)1069 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1070 {
1071 	iounmap(adev->doorbell.ptr);
1072 	adev->doorbell.ptr = NULL;
1073 }
1074 
1075 
1076 
1077 /*
1078  * amdgpu_device_wb_*()
1079  * Writeback is the method by which the GPU updates special pages in memory
1080  * with the status of certain GPU events (fences, ring pointers,etc.).
1081  */
1082 
1083 /**
1084  * amdgpu_device_wb_fini - Disable Writeback and free memory
1085  *
1086  * @adev: amdgpu_device pointer
1087  *
1088  * Disables Writeback and frees the Writeback memory (all asics).
1089  * Used at driver shutdown.
1090  */
amdgpu_device_wb_fini(struct amdgpu_device * adev)1091 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1092 {
1093 	if (adev->wb.wb_obj) {
1094 		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1095 				      &adev->wb.gpu_addr,
1096 				      (void **)&adev->wb.wb);
1097 		adev->wb.wb_obj = NULL;
1098 	}
1099 }
1100 
1101 /**
1102  * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
1103  *
1104  * @adev: amdgpu_device pointer
1105  *
1106  * Initializes writeback and allocates writeback memory (all asics).
1107  * Used at driver startup.
1108  * Returns 0 on success or an -error on failure.
1109  */
amdgpu_device_wb_init(struct amdgpu_device * adev)1110 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1111 {
1112 	int r;
1113 
1114 	if (adev->wb.wb_obj == NULL) {
1115 		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1116 		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1117 					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1118 					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1119 					    (void **)&adev->wb.wb);
1120 		if (r) {
1121 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1122 			return r;
1123 		}
1124 
1125 		adev->wb.num_wb = AMDGPU_MAX_WB;
1126 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1127 
1128 		/* clear wb memory */
1129 		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1130 	}
1131 
1132 	return 0;
1133 }
1134 
1135 /**
1136  * amdgpu_device_wb_get - Allocate a wb entry
1137  *
1138  * @adev: amdgpu_device pointer
1139  * @wb: wb index
1140  *
1141  * Allocate a wb slot for use by the driver (all asics).
1142  * Returns 0 on success or -EINVAL on failure.
1143  */
amdgpu_device_wb_get(struct amdgpu_device * adev,u32 * wb)1144 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1145 {
1146 	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1147 
1148 	if (offset < adev->wb.num_wb) {
1149 		__set_bit(offset, adev->wb.used);
1150 		*wb = offset << 3; /* convert to dw offset */
1151 		return 0;
1152 	} else {
1153 		return -EINVAL;
1154 	}
1155 }
1156 
1157 /**
1158  * amdgpu_device_wb_free - Free a wb entry
1159  *
1160  * @adev: amdgpu_device pointer
1161  * @wb: wb index
1162  *
1163  * Free a wb slot allocated for use by the driver (all asics)
1164  */
amdgpu_device_wb_free(struct amdgpu_device * adev,u32 wb)1165 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1166 {
1167 	wb >>= 3;
1168 	if (wb < adev->wb.num_wb)
1169 		__clear_bit(wb, adev->wb.used);
1170 }
1171 
1172 /**
1173  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1174  *
1175  * @adev: amdgpu_device pointer
1176  *
1177  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1178  * to fail, but if any of the BARs is not accessible after the size we abort
1179  * driver loading by returning -ENODEV.
1180  */
amdgpu_device_resize_fb_bar(struct amdgpu_device * adev)1181 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1182 {
1183 	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1184 	struct pci_bus *root;
1185 	struct resource *res;
1186 	unsigned i;
1187 	u16 cmd;
1188 	int r;
1189 
1190 	/* Bypass for VF */
1191 	if (amdgpu_sriov_vf(adev))
1192 		return 0;
1193 
1194 	/* skip if the bios has already enabled large BAR */
1195 	if (adev->gmc.real_vram_size &&
1196 	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1197 		return 0;
1198 
1199 	/* Check if the root BUS has 64bit memory resources */
1200 	root = adev->pdev->bus;
1201 	while (root->parent)
1202 		root = root->parent;
1203 
1204 	pci_bus_for_each_resource(root, res, i) {
1205 		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1206 		    res->start > 0x100000000ull)
1207 			break;
1208 	}
1209 
1210 	/* Trying to resize is pointless without a root hub window above 4GB */
1211 	if (!res)
1212 		return 0;
1213 
1214 	/* Limit the BAR size to what is available */
1215 	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1216 			rbar_size);
1217 
1218 	/* Disable memory decoding while we change the BAR addresses and size */
1219 	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1220 	pci_write_config_word(adev->pdev, PCI_COMMAND,
1221 			      cmd & ~PCI_COMMAND_MEMORY);
1222 
1223 	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1224 	amdgpu_device_doorbell_fini(adev);
1225 	if (adev->asic_type >= CHIP_BONAIRE)
1226 		pci_release_resource(adev->pdev, 2);
1227 
1228 	pci_release_resource(adev->pdev, 0);
1229 
1230 	r = pci_resize_resource(adev->pdev, 0, rbar_size);
1231 	if (r == -ENOSPC)
1232 		DRM_INFO("Not enough PCI address space for a large BAR.");
1233 	else if (r && r != -ENOTSUPP)
1234 		DRM_ERROR("Problem resizing BAR0 (%d).", r);
1235 
1236 	pci_assign_unassigned_bus_resources(adev->pdev->bus);
1237 
1238 	/* When the doorbell or fb BAR isn't available we have no chance of
1239 	 * using the device.
1240 	 */
1241 	r = amdgpu_device_doorbell_init(adev);
1242 	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1243 		return -ENODEV;
1244 
1245 	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1246 
1247 	return 0;
1248 }
1249 
1250 /*
1251  * GPU helpers function.
1252  */
1253 /**
1254  * amdgpu_device_need_post - check if the hw need post or not
1255  *
1256  * @adev: amdgpu_device pointer
1257  *
1258  * Check if the asic has been initialized (all asics) at driver startup
1259  * or post is needed if  hw reset is performed.
1260  * Returns true if need or false if not.
1261  */
amdgpu_device_need_post(struct amdgpu_device * adev)1262 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1263 {
1264 	uint32_t reg;
1265 
1266 	if (amdgpu_sriov_vf(adev))
1267 		return false;
1268 
1269 	if (amdgpu_passthrough(adev)) {
1270 		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1271 		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1272 		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1273 		 * vpost executed for smc version below 22.15
1274 		 */
1275 		if (adev->asic_type == CHIP_FIJI) {
1276 			int err;
1277 			uint32_t fw_ver;
1278 			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1279 			/* force vPost if error occured */
1280 			if (err)
1281 				return true;
1282 
1283 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1284 			if (fw_ver < 0x00160e00)
1285 				return true;
1286 		}
1287 	}
1288 
1289 	/* Don't post if we need to reset whole hive on init */
1290 	if (adev->gmc.xgmi.pending_reset)
1291 		return false;
1292 
1293 	if (adev->has_hw_reset) {
1294 		adev->has_hw_reset = false;
1295 		return true;
1296 	}
1297 
1298 	/* bios scratch used on CIK+ */
1299 	if (adev->asic_type >= CHIP_BONAIRE)
1300 		return amdgpu_atombios_scratch_need_asic_init(adev);
1301 
1302 	/* check MEM_SIZE for older asics */
1303 	reg = amdgpu_asic_get_config_memsize(adev);
1304 
1305 	if ((reg != 0) && (reg != 0xffffffff))
1306 		return false;
1307 
1308 	return true;
1309 }
1310 
1311 /* if we get transitioned to only one device, take VGA back */
1312 /**
1313  * amdgpu_device_vga_set_decode - enable/disable vga decode
1314  *
1315  * @pdev: PCI device pointer
1316  * @state: enable/disable vga decode
1317  *
1318  * Enable/disable vga decode (all asics).
1319  * Returns VGA resource flags.
1320  */
amdgpu_device_vga_set_decode(struct pci_dev * pdev,bool state)1321 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1322 		bool state)
1323 {
1324 	struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1325 	amdgpu_asic_set_vga_state(adev, state);
1326 	if (state)
1327 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1328 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1329 	else
1330 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1331 }
1332 
1333 /**
1334  * amdgpu_device_check_block_size - validate the vm block size
1335  *
1336  * @adev: amdgpu_device pointer
1337  *
1338  * Validates the vm block size specified via module parameter.
1339  * The vm block size defines number of bits in page table versus page directory,
1340  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1341  * page table and the remaining bits are in the page directory.
1342  */
amdgpu_device_check_block_size(struct amdgpu_device * adev)1343 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1344 {
1345 	/* defines number of bits in page table versus page directory,
1346 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1347 	 * page table and the remaining bits are in the page directory */
1348 	if (amdgpu_vm_block_size == -1)
1349 		return;
1350 
1351 	if (amdgpu_vm_block_size < 9) {
1352 		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1353 			 amdgpu_vm_block_size);
1354 		amdgpu_vm_block_size = -1;
1355 	}
1356 }
1357 
1358 /**
1359  * amdgpu_device_check_vm_size - validate the vm size
1360  *
1361  * @adev: amdgpu_device pointer
1362  *
1363  * Validates the vm size in GB specified via module parameter.
1364  * The VM size is the size of the GPU virtual memory space in GB.
1365  */
amdgpu_device_check_vm_size(struct amdgpu_device * adev)1366 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1367 {
1368 	/* no need to check the default value */
1369 	if (amdgpu_vm_size == -1)
1370 		return;
1371 
1372 	if (amdgpu_vm_size < 1) {
1373 		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1374 			 amdgpu_vm_size);
1375 		amdgpu_vm_size = -1;
1376 	}
1377 }
1378 
amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device * adev)1379 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1380 {
1381 	struct sysinfo si;
1382 	bool is_os_64 = (sizeof(void *) == 8);
1383 	uint64_t total_memory;
1384 	uint64_t dram_size_seven_GB = 0x1B8000000;
1385 	uint64_t dram_size_three_GB = 0xB8000000;
1386 
1387 	if (amdgpu_smu_memory_pool_size == 0)
1388 		return;
1389 
1390 	if (!is_os_64) {
1391 		DRM_WARN("Not 64-bit OS, feature not supported\n");
1392 		goto def_value;
1393 	}
1394 	si_meminfo(&si);
1395 	total_memory = (uint64_t)si.totalram * si.mem_unit;
1396 
1397 	if ((amdgpu_smu_memory_pool_size == 1) ||
1398 		(amdgpu_smu_memory_pool_size == 2)) {
1399 		if (total_memory < dram_size_three_GB)
1400 			goto def_value1;
1401 	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1402 		(amdgpu_smu_memory_pool_size == 8)) {
1403 		if (total_memory < dram_size_seven_GB)
1404 			goto def_value1;
1405 	} else {
1406 		DRM_WARN("Smu memory pool size not supported\n");
1407 		goto def_value;
1408 	}
1409 	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1410 
1411 	return;
1412 
1413 def_value1:
1414 	DRM_WARN("No enough system memory\n");
1415 def_value:
1416 	adev->pm.smu_prv_buffer_size = 0;
1417 }
1418 
amdgpu_device_init_apu_flags(struct amdgpu_device * adev)1419 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1420 {
1421 	if (!(adev->flags & AMD_IS_APU) ||
1422 	    adev->asic_type < CHIP_RAVEN)
1423 		return 0;
1424 
1425 	switch (adev->asic_type) {
1426 	case CHIP_RAVEN:
1427 		if (adev->pdev->device == 0x15dd)
1428 			adev->apu_flags |= AMD_APU_IS_RAVEN;
1429 		if (adev->pdev->device == 0x15d8)
1430 			adev->apu_flags |= AMD_APU_IS_PICASSO;
1431 		break;
1432 	case CHIP_RENOIR:
1433 		if ((adev->pdev->device == 0x1636) ||
1434 		    (adev->pdev->device == 0x164c))
1435 			adev->apu_flags |= AMD_APU_IS_RENOIR;
1436 		else
1437 			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1438 		break;
1439 	case CHIP_VANGOGH:
1440 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1441 		break;
1442 	case CHIP_YELLOW_CARP:
1443 		break;
1444 	case CHIP_CYAN_SKILLFISH:
1445 		if (adev->pdev->device == 0x13FE)
1446 			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1447 		break;
1448 	default:
1449 		return -EINVAL;
1450 	}
1451 
1452 	return 0;
1453 }
1454 
1455 /**
1456  * amdgpu_device_check_arguments - validate module params
1457  *
1458  * @adev: amdgpu_device pointer
1459  *
1460  * Validates certain module parameters and updates
1461  * the associated values used by the driver (all asics).
1462  */
amdgpu_device_check_arguments(struct amdgpu_device * adev)1463 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1464 {
1465 	if (amdgpu_sched_jobs < 4) {
1466 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1467 			 amdgpu_sched_jobs);
1468 		amdgpu_sched_jobs = 4;
1469 	} else if (!is_power_of_2(amdgpu_sched_jobs)){
1470 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1471 			 amdgpu_sched_jobs);
1472 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1473 	}
1474 
1475 	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1476 		/* gart size must be greater or equal to 32M */
1477 		dev_warn(adev->dev, "gart size (%d) too small\n",
1478 			 amdgpu_gart_size);
1479 		amdgpu_gart_size = -1;
1480 	}
1481 
1482 	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1483 		/* gtt size must be greater or equal to 32M */
1484 		dev_warn(adev->dev, "gtt size (%d) too small\n",
1485 				 amdgpu_gtt_size);
1486 		amdgpu_gtt_size = -1;
1487 	}
1488 
1489 	/* valid range is between 4 and 9 inclusive */
1490 	if (amdgpu_vm_fragment_size != -1 &&
1491 	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1492 		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1493 		amdgpu_vm_fragment_size = -1;
1494 	}
1495 
1496 	if (amdgpu_sched_hw_submission < 2) {
1497 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1498 			 amdgpu_sched_hw_submission);
1499 		amdgpu_sched_hw_submission = 2;
1500 	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1501 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1502 			 amdgpu_sched_hw_submission);
1503 		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1504 	}
1505 
1506 	amdgpu_device_check_smu_prv_buffer_size(adev);
1507 
1508 	amdgpu_device_check_vm_size(adev);
1509 
1510 	amdgpu_device_check_block_size(adev);
1511 
1512 	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1513 
1514 	amdgpu_gmc_tmz_set(adev);
1515 
1516 	amdgpu_gmc_noretry_set(adev);
1517 
1518 	return 0;
1519 }
1520 
1521 /**
1522  * amdgpu_switcheroo_set_state - set switcheroo state
1523  *
1524  * @pdev: pci dev pointer
1525  * @state: vga_switcheroo state
1526  *
1527  * Callback for the switcheroo driver.  Suspends or resumes the
1528  * the asics before or after it is powered up using ACPI methods.
1529  */
amdgpu_switcheroo_set_state(struct pci_dev * pdev,enum vga_switcheroo_state state)1530 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1531 					enum vga_switcheroo_state state)
1532 {
1533 	struct drm_device *dev = pci_get_drvdata(pdev);
1534 	int r;
1535 
1536 	if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1537 		return;
1538 
1539 	if (state == VGA_SWITCHEROO_ON) {
1540 		pr_info("switched on\n");
1541 		/* don't suspend or resume card normally */
1542 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1543 
1544 		pci_set_power_state(pdev, PCI_D0);
1545 		amdgpu_device_load_pci_state(pdev);
1546 		r = pci_enable_device(pdev);
1547 		if (r)
1548 			DRM_WARN("pci_enable_device failed (%d)\n", r);
1549 		amdgpu_device_resume(dev, true);
1550 
1551 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1552 	} else {
1553 		pr_info("switched off\n");
1554 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1555 		amdgpu_device_suspend(dev, true);
1556 		amdgpu_device_cache_pci_state(pdev);
1557 		/* Shut down the device */
1558 		pci_disable_device(pdev);
1559 		pci_set_power_state(pdev, PCI_D3cold);
1560 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1561 	}
1562 }
1563 
1564 /**
1565  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1566  *
1567  * @pdev: pci dev pointer
1568  *
1569  * Callback for the switcheroo driver.  Check of the switcheroo
1570  * state can be changed.
1571  * Returns true if the state can be changed, false if not.
1572  */
amdgpu_switcheroo_can_switch(struct pci_dev * pdev)1573 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1574 {
1575 	struct drm_device *dev = pci_get_drvdata(pdev);
1576 
1577 	/*
1578 	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1579 	* locking inversion with the driver load path. And the access here is
1580 	* completely racy anyway. So don't bother with locking for now.
1581 	*/
1582 	return atomic_read(&dev->open_count) == 0;
1583 }
1584 
1585 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1586 	.set_gpu_state = amdgpu_switcheroo_set_state,
1587 	.reprobe = NULL,
1588 	.can_switch = amdgpu_switcheroo_can_switch,
1589 };
1590 
1591 /**
1592  * amdgpu_device_ip_set_clockgating_state - set the CG state
1593  *
1594  * @dev: amdgpu_device pointer
1595  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1596  * @state: clockgating state (gate or ungate)
1597  *
1598  * Sets the requested clockgating state for all instances of
1599  * the hardware IP specified.
1600  * Returns the error code from the last instance.
1601  */
amdgpu_device_ip_set_clockgating_state(void * dev,enum amd_ip_block_type block_type,enum amd_clockgating_state state)1602 int amdgpu_device_ip_set_clockgating_state(void *dev,
1603 					   enum amd_ip_block_type block_type,
1604 					   enum amd_clockgating_state state)
1605 {
1606 	struct amdgpu_device *adev = dev;
1607 	int i, r = 0;
1608 
1609 	for (i = 0; i < adev->num_ip_blocks; i++) {
1610 		if (!adev->ip_blocks[i].status.valid)
1611 			continue;
1612 		if (adev->ip_blocks[i].version->type != block_type)
1613 			continue;
1614 		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1615 			continue;
1616 		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1617 			(void *)adev, state);
1618 		if (r)
1619 			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1620 				  adev->ip_blocks[i].version->funcs->name, r);
1621 	}
1622 	return r;
1623 }
1624 
1625 /**
1626  * amdgpu_device_ip_set_powergating_state - set the PG state
1627  *
1628  * @dev: amdgpu_device pointer
1629  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1630  * @state: powergating state (gate or ungate)
1631  *
1632  * Sets the requested powergating state for all instances of
1633  * the hardware IP specified.
1634  * Returns the error code from the last instance.
1635  */
amdgpu_device_ip_set_powergating_state(void * dev,enum amd_ip_block_type block_type,enum amd_powergating_state state)1636 int amdgpu_device_ip_set_powergating_state(void *dev,
1637 					   enum amd_ip_block_type block_type,
1638 					   enum amd_powergating_state state)
1639 {
1640 	struct amdgpu_device *adev = dev;
1641 	int i, r = 0;
1642 
1643 	for (i = 0; i < adev->num_ip_blocks; i++) {
1644 		if (!adev->ip_blocks[i].status.valid)
1645 			continue;
1646 		if (adev->ip_blocks[i].version->type != block_type)
1647 			continue;
1648 		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1649 			continue;
1650 		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1651 			(void *)adev, state);
1652 		if (r)
1653 			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1654 				  adev->ip_blocks[i].version->funcs->name, r);
1655 	}
1656 	return r;
1657 }
1658 
1659 /**
1660  * amdgpu_device_ip_get_clockgating_state - get the CG state
1661  *
1662  * @adev: amdgpu_device pointer
1663  * @flags: clockgating feature flags
1664  *
1665  * Walks the list of IPs on the device and updates the clockgating
1666  * flags for each IP.
1667  * Updates @flags with the feature flags for each hardware IP where
1668  * clockgating is enabled.
1669  */
amdgpu_device_ip_get_clockgating_state(struct amdgpu_device * adev,u32 * flags)1670 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1671 					    u32 *flags)
1672 {
1673 	int i;
1674 
1675 	for (i = 0; i < adev->num_ip_blocks; i++) {
1676 		if (!adev->ip_blocks[i].status.valid)
1677 			continue;
1678 		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1679 			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1680 	}
1681 }
1682 
1683 /**
1684  * amdgpu_device_ip_wait_for_idle - wait for idle
1685  *
1686  * @adev: amdgpu_device pointer
1687  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1688  *
1689  * Waits for the request hardware IP to be idle.
1690  * Returns 0 for success or a negative error code on failure.
1691  */
amdgpu_device_ip_wait_for_idle(struct amdgpu_device * adev,enum amd_ip_block_type block_type)1692 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1693 				   enum amd_ip_block_type block_type)
1694 {
1695 	int i, r;
1696 
1697 	for (i = 0; i < adev->num_ip_blocks; i++) {
1698 		if (!adev->ip_blocks[i].status.valid)
1699 			continue;
1700 		if (adev->ip_blocks[i].version->type == block_type) {
1701 			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1702 			if (r)
1703 				return r;
1704 			break;
1705 		}
1706 	}
1707 	return 0;
1708 
1709 }
1710 
1711 /**
1712  * amdgpu_device_ip_is_idle - is the hardware IP idle
1713  *
1714  * @adev: amdgpu_device pointer
1715  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1716  *
1717  * Check if the hardware IP is idle or not.
1718  * Returns true if it the IP is idle, false if not.
1719  */
amdgpu_device_ip_is_idle(struct amdgpu_device * adev,enum amd_ip_block_type block_type)1720 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1721 			      enum amd_ip_block_type block_type)
1722 {
1723 	int i;
1724 
1725 	for (i = 0; i < adev->num_ip_blocks; i++) {
1726 		if (!adev->ip_blocks[i].status.valid)
1727 			continue;
1728 		if (adev->ip_blocks[i].version->type == block_type)
1729 			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1730 	}
1731 	return true;
1732 
1733 }
1734 
1735 /**
1736  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1737  *
1738  * @adev: amdgpu_device pointer
1739  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1740  *
1741  * Returns a pointer to the hardware IP block structure
1742  * if it exists for the asic, otherwise NULL.
1743  */
1744 struct amdgpu_ip_block *
amdgpu_device_ip_get_ip_block(struct amdgpu_device * adev,enum amd_ip_block_type type)1745 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1746 			      enum amd_ip_block_type type)
1747 {
1748 	int i;
1749 
1750 	for (i = 0; i < adev->num_ip_blocks; i++)
1751 		if (adev->ip_blocks[i].version->type == type)
1752 			return &adev->ip_blocks[i];
1753 
1754 	return NULL;
1755 }
1756 
1757 /**
1758  * amdgpu_device_ip_block_version_cmp
1759  *
1760  * @adev: amdgpu_device pointer
1761  * @type: enum amd_ip_block_type
1762  * @major: major version
1763  * @minor: minor version
1764  *
1765  * return 0 if equal or greater
1766  * return 1 if smaller or the ip_block doesn't exist
1767  */
amdgpu_device_ip_block_version_cmp(struct amdgpu_device * adev,enum amd_ip_block_type type,u32 major,u32 minor)1768 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1769 				       enum amd_ip_block_type type,
1770 				       u32 major, u32 minor)
1771 {
1772 	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1773 
1774 	if (ip_block && ((ip_block->version->major > major) ||
1775 			((ip_block->version->major == major) &&
1776 			(ip_block->version->minor >= minor))))
1777 		return 0;
1778 
1779 	return 1;
1780 }
1781 
1782 /**
1783  * amdgpu_device_ip_block_add
1784  *
1785  * @adev: amdgpu_device pointer
1786  * @ip_block_version: pointer to the IP to add
1787  *
1788  * Adds the IP block driver information to the collection of IPs
1789  * on the asic.
1790  */
amdgpu_device_ip_block_add(struct amdgpu_device * adev,const struct amdgpu_ip_block_version * ip_block_version)1791 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1792 			       const struct amdgpu_ip_block_version *ip_block_version)
1793 {
1794 	if (!ip_block_version)
1795 		return -EINVAL;
1796 
1797 	switch (ip_block_version->type) {
1798 	case AMD_IP_BLOCK_TYPE_VCN:
1799 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1800 			return 0;
1801 		break;
1802 	case AMD_IP_BLOCK_TYPE_JPEG:
1803 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1804 			return 0;
1805 		break;
1806 	default:
1807 		break;
1808 	}
1809 
1810 	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1811 		  ip_block_version->funcs->name);
1812 
1813 	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1814 
1815 	return 0;
1816 }
1817 
1818 /**
1819  * amdgpu_device_enable_virtual_display - enable virtual display feature
1820  *
1821  * @adev: amdgpu_device pointer
1822  *
1823  * Enabled the virtual display feature if the user has enabled it via
1824  * the module parameter virtual_display.  This feature provides a virtual
1825  * display hardware on headless boards or in virtualized environments.
1826  * This function parses and validates the configuration string specified by
1827  * the user and configues the virtual display configuration (number of
1828  * virtual connectors, crtcs, etc.) specified.
1829  */
amdgpu_device_enable_virtual_display(struct amdgpu_device * adev)1830 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1831 {
1832 	adev->enable_virtual_display = false;
1833 
1834 	if (amdgpu_virtual_display) {
1835 		const char *pci_address_name = pci_name(adev->pdev);
1836 		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1837 
1838 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1839 		pciaddstr_tmp = pciaddstr;
1840 		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1841 			pciaddname = strsep(&pciaddname_tmp, ",");
1842 			if (!strcmp("all", pciaddname)
1843 			    || !strcmp(pci_address_name, pciaddname)) {
1844 				long num_crtc;
1845 				int res = -1;
1846 
1847 				adev->enable_virtual_display = true;
1848 
1849 				if (pciaddname_tmp)
1850 					res = kstrtol(pciaddname_tmp, 10,
1851 						      &num_crtc);
1852 
1853 				if (!res) {
1854 					if (num_crtc < 1)
1855 						num_crtc = 1;
1856 					if (num_crtc > 6)
1857 						num_crtc = 6;
1858 					adev->mode_info.num_crtc = num_crtc;
1859 				} else {
1860 					adev->mode_info.num_crtc = 1;
1861 				}
1862 				break;
1863 			}
1864 		}
1865 
1866 		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1867 			 amdgpu_virtual_display, pci_address_name,
1868 			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1869 
1870 		kfree(pciaddstr);
1871 	}
1872 }
1873 
1874 /**
1875  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1876  *
1877  * @adev: amdgpu_device pointer
1878  *
1879  * Parses the asic configuration parameters specified in the gpu info
1880  * firmware and makes them availale to the driver for use in configuring
1881  * the asic.
1882  * Returns 0 on success, -EINVAL on failure.
1883  */
amdgpu_device_parse_gpu_info_fw(struct amdgpu_device * adev)1884 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1885 {
1886 	const char *chip_name;
1887 	char fw_name[40];
1888 	int err;
1889 	const struct gpu_info_firmware_header_v1_0 *hdr;
1890 
1891 	adev->firmware.gpu_info_fw = NULL;
1892 
1893 	if (adev->mman.discovery_bin) {
1894 		amdgpu_discovery_get_gfx_info(adev);
1895 
1896 		/*
1897 		 * FIXME: The bounding box is still needed by Navi12, so
1898 		 * temporarily read it from gpu_info firmware. Should be droped
1899 		 * when DAL no longer needs it.
1900 		 */
1901 		if (adev->asic_type != CHIP_NAVI12)
1902 			return 0;
1903 	}
1904 
1905 	switch (adev->asic_type) {
1906 #ifdef CONFIG_DRM_AMDGPU_SI
1907 	case CHIP_VERDE:
1908 	case CHIP_TAHITI:
1909 	case CHIP_PITCAIRN:
1910 	case CHIP_OLAND:
1911 	case CHIP_HAINAN:
1912 #endif
1913 #ifdef CONFIG_DRM_AMDGPU_CIK
1914 	case CHIP_BONAIRE:
1915 	case CHIP_HAWAII:
1916 	case CHIP_KAVERI:
1917 	case CHIP_KABINI:
1918 	case CHIP_MULLINS:
1919 #endif
1920 	case CHIP_TOPAZ:
1921 	case CHIP_TONGA:
1922 	case CHIP_FIJI:
1923 	case CHIP_POLARIS10:
1924 	case CHIP_POLARIS11:
1925 	case CHIP_POLARIS12:
1926 	case CHIP_VEGAM:
1927 	case CHIP_CARRIZO:
1928 	case CHIP_STONEY:
1929 	case CHIP_VEGA20:
1930 	case CHIP_ALDEBARAN:
1931 	case CHIP_SIENNA_CICHLID:
1932 	case CHIP_NAVY_FLOUNDER:
1933 	case CHIP_DIMGREY_CAVEFISH:
1934 	case CHIP_BEIGE_GOBY:
1935 	default:
1936 		return 0;
1937 	case CHIP_VEGA10:
1938 		chip_name = "vega10";
1939 		break;
1940 	case CHIP_VEGA12:
1941 		chip_name = "vega12";
1942 		break;
1943 	case CHIP_RAVEN:
1944 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1945 			chip_name = "raven2";
1946 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1947 			chip_name = "picasso";
1948 		else
1949 			chip_name = "raven";
1950 		break;
1951 	case CHIP_ARCTURUS:
1952 		chip_name = "arcturus";
1953 		break;
1954 	case CHIP_RENOIR:
1955 		if (adev->apu_flags & AMD_APU_IS_RENOIR)
1956 			chip_name = "renoir";
1957 		else
1958 			chip_name = "green_sardine";
1959 		break;
1960 	case CHIP_NAVI10:
1961 		chip_name = "navi10";
1962 		break;
1963 	case CHIP_NAVI14:
1964 		chip_name = "navi14";
1965 		break;
1966 	case CHIP_NAVI12:
1967 		chip_name = "navi12";
1968 		break;
1969 	case CHIP_VANGOGH:
1970 		chip_name = "vangogh";
1971 		break;
1972 	case CHIP_YELLOW_CARP:
1973 		chip_name = "yellow_carp";
1974 		break;
1975 	}
1976 
1977 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1978 	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1979 	if (err) {
1980 		dev_err(adev->dev,
1981 			"Failed to load gpu_info firmware \"%s\"\n",
1982 			fw_name);
1983 		goto out;
1984 	}
1985 	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1986 	if (err) {
1987 		dev_err(adev->dev,
1988 			"Failed to validate gpu_info firmware \"%s\"\n",
1989 			fw_name);
1990 		goto out;
1991 	}
1992 
1993 	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1994 	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1995 
1996 	switch (hdr->version_major) {
1997 	case 1:
1998 	{
1999 		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2000 			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2001 								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2002 
2003 		/*
2004 		 * Should be droped when DAL no longer needs it.
2005 		 */
2006 		if (adev->asic_type == CHIP_NAVI12)
2007 			goto parse_soc_bounding_box;
2008 
2009 		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2010 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2011 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2012 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2013 		adev->gfx.config.max_texture_channel_caches =
2014 			le32_to_cpu(gpu_info_fw->gc_num_tccs);
2015 		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2016 		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2017 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2018 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2019 		adev->gfx.config.double_offchip_lds_buf =
2020 			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2021 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2022 		adev->gfx.cu_info.max_waves_per_simd =
2023 			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2024 		adev->gfx.cu_info.max_scratch_slots_per_cu =
2025 			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2026 		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2027 		if (hdr->version_minor >= 1) {
2028 			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2029 				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2030 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2031 			adev->gfx.config.num_sc_per_sh =
2032 				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2033 			adev->gfx.config.num_packer_per_sc =
2034 				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2035 		}
2036 
2037 parse_soc_bounding_box:
2038 		/*
2039 		 * soc bounding box info is not integrated in disocovery table,
2040 		 * we always need to parse it from gpu info firmware if needed.
2041 		 */
2042 		if (hdr->version_minor == 2) {
2043 			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2044 				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2045 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2046 			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2047 		}
2048 		break;
2049 	}
2050 	default:
2051 		dev_err(adev->dev,
2052 			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2053 		err = -EINVAL;
2054 		goto out;
2055 	}
2056 out:
2057 	return err;
2058 }
2059 
2060 /**
2061  * amdgpu_device_ip_early_init - run early init for hardware IPs
2062  *
2063  * @adev: amdgpu_device pointer
2064  *
2065  * Early initialization pass for hardware IPs.  The hardware IPs that make
2066  * up each asic are discovered each IP's early_init callback is run.  This
2067  * is the first stage in initializing the asic.
2068  * Returns 0 on success, negative error code on failure.
2069  */
amdgpu_device_ip_early_init(struct amdgpu_device * adev)2070 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2071 {
2072 	int i, r;
2073 
2074 	amdgpu_device_enable_virtual_display(adev);
2075 
2076 	if (amdgpu_sriov_vf(adev)) {
2077 		r = amdgpu_virt_request_full_gpu(adev, true);
2078 		if (r)
2079 			return r;
2080 	}
2081 
2082 	switch (adev->asic_type) {
2083 #ifdef CONFIG_DRM_AMDGPU_SI
2084 	case CHIP_VERDE:
2085 	case CHIP_TAHITI:
2086 	case CHIP_PITCAIRN:
2087 	case CHIP_OLAND:
2088 	case CHIP_HAINAN:
2089 		adev->family = AMDGPU_FAMILY_SI;
2090 		r = si_set_ip_blocks(adev);
2091 		if (r)
2092 			return r;
2093 		break;
2094 #endif
2095 #ifdef CONFIG_DRM_AMDGPU_CIK
2096 	case CHIP_BONAIRE:
2097 	case CHIP_HAWAII:
2098 	case CHIP_KAVERI:
2099 	case CHIP_KABINI:
2100 	case CHIP_MULLINS:
2101 		if (adev->flags & AMD_IS_APU)
2102 			adev->family = AMDGPU_FAMILY_KV;
2103 		else
2104 			adev->family = AMDGPU_FAMILY_CI;
2105 
2106 		r = cik_set_ip_blocks(adev);
2107 		if (r)
2108 			return r;
2109 		break;
2110 #endif
2111 	case CHIP_TOPAZ:
2112 	case CHIP_TONGA:
2113 	case CHIP_FIJI:
2114 	case CHIP_POLARIS10:
2115 	case CHIP_POLARIS11:
2116 	case CHIP_POLARIS12:
2117 	case CHIP_VEGAM:
2118 	case CHIP_CARRIZO:
2119 	case CHIP_STONEY:
2120 		if (adev->flags & AMD_IS_APU)
2121 			adev->family = AMDGPU_FAMILY_CZ;
2122 		else
2123 			adev->family = AMDGPU_FAMILY_VI;
2124 
2125 		r = vi_set_ip_blocks(adev);
2126 		if (r)
2127 			return r;
2128 		break;
2129 	case CHIP_VEGA10:
2130 	case CHIP_VEGA12:
2131 	case CHIP_VEGA20:
2132 	case CHIP_RAVEN:
2133 	case CHIP_ARCTURUS:
2134 	case CHIP_RENOIR:
2135 	case CHIP_ALDEBARAN:
2136 		if (adev->flags & AMD_IS_APU)
2137 			adev->family = AMDGPU_FAMILY_RV;
2138 		else
2139 			adev->family = AMDGPU_FAMILY_AI;
2140 
2141 		r = soc15_set_ip_blocks(adev);
2142 		if (r)
2143 			return r;
2144 		break;
2145 	case  CHIP_NAVI10:
2146 	case  CHIP_NAVI14:
2147 	case  CHIP_NAVI12:
2148 	case  CHIP_SIENNA_CICHLID:
2149 	case  CHIP_NAVY_FLOUNDER:
2150 	case  CHIP_DIMGREY_CAVEFISH:
2151 	case  CHIP_BEIGE_GOBY:
2152 	case CHIP_VANGOGH:
2153 	case CHIP_YELLOW_CARP:
2154 	case CHIP_CYAN_SKILLFISH:
2155 		if (adev->asic_type == CHIP_VANGOGH)
2156 			adev->family = AMDGPU_FAMILY_VGH;
2157 		else if (adev->asic_type == CHIP_YELLOW_CARP)
2158 			adev->family = AMDGPU_FAMILY_YC;
2159 		else
2160 			adev->family = AMDGPU_FAMILY_NV;
2161 
2162 		r = nv_set_ip_blocks(adev);
2163 		if (r)
2164 			return r;
2165 		break;
2166 	default:
2167 		/* FIXME: not supported yet */
2168 		return -EINVAL;
2169 	}
2170 
2171 	amdgpu_amdkfd_device_probe(adev);
2172 
2173 	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2174 	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2175 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2176 	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2177 		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2178 
2179 	for (i = 0; i < adev->num_ip_blocks; i++) {
2180 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2181 			DRM_ERROR("disabled ip block: %d <%s>\n",
2182 				  i, adev->ip_blocks[i].version->funcs->name);
2183 			adev->ip_blocks[i].status.valid = false;
2184 		} else {
2185 			if (adev->ip_blocks[i].version->funcs->early_init) {
2186 				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2187 				if (r == -ENOENT) {
2188 					adev->ip_blocks[i].status.valid = false;
2189 				} else if (r) {
2190 					DRM_ERROR("early_init of IP block <%s> failed %d\n",
2191 						  adev->ip_blocks[i].version->funcs->name, r);
2192 					return r;
2193 				} else {
2194 					adev->ip_blocks[i].status.valid = true;
2195 				}
2196 			} else {
2197 				adev->ip_blocks[i].status.valid = true;
2198 			}
2199 		}
2200 		/* get the vbios after the asic_funcs are set up */
2201 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2202 			r = amdgpu_device_parse_gpu_info_fw(adev);
2203 			if (r)
2204 				return r;
2205 
2206 			/* Read BIOS */
2207 			if (!amdgpu_get_bios(adev))
2208 				return -EINVAL;
2209 
2210 			r = amdgpu_atombios_init(adev);
2211 			if (r) {
2212 				dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2213 				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2214 				return r;
2215 			}
2216 
2217 			/*get pf2vf msg info at it's earliest time*/
2218 			if (amdgpu_sriov_vf(adev))
2219 				amdgpu_virt_init_data_exchange(adev);
2220 
2221 		}
2222 	}
2223 
2224 	adev->cg_flags &= amdgpu_cg_mask;
2225 	adev->pg_flags &= amdgpu_pg_mask;
2226 
2227 	return 0;
2228 }
2229 
amdgpu_device_ip_hw_init_phase1(struct amdgpu_device * adev)2230 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2231 {
2232 	int i, r;
2233 
2234 	for (i = 0; i < adev->num_ip_blocks; i++) {
2235 		if (!adev->ip_blocks[i].status.sw)
2236 			continue;
2237 		if (adev->ip_blocks[i].status.hw)
2238 			continue;
2239 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2240 		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2241 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2242 			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2243 			if (r) {
2244 				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2245 					  adev->ip_blocks[i].version->funcs->name, r);
2246 				return r;
2247 			}
2248 			adev->ip_blocks[i].status.hw = true;
2249 		}
2250 	}
2251 
2252 	return 0;
2253 }
2254 
amdgpu_device_ip_hw_init_phase2(struct amdgpu_device * adev)2255 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2256 {
2257 	int i, r;
2258 
2259 	for (i = 0; i < adev->num_ip_blocks; i++) {
2260 		if (!adev->ip_blocks[i].status.sw)
2261 			continue;
2262 		if (adev->ip_blocks[i].status.hw)
2263 			continue;
2264 		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2265 		if (r) {
2266 			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2267 				  adev->ip_blocks[i].version->funcs->name, r);
2268 			return r;
2269 		}
2270 		adev->ip_blocks[i].status.hw = true;
2271 	}
2272 
2273 	return 0;
2274 }
2275 
amdgpu_device_fw_loading(struct amdgpu_device * adev)2276 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2277 {
2278 	int r = 0;
2279 	int i;
2280 	uint32_t smu_version;
2281 
2282 	if (adev->asic_type >= CHIP_VEGA10) {
2283 		for (i = 0; i < adev->num_ip_blocks; i++) {
2284 			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2285 				continue;
2286 
2287 			if (!adev->ip_blocks[i].status.sw)
2288 				continue;
2289 
2290 			/* no need to do the fw loading again if already done*/
2291 			if (adev->ip_blocks[i].status.hw == true)
2292 				break;
2293 
2294 			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2295 				r = adev->ip_blocks[i].version->funcs->resume(adev);
2296 				if (r) {
2297 					DRM_ERROR("resume of IP block <%s> failed %d\n",
2298 							  adev->ip_blocks[i].version->funcs->name, r);
2299 					return r;
2300 				}
2301 			} else {
2302 				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2303 				if (r) {
2304 					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2305 							  adev->ip_blocks[i].version->funcs->name, r);
2306 					return r;
2307 				}
2308 			}
2309 
2310 			adev->ip_blocks[i].status.hw = true;
2311 			break;
2312 		}
2313 	}
2314 
2315 	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2316 		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2317 
2318 	return r;
2319 }
2320 
2321 /**
2322  * amdgpu_device_ip_init - run init for hardware IPs
2323  *
2324  * @adev: amdgpu_device pointer
2325  *
2326  * Main initialization pass for hardware IPs.  The list of all the hardware
2327  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2328  * are run.  sw_init initializes the software state associated with each IP
2329  * and hw_init initializes the hardware associated with each IP.
2330  * Returns 0 on success, negative error code on failure.
2331  */
amdgpu_device_ip_init(struct amdgpu_device * adev)2332 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2333 {
2334 	int i, r;
2335 
2336 	r = amdgpu_ras_init(adev);
2337 	if (r)
2338 		return r;
2339 
2340 	for (i = 0; i < adev->num_ip_blocks; i++) {
2341 		if (!adev->ip_blocks[i].status.valid)
2342 			continue;
2343 		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2344 		if (r) {
2345 			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2346 				  adev->ip_blocks[i].version->funcs->name, r);
2347 			goto init_failed;
2348 		}
2349 		adev->ip_blocks[i].status.sw = true;
2350 
2351 		/* need to do gmc hw init early so we can allocate gpu mem */
2352 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2353 			r = amdgpu_device_vram_scratch_init(adev);
2354 			if (r) {
2355 				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2356 				goto init_failed;
2357 			}
2358 			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2359 			if (r) {
2360 				DRM_ERROR("hw_init %d failed %d\n", i, r);
2361 				goto init_failed;
2362 			}
2363 			r = amdgpu_device_wb_init(adev);
2364 			if (r) {
2365 				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2366 				goto init_failed;
2367 			}
2368 			adev->ip_blocks[i].status.hw = true;
2369 
2370 			/* right after GMC hw init, we create CSA */
2371 			if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2372 				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2373 								AMDGPU_GEM_DOMAIN_VRAM,
2374 								AMDGPU_CSA_SIZE);
2375 				if (r) {
2376 					DRM_ERROR("allocate CSA failed %d\n", r);
2377 					goto init_failed;
2378 				}
2379 			}
2380 		}
2381 	}
2382 
2383 	if (amdgpu_sriov_vf(adev))
2384 		amdgpu_virt_init_data_exchange(adev);
2385 
2386 	r = amdgpu_ib_pool_init(adev);
2387 	if (r) {
2388 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2389 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2390 		goto init_failed;
2391 	}
2392 
2393 	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2394 	if (r)
2395 		goto init_failed;
2396 
2397 	r = amdgpu_device_ip_hw_init_phase1(adev);
2398 	if (r)
2399 		goto init_failed;
2400 
2401 	r = amdgpu_device_fw_loading(adev);
2402 	if (r)
2403 		goto init_failed;
2404 
2405 	r = amdgpu_device_ip_hw_init_phase2(adev);
2406 	if (r)
2407 		goto init_failed;
2408 
2409 	/*
2410 	 * retired pages will be loaded from eeprom and reserved here,
2411 	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2412 	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2413 	 * for I2C communication which only true at this point.
2414 	 *
2415 	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2416 	 * failure from bad gpu situation and stop amdgpu init process
2417 	 * accordingly. For other failed cases, it will still release all
2418 	 * the resource and print error message, rather than returning one
2419 	 * negative value to upper level.
2420 	 *
2421 	 * Note: theoretically, this should be called before all vram allocations
2422 	 * to protect retired page from abusing
2423 	 */
2424 	r = amdgpu_ras_recovery_init(adev);
2425 	if (r)
2426 		goto init_failed;
2427 
2428 	if (adev->gmc.xgmi.num_physical_nodes > 1)
2429 		amdgpu_xgmi_add_device(adev);
2430 
2431 	/* Don't init kfd if whole hive need to be reset during init */
2432 	if (!adev->gmc.xgmi.pending_reset)
2433 		amdgpu_amdkfd_device_init(adev);
2434 
2435 	r = amdgpu_amdkfd_resume_iommu(adev);
2436 	if (r)
2437 		goto init_failed;
2438 
2439 	amdgpu_fru_get_product_info(adev);
2440 
2441 init_failed:
2442 	if (amdgpu_sriov_vf(adev))
2443 		amdgpu_virt_release_full_gpu(adev, true);
2444 
2445 	return r;
2446 }
2447 
2448 /**
2449  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2450  *
2451  * @adev: amdgpu_device pointer
2452  *
2453  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2454  * this function before a GPU reset.  If the value is retained after a
2455  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2456  */
amdgpu_device_fill_reset_magic(struct amdgpu_device * adev)2457 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2458 {
2459 	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2460 }
2461 
2462 /**
2463  * amdgpu_device_check_vram_lost - check if vram is valid
2464  *
2465  * @adev: amdgpu_device pointer
2466  *
2467  * Checks the reset magic value written to the gart pointer in VRAM.
2468  * The driver calls this after a GPU reset to see if the contents of
2469  * VRAM is lost or now.
2470  * returns true if vram is lost, false if not.
2471  */
amdgpu_device_check_vram_lost(struct amdgpu_device * adev)2472 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2473 {
2474 	if (memcmp(adev->gart.ptr, adev->reset_magic,
2475 			AMDGPU_RESET_MAGIC_NUM))
2476 		return true;
2477 
2478 	if (!amdgpu_in_reset(adev))
2479 		return false;
2480 
2481 	/*
2482 	 * For all ASICs with baco/mode1 reset, the VRAM is
2483 	 * always assumed to be lost.
2484 	 */
2485 	switch (amdgpu_asic_reset_method(adev)) {
2486 	case AMD_RESET_METHOD_BACO:
2487 	case AMD_RESET_METHOD_MODE1:
2488 		return true;
2489 	default:
2490 		return false;
2491 	}
2492 }
2493 
2494 /**
2495  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2496  *
2497  * @adev: amdgpu_device pointer
2498  * @state: clockgating state (gate or ungate)
2499  *
2500  * The list of all the hardware IPs that make up the asic is walked and the
2501  * set_clockgating_state callbacks are run.
2502  * Late initialization pass enabling clockgating for hardware IPs.
2503  * Fini or suspend, pass disabling clockgating for hardware IPs.
2504  * Returns 0 on success, negative error code on failure.
2505  */
2506 
amdgpu_device_set_cg_state(struct amdgpu_device * adev,enum amd_clockgating_state state)2507 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2508 			       enum amd_clockgating_state state)
2509 {
2510 	int i, j, r;
2511 
2512 	if (amdgpu_emu_mode == 1)
2513 		return 0;
2514 
2515 	for (j = 0; j < adev->num_ip_blocks; j++) {
2516 		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2517 		if (!adev->ip_blocks[i].status.late_initialized)
2518 			continue;
2519 		/* skip CG for GFX on S0ix */
2520 		if (adev->in_s0ix &&
2521 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2522 			continue;
2523 		/* skip CG for VCE/UVD, it's handled specially */
2524 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2525 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2526 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2527 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2528 		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2529 			/* enable clockgating to save power */
2530 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2531 										     state);
2532 			if (r) {
2533 				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2534 					  adev->ip_blocks[i].version->funcs->name, r);
2535 				return r;
2536 			}
2537 		}
2538 	}
2539 
2540 	return 0;
2541 }
2542 
amdgpu_device_set_pg_state(struct amdgpu_device * adev,enum amd_powergating_state state)2543 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2544 			       enum amd_powergating_state state)
2545 {
2546 	int i, j, r;
2547 
2548 	if (amdgpu_emu_mode == 1)
2549 		return 0;
2550 
2551 	for (j = 0; j < adev->num_ip_blocks; j++) {
2552 		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2553 		if (!adev->ip_blocks[i].status.late_initialized)
2554 			continue;
2555 		/* skip PG for GFX on S0ix */
2556 		if (adev->in_s0ix &&
2557 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2558 			continue;
2559 		/* skip CG for VCE/UVD, it's handled specially */
2560 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2561 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2562 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2563 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2564 		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2565 			/* enable powergating to save power */
2566 			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2567 											state);
2568 			if (r) {
2569 				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2570 					  adev->ip_blocks[i].version->funcs->name, r);
2571 				return r;
2572 			}
2573 		}
2574 	}
2575 	return 0;
2576 }
2577 
amdgpu_device_enable_mgpu_fan_boost(void)2578 static int amdgpu_device_enable_mgpu_fan_boost(void)
2579 {
2580 	struct amdgpu_gpu_instance *gpu_ins;
2581 	struct amdgpu_device *adev;
2582 	int i, ret = 0;
2583 
2584 	mutex_lock(&mgpu_info.mutex);
2585 
2586 	/*
2587 	 * MGPU fan boost feature should be enabled
2588 	 * only when there are two or more dGPUs in
2589 	 * the system
2590 	 */
2591 	if (mgpu_info.num_dgpu < 2)
2592 		goto out;
2593 
2594 	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2595 		gpu_ins = &(mgpu_info.gpu_ins[i]);
2596 		adev = gpu_ins->adev;
2597 		if (!(adev->flags & AMD_IS_APU) &&
2598 		    !gpu_ins->mgpu_fan_enabled) {
2599 			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2600 			if (ret)
2601 				break;
2602 
2603 			gpu_ins->mgpu_fan_enabled = 1;
2604 		}
2605 	}
2606 
2607 out:
2608 	mutex_unlock(&mgpu_info.mutex);
2609 
2610 	return ret;
2611 }
2612 
2613 /**
2614  * amdgpu_device_ip_late_init - run late init for hardware IPs
2615  *
2616  * @adev: amdgpu_device pointer
2617  *
2618  * Late initialization pass for hardware IPs.  The list of all the hardware
2619  * IPs that make up the asic is walked and the late_init callbacks are run.
2620  * late_init covers any special initialization that an IP requires
2621  * after all of the have been initialized or something that needs to happen
2622  * late in the init process.
2623  * Returns 0 on success, negative error code on failure.
2624  */
amdgpu_device_ip_late_init(struct amdgpu_device * adev)2625 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2626 {
2627 	struct amdgpu_gpu_instance *gpu_instance;
2628 	int i = 0, r;
2629 
2630 	for (i = 0; i < adev->num_ip_blocks; i++) {
2631 		if (!adev->ip_blocks[i].status.hw)
2632 			continue;
2633 		if (adev->ip_blocks[i].version->funcs->late_init) {
2634 			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2635 			if (r) {
2636 				DRM_ERROR("late_init of IP block <%s> failed %d\n",
2637 					  adev->ip_blocks[i].version->funcs->name, r);
2638 				return r;
2639 			}
2640 		}
2641 		adev->ip_blocks[i].status.late_initialized = true;
2642 	}
2643 
2644 	amdgpu_ras_set_error_query_ready(adev, true);
2645 
2646 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2647 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2648 
2649 	amdgpu_device_fill_reset_magic(adev);
2650 
2651 	r = amdgpu_device_enable_mgpu_fan_boost();
2652 	if (r)
2653 		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2654 
2655 	/* For XGMI + passthrough configuration on arcturus, enable light SBR */
2656 	if (adev->asic_type == CHIP_ARCTURUS &&
2657 	    amdgpu_passthrough(adev) &&
2658 	    adev->gmc.xgmi.num_physical_nodes > 1)
2659 		smu_set_light_sbr(&adev->smu, true);
2660 
2661 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2662 		mutex_lock(&mgpu_info.mutex);
2663 
2664 		/*
2665 		 * Reset device p-state to low as this was booted with high.
2666 		 *
2667 		 * This should be performed only after all devices from the same
2668 		 * hive get initialized.
2669 		 *
2670 		 * However, it's unknown how many device in the hive in advance.
2671 		 * As this is counted one by one during devices initializations.
2672 		 *
2673 		 * So, we wait for all XGMI interlinked devices initialized.
2674 		 * This may bring some delays as those devices may come from
2675 		 * different hives. But that should be OK.
2676 		 */
2677 		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2678 			for (i = 0; i < mgpu_info.num_gpu; i++) {
2679 				gpu_instance = &(mgpu_info.gpu_ins[i]);
2680 				if (gpu_instance->adev->flags & AMD_IS_APU)
2681 					continue;
2682 
2683 				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2684 						AMDGPU_XGMI_PSTATE_MIN);
2685 				if (r) {
2686 					DRM_ERROR("pstate setting failed (%d).\n", r);
2687 					break;
2688 				}
2689 			}
2690 		}
2691 
2692 		mutex_unlock(&mgpu_info.mutex);
2693 	}
2694 
2695 	return 0;
2696 }
2697 
amdgpu_device_ip_fini_early(struct amdgpu_device * adev)2698 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2699 {
2700 	int i, r;
2701 
2702 	for (i = 0; i < adev->num_ip_blocks; i++) {
2703 		if (!adev->ip_blocks[i].version->funcs->early_fini)
2704 			continue;
2705 
2706 		r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2707 		if (r) {
2708 			DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2709 				  adev->ip_blocks[i].version->funcs->name, r);
2710 		}
2711 	}
2712 
2713 	amdgpu_amdkfd_suspend(adev, false);
2714 
2715 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2716 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2717 
2718 	/* need to disable SMC first */
2719 	for (i = 0; i < adev->num_ip_blocks; i++) {
2720 		if (!adev->ip_blocks[i].status.hw)
2721 			continue;
2722 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2723 			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2724 			/* XXX handle errors */
2725 			if (r) {
2726 				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2727 					  adev->ip_blocks[i].version->funcs->name, r);
2728 			}
2729 			adev->ip_blocks[i].status.hw = false;
2730 			break;
2731 		}
2732 	}
2733 
2734 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2735 		if (!adev->ip_blocks[i].status.hw)
2736 			continue;
2737 
2738 		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2739 		/* XXX handle errors */
2740 		if (r) {
2741 			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2742 				  adev->ip_blocks[i].version->funcs->name, r);
2743 		}
2744 
2745 		adev->ip_blocks[i].status.hw = false;
2746 	}
2747 
2748 	return 0;
2749 }
2750 
2751 /**
2752  * amdgpu_device_ip_fini - run fini for hardware IPs
2753  *
2754  * @adev: amdgpu_device pointer
2755  *
2756  * Main teardown pass for hardware IPs.  The list of all the hardware
2757  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2758  * are run.  hw_fini tears down the hardware associated with each IP
2759  * and sw_fini tears down any software state associated with each IP.
2760  * Returns 0 on success, negative error code on failure.
2761  */
amdgpu_device_ip_fini(struct amdgpu_device * adev)2762 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2763 {
2764 	int i, r;
2765 
2766 	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2767 		amdgpu_virt_release_ras_err_handler_data(adev);
2768 
2769 	amdgpu_ras_pre_fini(adev);
2770 
2771 	if (adev->gmc.xgmi.num_physical_nodes > 1)
2772 		amdgpu_xgmi_remove_device(adev);
2773 
2774 	amdgpu_amdkfd_device_fini_sw(adev);
2775 
2776 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2777 		if (!adev->ip_blocks[i].status.sw)
2778 			continue;
2779 
2780 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2781 			amdgpu_ucode_free_bo(adev);
2782 			amdgpu_free_static_csa(&adev->virt.csa_obj);
2783 			amdgpu_device_wb_fini(adev);
2784 			amdgpu_device_vram_scratch_fini(adev);
2785 			amdgpu_ib_pool_fini(adev);
2786 		}
2787 
2788 		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2789 		/* XXX handle errors */
2790 		if (r) {
2791 			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2792 				  adev->ip_blocks[i].version->funcs->name, r);
2793 		}
2794 		adev->ip_blocks[i].status.sw = false;
2795 		adev->ip_blocks[i].status.valid = false;
2796 	}
2797 
2798 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2799 		if (!adev->ip_blocks[i].status.late_initialized)
2800 			continue;
2801 		if (adev->ip_blocks[i].version->funcs->late_fini)
2802 			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2803 		adev->ip_blocks[i].status.late_initialized = false;
2804 	}
2805 
2806 	amdgpu_ras_fini(adev);
2807 
2808 	if (amdgpu_sriov_vf(adev))
2809 		if (amdgpu_virt_release_full_gpu(adev, false))
2810 			DRM_ERROR("failed to release exclusive mode on fini\n");
2811 
2812 	return 0;
2813 }
2814 
2815 /**
2816  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2817  *
2818  * @work: work_struct.
2819  */
amdgpu_device_delayed_init_work_handler(struct work_struct * work)2820 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2821 {
2822 	struct amdgpu_device *adev =
2823 		container_of(work, struct amdgpu_device, delayed_init_work.work);
2824 	int r;
2825 
2826 	r = amdgpu_ib_ring_tests(adev);
2827 	if (r)
2828 		DRM_ERROR("ib ring test failed (%d).\n", r);
2829 }
2830 
amdgpu_device_delay_enable_gfx_off(struct work_struct * work)2831 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2832 {
2833 	struct amdgpu_device *adev =
2834 		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2835 
2836 	WARN_ON_ONCE(adev->gfx.gfx_off_state);
2837 	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2838 
2839 	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2840 		adev->gfx.gfx_off_state = true;
2841 }
2842 
2843 /**
2844  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2845  *
2846  * @adev: amdgpu_device pointer
2847  *
2848  * Main suspend function for hardware IPs.  The list of all the hardware
2849  * IPs that make up the asic is walked, clockgating is disabled and the
2850  * suspend callbacks are run.  suspend puts the hardware and software state
2851  * in each IP into a state suitable for suspend.
2852  * Returns 0 on success, negative error code on failure.
2853  */
amdgpu_device_ip_suspend_phase1(struct amdgpu_device * adev)2854 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2855 {
2856 	int i, r;
2857 
2858 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2859 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2860 
2861 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2862 		if (!adev->ip_blocks[i].status.valid)
2863 			continue;
2864 
2865 		/* displays are handled separately */
2866 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2867 			continue;
2868 
2869 		/* XXX handle errors */
2870 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2871 		/* XXX handle errors */
2872 		if (r) {
2873 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2874 				  adev->ip_blocks[i].version->funcs->name, r);
2875 			return r;
2876 		}
2877 
2878 		adev->ip_blocks[i].status.hw = false;
2879 	}
2880 
2881 	return 0;
2882 }
2883 
2884 /**
2885  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2886  *
2887  * @adev: amdgpu_device pointer
2888  *
2889  * Main suspend function for hardware IPs.  The list of all the hardware
2890  * IPs that make up the asic is walked, clockgating is disabled and the
2891  * suspend callbacks are run.  suspend puts the hardware and software state
2892  * in each IP into a state suitable for suspend.
2893  * Returns 0 on success, negative error code on failure.
2894  */
amdgpu_device_ip_suspend_phase2(struct amdgpu_device * adev)2895 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2896 {
2897 	int i, r;
2898 
2899 	if (adev->in_s0ix)
2900 		amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
2901 
2902 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2903 		if (!adev->ip_blocks[i].status.valid)
2904 			continue;
2905 		/* displays are handled in phase1 */
2906 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2907 			continue;
2908 		/* PSP lost connection when err_event_athub occurs */
2909 		if (amdgpu_ras_intr_triggered() &&
2910 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2911 			adev->ip_blocks[i].status.hw = false;
2912 			continue;
2913 		}
2914 
2915 		/* skip unnecessary suspend if we do not initialize them yet */
2916 		if (adev->gmc.xgmi.pending_reset &&
2917 		    !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2918 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2919 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2920 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2921 			adev->ip_blocks[i].status.hw = false;
2922 			continue;
2923 		}
2924 
2925 		/* skip suspend of gfx and psp for S0ix
2926 		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
2927 		 * like at runtime. PSP is also part of the always on hardware
2928 		 * so no need to suspend it.
2929 		 */
2930 		if (adev->in_s0ix &&
2931 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2932 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
2933 			continue;
2934 
2935 		/* XXX handle errors */
2936 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2937 		/* XXX handle errors */
2938 		if (r) {
2939 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2940 				  adev->ip_blocks[i].version->funcs->name, r);
2941 		}
2942 		adev->ip_blocks[i].status.hw = false;
2943 		/* handle putting the SMC in the appropriate state */
2944 		if(!amdgpu_sriov_vf(adev)){
2945 			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2946 				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2947 				if (r) {
2948 					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2949 							adev->mp1_state, r);
2950 					return r;
2951 				}
2952 			}
2953 		}
2954 	}
2955 
2956 	return 0;
2957 }
2958 
2959 /**
2960  * amdgpu_device_ip_suspend - run suspend for hardware IPs
2961  *
2962  * @adev: amdgpu_device pointer
2963  *
2964  * Main suspend function for hardware IPs.  The list of all the hardware
2965  * IPs that make up the asic is walked, clockgating is disabled and the
2966  * suspend callbacks are run.  suspend puts the hardware and software state
2967  * in each IP into a state suitable for suspend.
2968  * Returns 0 on success, negative error code on failure.
2969  */
amdgpu_device_ip_suspend(struct amdgpu_device * adev)2970 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2971 {
2972 	int r;
2973 
2974 	if (amdgpu_sriov_vf(adev)) {
2975 		amdgpu_virt_fini_data_exchange(adev);
2976 		amdgpu_virt_request_full_gpu(adev, false);
2977 	}
2978 
2979 	r = amdgpu_device_ip_suspend_phase1(adev);
2980 	if (r)
2981 		return r;
2982 	r = amdgpu_device_ip_suspend_phase2(adev);
2983 
2984 	if (amdgpu_sriov_vf(adev))
2985 		amdgpu_virt_release_full_gpu(adev, false);
2986 
2987 	return r;
2988 }
2989 
amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device * adev)2990 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2991 {
2992 	int i, r;
2993 
2994 	static enum amd_ip_block_type ip_order[] = {
2995 		AMD_IP_BLOCK_TYPE_GMC,
2996 		AMD_IP_BLOCK_TYPE_COMMON,
2997 		AMD_IP_BLOCK_TYPE_PSP,
2998 		AMD_IP_BLOCK_TYPE_IH,
2999 	};
3000 
3001 	for (i = 0; i < adev->num_ip_blocks; i++) {
3002 		int j;
3003 		struct amdgpu_ip_block *block;
3004 
3005 		block = &adev->ip_blocks[i];
3006 		block->status.hw = false;
3007 
3008 		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3009 
3010 			if (block->version->type != ip_order[j] ||
3011 				!block->status.valid)
3012 				continue;
3013 
3014 			r = block->version->funcs->hw_init(adev);
3015 			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3016 			if (r)
3017 				return r;
3018 			block->status.hw = true;
3019 		}
3020 	}
3021 
3022 	return 0;
3023 }
3024 
amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device * adev)3025 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3026 {
3027 	int i, r;
3028 
3029 	static enum amd_ip_block_type ip_order[] = {
3030 		AMD_IP_BLOCK_TYPE_SMC,
3031 		AMD_IP_BLOCK_TYPE_DCE,
3032 		AMD_IP_BLOCK_TYPE_GFX,
3033 		AMD_IP_BLOCK_TYPE_SDMA,
3034 		AMD_IP_BLOCK_TYPE_UVD,
3035 		AMD_IP_BLOCK_TYPE_VCE,
3036 		AMD_IP_BLOCK_TYPE_VCN
3037 	};
3038 
3039 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3040 		int j;
3041 		struct amdgpu_ip_block *block;
3042 
3043 		for (j = 0; j < adev->num_ip_blocks; j++) {
3044 			block = &adev->ip_blocks[j];
3045 
3046 			if (block->version->type != ip_order[i] ||
3047 				!block->status.valid ||
3048 				block->status.hw)
3049 				continue;
3050 
3051 			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3052 				r = block->version->funcs->resume(adev);
3053 			else
3054 				r = block->version->funcs->hw_init(adev);
3055 
3056 			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3057 			if (r)
3058 				return r;
3059 			block->status.hw = true;
3060 		}
3061 	}
3062 
3063 	return 0;
3064 }
3065 
3066 /**
3067  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3068  *
3069  * @adev: amdgpu_device pointer
3070  *
3071  * First resume function for hardware IPs.  The list of all the hardware
3072  * IPs that make up the asic is walked and the resume callbacks are run for
3073  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3074  * after a suspend and updates the software state as necessary.  This
3075  * function is also used for restoring the GPU after a GPU reset.
3076  * Returns 0 on success, negative error code on failure.
3077  */
amdgpu_device_ip_resume_phase1(struct amdgpu_device * adev)3078 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3079 {
3080 	int i, r;
3081 
3082 	for (i = 0; i < adev->num_ip_blocks; i++) {
3083 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3084 			continue;
3085 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3086 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3087 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
3088 
3089 			r = adev->ip_blocks[i].version->funcs->resume(adev);
3090 			if (r) {
3091 				DRM_ERROR("resume of IP block <%s> failed %d\n",
3092 					  adev->ip_blocks[i].version->funcs->name, r);
3093 				return r;
3094 			}
3095 			adev->ip_blocks[i].status.hw = true;
3096 		}
3097 	}
3098 
3099 	return 0;
3100 }
3101 
3102 /**
3103  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3104  *
3105  * @adev: amdgpu_device pointer
3106  *
3107  * First resume function for hardware IPs.  The list of all the hardware
3108  * IPs that make up the asic is walked and the resume callbacks are run for
3109  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3110  * functional state after a suspend and updates the software state as
3111  * necessary.  This function is also used for restoring the GPU after a GPU
3112  * reset.
3113  * Returns 0 on success, negative error code on failure.
3114  */
amdgpu_device_ip_resume_phase2(struct amdgpu_device * adev)3115 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3116 {
3117 	int i, r;
3118 
3119 	for (i = 0; i < adev->num_ip_blocks; i++) {
3120 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3121 			continue;
3122 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3123 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3124 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3125 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3126 			continue;
3127 		r = adev->ip_blocks[i].version->funcs->resume(adev);
3128 		if (r) {
3129 			DRM_ERROR("resume of IP block <%s> failed %d\n",
3130 				  adev->ip_blocks[i].version->funcs->name, r);
3131 			return r;
3132 		}
3133 		adev->ip_blocks[i].status.hw = true;
3134 	}
3135 
3136 	return 0;
3137 }
3138 
3139 /**
3140  * amdgpu_device_ip_resume - run resume for hardware IPs
3141  *
3142  * @adev: amdgpu_device pointer
3143  *
3144  * Main resume function for hardware IPs.  The hardware IPs
3145  * are split into two resume functions because they are
3146  * are also used in in recovering from a GPU reset and some additional
3147  * steps need to be take between them.  In this case (S3/S4) they are
3148  * run sequentially.
3149  * Returns 0 on success, negative error code on failure.
3150  */
amdgpu_device_ip_resume(struct amdgpu_device * adev)3151 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3152 {
3153 	int r;
3154 
3155 	r = amdgpu_amdkfd_resume_iommu(adev);
3156 	if (r)
3157 		return r;
3158 
3159 	r = amdgpu_device_ip_resume_phase1(adev);
3160 	if (r)
3161 		return r;
3162 
3163 	r = amdgpu_device_fw_loading(adev);
3164 	if (r)
3165 		return r;
3166 
3167 	r = amdgpu_device_ip_resume_phase2(adev);
3168 
3169 	return r;
3170 }
3171 
3172 /**
3173  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3174  *
3175  * @adev: amdgpu_device pointer
3176  *
3177  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3178  */
amdgpu_device_detect_sriov_bios(struct amdgpu_device * adev)3179 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3180 {
3181 	if (amdgpu_sriov_vf(adev)) {
3182 		if (adev->is_atom_fw) {
3183 			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3184 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3185 		} else {
3186 			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3187 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3188 		}
3189 
3190 		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3191 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3192 	}
3193 }
3194 
3195 /**
3196  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3197  *
3198  * @asic_type: AMD asic type
3199  *
3200  * Check if there is DC (new modesetting infrastructre) support for an asic.
3201  * returns true if DC has support, false if not.
3202  */
amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)3203 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3204 {
3205 	switch (asic_type) {
3206 #if defined(CONFIG_DRM_AMD_DC)
3207 #if defined(CONFIG_DRM_AMD_DC_SI)
3208 	case CHIP_TAHITI:
3209 	case CHIP_PITCAIRN:
3210 	case CHIP_VERDE:
3211 	case CHIP_OLAND:
3212 #endif
3213 	case CHIP_BONAIRE:
3214 	case CHIP_KAVERI:
3215 	case CHIP_KABINI:
3216 	case CHIP_MULLINS:
3217 		/*
3218 		 * We have systems in the wild with these ASICs that require
3219 		 * LVDS and VGA support which is not supported with DC.
3220 		 *
3221 		 * Fallback to the non-DC driver here by default so as not to
3222 		 * cause regressions.
3223 		 */
3224 		return amdgpu_dc > 0;
3225 	case CHIP_HAWAII:
3226 	case CHIP_CARRIZO:
3227 	case CHIP_STONEY:
3228 	case CHIP_POLARIS10:
3229 	case CHIP_POLARIS11:
3230 	case CHIP_POLARIS12:
3231 	case CHIP_VEGAM:
3232 	case CHIP_TONGA:
3233 	case CHIP_FIJI:
3234 	case CHIP_VEGA10:
3235 	case CHIP_VEGA12:
3236 	case CHIP_VEGA20:
3237 #if defined(CONFIG_DRM_AMD_DC_DCN)
3238 	case CHIP_RAVEN:
3239 	case CHIP_NAVI10:
3240 	case CHIP_NAVI14:
3241 	case CHIP_NAVI12:
3242 	case CHIP_RENOIR:
3243 	case CHIP_SIENNA_CICHLID:
3244 	case CHIP_NAVY_FLOUNDER:
3245 	case CHIP_DIMGREY_CAVEFISH:
3246 	case CHIP_BEIGE_GOBY:
3247 	case CHIP_VANGOGH:
3248 	case CHIP_YELLOW_CARP:
3249 #endif
3250 		return amdgpu_dc != 0;
3251 #endif
3252 	default:
3253 		if (amdgpu_dc > 0)
3254 			DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3255 					 "but isn't supported by ASIC, ignoring\n");
3256 		return false;
3257 	}
3258 }
3259 
3260 /**
3261  * amdgpu_device_has_dc_support - check if dc is supported
3262  *
3263  * @adev: amdgpu_device pointer
3264  *
3265  * Returns true for supported, false for not supported
3266  */
amdgpu_device_has_dc_support(struct amdgpu_device * adev)3267 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3268 {
3269 	if (amdgpu_sriov_vf(adev) ||
3270 	    adev->enable_virtual_display ||
3271 	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3272 		return false;
3273 
3274 	return amdgpu_device_asic_has_dc_support(adev->asic_type);
3275 }
3276 
amdgpu_device_xgmi_reset_func(struct work_struct * __work)3277 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3278 {
3279 	struct amdgpu_device *adev =
3280 		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3281 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3282 
3283 	/* It's a bug to not have a hive within this function */
3284 	if (WARN_ON(!hive))
3285 		return;
3286 
3287 	/*
3288 	 * Use task barrier to synchronize all xgmi reset works across the
3289 	 * hive. task_barrier_enter and task_barrier_exit will block
3290 	 * until all the threads running the xgmi reset works reach
3291 	 * those points. task_barrier_full will do both blocks.
3292 	 */
3293 	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3294 
3295 		task_barrier_enter(&hive->tb);
3296 		adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3297 
3298 		if (adev->asic_reset_res)
3299 			goto fail;
3300 
3301 		task_barrier_exit(&hive->tb);
3302 		adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3303 
3304 		if (adev->asic_reset_res)
3305 			goto fail;
3306 
3307 		if (adev->mmhub.ras_funcs &&
3308 		    adev->mmhub.ras_funcs->reset_ras_error_count)
3309 			adev->mmhub.ras_funcs->reset_ras_error_count(adev);
3310 	} else {
3311 
3312 		task_barrier_full(&hive->tb);
3313 		adev->asic_reset_res =  amdgpu_asic_reset(adev);
3314 	}
3315 
3316 fail:
3317 	if (adev->asic_reset_res)
3318 		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3319 			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3320 	amdgpu_put_xgmi_hive(hive);
3321 }
3322 
amdgpu_device_get_job_timeout_settings(struct amdgpu_device * adev)3323 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3324 {
3325 	char *input = amdgpu_lockup_timeout;
3326 	char *timeout_setting = NULL;
3327 	int index = 0;
3328 	long timeout;
3329 	int ret = 0;
3330 
3331 	/*
3332 	 * By default timeout for non compute jobs is 10000
3333 	 * and 60000 for compute jobs.
3334 	 * In SR-IOV or passthrough mode, timeout for compute
3335 	 * jobs are 60000 by default.
3336 	 */
3337 	adev->gfx_timeout = msecs_to_jiffies(10000);
3338 	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3339 	if (amdgpu_sriov_vf(adev))
3340 		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3341 					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3342 	else
3343 		adev->compute_timeout =  msecs_to_jiffies(60000);
3344 
3345 	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3346 		while ((timeout_setting = strsep(&input, ",")) &&
3347 				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3348 			ret = kstrtol(timeout_setting, 0, &timeout);
3349 			if (ret)
3350 				return ret;
3351 
3352 			if (timeout == 0) {
3353 				index++;
3354 				continue;
3355 			} else if (timeout < 0) {
3356 				timeout = MAX_SCHEDULE_TIMEOUT;
3357 			} else {
3358 				timeout = msecs_to_jiffies(timeout);
3359 			}
3360 
3361 			switch (index++) {
3362 			case 0:
3363 				adev->gfx_timeout = timeout;
3364 				break;
3365 			case 1:
3366 				adev->compute_timeout = timeout;
3367 				break;
3368 			case 2:
3369 				adev->sdma_timeout = timeout;
3370 				break;
3371 			case 3:
3372 				adev->video_timeout = timeout;
3373 				break;
3374 			default:
3375 				break;
3376 			}
3377 		}
3378 		/*
3379 		 * There is only one value specified and
3380 		 * it should apply to all non-compute jobs.
3381 		 */
3382 		if (index == 1) {
3383 			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3384 			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3385 				adev->compute_timeout = adev->gfx_timeout;
3386 		}
3387 	}
3388 
3389 	return ret;
3390 }
3391 
3392 static const struct attribute *amdgpu_dev_attributes[] = {
3393 	&dev_attr_product_name.attr,
3394 	&dev_attr_product_number.attr,
3395 	&dev_attr_serial_number.attr,
3396 	&dev_attr_pcie_replay_count.attr,
3397 	NULL
3398 };
3399 
3400 /**
3401  * amdgpu_device_init - initialize the driver
3402  *
3403  * @adev: amdgpu_device pointer
3404  * @flags: driver flags
3405  *
3406  * Initializes the driver info and hw (all asics).
3407  * Returns 0 for success or an error on failure.
3408  * Called at driver startup.
3409  */
amdgpu_device_init(struct amdgpu_device * adev,uint32_t flags)3410 int amdgpu_device_init(struct amdgpu_device *adev,
3411 		       uint32_t flags)
3412 {
3413 	struct drm_device *ddev = adev_to_drm(adev);
3414 	struct pci_dev *pdev = adev->pdev;
3415 	int r, i;
3416 	bool px = false;
3417 	u32 max_MBps;
3418 
3419 	adev->shutdown = false;
3420 	adev->flags = flags;
3421 
3422 	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3423 		adev->asic_type = amdgpu_force_asic_type;
3424 	else
3425 		adev->asic_type = flags & AMD_ASIC_MASK;
3426 
3427 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3428 	if (amdgpu_emu_mode == 1)
3429 		adev->usec_timeout *= 10;
3430 	adev->gmc.gart_size = 512 * 1024 * 1024;
3431 	adev->accel_working = false;
3432 	adev->num_rings = 0;
3433 	adev->mman.buffer_funcs = NULL;
3434 	adev->mman.buffer_funcs_ring = NULL;
3435 	adev->vm_manager.vm_pte_funcs = NULL;
3436 	adev->vm_manager.vm_pte_num_scheds = 0;
3437 	adev->gmc.gmc_funcs = NULL;
3438 	adev->harvest_ip_mask = 0x0;
3439 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3440 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3441 
3442 	adev->smc_rreg = &amdgpu_invalid_rreg;
3443 	adev->smc_wreg = &amdgpu_invalid_wreg;
3444 	adev->pcie_rreg = &amdgpu_invalid_rreg;
3445 	adev->pcie_wreg = &amdgpu_invalid_wreg;
3446 	adev->pciep_rreg = &amdgpu_invalid_rreg;
3447 	adev->pciep_wreg = &amdgpu_invalid_wreg;
3448 	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3449 	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3450 	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3451 	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3452 	adev->didt_rreg = &amdgpu_invalid_rreg;
3453 	adev->didt_wreg = &amdgpu_invalid_wreg;
3454 	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3455 	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3456 	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3457 	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3458 
3459 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3460 		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3461 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3462 
3463 	/* mutex initialization are all done here so we
3464 	 * can recall function without having locking issues */
3465 	mutex_init(&adev->firmware.mutex);
3466 	mutex_init(&adev->pm.mutex);
3467 	mutex_init(&adev->gfx.gpu_clock_mutex);
3468 	mutex_init(&adev->srbm_mutex);
3469 	mutex_init(&adev->gfx.pipe_reserve_mutex);
3470 	mutex_init(&adev->gfx.gfx_off_mutex);
3471 	mutex_init(&adev->grbm_idx_mutex);
3472 	mutex_init(&adev->mn_lock);
3473 	mutex_init(&adev->virt.vf_errors.lock);
3474 	hash_init(adev->mn_hash);
3475 	atomic_set(&adev->in_gpu_reset, 0);
3476 	init_rwsem(&adev->reset_sem);
3477 	mutex_init(&adev->psp.mutex);
3478 	mutex_init(&adev->notifier_lock);
3479 
3480 	r = amdgpu_device_init_apu_flags(adev);
3481 	if (r)
3482 		return r;
3483 
3484 	r = amdgpu_device_check_arguments(adev);
3485 	if (r)
3486 		return r;
3487 
3488 	spin_lock_init(&adev->mmio_idx_lock);
3489 	spin_lock_init(&adev->smc_idx_lock);
3490 	spin_lock_init(&adev->pcie_idx_lock);
3491 	spin_lock_init(&adev->uvd_ctx_idx_lock);
3492 	spin_lock_init(&adev->didt_idx_lock);
3493 	spin_lock_init(&adev->gc_cac_idx_lock);
3494 	spin_lock_init(&adev->se_cac_idx_lock);
3495 	spin_lock_init(&adev->audio_endpt_idx_lock);
3496 	spin_lock_init(&adev->mm_stats.lock);
3497 
3498 	INIT_LIST_HEAD(&adev->shadow_list);
3499 	mutex_init(&adev->shadow_list_lock);
3500 
3501 	INIT_LIST_HEAD(&adev->reset_list);
3502 
3503 	INIT_DELAYED_WORK(&adev->delayed_init_work,
3504 			  amdgpu_device_delayed_init_work_handler);
3505 	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3506 			  amdgpu_device_delay_enable_gfx_off);
3507 
3508 	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3509 
3510 	adev->gfx.gfx_off_req_count = 1;
3511 	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3512 
3513 	atomic_set(&adev->throttling_logging_enabled, 1);
3514 	/*
3515 	 * If throttling continues, logging will be performed every minute
3516 	 * to avoid log flooding. "-1" is subtracted since the thermal
3517 	 * throttling interrupt comes every second. Thus, the total logging
3518 	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3519 	 * for throttling interrupt) = 60 seconds.
3520 	 */
3521 	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3522 	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3523 
3524 	/* Registers mapping */
3525 	/* TODO: block userspace mapping of io register */
3526 	if (adev->asic_type >= CHIP_BONAIRE) {
3527 		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3528 		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3529 	} else {
3530 		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3531 		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3532 	}
3533 
3534 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3535 	if (adev->rmmio == NULL) {
3536 		return -ENOMEM;
3537 	}
3538 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3539 	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3540 
3541 	/* enable PCIE atomic ops */
3542 	r = pci_enable_atomic_ops_to_root(adev->pdev,
3543 					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3544 					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3545 	if (r) {
3546 		adev->have_atomics_support = false;
3547 		DRM_INFO("PCIE atomic ops is not supported\n");
3548 	} else {
3549 		adev->have_atomics_support = true;
3550 	}
3551 
3552 	amdgpu_device_get_pcie_info(adev);
3553 
3554 	if (amdgpu_mcbp)
3555 		DRM_INFO("MCBP is enabled\n");
3556 
3557 	if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3558 		adev->enable_mes = true;
3559 
3560 	/* detect hw virtualization here */
3561 	amdgpu_detect_virtualization(adev);
3562 
3563 	r = amdgpu_device_get_job_timeout_settings(adev);
3564 	if (r) {
3565 		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3566 		return r;
3567 	}
3568 
3569 	/* early init functions */
3570 	r = amdgpu_device_ip_early_init(adev);
3571 	if (r)
3572 		return r;
3573 
3574 	/* doorbell bar mapping and doorbell index init*/
3575 	amdgpu_device_doorbell_init(adev);
3576 
3577 	if (amdgpu_emu_mode == 1) {
3578 		/* post the asic on emulation mode */
3579 		emu_soc_asic_init(adev);
3580 		goto fence_driver_init;
3581 	}
3582 
3583 	amdgpu_reset_init(adev);
3584 
3585 	/* detect if we are with an SRIOV vbios */
3586 	amdgpu_device_detect_sriov_bios(adev);
3587 
3588 	/* check if we need to reset the asic
3589 	 *  E.g., driver was not cleanly unloaded previously, etc.
3590 	 */
3591 	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3592 		if (adev->gmc.xgmi.num_physical_nodes) {
3593 			dev_info(adev->dev, "Pending hive reset.\n");
3594 			adev->gmc.xgmi.pending_reset = true;
3595 			/* Only need to init necessary block for SMU to handle the reset */
3596 			for (i = 0; i < adev->num_ip_blocks; i++) {
3597 				if (!adev->ip_blocks[i].status.valid)
3598 					continue;
3599 				if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3600 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3601 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3602 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3603 					DRM_DEBUG("IP %s disabled for hw_init.\n",
3604 						adev->ip_blocks[i].version->funcs->name);
3605 					adev->ip_blocks[i].status.hw = true;
3606 				}
3607 			}
3608 		} else {
3609 			r = amdgpu_asic_reset(adev);
3610 			if (r) {
3611 				dev_err(adev->dev, "asic reset on init failed\n");
3612 				goto failed;
3613 			}
3614 		}
3615 	}
3616 
3617 	pci_enable_pcie_error_reporting(adev->pdev);
3618 
3619 	/* Post card if necessary */
3620 	if (amdgpu_device_need_post(adev)) {
3621 		if (!adev->bios) {
3622 			dev_err(adev->dev, "no vBIOS found\n");
3623 			r = -EINVAL;
3624 			goto failed;
3625 		}
3626 		DRM_INFO("GPU posting now...\n");
3627 		r = amdgpu_device_asic_init(adev);
3628 		if (r) {
3629 			dev_err(adev->dev, "gpu post error!\n");
3630 			goto failed;
3631 		}
3632 	}
3633 
3634 	if (adev->is_atom_fw) {
3635 		/* Initialize clocks */
3636 		r = amdgpu_atomfirmware_get_clock_info(adev);
3637 		if (r) {
3638 			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3639 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3640 			goto failed;
3641 		}
3642 	} else {
3643 		/* Initialize clocks */
3644 		r = amdgpu_atombios_get_clock_info(adev);
3645 		if (r) {
3646 			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3647 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3648 			goto failed;
3649 		}
3650 		/* init i2c buses */
3651 		if (!amdgpu_device_has_dc_support(adev))
3652 			amdgpu_atombios_i2c_init(adev);
3653 	}
3654 
3655 fence_driver_init:
3656 	/* Fence driver */
3657 	r = amdgpu_fence_driver_sw_init(adev);
3658 	if (r) {
3659 		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3660 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3661 		goto failed;
3662 	}
3663 
3664 	/* init the mode config */
3665 	drm_mode_config_init(adev_to_drm(adev));
3666 
3667 	r = amdgpu_device_ip_init(adev);
3668 	if (r) {
3669 		/* failed in exclusive mode due to timeout */
3670 		if (amdgpu_sriov_vf(adev) &&
3671 		    !amdgpu_sriov_runtime(adev) &&
3672 		    amdgpu_virt_mmio_blocked(adev) &&
3673 		    !amdgpu_virt_wait_reset(adev)) {
3674 			dev_err(adev->dev, "VF exclusive mode timeout\n");
3675 			/* Don't send request since VF is inactive. */
3676 			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3677 			adev->virt.ops = NULL;
3678 			r = -EAGAIN;
3679 			goto release_ras_con;
3680 		}
3681 		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3682 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3683 		goto release_ras_con;
3684 	}
3685 
3686 	amdgpu_fence_driver_hw_init(adev);
3687 
3688 	dev_info(adev->dev,
3689 		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3690 			adev->gfx.config.max_shader_engines,
3691 			adev->gfx.config.max_sh_per_se,
3692 			adev->gfx.config.max_cu_per_sh,
3693 			adev->gfx.cu_info.number);
3694 
3695 	adev->accel_working = true;
3696 
3697 	amdgpu_vm_check_compute_bug(adev);
3698 
3699 	/* Initialize the buffer migration limit. */
3700 	if (amdgpu_moverate >= 0)
3701 		max_MBps = amdgpu_moverate;
3702 	else
3703 		max_MBps = 8; /* Allow 8 MB/s. */
3704 	/* Get a log2 for easy divisions. */
3705 	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3706 
3707 	amdgpu_fbdev_init(adev);
3708 
3709 	r = amdgpu_pm_sysfs_init(adev);
3710 	if (r) {
3711 		adev->pm_sysfs_en = false;
3712 		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3713 	} else
3714 		adev->pm_sysfs_en = true;
3715 
3716 	r = amdgpu_ucode_sysfs_init(adev);
3717 	if (r) {
3718 		adev->ucode_sysfs_en = false;
3719 		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3720 	} else
3721 		adev->ucode_sysfs_en = true;
3722 
3723 	if ((amdgpu_testing & 1)) {
3724 		if (adev->accel_working)
3725 			amdgpu_test_moves(adev);
3726 		else
3727 			DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3728 	}
3729 	if (amdgpu_benchmarking) {
3730 		if (adev->accel_working)
3731 			amdgpu_benchmark(adev, amdgpu_benchmarking);
3732 		else
3733 			DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3734 	}
3735 
3736 	/*
3737 	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3738 	 * Otherwise the mgpu fan boost feature will be skipped due to the
3739 	 * gpu instance is counted less.
3740 	 */
3741 	amdgpu_register_gpu_instance(adev);
3742 
3743 	/* enable clockgating, etc. after ib tests, etc. since some blocks require
3744 	 * explicit gating rather than handling it automatically.
3745 	 */
3746 	if (!adev->gmc.xgmi.pending_reset) {
3747 		r = amdgpu_device_ip_late_init(adev);
3748 		if (r) {
3749 			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3750 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3751 			goto release_ras_con;
3752 		}
3753 		/* must succeed. */
3754 		amdgpu_ras_resume(adev);
3755 		queue_delayed_work(system_wq, &adev->delayed_init_work,
3756 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3757 	}
3758 
3759 	if (amdgpu_sriov_vf(adev))
3760 		flush_delayed_work(&adev->delayed_init_work);
3761 
3762 	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3763 	if (r)
3764 		dev_err(adev->dev, "Could not create amdgpu device attr\n");
3765 
3766 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
3767 		r = amdgpu_pmu_init(adev);
3768 	if (r)
3769 		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3770 
3771 	/* Have stored pci confspace at hand for restore in sudden PCI error */
3772 	if (amdgpu_device_cache_pci_state(adev->pdev))
3773 		pci_restore_state(pdev);
3774 
3775 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3776 	/* this will fail for cards that aren't VGA class devices, just
3777 	 * ignore it */
3778 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3779 		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3780 
3781 	if (amdgpu_device_supports_px(ddev)) {
3782 		px = true;
3783 		vga_switcheroo_register_client(adev->pdev,
3784 					       &amdgpu_switcheroo_ops, px);
3785 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3786 	}
3787 
3788 	if (adev->gmc.xgmi.pending_reset)
3789 		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3790 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3791 
3792 	return 0;
3793 
3794 release_ras_con:
3795 	amdgpu_release_ras_context(adev);
3796 
3797 failed:
3798 	amdgpu_vf_error_trans_all(adev);
3799 
3800 	return r;
3801 }
3802 
amdgpu_device_unmap_mmio(struct amdgpu_device * adev)3803 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3804 {
3805 	/* Clear all CPU mappings pointing to this device */
3806 	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3807 
3808 	/* Unmap all mapped bars - Doorbell, registers and VRAM */
3809 	amdgpu_device_doorbell_fini(adev);
3810 
3811 	iounmap(adev->rmmio);
3812 	adev->rmmio = NULL;
3813 	if (adev->mman.aper_base_kaddr)
3814 		iounmap(adev->mman.aper_base_kaddr);
3815 	adev->mman.aper_base_kaddr = NULL;
3816 
3817 	/* Memory manager related */
3818 	if (!adev->gmc.xgmi.connected_to_cpu) {
3819 		arch_phys_wc_del(adev->gmc.vram_mtrr);
3820 		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3821 	}
3822 }
3823 
3824 /**
3825  * amdgpu_device_fini - tear down the driver
3826  *
3827  * @adev: amdgpu_device pointer
3828  *
3829  * Tear down the driver info (all asics).
3830  * Called at driver shutdown.
3831  */
amdgpu_device_fini_hw(struct amdgpu_device * adev)3832 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3833 {
3834 	dev_info(adev->dev, "amdgpu: finishing device.\n");
3835 	flush_delayed_work(&adev->delayed_init_work);
3836 	if (adev->mman.initialized) {
3837 		flush_delayed_work(&adev->mman.bdev.wq);
3838 		ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3839 	}
3840 	adev->shutdown = true;
3841 
3842 	/* make sure IB test finished before entering exclusive mode
3843 	 * to avoid preemption on IB test
3844 	 * */
3845 	if (amdgpu_sriov_vf(adev)) {
3846 		amdgpu_virt_request_full_gpu(adev, false);
3847 		amdgpu_virt_fini_data_exchange(adev);
3848 	}
3849 
3850 	/* disable all interrupts */
3851 	amdgpu_irq_disable_all(adev);
3852 	if (adev->mode_info.mode_config_initialized){
3853 		if (!amdgpu_device_has_dc_support(adev))
3854 			drm_helper_force_disable_all(adev_to_drm(adev));
3855 		else
3856 			drm_atomic_helper_shutdown(adev_to_drm(adev));
3857 	}
3858 	amdgpu_fence_driver_hw_fini(adev);
3859 
3860 	if (adev->pm_sysfs_en)
3861 		amdgpu_pm_sysfs_fini(adev);
3862 	if (adev->ucode_sysfs_en)
3863 		amdgpu_ucode_sysfs_fini(adev);
3864 	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3865 
3866 	amdgpu_fbdev_fini(adev);
3867 
3868 	amdgpu_irq_fini_hw(adev);
3869 
3870 	amdgpu_device_ip_fini_early(adev);
3871 
3872 	amdgpu_gart_dummy_page_fini(adev);
3873 
3874 	amdgpu_device_unmap_mmio(adev);
3875 }
3876 
amdgpu_device_fini_sw(struct amdgpu_device * adev)3877 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3878 {
3879 	amdgpu_device_ip_fini(adev);
3880 	amdgpu_fence_driver_sw_fini(adev);
3881 	release_firmware(adev->firmware.gpu_info_fw);
3882 	adev->firmware.gpu_info_fw = NULL;
3883 	adev->accel_working = false;
3884 
3885 	amdgpu_reset_fini(adev);
3886 
3887 	/* free i2c buses */
3888 	if (!amdgpu_device_has_dc_support(adev))
3889 		amdgpu_i2c_fini(adev);
3890 
3891 	if (amdgpu_emu_mode != 1)
3892 		amdgpu_atombios_fini(adev);
3893 
3894 	kfree(adev->bios);
3895 	adev->bios = NULL;
3896 	if (amdgpu_device_supports_px(adev_to_drm(adev))) {
3897 		vga_switcheroo_unregister_client(adev->pdev);
3898 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
3899 	}
3900 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3901 		vga_client_unregister(adev->pdev);
3902 
3903 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
3904 		amdgpu_pmu_fini(adev);
3905 	if (adev->mman.discovery_bin)
3906 		amdgpu_discovery_fini(adev);
3907 
3908 	kfree(adev->pci_state);
3909 
3910 }
3911 
3912 
3913 /*
3914  * Suspend & resume.
3915  */
3916 /**
3917  * amdgpu_device_suspend - initiate device suspend
3918  *
3919  * @dev: drm dev pointer
3920  * @fbcon : notify the fbdev of suspend
3921  *
3922  * Puts the hw in the suspend state (all asics).
3923  * Returns 0 for success or an error on failure.
3924  * Called at driver suspend.
3925  */
amdgpu_device_suspend(struct drm_device * dev,bool fbcon)3926 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3927 {
3928 	struct amdgpu_device *adev = drm_to_adev(dev);
3929 
3930 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3931 		return 0;
3932 
3933 	adev->in_suspend = true;
3934 
3935 	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
3936 		DRM_WARN("smart shift update failed\n");
3937 
3938 	drm_kms_helper_poll_disable(dev);
3939 
3940 	if (fbcon)
3941 		amdgpu_fbdev_set_suspend(adev, 1);
3942 
3943 	cancel_delayed_work_sync(&adev->delayed_init_work);
3944 
3945 	amdgpu_ras_suspend(adev);
3946 
3947 	amdgpu_device_ip_suspend_phase1(adev);
3948 
3949 	if (!adev->in_s0ix)
3950 		amdgpu_amdkfd_suspend(adev, adev->in_runpm);
3951 
3952 	/* evict vram memory */
3953 	amdgpu_bo_evict_vram(adev);
3954 
3955 	amdgpu_fence_driver_hw_fini(adev);
3956 
3957 	amdgpu_device_ip_suspend_phase2(adev);
3958 	/* evict remaining vram memory
3959 	 * This second call to evict vram is to evict the gart page table
3960 	 * using the CPU.
3961 	 */
3962 	amdgpu_bo_evict_vram(adev);
3963 
3964 	return 0;
3965 }
3966 
3967 /**
3968  * amdgpu_device_resume - initiate device resume
3969  *
3970  * @dev: drm dev pointer
3971  * @fbcon : notify the fbdev of resume
3972  *
3973  * Bring the hw back to operating state (all asics).
3974  * Returns 0 for success or an error on failure.
3975  * Called at driver resume.
3976  */
amdgpu_device_resume(struct drm_device * dev,bool fbcon)3977 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3978 {
3979 	struct amdgpu_device *adev = drm_to_adev(dev);
3980 	int r = 0;
3981 
3982 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3983 		return 0;
3984 
3985 	if (adev->in_s0ix)
3986 		amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
3987 
3988 	/* post card */
3989 	if (amdgpu_device_need_post(adev)) {
3990 		r = amdgpu_device_asic_init(adev);
3991 		if (r)
3992 			dev_err(adev->dev, "amdgpu asic init failed\n");
3993 	}
3994 
3995 	r = amdgpu_device_ip_resume(adev);
3996 	if (r) {
3997 		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
3998 		return r;
3999 	}
4000 	amdgpu_fence_driver_hw_init(adev);
4001 
4002 	r = amdgpu_device_ip_late_init(adev);
4003 	if (r)
4004 		return r;
4005 
4006 	queue_delayed_work(system_wq, &adev->delayed_init_work,
4007 			   msecs_to_jiffies(AMDGPU_RESUME_MS));
4008 
4009 	if (!adev->in_s0ix) {
4010 		r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4011 		if (r)
4012 			return r;
4013 	}
4014 
4015 	/* Make sure IB tests flushed */
4016 	flush_delayed_work(&adev->delayed_init_work);
4017 
4018 	if (fbcon)
4019 		amdgpu_fbdev_set_suspend(adev, 0);
4020 
4021 	drm_kms_helper_poll_enable(dev);
4022 
4023 	amdgpu_ras_resume(adev);
4024 
4025 	/*
4026 	 * Most of the connector probing functions try to acquire runtime pm
4027 	 * refs to ensure that the GPU is powered on when connector polling is
4028 	 * performed. Since we're calling this from a runtime PM callback,
4029 	 * trying to acquire rpm refs will cause us to deadlock.
4030 	 *
4031 	 * Since we're guaranteed to be holding the rpm lock, it's safe to
4032 	 * temporarily disable the rpm helpers so this doesn't deadlock us.
4033 	 */
4034 #ifdef CONFIG_PM
4035 	dev->dev->power.disable_depth++;
4036 #endif
4037 	if (!amdgpu_device_has_dc_support(adev))
4038 		drm_helper_hpd_irq_event(dev);
4039 	else
4040 		drm_kms_helper_hotplug_event(dev);
4041 #ifdef CONFIG_PM
4042 	dev->dev->power.disable_depth--;
4043 #endif
4044 	adev->in_suspend = false;
4045 
4046 	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4047 		DRM_WARN("smart shift update failed\n");
4048 
4049 	return 0;
4050 }
4051 
4052 /**
4053  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4054  *
4055  * @adev: amdgpu_device pointer
4056  *
4057  * The list of all the hardware IPs that make up the asic is walked and
4058  * the check_soft_reset callbacks are run.  check_soft_reset determines
4059  * if the asic is still hung or not.
4060  * Returns true if any of the IPs are still in a hung state, false if not.
4061  */
amdgpu_device_ip_check_soft_reset(struct amdgpu_device * adev)4062 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4063 {
4064 	int i;
4065 	bool asic_hang = false;
4066 
4067 	if (amdgpu_sriov_vf(adev))
4068 		return true;
4069 
4070 	if (amdgpu_asic_need_full_reset(adev))
4071 		return true;
4072 
4073 	for (i = 0; i < adev->num_ip_blocks; i++) {
4074 		if (!adev->ip_blocks[i].status.valid)
4075 			continue;
4076 		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4077 			adev->ip_blocks[i].status.hang =
4078 				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4079 		if (adev->ip_blocks[i].status.hang) {
4080 			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4081 			asic_hang = true;
4082 		}
4083 	}
4084 	return asic_hang;
4085 }
4086 
4087 /**
4088  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4089  *
4090  * @adev: amdgpu_device pointer
4091  *
4092  * The list of all the hardware IPs that make up the asic is walked and the
4093  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4094  * handles any IP specific hardware or software state changes that are
4095  * necessary for a soft reset to succeed.
4096  * Returns 0 on success, negative error code on failure.
4097  */
amdgpu_device_ip_pre_soft_reset(struct amdgpu_device * adev)4098 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4099 {
4100 	int i, r = 0;
4101 
4102 	for (i = 0; i < adev->num_ip_blocks; i++) {
4103 		if (!adev->ip_blocks[i].status.valid)
4104 			continue;
4105 		if (adev->ip_blocks[i].status.hang &&
4106 		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4107 			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4108 			if (r)
4109 				return r;
4110 		}
4111 	}
4112 
4113 	return 0;
4114 }
4115 
4116 /**
4117  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4118  *
4119  * @adev: amdgpu_device pointer
4120  *
4121  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4122  * reset is necessary to recover.
4123  * Returns true if a full asic reset is required, false if not.
4124  */
amdgpu_device_ip_need_full_reset(struct amdgpu_device * adev)4125 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4126 {
4127 	int i;
4128 
4129 	if (amdgpu_asic_need_full_reset(adev))
4130 		return true;
4131 
4132 	for (i = 0; i < adev->num_ip_blocks; i++) {
4133 		if (!adev->ip_blocks[i].status.valid)
4134 			continue;
4135 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4136 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4137 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4138 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4139 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4140 			if (adev->ip_blocks[i].status.hang) {
4141 				dev_info(adev->dev, "Some block need full reset!\n");
4142 				return true;
4143 			}
4144 		}
4145 	}
4146 	return false;
4147 }
4148 
4149 /**
4150  * amdgpu_device_ip_soft_reset - do a soft reset
4151  *
4152  * @adev: amdgpu_device pointer
4153  *
4154  * The list of all the hardware IPs that make up the asic is walked and the
4155  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4156  * IP specific hardware or software state changes that are necessary to soft
4157  * reset the IP.
4158  * Returns 0 on success, negative error code on failure.
4159  */
amdgpu_device_ip_soft_reset(struct amdgpu_device * adev)4160 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4161 {
4162 	int i, r = 0;
4163 
4164 	for (i = 0; i < adev->num_ip_blocks; i++) {
4165 		if (!adev->ip_blocks[i].status.valid)
4166 			continue;
4167 		if (adev->ip_blocks[i].status.hang &&
4168 		    adev->ip_blocks[i].version->funcs->soft_reset) {
4169 			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4170 			if (r)
4171 				return r;
4172 		}
4173 	}
4174 
4175 	return 0;
4176 }
4177 
4178 /**
4179  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4180  *
4181  * @adev: amdgpu_device pointer
4182  *
4183  * The list of all the hardware IPs that make up the asic is walked and the
4184  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4185  * handles any IP specific hardware or software state changes that are
4186  * necessary after the IP has been soft reset.
4187  * Returns 0 on success, negative error code on failure.
4188  */
amdgpu_device_ip_post_soft_reset(struct amdgpu_device * adev)4189 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4190 {
4191 	int i, r = 0;
4192 
4193 	for (i = 0; i < adev->num_ip_blocks; i++) {
4194 		if (!adev->ip_blocks[i].status.valid)
4195 			continue;
4196 		if (adev->ip_blocks[i].status.hang &&
4197 		    adev->ip_blocks[i].version->funcs->post_soft_reset)
4198 			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4199 		if (r)
4200 			return r;
4201 	}
4202 
4203 	return 0;
4204 }
4205 
4206 /**
4207  * amdgpu_device_recover_vram - Recover some VRAM contents
4208  *
4209  * @adev: amdgpu_device pointer
4210  *
4211  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4212  * restore things like GPUVM page tables after a GPU reset where
4213  * the contents of VRAM might be lost.
4214  *
4215  * Returns:
4216  * 0 on success, negative error code on failure.
4217  */
amdgpu_device_recover_vram(struct amdgpu_device * adev)4218 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4219 {
4220 	struct dma_fence *fence = NULL, *next = NULL;
4221 	struct amdgpu_bo *shadow;
4222 	struct amdgpu_bo_vm *vmbo;
4223 	long r = 1, tmo;
4224 
4225 	if (amdgpu_sriov_runtime(adev))
4226 		tmo = msecs_to_jiffies(8000);
4227 	else
4228 		tmo = msecs_to_jiffies(100);
4229 
4230 	dev_info(adev->dev, "recover vram bo from shadow start\n");
4231 	mutex_lock(&adev->shadow_list_lock);
4232 	list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4233 		shadow = &vmbo->bo;
4234 		/* No need to recover an evicted BO */
4235 		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4236 		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4237 		    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4238 			continue;
4239 
4240 		r = amdgpu_bo_restore_shadow(shadow, &next);
4241 		if (r)
4242 			break;
4243 
4244 		if (fence) {
4245 			tmo = dma_fence_wait_timeout(fence, false, tmo);
4246 			dma_fence_put(fence);
4247 			fence = next;
4248 			if (tmo == 0) {
4249 				r = -ETIMEDOUT;
4250 				break;
4251 			} else if (tmo < 0) {
4252 				r = tmo;
4253 				break;
4254 			}
4255 		} else {
4256 			fence = next;
4257 		}
4258 	}
4259 	mutex_unlock(&adev->shadow_list_lock);
4260 
4261 	if (fence)
4262 		tmo = dma_fence_wait_timeout(fence, false, tmo);
4263 	dma_fence_put(fence);
4264 
4265 	if (r < 0 || tmo <= 0) {
4266 		dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4267 		return -EIO;
4268 	}
4269 
4270 	dev_info(adev->dev, "recover vram bo from shadow done\n");
4271 	return 0;
4272 }
4273 
4274 
4275 /**
4276  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4277  *
4278  * @adev: amdgpu_device pointer
4279  * @from_hypervisor: request from hypervisor
4280  *
4281  * do VF FLR and reinitialize Asic
4282  * return 0 means succeeded otherwise failed
4283  */
amdgpu_device_reset_sriov(struct amdgpu_device * adev,bool from_hypervisor)4284 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4285 				     bool from_hypervisor)
4286 {
4287 	int r;
4288 
4289 	if (from_hypervisor)
4290 		r = amdgpu_virt_request_full_gpu(adev, true);
4291 	else
4292 		r = amdgpu_virt_reset_gpu(adev);
4293 	if (r)
4294 		return r;
4295 
4296 	amdgpu_amdkfd_pre_reset(adev);
4297 
4298 	/* Resume IP prior to SMC */
4299 	r = amdgpu_device_ip_reinit_early_sriov(adev);
4300 	if (r)
4301 		goto error;
4302 
4303 	amdgpu_virt_init_data_exchange(adev);
4304 	/* we need recover gart prior to run SMC/CP/SDMA resume */
4305 	amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
4306 
4307 	r = amdgpu_device_fw_loading(adev);
4308 	if (r)
4309 		return r;
4310 
4311 	/* now we are okay to resume SMC/CP/SDMA */
4312 	r = amdgpu_device_ip_reinit_late_sriov(adev);
4313 	if (r)
4314 		goto error;
4315 
4316 	amdgpu_irq_gpu_reset_resume_helper(adev);
4317 	r = amdgpu_ib_ring_tests(adev);
4318 	amdgpu_amdkfd_post_reset(adev);
4319 
4320 error:
4321 	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4322 		amdgpu_inc_vram_lost(adev);
4323 		r = amdgpu_device_recover_vram(adev);
4324 	}
4325 	amdgpu_virt_release_full_gpu(adev, true);
4326 
4327 	return r;
4328 }
4329 
4330 /**
4331  * amdgpu_device_has_job_running - check if there is any job in mirror list
4332  *
4333  * @adev: amdgpu_device pointer
4334  *
4335  * check if there is any job in mirror list
4336  */
amdgpu_device_has_job_running(struct amdgpu_device * adev)4337 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4338 {
4339 	int i;
4340 	struct drm_sched_job *job;
4341 
4342 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4343 		struct amdgpu_ring *ring = adev->rings[i];
4344 
4345 		if (!ring || !ring->sched.thread)
4346 			continue;
4347 
4348 		spin_lock(&ring->sched.job_list_lock);
4349 		job = list_first_entry_or_null(&ring->sched.pending_list,
4350 					       struct drm_sched_job, list);
4351 		spin_unlock(&ring->sched.job_list_lock);
4352 		if (job)
4353 			return true;
4354 	}
4355 	return false;
4356 }
4357 
4358 /**
4359  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4360  *
4361  * @adev: amdgpu_device pointer
4362  *
4363  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4364  * a hung GPU.
4365  */
amdgpu_device_should_recover_gpu(struct amdgpu_device * adev)4366 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4367 {
4368 	if (!amdgpu_device_ip_check_soft_reset(adev)) {
4369 		dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4370 		return false;
4371 	}
4372 
4373 	if (amdgpu_gpu_recovery == 0)
4374 		goto disabled;
4375 
4376 	if (amdgpu_sriov_vf(adev))
4377 		return true;
4378 
4379 	if (amdgpu_gpu_recovery == -1) {
4380 		switch (adev->asic_type) {
4381 		case CHIP_BONAIRE:
4382 		case CHIP_HAWAII:
4383 		case CHIP_TOPAZ:
4384 		case CHIP_TONGA:
4385 		case CHIP_FIJI:
4386 		case CHIP_POLARIS10:
4387 		case CHIP_POLARIS11:
4388 		case CHIP_POLARIS12:
4389 		case CHIP_VEGAM:
4390 		case CHIP_VEGA20:
4391 		case CHIP_VEGA10:
4392 		case CHIP_VEGA12:
4393 		case CHIP_RAVEN:
4394 		case CHIP_ARCTURUS:
4395 		case CHIP_RENOIR:
4396 		case CHIP_NAVI10:
4397 		case CHIP_NAVI14:
4398 		case CHIP_NAVI12:
4399 		case CHIP_SIENNA_CICHLID:
4400 		case CHIP_NAVY_FLOUNDER:
4401 		case CHIP_DIMGREY_CAVEFISH:
4402 		case CHIP_BEIGE_GOBY:
4403 		case CHIP_VANGOGH:
4404 		case CHIP_ALDEBARAN:
4405 			break;
4406 		default:
4407 			goto disabled;
4408 		}
4409 	}
4410 
4411 	return true;
4412 
4413 disabled:
4414 		dev_info(adev->dev, "GPU recovery disabled.\n");
4415 		return false;
4416 }
4417 
amdgpu_device_mode1_reset(struct amdgpu_device * adev)4418 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4419 {
4420         u32 i;
4421         int ret = 0;
4422 
4423         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4424 
4425         dev_info(adev->dev, "GPU mode1 reset\n");
4426 
4427         /* disable BM */
4428         pci_clear_master(adev->pdev);
4429 
4430         amdgpu_device_cache_pci_state(adev->pdev);
4431 
4432         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4433                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4434                 ret = amdgpu_dpm_mode1_reset(adev);
4435         } else {
4436                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4437                 ret = psp_gpu_reset(adev);
4438         }
4439 
4440         if (ret)
4441                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4442 
4443         amdgpu_device_load_pci_state(adev->pdev);
4444 
4445         /* wait for asic to come out of reset */
4446         for (i = 0; i < adev->usec_timeout; i++) {
4447                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4448 
4449                 if (memsize != 0xffffffff)
4450                         break;
4451                 udelay(1);
4452         }
4453 
4454         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4455         return ret;
4456 }
4457 
amdgpu_device_pre_asic_reset(struct amdgpu_device * adev,struct amdgpu_reset_context * reset_context)4458 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4459 				 struct amdgpu_reset_context *reset_context)
4460 {
4461 	int i, j, r = 0;
4462 	struct amdgpu_job *job = NULL;
4463 	bool need_full_reset =
4464 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4465 
4466 	if (reset_context->reset_req_dev == adev)
4467 		job = reset_context->job;
4468 
4469 	/* no need to dump if device is not in good state during probe period */
4470 	if (!adev->gmc.xgmi.pending_reset)
4471 		amdgpu_debugfs_wait_dump(adev);
4472 
4473 	if (amdgpu_sriov_vf(adev)) {
4474 		/* stop the data exchange thread */
4475 		amdgpu_virt_fini_data_exchange(adev);
4476 	}
4477 
4478 	/* block all schedulers and reset given job's ring */
4479 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4480 		struct amdgpu_ring *ring = adev->rings[i];
4481 
4482 		if (!ring || !ring->sched.thread)
4483 			continue;
4484 
4485 		/*clear job fence from fence drv to avoid force_completion
4486 		 *leave NULL and vm flush fence in fence drv */
4487 		for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
4488 			struct dma_fence *old, **ptr;
4489 
4490 			ptr = &ring->fence_drv.fences[j];
4491 			old = rcu_dereference_protected(*ptr, 1);
4492 			if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
4493 				RCU_INIT_POINTER(*ptr, NULL);
4494 			}
4495 		}
4496 		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4497 		amdgpu_fence_driver_force_completion(ring);
4498 	}
4499 
4500 	if (job && job->vm)
4501 		drm_sched_increase_karma(&job->base);
4502 
4503 	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4504 	/* If reset handler not implemented, continue; otherwise return */
4505 	if (r == -ENOSYS)
4506 		r = 0;
4507 	else
4508 		return r;
4509 
4510 	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4511 	if (!amdgpu_sriov_vf(adev)) {
4512 
4513 		if (!need_full_reset)
4514 			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4515 
4516 		if (!need_full_reset) {
4517 			amdgpu_device_ip_pre_soft_reset(adev);
4518 			r = amdgpu_device_ip_soft_reset(adev);
4519 			amdgpu_device_ip_post_soft_reset(adev);
4520 			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4521 				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4522 				need_full_reset = true;
4523 			}
4524 		}
4525 
4526 		if (need_full_reset)
4527 			r = amdgpu_device_ip_suspend(adev);
4528 		if (need_full_reset)
4529 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4530 		else
4531 			clear_bit(AMDGPU_NEED_FULL_RESET,
4532 				  &reset_context->flags);
4533 	}
4534 
4535 	return r;
4536 }
4537 
amdgpu_do_asic_reset(struct list_head * device_list_handle,struct amdgpu_reset_context * reset_context)4538 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4539 			 struct amdgpu_reset_context *reset_context)
4540 {
4541 	struct amdgpu_device *tmp_adev = NULL;
4542 	bool need_full_reset, skip_hw_reset, vram_lost = false;
4543 	int r = 0;
4544 
4545 	/* Try reset handler method first */
4546 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4547 				    reset_list);
4548 	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4549 	/* If reset handler not implemented, continue; otherwise return */
4550 	if (r == -ENOSYS)
4551 		r = 0;
4552 	else
4553 		return r;
4554 
4555 	/* Reset handler not implemented, use the default method */
4556 	need_full_reset =
4557 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4558 	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4559 
4560 	/*
4561 	 * ASIC reset has to be done on all XGMI hive nodes ASAP
4562 	 * to allow proper links negotiation in FW (within 1 sec)
4563 	 */
4564 	if (!skip_hw_reset && need_full_reset) {
4565 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4566 			/* For XGMI run all resets in parallel to speed up the process */
4567 			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4568 				tmp_adev->gmc.xgmi.pending_reset = false;
4569 				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4570 					r = -EALREADY;
4571 			} else
4572 				r = amdgpu_asic_reset(tmp_adev);
4573 
4574 			if (r) {
4575 				dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4576 					 r, adev_to_drm(tmp_adev)->unique);
4577 				break;
4578 			}
4579 		}
4580 
4581 		/* For XGMI wait for all resets to complete before proceed */
4582 		if (!r) {
4583 			list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4584 				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4585 					flush_work(&tmp_adev->xgmi_reset_work);
4586 					r = tmp_adev->asic_reset_res;
4587 					if (r)
4588 						break;
4589 				}
4590 			}
4591 		}
4592 	}
4593 
4594 	if (!r && amdgpu_ras_intr_triggered()) {
4595 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4596 			if (tmp_adev->mmhub.ras_funcs &&
4597 			    tmp_adev->mmhub.ras_funcs->reset_ras_error_count)
4598 				tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev);
4599 		}
4600 
4601 		amdgpu_ras_intr_cleared();
4602 	}
4603 
4604 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4605 		if (need_full_reset) {
4606 			/* post card */
4607 			r = amdgpu_device_asic_init(tmp_adev);
4608 			if (r) {
4609 				dev_warn(tmp_adev->dev, "asic atom init failed!");
4610 			} else {
4611 				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4612 				r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4613 				if (r)
4614 					goto out;
4615 
4616 				r = amdgpu_device_ip_resume_phase1(tmp_adev);
4617 				if (r)
4618 					goto out;
4619 
4620 				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4621 				if (vram_lost) {
4622 					DRM_INFO("VRAM is lost due to GPU reset!\n");
4623 					amdgpu_inc_vram_lost(tmp_adev);
4624 				}
4625 
4626 				r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
4627 				if (r)
4628 					goto out;
4629 
4630 				r = amdgpu_device_fw_loading(tmp_adev);
4631 				if (r)
4632 					return r;
4633 
4634 				r = amdgpu_device_ip_resume_phase2(tmp_adev);
4635 				if (r)
4636 					goto out;
4637 
4638 				if (vram_lost)
4639 					amdgpu_device_fill_reset_magic(tmp_adev);
4640 
4641 				/*
4642 				 * Add this ASIC as tracked as reset was already
4643 				 * complete successfully.
4644 				 */
4645 				amdgpu_register_gpu_instance(tmp_adev);
4646 
4647 				if (!reset_context->hive &&
4648 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4649 					amdgpu_xgmi_add_device(tmp_adev);
4650 
4651 				r = amdgpu_device_ip_late_init(tmp_adev);
4652 				if (r)
4653 					goto out;
4654 
4655 				amdgpu_fbdev_set_suspend(tmp_adev, 0);
4656 
4657 				/*
4658 				 * The GPU enters bad state once faulty pages
4659 				 * by ECC has reached the threshold, and ras
4660 				 * recovery is scheduled next. So add one check
4661 				 * here to break recovery if it indeed exceeds
4662 				 * bad page threshold, and remind user to
4663 				 * retire this GPU or setting one bigger
4664 				 * bad_page_threshold value to fix this once
4665 				 * probing driver again.
4666 				 */
4667 				if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4668 					/* must succeed. */
4669 					amdgpu_ras_resume(tmp_adev);
4670 				} else {
4671 					r = -EINVAL;
4672 					goto out;
4673 				}
4674 
4675 				/* Update PSP FW topology after reset */
4676 				if (reset_context->hive &&
4677 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4678 					r = amdgpu_xgmi_update_topology(
4679 						reset_context->hive, tmp_adev);
4680 			}
4681 		}
4682 
4683 out:
4684 		if (!r) {
4685 			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4686 			r = amdgpu_ib_ring_tests(tmp_adev);
4687 			if (r) {
4688 				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4689 				need_full_reset = true;
4690 				r = -EAGAIN;
4691 				goto end;
4692 			}
4693 		}
4694 
4695 		if (!r)
4696 			r = amdgpu_device_recover_vram(tmp_adev);
4697 		else
4698 			tmp_adev->asic_reset_res = r;
4699 	}
4700 
4701 end:
4702 	if (need_full_reset)
4703 		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4704 	else
4705 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4706 	return r;
4707 }
4708 
amdgpu_device_lock_adev(struct amdgpu_device * adev,struct amdgpu_hive_info * hive)4709 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4710 				struct amdgpu_hive_info *hive)
4711 {
4712 	if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4713 		return false;
4714 
4715 	if (hive) {
4716 		down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4717 	} else {
4718 		down_write(&adev->reset_sem);
4719 	}
4720 
4721 	switch (amdgpu_asic_reset_method(adev)) {
4722 	case AMD_RESET_METHOD_MODE1:
4723 		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4724 		break;
4725 	case AMD_RESET_METHOD_MODE2:
4726 		adev->mp1_state = PP_MP1_STATE_RESET;
4727 		break;
4728 	default:
4729 		adev->mp1_state = PP_MP1_STATE_NONE;
4730 		break;
4731 	}
4732 
4733 	return true;
4734 }
4735 
amdgpu_device_unlock_adev(struct amdgpu_device * adev)4736 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4737 {
4738 	amdgpu_vf_error_trans_all(adev);
4739 	adev->mp1_state = PP_MP1_STATE_NONE;
4740 	atomic_set(&adev->in_gpu_reset, 0);
4741 	up_write(&adev->reset_sem);
4742 }
4743 
4744 /*
4745  * to lockup a list of amdgpu devices in a hive safely, if not a hive
4746  * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4747  *
4748  * unlock won't require roll back.
4749  */
amdgpu_device_lock_hive_adev(struct amdgpu_device * adev,struct amdgpu_hive_info * hive)4750 static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4751 {
4752 	struct amdgpu_device *tmp_adev = NULL;
4753 
4754 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
4755 		if (!hive) {
4756 			dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4757 			return -ENODEV;
4758 		}
4759 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4760 			if (!amdgpu_device_lock_adev(tmp_adev, hive))
4761 				goto roll_back;
4762 		}
4763 	} else if (!amdgpu_device_lock_adev(adev, hive))
4764 		return -EAGAIN;
4765 
4766 	return 0;
4767 roll_back:
4768 	if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4769 		/*
4770 		 * if the lockup iteration break in the middle of a hive,
4771 		 * it may means there may has a race issue,
4772 		 * or a hive device locked up independently.
4773 		 * we may be in trouble and may not, so will try to roll back
4774 		 * the lock and give out a warnning.
4775 		 */
4776 		dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4777 		list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4778 			amdgpu_device_unlock_adev(tmp_adev);
4779 		}
4780 	}
4781 	return -EAGAIN;
4782 }
4783 
amdgpu_device_resume_display_audio(struct amdgpu_device * adev)4784 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4785 {
4786 	struct pci_dev *p = NULL;
4787 
4788 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4789 			adev->pdev->bus->number, 1);
4790 	if (p) {
4791 		pm_runtime_enable(&(p->dev));
4792 		pm_runtime_resume(&(p->dev));
4793 	}
4794 }
4795 
amdgpu_device_suspend_display_audio(struct amdgpu_device * adev)4796 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4797 {
4798 	enum amd_reset_method reset_method;
4799 	struct pci_dev *p = NULL;
4800 	u64 expires;
4801 
4802 	/*
4803 	 * For now, only BACO and mode1 reset are confirmed
4804 	 * to suffer the audio issue without proper suspended.
4805 	 */
4806 	reset_method = amdgpu_asic_reset_method(adev);
4807 	if ((reset_method != AMD_RESET_METHOD_BACO) &&
4808 	     (reset_method != AMD_RESET_METHOD_MODE1))
4809 		return -EINVAL;
4810 
4811 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4812 			adev->pdev->bus->number, 1);
4813 	if (!p)
4814 		return -ENODEV;
4815 
4816 	expires = pm_runtime_autosuspend_expiration(&(p->dev));
4817 	if (!expires)
4818 		/*
4819 		 * If we cannot get the audio device autosuspend delay,
4820 		 * a fixed 4S interval will be used. Considering 3S is
4821 		 * the audio controller default autosuspend delay setting.
4822 		 * 4S used here is guaranteed to cover that.
4823 		 */
4824 		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4825 
4826 	while (!pm_runtime_status_suspended(&(p->dev))) {
4827 		if (!pm_runtime_suspend(&(p->dev)))
4828 			break;
4829 
4830 		if (expires < ktime_get_mono_fast_ns()) {
4831 			dev_warn(adev->dev, "failed to suspend display audio\n");
4832 			/* TODO: abort the succeeding gpu reset? */
4833 			return -ETIMEDOUT;
4834 		}
4835 	}
4836 
4837 	pm_runtime_disable(&(p->dev));
4838 
4839 	return 0;
4840 }
4841 
amdgpu_device_recheck_guilty_jobs(struct amdgpu_device * adev,struct list_head * device_list_handle,struct amdgpu_reset_context * reset_context)4842 static void amdgpu_device_recheck_guilty_jobs(
4843 	struct amdgpu_device *adev, struct list_head *device_list_handle,
4844 	struct amdgpu_reset_context *reset_context)
4845 {
4846 	int i, r = 0;
4847 
4848 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4849 		struct amdgpu_ring *ring = adev->rings[i];
4850 		int ret = 0;
4851 		struct drm_sched_job *s_job;
4852 
4853 		if (!ring || !ring->sched.thread)
4854 			continue;
4855 
4856 		s_job = list_first_entry_or_null(&ring->sched.pending_list,
4857 				struct drm_sched_job, list);
4858 		if (s_job == NULL)
4859 			continue;
4860 
4861 		/* clear job's guilty and depend the folowing step to decide the real one */
4862 		drm_sched_reset_karma(s_job);
4863 		drm_sched_resubmit_jobs_ext(&ring->sched, 1);
4864 
4865 		ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
4866 		if (ret == 0) { /* timeout */
4867 			DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
4868 						ring->sched.name, s_job->id);
4869 
4870 			/* set guilty */
4871 			drm_sched_increase_karma(s_job);
4872 retry:
4873 			/* do hw reset */
4874 			if (amdgpu_sriov_vf(adev)) {
4875 				amdgpu_virt_fini_data_exchange(adev);
4876 				r = amdgpu_device_reset_sriov(adev, false);
4877 				if (r)
4878 					adev->asic_reset_res = r;
4879 			} else {
4880 				clear_bit(AMDGPU_SKIP_HW_RESET,
4881 					  &reset_context->flags);
4882 				r = amdgpu_do_asic_reset(device_list_handle,
4883 							 reset_context);
4884 				if (r && r == -EAGAIN)
4885 					goto retry;
4886 			}
4887 
4888 			/*
4889 			 * add reset counter so that the following
4890 			 * resubmitted job could flush vmid
4891 			 */
4892 			atomic_inc(&adev->gpu_reset_counter);
4893 			continue;
4894 		}
4895 
4896 		/* got the hw fence, signal finished fence */
4897 		atomic_dec(ring->sched.score);
4898 		dma_fence_get(&s_job->s_fence->finished);
4899 		dma_fence_signal(&s_job->s_fence->finished);
4900 		dma_fence_put(&s_job->s_fence->finished);
4901 
4902 		/* remove node from list and free the job */
4903 		spin_lock(&ring->sched.job_list_lock);
4904 		list_del_init(&s_job->list);
4905 		spin_unlock(&ring->sched.job_list_lock);
4906 		ring->sched.ops->free_job(s_job);
4907 	}
4908 }
4909 
4910 /**
4911  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4912  *
4913  * @adev: amdgpu_device pointer
4914  * @job: which job trigger hang
4915  *
4916  * Attempt to reset the GPU if it has hung (all asics).
4917  * Attempt to do soft-reset or full-reset and reinitialize Asic
4918  * Returns 0 for success or an error on failure.
4919  */
4920 
amdgpu_device_gpu_recover(struct amdgpu_device * adev,struct amdgpu_job * job)4921 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4922 			      struct amdgpu_job *job)
4923 {
4924 	struct list_head device_list, *device_list_handle =  NULL;
4925 	bool job_signaled = false;
4926 	struct amdgpu_hive_info *hive = NULL;
4927 	struct amdgpu_device *tmp_adev = NULL;
4928 	int i, r = 0;
4929 	bool need_emergency_restart = false;
4930 	bool audio_suspended = false;
4931 	int tmp_vram_lost_counter;
4932 	struct amdgpu_reset_context reset_context;
4933 
4934 	memset(&reset_context, 0, sizeof(reset_context));
4935 
4936 	/*
4937 	 * Special case: RAS triggered and full reset isn't supported
4938 	 */
4939 	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4940 
4941 	/*
4942 	 * Flush RAM to disk so that after reboot
4943 	 * the user can read log and see why the system rebooted.
4944 	 */
4945 	if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4946 		DRM_WARN("Emergency reboot.");
4947 
4948 		ksys_sync_helper();
4949 		emergency_restart();
4950 	}
4951 
4952 	dev_info(adev->dev, "GPU %s begin!\n",
4953 		need_emergency_restart ? "jobs stop":"reset");
4954 
4955 	/*
4956 	 * Here we trylock to avoid chain of resets executing from
4957 	 * either trigger by jobs on different adevs in XGMI hive or jobs on
4958 	 * different schedulers for same device while this TO handler is running.
4959 	 * We always reset all schedulers for device and all devices for XGMI
4960 	 * hive so that should take care of them too.
4961 	 */
4962 	hive = amdgpu_get_xgmi_hive(adev);
4963 	if (hive) {
4964 		if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
4965 			DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4966 				job ? job->base.id : -1, hive->hive_id);
4967 			amdgpu_put_xgmi_hive(hive);
4968 			if (job && job->vm)
4969 				drm_sched_increase_karma(&job->base);
4970 			return 0;
4971 		}
4972 		mutex_lock(&hive->hive_lock);
4973 	}
4974 
4975 	reset_context.method = AMD_RESET_METHOD_NONE;
4976 	reset_context.reset_req_dev = adev;
4977 	reset_context.job = job;
4978 	reset_context.hive = hive;
4979 	clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
4980 
4981 	/*
4982 	 * lock the device before we try to operate the linked list
4983 	 * if didn't get the device lock, don't touch the linked list since
4984 	 * others may iterating it.
4985 	 */
4986 	r = amdgpu_device_lock_hive_adev(adev, hive);
4987 	if (r) {
4988 		dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
4989 					job ? job->base.id : -1);
4990 
4991 		/* even we skipped this reset, still need to set the job to guilty */
4992 		if (job && job->vm)
4993 			drm_sched_increase_karma(&job->base);
4994 		goto skip_recovery;
4995 	}
4996 
4997 	/*
4998 	 * Build list of devices to reset.
4999 	 * In case we are in XGMI hive mode, resort the device list
5000 	 * to put adev in the 1st position.
5001 	 */
5002 	INIT_LIST_HEAD(&device_list);
5003 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
5004 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
5005 			list_add_tail(&tmp_adev->reset_list, &device_list);
5006 		if (!list_is_first(&adev->reset_list, &device_list))
5007 			list_rotate_to_front(&adev->reset_list, &device_list);
5008 		device_list_handle = &device_list;
5009 	} else {
5010 		list_add_tail(&adev->reset_list, &device_list);
5011 		device_list_handle = &device_list;
5012 	}
5013 
5014 	/* block all schedulers and reset given job's ring */
5015 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5016 		/*
5017 		 * Try to put the audio codec into suspend state
5018 		 * before gpu reset started.
5019 		 *
5020 		 * Due to the power domain of the graphics device
5021 		 * is shared with AZ power domain. Without this,
5022 		 * we may change the audio hardware from behind
5023 		 * the audio driver's back. That will trigger
5024 		 * some audio codec errors.
5025 		 */
5026 		if (!amdgpu_device_suspend_display_audio(tmp_adev))
5027 			audio_suspended = true;
5028 
5029 		amdgpu_ras_set_error_query_ready(tmp_adev, false);
5030 
5031 		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5032 
5033 		if (!amdgpu_sriov_vf(tmp_adev))
5034 			amdgpu_amdkfd_pre_reset(tmp_adev);
5035 
5036 		/*
5037 		 * Mark these ASICs to be reseted as untracked first
5038 		 * And add them back after reset completed
5039 		 */
5040 		amdgpu_unregister_gpu_instance(tmp_adev);
5041 
5042 		amdgpu_fbdev_set_suspend(tmp_adev, 1);
5043 
5044 		/* disable ras on ALL IPs */
5045 		if (!need_emergency_restart &&
5046 		      amdgpu_device_ip_need_full_reset(tmp_adev))
5047 			amdgpu_ras_suspend(tmp_adev);
5048 
5049 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5050 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5051 
5052 			if (!ring || !ring->sched.thread)
5053 				continue;
5054 
5055 			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5056 
5057 			if (need_emergency_restart)
5058 				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5059 		}
5060 		atomic_inc(&tmp_adev->gpu_reset_counter);
5061 	}
5062 
5063 	if (need_emergency_restart)
5064 		goto skip_sched_resume;
5065 
5066 	/*
5067 	 * Must check guilty signal here since after this point all old
5068 	 * HW fences are force signaled.
5069 	 *
5070 	 * job->base holds a reference to parent fence
5071 	 */
5072 	if (job && job->base.s_fence->parent &&
5073 	    dma_fence_is_signaled(job->base.s_fence->parent)) {
5074 		job_signaled = true;
5075 		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5076 		goto skip_hw_reset;
5077 	}
5078 
5079 retry:	/* Rest of adevs pre asic reset from XGMI hive. */
5080 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5081 		r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
5082 		/*TODO Should we stop ?*/
5083 		if (r) {
5084 			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5085 				  r, adev_to_drm(tmp_adev)->unique);
5086 			tmp_adev->asic_reset_res = r;
5087 		}
5088 	}
5089 
5090 	tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5091 	/* Actual ASIC resets if needed.*/
5092 	/* TODO Implement XGMI hive reset logic for SRIOV */
5093 	if (amdgpu_sriov_vf(adev)) {
5094 		r = amdgpu_device_reset_sriov(adev, job ? false : true);
5095 		if (r)
5096 			adev->asic_reset_res = r;
5097 	} else {
5098 		r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
5099 		if (r && r == -EAGAIN)
5100 			goto retry;
5101 	}
5102 
5103 skip_hw_reset:
5104 
5105 	/* Post ASIC reset for all devs .*/
5106 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5107 
5108 		/*
5109 		 * Sometimes a later bad compute job can block a good gfx job as gfx
5110 		 * and compute ring share internal GC HW mutually. We add an additional
5111 		 * guilty jobs recheck step to find the real guilty job, it synchronously
5112 		 * submits and pends for the first job being signaled. If it gets timeout,
5113 		 * we identify it as a real guilty job.
5114 		 */
5115 		if (amdgpu_gpu_recovery == 2 &&
5116 			!(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5117 			amdgpu_device_recheck_guilty_jobs(
5118 				tmp_adev, device_list_handle, &reset_context);
5119 
5120 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5121 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5122 
5123 			if (!ring || !ring->sched.thread)
5124 				continue;
5125 
5126 			/* No point to resubmit jobs if we didn't HW reset*/
5127 			if (!tmp_adev->asic_reset_res && !job_signaled)
5128 				drm_sched_resubmit_jobs(&ring->sched);
5129 
5130 			drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5131 		}
5132 
5133 		if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
5134 			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5135 		}
5136 
5137 		tmp_adev->asic_reset_res = 0;
5138 
5139 		if (r) {
5140 			/* bad news, how to tell it to userspace ? */
5141 			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5142 			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5143 		} else {
5144 			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5145 			if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5146 				DRM_WARN("smart shift update failed\n");
5147 		}
5148 	}
5149 
5150 skip_sched_resume:
5151 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5152 		/* unlock kfd: SRIOV would do it separately */
5153 		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5154 	                amdgpu_amdkfd_post_reset(tmp_adev);
5155 
5156 		/* kfd_post_reset will do nothing if kfd device is not initialized,
5157 		 * need to bring up kfd here if it's not be initialized before
5158 		 */
5159 		if (!adev->kfd.init_complete)
5160 			amdgpu_amdkfd_device_init(adev);
5161 
5162 		if (audio_suspended)
5163 			amdgpu_device_resume_display_audio(tmp_adev);
5164 		amdgpu_device_unlock_adev(tmp_adev);
5165 	}
5166 
5167 skip_recovery:
5168 	if (hive) {
5169 		atomic_set(&hive->in_reset, 0);
5170 		mutex_unlock(&hive->hive_lock);
5171 		amdgpu_put_xgmi_hive(hive);
5172 	}
5173 
5174 	if (r && r != -EAGAIN)
5175 		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5176 	return r;
5177 }
5178 
5179 /**
5180  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5181  *
5182  * @adev: amdgpu_device pointer
5183  *
5184  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5185  * and lanes) of the slot the device is in. Handles APUs and
5186  * virtualized environments where PCIE config space may not be available.
5187  */
amdgpu_device_get_pcie_info(struct amdgpu_device * adev)5188 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5189 {
5190 	struct pci_dev *pdev;
5191 	enum pci_bus_speed speed_cap, platform_speed_cap;
5192 	enum pcie_link_width platform_link_width;
5193 
5194 	if (amdgpu_pcie_gen_cap)
5195 		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5196 
5197 	if (amdgpu_pcie_lane_cap)
5198 		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5199 
5200 	/* covers APUs as well */
5201 	if (pci_is_root_bus(adev->pdev->bus)) {
5202 		if (adev->pm.pcie_gen_mask == 0)
5203 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5204 		if (adev->pm.pcie_mlw_mask == 0)
5205 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5206 		return;
5207 	}
5208 
5209 	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5210 		return;
5211 
5212 	pcie_bandwidth_available(adev->pdev, NULL,
5213 				 &platform_speed_cap, &platform_link_width);
5214 
5215 	if (adev->pm.pcie_gen_mask == 0) {
5216 		/* asic caps */
5217 		pdev = adev->pdev;
5218 		speed_cap = pcie_get_speed_cap(pdev);
5219 		if (speed_cap == PCI_SPEED_UNKNOWN) {
5220 			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5221 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5222 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5223 		} else {
5224 			if (speed_cap == PCIE_SPEED_32_0GT)
5225 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5226 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5227 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5228 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5229 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5230 			else if (speed_cap == PCIE_SPEED_16_0GT)
5231 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5232 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5233 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5234 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5235 			else if (speed_cap == PCIE_SPEED_8_0GT)
5236 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5237 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5238 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5239 			else if (speed_cap == PCIE_SPEED_5_0GT)
5240 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5241 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5242 			else
5243 				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5244 		}
5245 		/* platform caps */
5246 		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5247 			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5248 						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5249 		} else {
5250 			if (platform_speed_cap == PCIE_SPEED_32_0GT)
5251 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5252 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5253 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5254 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5255 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5256 			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5257 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5258 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5259 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5260 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5261 			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5262 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5263 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5264 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5265 			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5266 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5267 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5268 			else
5269 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5270 
5271 		}
5272 	}
5273 	if (adev->pm.pcie_mlw_mask == 0) {
5274 		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5275 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5276 		} else {
5277 			switch (platform_link_width) {
5278 			case PCIE_LNK_X32:
5279 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5280 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5281 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5282 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5283 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5284 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5285 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5286 				break;
5287 			case PCIE_LNK_X16:
5288 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5289 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5290 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5291 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5292 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5293 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5294 				break;
5295 			case PCIE_LNK_X12:
5296 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5297 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5298 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5299 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5300 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5301 				break;
5302 			case PCIE_LNK_X8:
5303 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5304 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5305 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5306 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5307 				break;
5308 			case PCIE_LNK_X4:
5309 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5310 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5311 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5312 				break;
5313 			case PCIE_LNK_X2:
5314 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5315 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5316 				break;
5317 			case PCIE_LNK_X1:
5318 				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5319 				break;
5320 			default:
5321 				break;
5322 			}
5323 		}
5324 	}
5325 }
5326 
amdgpu_device_baco_enter(struct drm_device * dev)5327 int amdgpu_device_baco_enter(struct drm_device *dev)
5328 {
5329 	struct amdgpu_device *adev = drm_to_adev(dev);
5330 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5331 
5332 	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5333 		return -ENOTSUPP;
5334 
5335 	if (ras && adev->ras_enabled &&
5336 	    adev->nbio.funcs->enable_doorbell_interrupt)
5337 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5338 
5339 	return amdgpu_dpm_baco_enter(adev);
5340 }
5341 
amdgpu_device_baco_exit(struct drm_device * dev)5342 int amdgpu_device_baco_exit(struct drm_device *dev)
5343 {
5344 	struct amdgpu_device *adev = drm_to_adev(dev);
5345 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5346 	int ret = 0;
5347 
5348 	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5349 		return -ENOTSUPP;
5350 
5351 	ret = amdgpu_dpm_baco_exit(adev);
5352 	if (ret)
5353 		return ret;
5354 
5355 	if (ras && adev->ras_enabled &&
5356 	    adev->nbio.funcs->enable_doorbell_interrupt)
5357 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5358 
5359 	if (amdgpu_passthrough(adev) &&
5360 	    adev->nbio.funcs->clear_doorbell_interrupt)
5361 		adev->nbio.funcs->clear_doorbell_interrupt(adev);
5362 
5363 	return 0;
5364 }
5365 
amdgpu_cancel_all_tdr(struct amdgpu_device * adev)5366 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
5367 {
5368 	int i;
5369 
5370 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5371 		struct amdgpu_ring *ring = adev->rings[i];
5372 
5373 		if (!ring || !ring->sched.thread)
5374 			continue;
5375 
5376 		cancel_delayed_work_sync(&ring->sched.work_tdr);
5377 	}
5378 }
5379 
5380 /**
5381  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5382  * @pdev: PCI device struct
5383  * @state: PCI channel state
5384  *
5385  * Description: Called when a PCI error is detected.
5386  *
5387  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5388  */
amdgpu_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)5389 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5390 {
5391 	struct drm_device *dev = pci_get_drvdata(pdev);
5392 	struct amdgpu_device *adev = drm_to_adev(dev);
5393 	int i;
5394 
5395 	DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5396 
5397 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
5398 		DRM_WARN("No support for XGMI hive yet...");
5399 		return PCI_ERS_RESULT_DISCONNECT;
5400 	}
5401 
5402 	adev->pci_channel_state = state;
5403 
5404 	switch (state) {
5405 	case pci_channel_io_normal:
5406 		return PCI_ERS_RESULT_CAN_RECOVER;
5407 	/* Fatal error, prepare for slot reset */
5408 	case pci_channel_io_frozen:
5409 		/*
5410 		 * Cancel and wait for all TDRs in progress if failing to
5411 		 * set  adev->in_gpu_reset in amdgpu_device_lock_adev
5412 		 *
5413 		 * Locking adev->reset_sem will prevent any external access
5414 		 * to GPU during PCI error recovery
5415 		 */
5416 		while (!amdgpu_device_lock_adev(adev, NULL))
5417 			amdgpu_cancel_all_tdr(adev);
5418 
5419 		/*
5420 		 * Block any work scheduling as we do for regular GPU reset
5421 		 * for the duration of the recovery
5422 		 */
5423 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5424 			struct amdgpu_ring *ring = adev->rings[i];
5425 
5426 			if (!ring || !ring->sched.thread)
5427 				continue;
5428 
5429 			drm_sched_stop(&ring->sched, NULL);
5430 		}
5431 		atomic_inc(&adev->gpu_reset_counter);
5432 		return PCI_ERS_RESULT_NEED_RESET;
5433 	case pci_channel_io_perm_failure:
5434 		/* Permanent error, prepare for device removal */
5435 		return PCI_ERS_RESULT_DISCONNECT;
5436 	}
5437 
5438 	return PCI_ERS_RESULT_NEED_RESET;
5439 }
5440 
5441 /**
5442  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5443  * @pdev: pointer to PCI device
5444  */
amdgpu_pci_mmio_enabled(struct pci_dev * pdev)5445 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5446 {
5447 
5448 	DRM_INFO("PCI error: mmio enabled callback!!\n");
5449 
5450 	/* TODO - dump whatever for debugging purposes */
5451 
5452 	/* This called only if amdgpu_pci_error_detected returns
5453 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5454 	 * works, no need to reset slot.
5455 	 */
5456 
5457 	return PCI_ERS_RESULT_RECOVERED;
5458 }
5459 
5460 /**
5461  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5462  * @pdev: PCI device struct
5463  *
5464  * Description: This routine is called by the pci error recovery
5465  * code after the PCI slot has been reset, just before we
5466  * should resume normal operations.
5467  */
amdgpu_pci_slot_reset(struct pci_dev * pdev)5468 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5469 {
5470 	struct drm_device *dev = pci_get_drvdata(pdev);
5471 	struct amdgpu_device *adev = drm_to_adev(dev);
5472 	int r, i;
5473 	struct amdgpu_reset_context reset_context;
5474 	u32 memsize;
5475 	struct list_head device_list;
5476 
5477 	DRM_INFO("PCI error: slot reset callback!!\n");
5478 
5479 	memset(&reset_context, 0, sizeof(reset_context));
5480 
5481 	INIT_LIST_HEAD(&device_list);
5482 	list_add_tail(&adev->reset_list, &device_list);
5483 
5484 	/* wait for asic to come out of reset */
5485 	msleep(500);
5486 
5487 	/* Restore PCI confspace */
5488 	amdgpu_device_load_pci_state(pdev);
5489 
5490 	/* confirm  ASIC came out of reset */
5491 	for (i = 0; i < adev->usec_timeout; i++) {
5492 		memsize = amdgpu_asic_get_config_memsize(adev);
5493 
5494 		if (memsize != 0xffffffff)
5495 			break;
5496 		udelay(1);
5497 	}
5498 	if (memsize == 0xffffffff) {
5499 		r = -ETIME;
5500 		goto out;
5501 	}
5502 
5503 	reset_context.method = AMD_RESET_METHOD_NONE;
5504 	reset_context.reset_req_dev = adev;
5505 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5506 	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5507 
5508 	adev->no_hw_access = true;
5509 	r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5510 	adev->no_hw_access = false;
5511 	if (r)
5512 		goto out;
5513 
5514 	r = amdgpu_do_asic_reset(&device_list, &reset_context);
5515 
5516 out:
5517 	if (!r) {
5518 		if (amdgpu_device_cache_pci_state(adev->pdev))
5519 			pci_restore_state(adev->pdev);
5520 
5521 		DRM_INFO("PCIe error recovery succeeded\n");
5522 	} else {
5523 		DRM_ERROR("PCIe error recovery failed, err:%d", r);
5524 		amdgpu_device_unlock_adev(adev);
5525 	}
5526 
5527 	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5528 }
5529 
5530 /**
5531  * amdgpu_pci_resume() - resume normal ops after PCI reset
5532  * @pdev: pointer to PCI device
5533  *
5534  * Called when the error recovery driver tells us that its
5535  * OK to resume normal operation.
5536  */
amdgpu_pci_resume(struct pci_dev * pdev)5537 void amdgpu_pci_resume(struct pci_dev *pdev)
5538 {
5539 	struct drm_device *dev = pci_get_drvdata(pdev);
5540 	struct amdgpu_device *adev = drm_to_adev(dev);
5541 	int i;
5542 
5543 
5544 	DRM_INFO("PCI error: resume callback!!\n");
5545 
5546 	/* Only continue execution for the case of pci_channel_io_frozen */
5547 	if (adev->pci_channel_state != pci_channel_io_frozen)
5548 		return;
5549 
5550 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5551 		struct amdgpu_ring *ring = adev->rings[i];
5552 
5553 		if (!ring || !ring->sched.thread)
5554 			continue;
5555 
5556 
5557 		drm_sched_resubmit_jobs(&ring->sched);
5558 		drm_sched_start(&ring->sched, true);
5559 	}
5560 
5561 	amdgpu_device_unlock_adev(adev);
5562 }
5563 
amdgpu_device_cache_pci_state(struct pci_dev * pdev)5564 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5565 {
5566 	struct drm_device *dev = pci_get_drvdata(pdev);
5567 	struct amdgpu_device *adev = drm_to_adev(dev);
5568 	int r;
5569 
5570 	r = pci_save_state(pdev);
5571 	if (!r) {
5572 		kfree(adev->pci_state);
5573 
5574 		adev->pci_state = pci_store_saved_state(pdev);
5575 
5576 		if (!adev->pci_state) {
5577 			DRM_ERROR("Failed to store PCI saved state");
5578 			return false;
5579 		}
5580 	} else {
5581 		DRM_WARN("Failed to save PCI state, err:%d\n", r);
5582 		return false;
5583 	}
5584 
5585 	return true;
5586 }
5587 
amdgpu_device_load_pci_state(struct pci_dev * pdev)5588 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5589 {
5590 	struct drm_device *dev = pci_get_drvdata(pdev);
5591 	struct amdgpu_device *adev = drm_to_adev(dev);
5592 	int r;
5593 
5594 	if (!adev->pci_state)
5595 		return false;
5596 
5597 	r = pci_load_saved_state(pdev, adev->pci_state);
5598 
5599 	if (!r) {
5600 		pci_restore_state(pdev);
5601 	} else {
5602 		DRM_WARN("Failed to load PCI state, err:%d\n", r);
5603 		return false;
5604 	}
5605 
5606 	return true;
5607 }
5608 
amdgpu_device_flush_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)5609 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5610 		struct amdgpu_ring *ring)
5611 {
5612 #ifdef CONFIG_X86_64
5613 	if (adev->flags & AMD_IS_APU)
5614 		return;
5615 #endif
5616 	if (adev->gmc.xgmi.connected_to_cpu)
5617 		return;
5618 
5619 	if (ring && ring->funcs->emit_hdp_flush)
5620 		amdgpu_ring_emit_hdp_flush(ring);
5621 	else
5622 		amdgpu_asic_flush_hdp(adev, ring);
5623 }
5624 
amdgpu_device_invalidate_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)5625 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5626 		struct amdgpu_ring *ring)
5627 {
5628 #ifdef CONFIG_X86_64
5629 	if (adev->flags & AMD_IS_APU)
5630 		return;
5631 #endif
5632 	if (adev->gmc.xgmi.connected_to_cpu)
5633 		return;
5634 
5635 	amdgpu_asic_invalidate_hdp(adev, ring);
5636 }
5637