1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_probe_helper.h>
41 #include <drm/amdgpu_drm.h>
42 #include <linux/vgaarb.h>
43 #include <linux/vga_switcheroo.h>
44 #include <linux/efi.h>
45 #include "amdgpu.h"
46 #include "amdgpu_trace.h"
47 #include "amdgpu_i2c.h"
48 #include "atom.h"
49 #include "amdgpu_atombios.h"
50 #include "amdgpu_atomfirmware.h"
51 #include "amd_pcie.h"
52 #ifdef CONFIG_DRM_AMDGPU_SI
53 #include "si.h"
54 #endif
55 #ifdef CONFIG_DRM_AMDGPU_CIK
56 #include "cik.h"
57 #endif
58 #include "vi.h"
59 #include "soc15.h"
60 #include "nv.h"
61 #include "bif/bif_4_1_d.h"
62 #include <linux/firmware.h>
63 #include "amdgpu_vf_error.h"
64
65 #include "amdgpu_amdkfd.h"
66 #include "amdgpu_pm.h"
67
68 #include "amdgpu_xgmi.h"
69 #include "amdgpu_ras.h"
70 #include "amdgpu_pmu.h"
71 #include "amdgpu_fru_eeprom.h"
72 #include "amdgpu_reset.h"
73
74 #include <linux/suspend.h>
75 #include <drm/task_barrier.h>
76 #include <linux/pm_runtime.h>
77
78 #include <drm/drm_drv.h>
79
80 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
87
88 #define AMDGPU_RESUME_MS 2000
89 #define AMDGPU_MAX_RETRY_LIMIT 2
90 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
91
92 const char *amdgpu_asic_name[] = {
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
96 "OLAND",
97 "HAINAN",
98 "BONAIRE",
99 "KAVERI",
100 "KABINI",
101 "HAWAII",
102 "MULLINS",
103 "TOPAZ",
104 "TONGA",
105 "FIJI",
106 "CARRIZO",
107 "STONEY",
108 "POLARIS10",
109 "POLARIS11",
110 "POLARIS12",
111 "VEGAM",
112 "VEGA10",
113 "VEGA12",
114 "VEGA20",
115 "RAVEN",
116 "ARCTURUS",
117 "RENOIR",
118 "ALDEBARAN",
119 "NAVI10",
120 "CYAN_SKILLFISH",
121 "NAVI14",
122 "NAVI12",
123 "SIENNA_CICHLID",
124 "NAVY_FLOUNDER",
125 "VANGOGH",
126 "DIMGREY_CAVEFISH",
127 "BEIGE_GOBY",
128 "YELLOW_CARP",
129 "IP DISCOVERY",
130 "LAST",
131 };
132
133 /**
134 * DOC: pcie_replay_count
135 *
136 * The amdgpu driver provides a sysfs API for reporting the total number
137 * of PCIe replays (NAKs)
138 * The file pcie_replay_count is used for this and returns the total
139 * number of replays as a sum of the NAKs generated and NAKs received
140 */
141
amdgpu_device_get_pcie_replay_count(struct device * dev,struct device_attribute * attr,char * buf)142 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
143 struct device_attribute *attr, char *buf)
144 {
145 struct drm_device *ddev = dev_get_drvdata(dev);
146 struct amdgpu_device *adev = drm_to_adev(ddev);
147 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
148
149 return sysfs_emit(buf, "%llu\n", cnt);
150 }
151
152 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
153 amdgpu_device_get_pcie_replay_count, NULL);
154
155 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
156
157 /**
158 * DOC: product_name
159 *
160 * The amdgpu driver provides a sysfs API for reporting the product name
161 * for the device
162 * The file serial_number is used for this and returns the product name
163 * as returned from the FRU.
164 * NOTE: This is only available for certain server cards
165 */
166
amdgpu_device_get_product_name(struct device * dev,struct device_attribute * attr,char * buf)167 static ssize_t amdgpu_device_get_product_name(struct device *dev,
168 struct device_attribute *attr, char *buf)
169 {
170 struct drm_device *ddev = dev_get_drvdata(dev);
171 struct amdgpu_device *adev = drm_to_adev(ddev);
172
173 return sysfs_emit(buf, "%s\n", adev->product_name);
174 }
175
176 static DEVICE_ATTR(product_name, S_IRUGO,
177 amdgpu_device_get_product_name, NULL);
178
179 /**
180 * DOC: product_number
181 *
182 * The amdgpu driver provides a sysfs API for reporting the part number
183 * for the device
184 * The file serial_number is used for this and returns the part number
185 * as returned from the FRU.
186 * NOTE: This is only available for certain server cards
187 */
188
amdgpu_device_get_product_number(struct device * dev,struct device_attribute * attr,char * buf)189 static ssize_t amdgpu_device_get_product_number(struct device *dev,
190 struct device_attribute *attr, char *buf)
191 {
192 struct drm_device *ddev = dev_get_drvdata(dev);
193 struct amdgpu_device *adev = drm_to_adev(ddev);
194
195 return sysfs_emit(buf, "%s\n", adev->product_number);
196 }
197
198 static DEVICE_ATTR(product_number, S_IRUGO,
199 amdgpu_device_get_product_number, NULL);
200
201 /**
202 * DOC: serial_number
203 *
204 * The amdgpu driver provides a sysfs API for reporting the serial number
205 * for the device
206 * The file serial_number is used for this and returns the serial number
207 * as returned from the FRU.
208 * NOTE: This is only available for certain server cards
209 */
210
amdgpu_device_get_serial_number(struct device * dev,struct device_attribute * attr,char * buf)211 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
212 struct device_attribute *attr, char *buf)
213 {
214 struct drm_device *ddev = dev_get_drvdata(dev);
215 struct amdgpu_device *adev = drm_to_adev(ddev);
216
217 return sysfs_emit(buf, "%s\n", adev->serial);
218 }
219
220 static DEVICE_ATTR(serial_number, S_IRUGO,
221 amdgpu_device_get_serial_number, NULL);
222
223 /**
224 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
225 *
226 * @dev: drm_device pointer
227 *
228 * Returns true if the device is a dGPU with ATPX power control,
229 * otherwise return false.
230 */
amdgpu_device_supports_px(struct drm_device * dev)231 bool amdgpu_device_supports_px(struct drm_device *dev)
232 {
233 struct amdgpu_device *adev = drm_to_adev(dev);
234
235 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
236 return true;
237 return false;
238 }
239
240 /**
241 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
242 *
243 * @dev: drm_device pointer
244 *
245 * Returns true if the device is a dGPU with ACPI power control,
246 * otherwise return false.
247 */
amdgpu_device_supports_boco(struct drm_device * dev)248 bool amdgpu_device_supports_boco(struct drm_device *dev)
249 {
250 struct amdgpu_device *adev = drm_to_adev(dev);
251
252 if (adev->has_pr3 ||
253 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
254 return true;
255 return false;
256 }
257
258 /**
259 * amdgpu_device_supports_baco - Does the device support BACO
260 *
261 * @dev: drm_device pointer
262 *
263 * Returns true if the device supporte BACO,
264 * otherwise return false.
265 */
amdgpu_device_supports_baco(struct drm_device * dev)266 bool amdgpu_device_supports_baco(struct drm_device *dev)
267 {
268 struct amdgpu_device *adev = drm_to_adev(dev);
269
270 return amdgpu_asic_supports_baco(adev);
271 }
272
273 /**
274 * amdgpu_device_supports_smart_shift - Is the device dGPU with
275 * smart shift support
276 *
277 * @dev: drm_device pointer
278 *
279 * Returns true if the device is a dGPU with Smart Shift support,
280 * otherwise returns false.
281 */
amdgpu_device_supports_smart_shift(struct drm_device * dev)282 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
283 {
284 return (amdgpu_device_supports_boco(dev) &&
285 amdgpu_acpi_is_power_shift_control_supported());
286 }
287
288 /*
289 * VRAM access helper functions
290 */
291
292 /**
293 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
294 *
295 * @adev: amdgpu_device pointer
296 * @pos: offset of the buffer in vram
297 * @buf: virtual address of the buffer in system memory
298 * @size: read/write size, sizeof(@buf) must > @size
299 * @write: true - write to vram, otherwise - read from vram
300 */
amdgpu_device_mm_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)301 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
302 void *buf, size_t size, bool write)
303 {
304 unsigned long flags;
305 uint32_t hi = ~0, tmp = 0;
306 uint32_t *data = buf;
307 uint64_t last;
308 int idx;
309
310 if (!drm_dev_enter(adev_to_drm(adev), &idx))
311 return;
312
313 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
314
315 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
316 for (last = pos + size; pos < last; pos += 4) {
317 tmp = pos >> 31;
318
319 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
320 if (tmp != hi) {
321 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
322 hi = tmp;
323 }
324 if (write)
325 WREG32_NO_KIQ(mmMM_DATA, *data++);
326 else
327 *data++ = RREG32_NO_KIQ(mmMM_DATA);
328 }
329
330 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
331 drm_dev_exit(idx);
332 }
333
334 /**
335 * amdgpu_device_aper_access - access vram by vram aperature
336 *
337 * @adev: amdgpu_device pointer
338 * @pos: offset of the buffer in vram
339 * @buf: virtual address of the buffer in system memory
340 * @size: read/write size, sizeof(@buf) must > @size
341 * @write: true - write to vram, otherwise - read from vram
342 *
343 * The return value means how many bytes have been transferred.
344 */
amdgpu_device_aper_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)345 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
346 void *buf, size_t size, bool write)
347 {
348 #ifdef CONFIG_64BIT
349 void __iomem *addr;
350 size_t count = 0;
351 uint64_t last;
352
353 if (!adev->mman.aper_base_kaddr)
354 return 0;
355
356 last = min(pos + size, adev->gmc.visible_vram_size);
357 if (last > pos) {
358 addr = adev->mman.aper_base_kaddr + pos;
359 count = last - pos;
360
361 if (write) {
362 memcpy_toio(addr, buf, count);
363 mb();
364 amdgpu_device_flush_hdp(adev, NULL);
365 } else {
366 amdgpu_device_invalidate_hdp(adev, NULL);
367 mb();
368 memcpy_fromio(buf, addr, count);
369 }
370
371 }
372
373 return count;
374 #else
375 return 0;
376 #endif
377 }
378
379 /**
380 * amdgpu_device_vram_access - read/write a buffer in vram
381 *
382 * @adev: amdgpu_device pointer
383 * @pos: offset of the buffer in vram
384 * @buf: virtual address of the buffer in system memory
385 * @size: read/write size, sizeof(@buf) must > @size
386 * @write: true - write to vram, otherwise - read from vram
387 */
amdgpu_device_vram_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)388 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
389 void *buf, size_t size, bool write)
390 {
391 size_t count;
392
393 /* try to using vram apreature to access vram first */
394 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
395 size -= count;
396 if (size) {
397 /* using MM to access rest vram */
398 pos += count;
399 buf += count;
400 amdgpu_device_mm_access(adev, pos, buf, size, write);
401 }
402 }
403
404 /*
405 * register access helper functions.
406 */
407
408 /* Check if hw access should be skipped because of hotplug or device error */
amdgpu_device_skip_hw_access(struct amdgpu_device * adev)409 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
410 {
411 if (adev->no_hw_access)
412 return true;
413
414 #ifdef CONFIG_LOCKDEP
415 /*
416 * This is a bit complicated to understand, so worth a comment. What we assert
417 * here is that the GPU reset is not running on another thread in parallel.
418 *
419 * For this we trylock the read side of the reset semaphore, if that succeeds
420 * we know that the reset is not running in paralell.
421 *
422 * If the trylock fails we assert that we are either already holding the read
423 * side of the lock or are the reset thread itself and hold the write side of
424 * the lock.
425 */
426 if (in_task()) {
427 if (down_read_trylock(&adev->reset_domain->sem))
428 up_read(&adev->reset_domain->sem);
429 else
430 lockdep_assert_held(&adev->reset_domain->sem);
431 }
432 #endif
433 return false;
434 }
435
436 /**
437 * amdgpu_device_rreg - read a memory mapped IO or indirect register
438 *
439 * @adev: amdgpu_device pointer
440 * @reg: dword aligned register offset
441 * @acc_flags: access flags which require special behavior
442 *
443 * Returns the 32 bit value from the offset specified.
444 */
amdgpu_device_rreg(struct amdgpu_device * adev,uint32_t reg,uint32_t acc_flags)445 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
446 uint32_t reg, uint32_t acc_flags)
447 {
448 uint32_t ret;
449
450 if (amdgpu_device_skip_hw_access(adev))
451 return 0;
452
453 if ((reg * 4) < adev->rmmio_size) {
454 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
455 amdgpu_sriov_runtime(adev) &&
456 down_read_trylock(&adev->reset_domain->sem)) {
457 ret = amdgpu_kiq_rreg(adev, reg);
458 up_read(&adev->reset_domain->sem);
459 } else {
460 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
461 }
462 } else {
463 ret = adev->pcie_rreg(adev, reg * 4);
464 }
465
466 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
467
468 return ret;
469 }
470
471 /*
472 * MMIO register read with bytes helper functions
473 * @offset:bytes offset from MMIO start
474 *
475 */
476
477 /**
478 * amdgpu_mm_rreg8 - read a memory mapped IO register
479 *
480 * @adev: amdgpu_device pointer
481 * @offset: byte aligned register offset
482 *
483 * Returns the 8 bit value from the offset specified.
484 */
amdgpu_mm_rreg8(struct amdgpu_device * adev,uint32_t offset)485 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
486 {
487 if (amdgpu_device_skip_hw_access(adev))
488 return 0;
489
490 if (offset < adev->rmmio_size)
491 return (readb(adev->rmmio + offset));
492 BUG();
493 }
494
495 /*
496 * MMIO register write with bytes helper functions
497 * @offset:bytes offset from MMIO start
498 * @value: the value want to be written to the register
499 *
500 */
501 /**
502 * amdgpu_mm_wreg8 - read a memory mapped IO register
503 *
504 * @adev: amdgpu_device pointer
505 * @offset: byte aligned register offset
506 * @value: 8 bit value to write
507 *
508 * Writes the value specified to the offset specified.
509 */
amdgpu_mm_wreg8(struct amdgpu_device * adev,uint32_t offset,uint8_t value)510 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
511 {
512 if (amdgpu_device_skip_hw_access(adev))
513 return;
514
515 if (offset < adev->rmmio_size)
516 writeb(value, adev->rmmio + offset);
517 else
518 BUG();
519 }
520
521 /**
522 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
523 *
524 * @adev: amdgpu_device pointer
525 * @reg: dword aligned register offset
526 * @v: 32 bit value to write to the register
527 * @acc_flags: access flags which require special behavior
528 *
529 * Writes the value specified to the offset specified.
530 */
amdgpu_device_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t v,uint32_t acc_flags)531 void amdgpu_device_wreg(struct amdgpu_device *adev,
532 uint32_t reg, uint32_t v,
533 uint32_t acc_flags)
534 {
535 if (amdgpu_device_skip_hw_access(adev))
536 return;
537
538 if ((reg * 4) < adev->rmmio_size) {
539 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
540 amdgpu_sriov_runtime(adev) &&
541 down_read_trylock(&adev->reset_domain->sem)) {
542 amdgpu_kiq_wreg(adev, reg, v);
543 up_read(&adev->reset_domain->sem);
544 } else {
545 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
546 }
547 } else {
548 adev->pcie_wreg(adev, reg * 4, v);
549 }
550
551 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
552 }
553
554 /**
555 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
556 *
557 * @adev: amdgpu_device pointer
558 * @reg: mmio/rlc register
559 * @v: value to write
560 *
561 * this function is invoked only for the debugfs register access
562 */
amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device * adev,uint32_t reg,uint32_t v)563 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
564 uint32_t reg, uint32_t v)
565 {
566 if (amdgpu_device_skip_hw_access(adev))
567 return;
568
569 if (amdgpu_sriov_fullaccess(adev) &&
570 adev->gfx.rlc.funcs &&
571 adev->gfx.rlc.funcs->is_rlcg_access_range) {
572 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
573 return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
574 } else if ((reg * 4) >= adev->rmmio_size) {
575 adev->pcie_wreg(adev, reg * 4, v);
576 } else {
577 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
578 }
579 }
580
581 /**
582 * amdgpu_mm_rdoorbell - read a doorbell dword
583 *
584 * @adev: amdgpu_device pointer
585 * @index: doorbell index
586 *
587 * Returns the value in the doorbell aperture at the
588 * requested doorbell index (CIK).
589 */
amdgpu_mm_rdoorbell(struct amdgpu_device * adev,u32 index)590 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
591 {
592 if (amdgpu_device_skip_hw_access(adev))
593 return 0;
594
595 if (index < adev->doorbell.num_doorbells) {
596 return readl(adev->doorbell.ptr + index);
597 } else {
598 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
599 return 0;
600 }
601 }
602
603 /**
604 * amdgpu_mm_wdoorbell - write a doorbell dword
605 *
606 * @adev: amdgpu_device pointer
607 * @index: doorbell index
608 * @v: value to write
609 *
610 * Writes @v to the doorbell aperture at the
611 * requested doorbell index (CIK).
612 */
amdgpu_mm_wdoorbell(struct amdgpu_device * adev,u32 index,u32 v)613 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
614 {
615 if (amdgpu_device_skip_hw_access(adev))
616 return;
617
618 if (index < adev->doorbell.num_doorbells) {
619 writel(v, adev->doorbell.ptr + index);
620 } else {
621 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
622 }
623 }
624
625 /**
626 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
627 *
628 * @adev: amdgpu_device pointer
629 * @index: doorbell index
630 *
631 * Returns the value in the doorbell aperture at the
632 * requested doorbell index (VEGA10+).
633 */
amdgpu_mm_rdoorbell64(struct amdgpu_device * adev,u32 index)634 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
635 {
636 if (amdgpu_device_skip_hw_access(adev))
637 return 0;
638
639 if (index < adev->doorbell.num_doorbells) {
640 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
641 } else {
642 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
643 return 0;
644 }
645 }
646
647 /**
648 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
649 *
650 * @adev: amdgpu_device pointer
651 * @index: doorbell index
652 * @v: value to write
653 *
654 * Writes @v to the doorbell aperture at the
655 * requested doorbell index (VEGA10+).
656 */
amdgpu_mm_wdoorbell64(struct amdgpu_device * adev,u32 index,u64 v)657 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
658 {
659 if (amdgpu_device_skip_hw_access(adev))
660 return;
661
662 if (index < adev->doorbell.num_doorbells) {
663 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
664 } else {
665 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
666 }
667 }
668
669 /**
670 * amdgpu_device_indirect_rreg - read an indirect register
671 *
672 * @adev: amdgpu_device pointer
673 * @pcie_index: mmio register offset
674 * @pcie_data: mmio register offset
675 * @reg_addr: indirect register address to read from
676 *
677 * Returns the value of indirect register @reg_addr
678 */
amdgpu_device_indirect_rreg(struct amdgpu_device * adev,u32 pcie_index,u32 pcie_data,u32 reg_addr)679 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
680 u32 pcie_index, u32 pcie_data,
681 u32 reg_addr)
682 {
683 unsigned long flags;
684 u32 r;
685 void __iomem *pcie_index_offset;
686 void __iomem *pcie_data_offset;
687
688 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
689 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
690 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
691
692 writel(reg_addr, pcie_index_offset);
693 readl(pcie_index_offset);
694 r = readl(pcie_data_offset);
695 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
696
697 return r;
698 }
699
700 /**
701 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
702 *
703 * @adev: amdgpu_device pointer
704 * @pcie_index: mmio register offset
705 * @pcie_data: mmio register offset
706 * @reg_addr: indirect register address to read from
707 *
708 * Returns the value of indirect register @reg_addr
709 */
amdgpu_device_indirect_rreg64(struct amdgpu_device * adev,u32 pcie_index,u32 pcie_data,u32 reg_addr)710 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
711 u32 pcie_index, u32 pcie_data,
712 u32 reg_addr)
713 {
714 unsigned long flags;
715 u64 r;
716 void __iomem *pcie_index_offset;
717 void __iomem *pcie_data_offset;
718
719 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
720 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
721 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
722
723 /* read low 32 bits */
724 writel(reg_addr, pcie_index_offset);
725 readl(pcie_index_offset);
726 r = readl(pcie_data_offset);
727 /* read high 32 bits */
728 writel(reg_addr + 4, pcie_index_offset);
729 readl(pcie_index_offset);
730 r |= ((u64)readl(pcie_data_offset) << 32);
731 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
732
733 return r;
734 }
735
736 /**
737 * amdgpu_device_indirect_wreg - write an indirect register address
738 *
739 * @adev: amdgpu_device pointer
740 * @pcie_index: mmio register offset
741 * @pcie_data: mmio register offset
742 * @reg_addr: indirect register offset
743 * @reg_data: indirect register data
744 *
745 */
amdgpu_device_indirect_wreg(struct amdgpu_device * adev,u32 pcie_index,u32 pcie_data,u32 reg_addr,u32 reg_data)746 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
747 u32 pcie_index, u32 pcie_data,
748 u32 reg_addr, u32 reg_data)
749 {
750 unsigned long flags;
751 void __iomem *pcie_index_offset;
752 void __iomem *pcie_data_offset;
753
754 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
755 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
756 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
757
758 writel(reg_addr, pcie_index_offset);
759 readl(pcie_index_offset);
760 writel(reg_data, pcie_data_offset);
761 readl(pcie_data_offset);
762 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
763 }
764
765 /**
766 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
767 *
768 * @adev: amdgpu_device pointer
769 * @pcie_index: mmio register offset
770 * @pcie_data: mmio register offset
771 * @reg_addr: indirect register offset
772 * @reg_data: indirect register data
773 *
774 */
amdgpu_device_indirect_wreg64(struct amdgpu_device * adev,u32 pcie_index,u32 pcie_data,u32 reg_addr,u64 reg_data)775 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
776 u32 pcie_index, u32 pcie_data,
777 u32 reg_addr, u64 reg_data)
778 {
779 unsigned long flags;
780 void __iomem *pcie_index_offset;
781 void __iomem *pcie_data_offset;
782
783 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
784 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
785 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
786
787 /* write low 32 bits */
788 writel(reg_addr, pcie_index_offset);
789 readl(pcie_index_offset);
790 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
791 readl(pcie_data_offset);
792 /* write high 32 bits */
793 writel(reg_addr + 4, pcie_index_offset);
794 readl(pcie_index_offset);
795 writel((u32)(reg_data >> 32), pcie_data_offset);
796 readl(pcie_data_offset);
797 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
798 }
799
800 /**
801 * amdgpu_invalid_rreg - dummy reg read function
802 *
803 * @adev: amdgpu_device pointer
804 * @reg: offset of register
805 *
806 * Dummy register read function. Used for register blocks
807 * that certain asics don't have (all asics).
808 * Returns the value in the register.
809 */
amdgpu_invalid_rreg(struct amdgpu_device * adev,uint32_t reg)810 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
811 {
812 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
813 BUG();
814 return 0;
815 }
816
817 /**
818 * amdgpu_invalid_wreg - dummy reg write function
819 *
820 * @adev: amdgpu_device pointer
821 * @reg: offset of register
822 * @v: value to write to the register
823 *
824 * Dummy register read function. Used for register blocks
825 * that certain asics don't have (all asics).
826 */
amdgpu_invalid_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t v)827 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
828 {
829 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
830 reg, v);
831 BUG();
832 }
833
834 /**
835 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
836 *
837 * @adev: amdgpu_device pointer
838 * @reg: offset of register
839 *
840 * Dummy register read function. Used for register blocks
841 * that certain asics don't have (all asics).
842 * Returns the value in the register.
843 */
amdgpu_invalid_rreg64(struct amdgpu_device * adev,uint32_t reg)844 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
845 {
846 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
847 BUG();
848 return 0;
849 }
850
851 /**
852 * amdgpu_invalid_wreg64 - dummy reg write function
853 *
854 * @adev: amdgpu_device pointer
855 * @reg: offset of register
856 * @v: value to write to the register
857 *
858 * Dummy register read function. Used for register blocks
859 * that certain asics don't have (all asics).
860 */
amdgpu_invalid_wreg64(struct amdgpu_device * adev,uint32_t reg,uint64_t v)861 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
862 {
863 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
864 reg, v);
865 BUG();
866 }
867
868 /**
869 * amdgpu_block_invalid_rreg - dummy reg read function
870 *
871 * @adev: amdgpu_device pointer
872 * @block: offset of instance
873 * @reg: offset of register
874 *
875 * Dummy register read function. Used for register blocks
876 * that certain asics don't have (all asics).
877 * Returns the value in the register.
878 */
amdgpu_block_invalid_rreg(struct amdgpu_device * adev,uint32_t block,uint32_t reg)879 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
880 uint32_t block, uint32_t reg)
881 {
882 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
883 reg, block);
884 BUG();
885 return 0;
886 }
887
888 /**
889 * amdgpu_block_invalid_wreg - dummy reg write function
890 *
891 * @adev: amdgpu_device pointer
892 * @block: offset of instance
893 * @reg: offset of register
894 * @v: value to write to the register
895 *
896 * Dummy register read function. Used for register blocks
897 * that certain asics don't have (all asics).
898 */
amdgpu_block_invalid_wreg(struct amdgpu_device * adev,uint32_t block,uint32_t reg,uint32_t v)899 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
900 uint32_t block,
901 uint32_t reg, uint32_t v)
902 {
903 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
904 reg, block, v);
905 BUG();
906 }
907
908 /**
909 * amdgpu_device_asic_init - Wrapper for atom asic_init
910 *
911 * @adev: amdgpu_device pointer
912 *
913 * Does any asic specific work and then calls atom asic init.
914 */
amdgpu_device_asic_init(struct amdgpu_device * adev)915 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
916 {
917 amdgpu_asic_pre_asic_init(adev);
918
919 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
920 return amdgpu_atomfirmware_asic_init(adev, true);
921 else
922 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
923 }
924
925 /**
926 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
927 *
928 * @adev: amdgpu_device pointer
929 *
930 * Allocates a scratch page of VRAM for use by various things in the
931 * driver.
932 */
amdgpu_device_vram_scratch_init(struct amdgpu_device * adev)933 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
934 {
935 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
936 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
937 &adev->vram_scratch.robj,
938 &adev->vram_scratch.gpu_addr,
939 (void **)&adev->vram_scratch.ptr);
940 }
941
942 /**
943 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
944 *
945 * @adev: amdgpu_device pointer
946 *
947 * Frees the VRAM scratch page.
948 */
amdgpu_device_vram_scratch_fini(struct amdgpu_device * adev)949 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
950 {
951 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
952 }
953
954 /**
955 * amdgpu_device_program_register_sequence - program an array of registers.
956 *
957 * @adev: amdgpu_device pointer
958 * @registers: pointer to the register array
959 * @array_size: size of the register array
960 *
961 * Programs an array or registers with and and or masks.
962 * This is a helper for setting golden registers.
963 */
amdgpu_device_program_register_sequence(struct amdgpu_device * adev,const u32 * registers,const u32 array_size)964 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
965 const u32 *registers,
966 const u32 array_size)
967 {
968 u32 tmp, reg, and_mask, or_mask;
969 int i;
970
971 if (array_size % 3)
972 return;
973
974 for (i = 0; i < array_size; i +=3) {
975 reg = registers[i + 0];
976 and_mask = registers[i + 1];
977 or_mask = registers[i + 2];
978
979 if (and_mask == 0xffffffff) {
980 tmp = or_mask;
981 } else {
982 tmp = RREG32(reg);
983 tmp &= ~and_mask;
984 if (adev->family >= AMDGPU_FAMILY_AI)
985 tmp |= (or_mask & and_mask);
986 else
987 tmp |= or_mask;
988 }
989 WREG32(reg, tmp);
990 }
991 }
992
993 /**
994 * amdgpu_device_pci_config_reset - reset the GPU
995 *
996 * @adev: amdgpu_device pointer
997 *
998 * Resets the GPU using the pci config reset sequence.
999 * Only applicable to asics prior to vega10.
1000 */
amdgpu_device_pci_config_reset(struct amdgpu_device * adev)1001 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1002 {
1003 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1004 }
1005
1006 /**
1007 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1008 *
1009 * @adev: amdgpu_device pointer
1010 *
1011 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1012 */
amdgpu_device_pci_reset(struct amdgpu_device * adev)1013 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1014 {
1015 return pci_reset_function(adev->pdev);
1016 }
1017
1018 /*
1019 * GPU doorbell aperture helpers function.
1020 */
1021 /**
1022 * amdgpu_device_doorbell_init - Init doorbell driver information.
1023 *
1024 * @adev: amdgpu_device pointer
1025 *
1026 * Init doorbell driver information (CIK)
1027 * Returns 0 on success, error on failure.
1028 */
amdgpu_device_doorbell_init(struct amdgpu_device * adev)1029 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1030 {
1031
1032 /* No doorbell on SI hardware generation */
1033 if (adev->asic_type < CHIP_BONAIRE) {
1034 adev->doorbell.base = 0;
1035 adev->doorbell.size = 0;
1036 adev->doorbell.num_doorbells = 0;
1037 adev->doorbell.ptr = NULL;
1038 return 0;
1039 }
1040
1041 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1042 return -EINVAL;
1043
1044 amdgpu_asic_init_doorbell_index(adev);
1045
1046 /* doorbell bar mapping */
1047 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1048 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1049
1050 if (adev->enable_mes) {
1051 adev->doorbell.num_doorbells =
1052 adev->doorbell.size / sizeof(u32);
1053 } else {
1054 adev->doorbell.num_doorbells =
1055 min_t(u32, adev->doorbell.size / sizeof(u32),
1056 adev->doorbell_index.max_assignment+1);
1057 if (adev->doorbell.num_doorbells == 0)
1058 return -EINVAL;
1059
1060 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1061 * paging queue doorbell use the second page. The
1062 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1063 * doorbells are in the first page. So with paging queue enabled,
1064 * the max num_doorbells should + 1 page (0x400 in dword)
1065 */
1066 if (adev->asic_type >= CHIP_VEGA10)
1067 adev->doorbell.num_doorbells += 0x400;
1068 }
1069
1070 adev->doorbell.ptr = ioremap(adev->doorbell.base,
1071 adev->doorbell.num_doorbells *
1072 sizeof(u32));
1073 if (adev->doorbell.ptr == NULL)
1074 return -ENOMEM;
1075
1076 return 0;
1077 }
1078
1079 /**
1080 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1081 *
1082 * @adev: amdgpu_device pointer
1083 *
1084 * Tear down doorbell driver information (CIK)
1085 */
amdgpu_device_doorbell_fini(struct amdgpu_device * adev)1086 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1087 {
1088 iounmap(adev->doorbell.ptr);
1089 adev->doorbell.ptr = NULL;
1090 }
1091
1092
1093
1094 /*
1095 * amdgpu_device_wb_*()
1096 * Writeback is the method by which the GPU updates special pages in memory
1097 * with the status of certain GPU events (fences, ring pointers,etc.).
1098 */
1099
1100 /**
1101 * amdgpu_device_wb_fini - Disable Writeback and free memory
1102 *
1103 * @adev: amdgpu_device pointer
1104 *
1105 * Disables Writeback and frees the Writeback memory (all asics).
1106 * Used at driver shutdown.
1107 */
amdgpu_device_wb_fini(struct amdgpu_device * adev)1108 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1109 {
1110 if (adev->wb.wb_obj) {
1111 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1112 &adev->wb.gpu_addr,
1113 (void **)&adev->wb.wb);
1114 adev->wb.wb_obj = NULL;
1115 }
1116 }
1117
1118 /**
1119 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1120 *
1121 * @adev: amdgpu_device pointer
1122 *
1123 * Initializes writeback and allocates writeback memory (all asics).
1124 * Used at driver startup.
1125 * Returns 0 on success or an -error on failure.
1126 */
amdgpu_device_wb_init(struct amdgpu_device * adev)1127 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1128 {
1129 int r;
1130
1131 if (adev->wb.wb_obj == NULL) {
1132 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1133 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1134 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1135 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1136 (void **)&adev->wb.wb);
1137 if (r) {
1138 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1139 return r;
1140 }
1141
1142 adev->wb.num_wb = AMDGPU_MAX_WB;
1143 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1144
1145 /* clear wb memory */
1146 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1147 }
1148
1149 return 0;
1150 }
1151
1152 /**
1153 * amdgpu_device_wb_get - Allocate a wb entry
1154 *
1155 * @adev: amdgpu_device pointer
1156 * @wb: wb index
1157 *
1158 * Allocate a wb slot for use by the driver (all asics).
1159 * Returns 0 on success or -EINVAL on failure.
1160 */
amdgpu_device_wb_get(struct amdgpu_device * adev,u32 * wb)1161 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1162 {
1163 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1164
1165 if (offset < adev->wb.num_wb) {
1166 __set_bit(offset, adev->wb.used);
1167 *wb = offset << 3; /* convert to dw offset */
1168 return 0;
1169 } else {
1170 return -EINVAL;
1171 }
1172 }
1173
1174 /**
1175 * amdgpu_device_wb_free - Free a wb entry
1176 *
1177 * @adev: amdgpu_device pointer
1178 * @wb: wb index
1179 *
1180 * Free a wb slot allocated for use by the driver (all asics)
1181 */
amdgpu_device_wb_free(struct amdgpu_device * adev,u32 wb)1182 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1183 {
1184 wb >>= 3;
1185 if (wb < adev->wb.num_wb)
1186 __clear_bit(wb, adev->wb.used);
1187 }
1188
1189 /**
1190 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1191 *
1192 * @adev: amdgpu_device pointer
1193 *
1194 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1195 * to fail, but if any of the BARs is not accessible after the size we abort
1196 * driver loading by returning -ENODEV.
1197 */
amdgpu_device_resize_fb_bar(struct amdgpu_device * adev)1198 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1199 {
1200 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1201 struct pci_bus *root;
1202 struct resource *res;
1203 unsigned i;
1204 u16 cmd;
1205 int r;
1206
1207 /* Bypass for VF */
1208 if (amdgpu_sriov_vf(adev))
1209 return 0;
1210
1211 /* skip if the bios has already enabled large BAR */
1212 if (adev->gmc.real_vram_size &&
1213 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1214 return 0;
1215
1216 /* Check if the root BUS has 64bit memory resources */
1217 root = adev->pdev->bus;
1218 while (root->parent)
1219 root = root->parent;
1220
1221 pci_bus_for_each_resource(root, res, i) {
1222 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1223 res->start > 0x100000000ull)
1224 break;
1225 }
1226
1227 /* Trying to resize is pointless without a root hub window above 4GB */
1228 if (!res)
1229 return 0;
1230
1231 /* Limit the BAR size to what is available */
1232 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1233 rbar_size);
1234
1235 /* Disable memory decoding while we change the BAR addresses and size */
1236 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1237 pci_write_config_word(adev->pdev, PCI_COMMAND,
1238 cmd & ~PCI_COMMAND_MEMORY);
1239
1240 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1241 amdgpu_device_doorbell_fini(adev);
1242 if (adev->asic_type >= CHIP_BONAIRE)
1243 pci_release_resource(adev->pdev, 2);
1244
1245 pci_release_resource(adev->pdev, 0);
1246
1247 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1248 if (r == -ENOSPC)
1249 DRM_INFO("Not enough PCI address space for a large BAR.");
1250 else if (r && r != -ENOTSUPP)
1251 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1252
1253 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1254
1255 /* When the doorbell or fb BAR isn't available we have no chance of
1256 * using the device.
1257 */
1258 r = amdgpu_device_doorbell_init(adev);
1259 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1260 return -ENODEV;
1261
1262 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1263
1264 return 0;
1265 }
1266
1267 /*
1268 * GPU helpers function.
1269 */
1270 /**
1271 * amdgpu_device_need_post - check if the hw need post or not
1272 *
1273 * @adev: amdgpu_device pointer
1274 *
1275 * Check if the asic has been initialized (all asics) at driver startup
1276 * or post is needed if hw reset is performed.
1277 * Returns true if need or false if not.
1278 */
amdgpu_device_need_post(struct amdgpu_device * adev)1279 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1280 {
1281 uint32_t reg;
1282
1283 if (amdgpu_sriov_vf(adev))
1284 return false;
1285
1286 if (amdgpu_passthrough(adev)) {
1287 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1288 * some old smc fw still need driver do vPost otherwise gpu hang, while
1289 * those smc fw version above 22.15 doesn't have this flaw, so we force
1290 * vpost executed for smc version below 22.15
1291 */
1292 if (adev->asic_type == CHIP_FIJI) {
1293 int err;
1294 uint32_t fw_ver;
1295 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1296 /* force vPost if error occured */
1297 if (err)
1298 return true;
1299
1300 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1301 if (fw_ver < 0x00160e00)
1302 return true;
1303 }
1304 }
1305
1306 /* Don't post if we need to reset whole hive on init */
1307 if (adev->gmc.xgmi.pending_reset)
1308 return false;
1309
1310 if (adev->has_hw_reset) {
1311 adev->has_hw_reset = false;
1312 return true;
1313 }
1314
1315 /* bios scratch used on CIK+ */
1316 if (adev->asic_type >= CHIP_BONAIRE)
1317 return amdgpu_atombios_scratch_need_asic_init(adev);
1318
1319 /* check MEM_SIZE for older asics */
1320 reg = amdgpu_asic_get_config_memsize(adev);
1321
1322 if ((reg != 0) && (reg != 0xffffffff))
1323 return false;
1324
1325 return true;
1326 }
1327
1328 /**
1329 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1330 *
1331 * @adev: amdgpu_device pointer
1332 *
1333 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1334 * be set for this device.
1335 *
1336 * Returns true if it should be used or false if not.
1337 */
amdgpu_device_should_use_aspm(struct amdgpu_device * adev)1338 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1339 {
1340 switch (amdgpu_aspm) {
1341 case -1:
1342 break;
1343 case 0:
1344 return false;
1345 case 1:
1346 return true;
1347 default:
1348 return false;
1349 }
1350 return pcie_aspm_enabled(adev->pdev);
1351 }
1352
1353 /* if we get transitioned to only one device, take VGA back */
1354 /**
1355 * amdgpu_device_vga_set_decode - enable/disable vga decode
1356 *
1357 * @pdev: PCI device pointer
1358 * @state: enable/disable vga decode
1359 *
1360 * Enable/disable vga decode (all asics).
1361 * Returns VGA resource flags.
1362 */
amdgpu_device_vga_set_decode(struct pci_dev * pdev,bool state)1363 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1364 bool state)
1365 {
1366 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1367 amdgpu_asic_set_vga_state(adev, state);
1368 if (state)
1369 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1370 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1371 else
1372 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1373 }
1374
1375 /**
1376 * amdgpu_device_check_block_size - validate the vm block size
1377 *
1378 * @adev: amdgpu_device pointer
1379 *
1380 * Validates the vm block size specified via module parameter.
1381 * The vm block size defines number of bits in page table versus page directory,
1382 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1383 * page table and the remaining bits are in the page directory.
1384 */
amdgpu_device_check_block_size(struct amdgpu_device * adev)1385 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1386 {
1387 /* defines number of bits in page table versus page directory,
1388 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1389 * page table and the remaining bits are in the page directory */
1390 if (amdgpu_vm_block_size == -1)
1391 return;
1392
1393 if (amdgpu_vm_block_size < 9) {
1394 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1395 amdgpu_vm_block_size);
1396 amdgpu_vm_block_size = -1;
1397 }
1398 }
1399
1400 /**
1401 * amdgpu_device_check_vm_size - validate the vm size
1402 *
1403 * @adev: amdgpu_device pointer
1404 *
1405 * Validates the vm size in GB specified via module parameter.
1406 * The VM size is the size of the GPU virtual memory space in GB.
1407 */
amdgpu_device_check_vm_size(struct amdgpu_device * adev)1408 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1409 {
1410 /* no need to check the default value */
1411 if (amdgpu_vm_size == -1)
1412 return;
1413
1414 if (amdgpu_vm_size < 1) {
1415 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1416 amdgpu_vm_size);
1417 amdgpu_vm_size = -1;
1418 }
1419 }
1420
amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device * adev)1421 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1422 {
1423 struct sysinfo si;
1424 bool is_os_64 = (sizeof(void *) == 8);
1425 uint64_t total_memory;
1426 uint64_t dram_size_seven_GB = 0x1B8000000;
1427 uint64_t dram_size_three_GB = 0xB8000000;
1428
1429 if (amdgpu_smu_memory_pool_size == 0)
1430 return;
1431
1432 if (!is_os_64) {
1433 DRM_WARN("Not 64-bit OS, feature not supported\n");
1434 goto def_value;
1435 }
1436 si_meminfo(&si);
1437 total_memory = (uint64_t)si.totalram * si.mem_unit;
1438
1439 if ((amdgpu_smu_memory_pool_size == 1) ||
1440 (amdgpu_smu_memory_pool_size == 2)) {
1441 if (total_memory < dram_size_three_GB)
1442 goto def_value1;
1443 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1444 (amdgpu_smu_memory_pool_size == 8)) {
1445 if (total_memory < dram_size_seven_GB)
1446 goto def_value1;
1447 } else {
1448 DRM_WARN("Smu memory pool size not supported\n");
1449 goto def_value;
1450 }
1451 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1452
1453 return;
1454
1455 def_value1:
1456 DRM_WARN("No enough system memory\n");
1457 def_value:
1458 adev->pm.smu_prv_buffer_size = 0;
1459 }
1460
amdgpu_device_init_apu_flags(struct amdgpu_device * adev)1461 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1462 {
1463 if (!(adev->flags & AMD_IS_APU) ||
1464 adev->asic_type < CHIP_RAVEN)
1465 return 0;
1466
1467 switch (adev->asic_type) {
1468 case CHIP_RAVEN:
1469 if (adev->pdev->device == 0x15dd)
1470 adev->apu_flags |= AMD_APU_IS_RAVEN;
1471 if (adev->pdev->device == 0x15d8)
1472 adev->apu_flags |= AMD_APU_IS_PICASSO;
1473 break;
1474 case CHIP_RENOIR:
1475 if ((adev->pdev->device == 0x1636) ||
1476 (adev->pdev->device == 0x164c))
1477 adev->apu_flags |= AMD_APU_IS_RENOIR;
1478 else
1479 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1480 break;
1481 case CHIP_VANGOGH:
1482 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1483 break;
1484 case CHIP_YELLOW_CARP:
1485 break;
1486 case CHIP_CYAN_SKILLFISH:
1487 if ((adev->pdev->device == 0x13FE) ||
1488 (adev->pdev->device == 0x143F))
1489 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1490 break;
1491 default:
1492 break;
1493 }
1494
1495 return 0;
1496 }
1497
1498 /**
1499 * amdgpu_device_check_arguments - validate module params
1500 *
1501 * @adev: amdgpu_device pointer
1502 *
1503 * Validates certain module parameters and updates
1504 * the associated values used by the driver (all asics).
1505 */
amdgpu_device_check_arguments(struct amdgpu_device * adev)1506 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1507 {
1508 if (amdgpu_sched_jobs < 4) {
1509 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1510 amdgpu_sched_jobs);
1511 amdgpu_sched_jobs = 4;
1512 } else if (!is_power_of_2(amdgpu_sched_jobs)){
1513 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1514 amdgpu_sched_jobs);
1515 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1516 }
1517
1518 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1519 /* gart size must be greater or equal to 32M */
1520 dev_warn(adev->dev, "gart size (%d) too small\n",
1521 amdgpu_gart_size);
1522 amdgpu_gart_size = -1;
1523 }
1524
1525 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1526 /* gtt size must be greater or equal to 32M */
1527 dev_warn(adev->dev, "gtt size (%d) too small\n",
1528 amdgpu_gtt_size);
1529 amdgpu_gtt_size = -1;
1530 }
1531
1532 /* valid range is between 4 and 9 inclusive */
1533 if (amdgpu_vm_fragment_size != -1 &&
1534 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1535 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1536 amdgpu_vm_fragment_size = -1;
1537 }
1538
1539 if (amdgpu_sched_hw_submission < 2) {
1540 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1541 amdgpu_sched_hw_submission);
1542 amdgpu_sched_hw_submission = 2;
1543 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1544 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1545 amdgpu_sched_hw_submission);
1546 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1547 }
1548
1549 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1550 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1551 amdgpu_reset_method = -1;
1552 }
1553
1554 amdgpu_device_check_smu_prv_buffer_size(adev);
1555
1556 amdgpu_device_check_vm_size(adev);
1557
1558 amdgpu_device_check_block_size(adev);
1559
1560 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1561
1562 return 0;
1563 }
1564
1565 /**
1566 * amdgpu_switcheroo_set_state - set switcheroo state
1567 *
1568 * @pdev: pci dev pointer
1569 * @state: vga_switcheroo state
1570 *
1571 * Callback for the switcheroo driver. Suspends or resumes the
1572 * the asics before or after it is powered up using ACPI methods.
1573 */
amdgpu_switcheroo_set_state(struct pci_dev * pdev,enum vga_switcheroo_state state)1574 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1575 enum vga_switcheroo_state state)
1576 {
1577 struct drm_device *dev = pci_get_drvdata(pdev);
1578 int r;
1579
1580 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1581 return;
1582
1583 if (state == VGA_SWITCHEROO_ON) {
1584 pr_info("switched on\n");
1585 /* don't suspend or resume card normally */
1586 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1587
1588 pci_set_power_state(pdev, PCI_D0);
1589 amdgpu_device_load_pci_state(pdev);
1590 r = pci_enable_device(pdev);
1591 if (r)
1592 DRM_WARN("pci_enable_device failed (%d)\n", r);
1593 amdgpu_device_resume(dev, true);
1594
1595 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1596 } else {
1597 pr_info("switched off\n");
1598 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1599 amdgpu_device_suspend(dev, true);
1600 amdgpu_device_cache_pci_state(pdev);
1601 /* Shut down the device */
1602 pci_disable_device(pdev);
1603 pci_set_power_state(pdev, PCI_D3cold);
1604 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1605 }
1606 }
1607
1608 /**
1609 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1610 *
1611 * @pdev: pci dev pointer
1612 *
1613 * Callback for the switcheroo driver. Check of the switcheroo
1614 * state can be changed.
1615 * Returns true if the state can be changed, false if not.
1616 */
amdgpu_switcheroo_can_switch(struct pci_dev * pdev)1617 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1618 {
1619 struct drm_device *dev = pci_get_drvdata(pdev);
1620
1621 /*
1622 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1623 * locking inversion with the driver load path. And the access here is
1624 * completely racy anyway. So don't bother with locking for now.
1625 */
1626 return atomic_read(&dev->open_count) == 0;
1627 }
1628
1629 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1630 .set_gpu_state = amdgpu_switcheroo_set_state,
1631 .reprobe = NULL,
1632 .can_switch = amdgpu_switcheroo_can_switch,
1633 };
1634
1635 /**
1636 * amdgpu_device_ip_set_clockgating_state - set the CG state
1637 *
1638 * @dev: amdgpu_device pointer
1639 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1640 * @state: clockgating state (gate or ungate)
1641 *
1642 * Sets the requested clockgating state for all instances of
1643 * the hardware IP specified.
1644 * Returns the error code from the last instance.
1645 */
amdgpu_device_ip_set_clockgating_state(void * dev,enum amd_ip_block_type block_type,enum amd_clockgating_state state)1646 int amdgpu_device_ip_set_clockgating_state(void *dev,
1647 enum amd_ip_block_type block_type,
1648 enum amd_clockgating_state state)
1649 {
1650 struct amdgpu_device *adev = dev;
1651 int i, r = 0;
1652
1653 for (i = 0; i < adev->num_ip_blocks; i++) {
1654 if (!adev->ip_blocks[i].status.valid)
1655 continue;
1656 if (adev->ip_blocks[i].version->type != block_type)
1657 continue;
1658 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1659 continue;
1660 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1661 (void *)adev, state);
1662 if (r)
1663 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1664 adev->ip_blocks[i].version->funcs->name, r);
1665 }
1666 return r;
1667 }
1668
1669 /**
1670 * amdgpu_device_ip_set_powergating_state - set the PG state
1671 *
1672 * @dev: amdgpu_device pointer
1673 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1674 * @state: powergating state (gate or ungate)
1675 *
1676 * Sets the requested powergating state for all instances of
1677 * the hardware IP specified.
1678 * Returns the error code from the last instance.
1679 */
amdgpu_device_ip_set_powergating_state(void * dev,enum amd_ip_block_type block_type,enum amd_powergating_state state)1680 int amdgpu_device_ip_set_powergating_state(void *dev,
1681 enum amd_ip_block_type block_type,
1682 enum amd_powergating_state state)
1683 {
1684 struct amdgpu_device *adev = dev;
1685 int i, r = 0;
1686
1687 for (i = 0; i < adev->num_ip_blocks; i++) {
1688 if (!adev->ip_blocks[i].status.valid)
1689 continue;
1690 if (adev->ip_blocks[i].version->type != block_type)
1691 continue;
1692 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1693 continue;
1694 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1695 (void *)adev, state);
1696 if (r)
1697 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1698 adev->ip_blocks[i].version->funcs->name, r);
1699 }
1700 return r;
1701 }
1702
1703 /**
1704 * amdgpu_device_ip_get_clockgating_state - get the CG state
1705 *
1706 * @adev: amdgpu_device pointer
1707 * @flags: clockgating feature flags
1708 *
1709 * Walks the list of IPs on the device and updates the clockgating
1710 * flags for each IP.
1711 * Updates @flags with the feature flags for each hardware IP where
1712 * clockgating is enabled.
1713 */
amdgpu_device_ip_get_clockgating_state(struct amdgpu_device * adev,u64 * flags)1714 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1715 u64 *flags)
1716 {
1717 int i;
1718
1719 for (i = 0; i < adev->num_ip_blocks; i++) {
1720 if (!adev->ip_blocks[i].status.valid)
1721 continue;
1722 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1723 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1724 }
1725 }
1726
1727 /**
1728 * amdgpu_device_ip_wait_for_idle - wait for idle
1729 *
1730 * @adev: amdgpu_device pointer
1731 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1732 *
1733 * Waits for the request hardware IP to be idle.
1734 * Returns 0 for success or a negative error code on failure.
1735 */
amdgpu_device_ip_wait_for_idle(struct amdgpu_device * adev,enum amd_ip_block_type block_type)1736 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1737 enum amd_ip_block_type block_type)
1738 {
1739 int i, r;
1740
1741 for (i = 0; i < adev->num_ip_blocks; i++) {
1742 if (!adev->ip_blocks[i].status.valid)
1743 continue;
1744 if (adev->ip_blocks[i].version->type == block_type) {
1745 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1746 if (r)
1747 return r;
1748 break;
1749 }
1750 }
1751 return 0;
1752
1753 }
1754
1755 /**
1756 * amdgpu_device_ip_is_idle - is the hardware IP idle
1757 *
1758 * @adev: amdgpu_device pointer
1759 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1760 *
1761 * Check if the hardware IP is idle or not.
1762 * Returns true if it the IP is idle, false if not.
1763 */
amdgpu_device_ip_is_idle(struct amdgpu_device * adev,enum amd_ip_block_type block_type)1764 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1765 enum amd_ip_block_type block_type)
1766 {
1767 int i;
1768
1769 for (i = 0; i < adev->num_ip_blocks; i++) {
1770 if (!adev->ip_blocks[i].status.valid)
1771 continue;
1772 if (adev->ip_blocks[i].version->type == block_type)
1773 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1774 }
1775 return true;
1776
1777 }
1778
1779 /**
1780 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1781 *
1782 * @adev: amdgpu_device pointer
1783 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1784 *
1785 * Returns a pointer to the hardware IP block structure
1786 * if it exists for the asic, otherwise NULL.
1787 */
1788 struct amdgpu_ip_block *
amdgpu_device_ip_get_ip_block(struct amdgpu_device * adev,enum amd_ip_block_type type)1789 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1790 enum amd_ip_block_type type)
1791 {
1792 int i;
1793
1794 for (i = 0; i < adev->num_ip_blocks; i++)
1795 if (adev->ip_blocks[i].version->type == type)
1796 return &adev->ip_blocks[i];
1797
1798 return NULL;
1799 }
1800
1801 /**
1802 * amdgpu_device_ip_block_version_cmp
1803 *
1804 * @adev: amdgpu_device pointer
1805 * @type: enum amd_ip_block_type
1806 * @major: major version
1807 * @minor: minor version
1808 *
1809 * return 0 if equal or greater
1810 * return 1 if smaller or the ip_block doesn't exist
1811 */
amdgpu_device_ip_block_version_cmp(struct amdgpu_device * adev,enum amd_ip_block_type type,u32 major,u32 minor)1812 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1813 enum amd_ip_block_type type,
1814 u32 major, u32 minor)
1815 {
1816 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1817
1818 if (ip_block && ((ip_block->version->major > major) ||
1819 ((ip_block->version->major == major) &&
1820 (ip_block->version->minor >= minor))))
1821 return 0;
1822
1823 return 1;
1824 }
1825
1826 /**
1827 * amdgpu_device_ip_block_add
1828 *
1829 * @adev: amdgpu_device pointer
1830 * @ip_block_version: pointer to the IP to add
1831 *
1832 * Adds the IP block driver information to the collection of IPs
1833 * on the asic.
1834 */
amdgpu_device_ip_block_add(struct amdgpu_device * adev,const struct amdgpu_ip_block_version * ip_block_version)1835 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1836 const struct amdgpu_ip_block_version *ip_block_version)
1837 {
1838 if (!ip_block_version)
1839 return -EINVAL;
1840
1841 switch (ip_block_version->type) {
1842 case AMD_IP_BLOCK_TYPE_VCN:
1843 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1844 return 0;
1845 break;
1846 case AMD_IP_BLOCK_TYPE_JPEG:
1847 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1848 return 0;
1849 break;
1850 default:
1851 break;
1852 }
1853
1854 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1855 ip_block_version->funcs->name);
1856
1857 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1858
1859 return 0;
1860 }
1861
1862 /**
1863 * amdgpu_device_enable_virtual_display - enable virtual display feature
1864 *
1865 * @adev: amdgpu_device pointer
1866 *
1867 * Enabled the virtual display feature if the user has enabled it via
1868 * the module parameter virtual_display. This feature provides a virtual
1869 * display hardware on headless boards or in virtualized environments.
1870 * This function parses and validates the configuration string specified by
1871 * the user and configues the virtual display configuration (number of
1872 * virtual connectors, crtcs, etc.) specified.
1873 */
amdgpu_device_enable_virtual_display(struct amdgpu_device * adev)1874 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1875 {
1876 adev->enable_virtual_display = false;
1877
1878 if (amdgpu_virtual_display) {
1879 const char *pci_address_name = pci_name(adev->pdev);
1880 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1881
1882 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1883 pciaddstr_tmp = pciaddstr;
1884 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1885 pciaddname = strsep(&pciaddname_tmp, ",");
1886 if (!strcmp("all", pciaddname)
1887 || !strcmp(pci_address_name, pciaddname)) {
1888 long num_crtc;
1889 int res = -1;
1890
1891 adev->enable_virtual_display = true;
1892
1893 if (pciaddname_tmp)
1894 res = kstrtol(pciaddname_tmp, 10,
1895 &num_crtc);
1896
1897 if (!res) {
1898 if (num_crtc < 1)
1899 num_crtc = 1;
1900 if (num_crtc > 6)
1901 num_crtc = 6;
1902 adev->mode_info.num_crtc = num_crtc;
1903 } else {
1904 adev->mode_info.num_crtc = 1;
1905 }
1906 break;
1907 }
1908 }
1909
1910 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1911 amdgpu_virtual_display, pci_address_name,
1912 adev->enable_virtual_display, adev->mode_info.num_crtc);
1913
1914 kfree(pciaddstr);
1915 }
1916 }
1917
1918 /**
1919 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1920 *
1921 * @adev: amdgpu_device pointer
1922 *
1923 * Parses the asic configuration parameters specified in the gpu info
1924 * firmware and makes them availale to the driver for use in configuring
1925 * the asic.
1926 * Returns 0 on success, -EINVAL on failure.
1927 */
amdgpu_device_parse_gpu_info_fw(struct amdgpu_device * adev)1928 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1929 {
1930 const char *chip_name;
1931 char fw_name[40];
1932 int err;
1933 const struct gpu_info_firmware_header_v1_0 *hdr;
1934
1935 adev->firmware.gpu_info_fw = NULL;
1936
1937 if (adev->mman.discovery_bin) {
1938 /*
1939 * FIXME: The bounding box is still needed by Navi12, so
1940 * temporarily read it from gpu_info firmware. Should be dropped
1941 * when DAL no longer needs it.
1942 */
1943 if (adev->asic_type != CHIP_NAVI12)
1944 return 0;
1945 }
1946
1947 switch (adev->asic_type) {
1948 default:
1949 return 0;
1950 case CHIP_VEGA10:
1951 chip_name = "vega10";
1952 break;
1953 case CHIP_VEGA12:
1954 chip_name = "vega12";
1955 break;
1956 case CHIP_RAVEN:
1957 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1958 chip_name = "raven2";
1959 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1960 chip_name = "picasso";
1961 else
1962 chip_name = "raven";
1963 break;
1964 case CHIP_ARCTURUS:
1965 chip_name = "arcturus";
1966 break;
1967 case CHIP_NAVI12:
1968 chip_name = "navi12";
1969 break;
1970 }
1971
1972 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1973 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1974 if (err) {
1975 dev_err(adev->dev,
1976 "Failed to load gpu_info firmware \"%s\"\n",
1977 fw_name);
1978 goto out;
1979 }
1980 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1981 if (err) {
1982 dev_err(adev->dev,
1983 "Failed to validate gpu_info firmware \"%s\"\n",
1984 fw_name);
1985 goto out;
1986 }
1987
1988 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1989 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1990
1991 switch (hdr->version_major) {
1992 case 1:
1993 {
1994 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1995 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1996 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1997
1998 /*
1999 * Should be droped when DAL no longer needs it.
2000 */
2001 if (adev->asic_type == CHIP_NAVI12)
2002 goto parse_soc_bounding_box;
2003
2004 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2005 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2006 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2007 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2008 adev->gfx.config.max_texture_channel_caches =
2009 le32_to_cpu(gpu_info_fw->gc_num_tccs);
2010 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2011 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2012 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2013 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2014 adev->gfx.config.double_offchip_lds_buf =
2015 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2016 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2017 adev->gfx.cu_info.max_waves_per_simd =
2018 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2019 adev->gfx.cu_info.max_scratch_slots_per_cu =
2020 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2021 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2022 if (hdr->version_minor >= 1) {
2023 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2024 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2025 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2026 adev->gfx.config.num_sc_per_sh =
2027 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2028 adev->gfx.config.num_packer_per_sc =
2029 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2030 }
2031
2032 parse_soc_bounding_box:
2033 /*
2034 * soc bounding box info is not integrated in disocovery table,
2035 * we always need to parse it from gpu info firmware if needed.
2036 */
2037 if (hdr->version_minor == 2) {
2038 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2039 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2040 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2041 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2042 }
2043 break;
2044 }
2045 default:
2046 dev_err(adev->dev,
2047 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2048 err = -EINVAL;
2049 goto out;
2050 }
2051 out:
2052 return err;
2053 }
2054
2055 /**
2056 * amdgpu_device_ip_early_init - run early init for hardware IPs
2057 *
2058 * @adev: amdgpu_device pointer
2059 *
2060 * Early initialization pass for hardware IPs. The hardware IPs that make
2061 * up each asic are discovered each IP's early_init callback is run. This
2062 * is the first stage in initializing the asic.
2063 * Returns 0 on success, negative error code on failure.
2064 */
amdgpu_device_ip_early_init(struct amdgpu_device * adev)2065 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2066 {
2067 struct drm_device *dev = adev_to_drm(adev);
2068 struct pci_dev *parent;
2069 int i, r;
2070
2071 amdgpu_device_enable_virtual_display(adev);
2072
2073 if (amdgpu_sriov_vf(adev)) {
2074 r = amdgpu_virt_request_full_gpu(adev, true);
2075 if (r)
2076 return r;
2077 }
2078
2079 switch (adev->asic_type) {
2080 #ifdef CONFIG_DRM_AMDGPU_SI
2081 case CHIP_VERDE:
2082 case CHIP_TAHITI:
2083 case CHIP_PITCAIRN:
2084 case CHIP_OLAND:
2085 case CHIP_HAINAN:
2086 adev->family = AMDGPU_FAMILY_SI;
2087 r = si_set_ip_blocks(adev);
2088 if (r)
2089 return r;
2090 break;
2091 #endif
2092 #ifdef CONFIG_DRM_AMDGPU_CIK
2093 case CHIP_BONAIRE:
2094 case CHIP_HAWAII:
2095 case CHIP_KAVERI:
2096 case CHIP_KABINI:
2097 case CHIP_MULLINS:
2098 if (adev->flags & AMD_IS_APU)
2099 adev->family = AMDGPU_FAMILY_KV;
2100 else
2101 adev->family = AMDGPU_FAMILY_CI;
2102
2103 r = cik_set_ip_blocks(adev);
2104 if (r)
2105 return r;
2106 break;
2107 #endif
2108 case CHIP_TOPAZ:
2109 case CHIP_TONGA:
2110 case CHIP_FIJI:
2111 case CHIP_POLARIS10:
2112 case CHIP_POLARIS11:
2113 case CHIP_POLARIS12:
2114 case CHIP_VEGAM:
2115 case CHIP_CARRIZO:
2116 case CHIP_STONEY:
2117 if (adev->flags & AMD_IS_APU)
2118 adev->family = AMDGPU_FAMILY_CZ;
2119 else
2120 adev->family = AMDGPU_FAMILY_VI;
2121
2122 r = vi_set_ip_blocks(adev);
2123 if (r)
2124 return r;
2125 break;
2126 default:
2127 r = amdgpu_discovery_set_ip_blocks(adev);
2128 if (r)
2129 return r;
2130 break;
2131 }
2132
2133 if (amdgpu_has_atpx() &&
2134 (amdgpu_is_atpx_hybrid() ||
2135 amdgpu_has_atpx_dgpu_power_cntl()) &&
2136 ((adev->flags & AMD_IS_APU) == 0) &&
2137 !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2138 adev->flags |= AMD_IS_PX;
2139
2140 if (!(adev->flags & AMD_IS_APU)) {
2141 parent = pci_upstream_bridge(adev->pdev);
2142 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2143 }
2144
2145 amdgpu_amdkfd_device_probe(adev);
2146
2147 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2148 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2149 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2150 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2151 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2152
2153 for (i = 0; i < adev->num_ip_blocks; i++) {
2154 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2155 DRM_ERROR("disabled ip block: %d <%s>\n",
2156 i, adev->ip_blocks[i].version->funcs->name);
2157 adev->ip_blocks[i].status.valid = false;
2158 } else {
2159 if (adev->ip_blocks[i].version->funcs->early_init) {
2160 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2161 if (r == -ENOENT) {
2162 adev->ip_blocks[i].status.valid = false;
2163 } else if (r) {
2164 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2165 adev->ip_blocks[i].version->funcs->name, r);
2166 return r;
2167 } else {
2168 adev->ip_blocks[i].status.valid = true;
2169 }
2170 } else {
2171 adev->ip_blocks[i].status.valid = true;
2172 }
2173 }
2174 /* get the vbios after the asic_funcs are set up */
2175 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2176 r = amdgpu_device_parse_gpu_info_fw(adev);
2177 if (r)
2178 return r;
2179
2180 /* Read BIOS */
2181 if (!amdgpu_get_bios(adev))
2182 return -EINVAL;
2183
2184 r = amdgpu_atombios_init(adev);
2185 if (r) {
2186 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2187 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2188 return r;
2189 }
2190
2191 /*get pf2vf msg info at it's earliest time*/
2192 if (amdgpu_sriov_vf(adev))
2193 amdgpu_virt_init_data_exchange(adev);
2194
2195 }
2196 }
2197
2198 adev->cg_flags &= amdgpu_cg_mask;
2199 adev->pg_flags &= amdgpu_pg_mask;
2200
2201 return 0;
2202 }
2203
amdgpu_device_ip_hw_init_phase1(struct amdgpu_device * adev)2204 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2205 {
2206 int i, r;
2207
2208 for (i = 0; i < adev->num_ip_blocks; i++) {
2209 if (!adev->ip_blocks[i].status.sw)
2210 continue;
2211 if (adev->ip_blocks[i].status.hw)
2212 continue;
2213 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2214 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2215 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2216 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2217 if (r) {
2218 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2219 adev->ip_blocks[i].version->funcs->name, r);
2220 return r;
2221 }
2222 adev->ip_blocks[i].status.hw = true;
2223 }
2224 }
2225
2226 return 0;
2227 }
2228
amdgpu_device_ip_hw_init_phase2(struct amdgpu_device * adev)2229 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2230 {
2231 int i, r;
2232
2233 for (i = 0; i < adev->num_ip_blocks; i++) {
2234 if (!adev->ip_blocks[i].status.sw)
2235 continue;
2236 if (adev->ip_blocks[i].status.hw)
2237 continue;
2238 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2239 if (r) {
2240 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2241 adev->ip_blocks[i].version->funcs->name, r);
2242 return r;
2243 }
2244 adev->ip_blocks[i].status.hw = true;
2245 }
2246
2247 return 0;
2248 }
2249
amdgpu_device_fw_loading(struct amdgpu_device * adev)2250 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2251 {
2252 int r = 0;
2253 int i;
2254 uint32_t smu_version;
2255
2256 if (adev->asic_type >= CHIP_VEGA10) {
2257 for (i = 0; i < adev->num_ip_blocks; i++) {
2258 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2259 continue;
2260
2261 if (!adev->ip_blocks[i].status.sw)
2262 continue;
2263
2264 /* no need to do the fw loading again if already done*/
2265 if (adev->ip_blocks[i].status.hw == true)
2266 break;
2267
2268 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2269 r = adev->ip_blocks[i].version->funcs->resume(adev);
2270 if (r) {
2271 DRM_ERROR("resume of IP block <%s> failed %d\n",
2272 adev->ip_blocks[i].version->funcs->name, r);
2273 return r;
2274 }
2275 } else {
2276 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2277 if (r) {
2278 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2279 adev->ip_blocks[i].version->funcs->name, r);
2280 return r;
2281 }
2282 }
2283
2284 adev->ip_blocks[i].status.hw = true;
2285 break;
2286 }
2287 }
2288
2289 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2290 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2291
2292 return r;
2293 }
2294
amdgpu_device_init_schedulers(struct amdgpu_device * adev)2295 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2296 {
2297 long timeout;
2298 int r, i;
2299
2300 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2301 struct amdgpu_ring *ring = adev->rings[i];
2302
2303 /* No need to setup the GPU scheduler for rings that don't need it */
2304 if (!ring || ring->no_scheduler)
2305 continue;
2306
2307 switch (ring->funcs->type) {
2308 case AMDGPU_RING_TYPE_GFX:
2309 timeout = adev->gfx_timeout;
2310 break;
2311 case AMDGPU_RING_TYPE_COMPUTE:
2312 timeout = adev->compute_timeout;
2313 break;
2314 case AMDGPU_RING_TYPE_SDMA:
2315 timeout = adev->sdma_timeout;
2316 break;
2317 default:
2318 timeout = adev->video_timeout;
2319 break;
2320 }
2321
2322 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2323 ring->num_hw_submission, amdgpu_job_hang_limit,
2324 timeout, adev->reset_domain->wq,
2325 ring->sched_score, ring->name,
2326 adev->dev);
2327 if (r) {
2328 DRM_ERROR("Failed to create scheduler on ring %s.\n",
2329 ring->name);
2330 return r;
2331 }
2332 }
2333
2334 return 0;
2335 }
2336
2337
2338 /**
2339 * amdgpu_device_ip_init - run init for hardware IPs
2340 *
2341 * @adev: amdgpu_device pointer
2342 *
2343 * Main initialization pass for hardware IPs. The list of all the hardware
2344 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2345 * are run. sw_init initializes the software state associated with each IP
2346 * and hw_init initializes the hardware associated with each IP.
2347 * Returns 0 on success, negative error code on failure.
2348 */
amdgpu_device_ip_init(struct amdgpu_device * adev)2349 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2350 {
2351 int i, r;
2352
2353 r = amdgpu_ras_init(adev);
2354 if (r)
2355 return r;
2356
2357 for (i = 0; i < adev->num_ip_blocks; i++) {
2358 if (!adev->ip_blocks[i].status.valid)
2359 continue;
2360 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2361 if (r) {
2362 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2363 adev->ip_blocks[i].version->funcs->name, r);
2364 goto init_failed;
2365 }
2366 adev->ip_blocks[i].status.sw = true;
2367
2368 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2369 /* need to do common hw init early so everything is set up for gmc */
2370 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2371 if (r) {
2372 DRM_ERROR("hw_init %d failed %d\n", i, r);
2373 goto init_failed;
2374 }
2375 adev->ip_blocks[i].status.hw = true;
2376 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2377 /* need to do gmc hw init early so we can allocate gpu mem */
2378 /* Try to reserve bad pages early */
2379 if (amdgpu_sriov_vf(adev))
2380 amdgpu_virt_exchange_data(adev);
2381
2382 r = amdgpu_device_vram_scratch_init(adev);
2383 if (r) {
2384 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2385 goto init_failed;
2386 }
2387 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2388 if (r) {
2389 DRM_ERROR("hw_init %d failed %d\n", i, r);
2390 goto init_failed;
2391 }
2392 r = amdgpu_device_wb_init(adev);
2393 if (r) {
2394 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2395 goto init_failed;
2396 }
2397 adev->ip_blocks[i].status.hw = true;
2398
2399 /* right after GMC hw init, we create CSA */
2400 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2401 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2402 AMDGPU_GEM_DOMAIN_VRAM,
2403 AMDGPU_CSA_SIZE);
2404 if (r) {
2405 DRM_ERROR("allocate CSA failed %d\n", r);
2406 goto init_failed;
2407 }
2408 }
2409 }
2410 }
2411
2412 if (amdgpu_sriov_vf(adev))
2413 amdgpu_virt_init_data_exchange(adev);
2414
2415 r = amdgpu_ib_pool_init(adev);
2416 if (r) {
2417 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2418 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2419 goto init_failed;
2420 }
2421
2422 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2423 if (r)
2424 goto init_failed;
2425
2426 r = amdgpu_device_ip_hw_init_phase1(adev);
2427 if (r)
2428 goto init_failed;
2429
2430 r = amdgpu_device_fw_loading(adev);
2431 if (r)
2432 goto init_failed;
2433
2434 r = amdgpu_device_ip_hw_init_phase2(adev);
2435 if (r)
2436 goto init_failed;
2437
2438 /*
2439 * retired pages will be loaded from eeprom and reserved here,
2440 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2441 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2442 * for I2C communication which only true at this point.
2443 *
2444 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2445 * failure from bad gpu situation and stop amdgpu init process
2446 * accordingly. For other failed cases, it will still release all
2447 * the resource and print error message, rather than returning one
2448 * negative value to upper level.
2449 *
2450 * Note: theoretically, this should be called before all vram allocations
2451 * to protect retired page from abusing
2452 */
2453 r = amdgpu_ras_recovery_init(adev);
2454 if (r)
2455 goto init_failed;
2456
2457 /**
2458 * In case of XGMI grab extra reference for reset domain for this device
2459 */
2460 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2461 if (amdgpu_xgmi_add_device(adev) == 0) {
2462 if (!amdgpu_sriov_vf(adev)) {
2463 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2464
2465 if (!hive->reset_domain ||
2466 !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2467 r = -ENOENT;
2468 amdgpu_put_xgmi_hive(hive);
2469 goto init_failed;
2470 }
2471
2472 /* Drop the early temporary reset domain we created for device */
2473 amdgpu_reset_put_reset_domain(adev->reset_domain);
2474 adev->reset_domain = hive->reset_domain;
2475 amdgpu_put_xgmi_hive(hive);
2476 }
2477 }
2478 }
2479
2480 r = amdgpu_device_init_schedulers(adev);
2481 if (r)
2482 goto init_failed;
2483
2484 /* Don't init kfd if whole hive need to be reset during init */
2485 if (!adev->gmc.xgmi.pending_reset)
2486 amdgpu_amdkfd_device_init(adev);
2487
2488 amdgpu_fru_get_product_info(adev);
2489
2490 init_failed:
2491 if (amdgpu_sriov_vf(adev))
2492 amdgpu_virt_release_full_gpu(adev, true);
2493
2494 return r;
2495 }
2496
2497 /**
2498 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2499 *
2500 * @adev: amdgpu_device pointer
2501 *
2502 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2503 * this function before a GPU reset. If the value is retained after a
2504 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2505 */
amdgpu_device_fill_reset_magic(struct amdgpu_device * adev)2506 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2507 {
2508 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2509 }
2510
2511 /**
2512 * amdgpu_device_check_vram_lost - check if vram is valid
2513 *
2514 * @adev: amdgpu_device pointer
2515 *
2516 * Checks the reset magic value written to the gart pointer in VRAM.
2517 * The driver calls this after a GPU reset to see if the contents of
2518 * VRAM is lost or now.
2519 * returns true if vram is lost, false if not.
2520 */
amdgpu_device_check_vram_lost(struct amdgpu_device * adev)2521 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2522 {
2523 if (memcmp(adev->gart.ptr, adev->reset_magic,
2524 AMDGPU_RESET_MAGIC_NUM))
2525 return true;
2526
2527 if (!amdgpu_in_reset(adev))
2528 return false;
2529
2530 /*
2531 * For all ASICs with baco/mode1 reset, the VRAM is
2532 * always assumed to be lost.
2533 */
2534 switch (amdgpu_asic_reset_method(adev)) {
2535 case AMD_RESET_METHOD_BACO:
2536 case AMD_RESET_METHOD_MODE1:
2537 return true;
2538 default:
2539 return false;
2540 }
2541 }
2542
2543 /**
2544 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2545 *
2546 * @adev: amdgpu_device pointer
2547 * @state: clockgating state (gate or ungate)
2548 *
2549 * The list of all the hardware IPs that make up the asic is walked and the
2550 * set_clockgating_state callbacks are run.
2551 * Late initialization pass enabling clockgating for hardware IPs.
2552 * Fini or suspend, pass disabling clockgating for hardware IPs.
2553 * Returns 0 on success, negative error code on failure.
2554 */
2555
amdgpu_device_set_cg_state(struct amdgpu_device * adev,enum amd_clockgating_state state)2556 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2557 enum amd_clockgating_state state)
2558 {
2559 int i, j, r;
2560
2561 if (amdgpu_emu_mode == 1)
2562 return 0;
2563
2564 for (j = 0; j < adev->num_ip_blocks; j++) {
2565 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2566 if (!adev->ip_blocks[i].status.late_initialized)
2567 continue;
2568 /* skip CG for GFX on S0ix */
2569 if (adev->in_s0ix &&
2570 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2571 continue;
2572 /* skip CG for VCE/UVD, it's handled specially */
2573 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2574 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2575 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2576 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2577 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2578 /* enable clockgating to save power */
2579 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2580 state);
2581 if (r) {
2582 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2583 adev->ip_blocks[i].version->funcs->name, r);
2584 return r;
2585 }
2586 }
2587 }
2588
2589 return 0;
2590 }
2591
amdgpu_device_set_pg_state(struct amdgpu_device * adev,enum amd_powergating_state state)2592 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2593 enum amd_powergating_state state)
2594 {
2595 int i, j, r;
2596
2597 if (amdgpu_emu_mode == 1)
2598 return 0;
2599
2600 for (j = 0; j < adev->num_ip_blocks; j++) {
2601 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2602 if (!adev->ip_blocks[i].status.late_initialized)
2603 continue;
2604 /* skip PG for GFX on S0ix */
2605 if (adev->in_s0ix &&
2606 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2607 continue;
2608 /* skip CG for VCE/UVD, it's handled specially */
2609 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2610 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2611 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2612 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2613 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2614 /* enable powergating to save power */
2615 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2616 state);
2617 if (r) {
2618 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2619 adev->ip_blocks[i].version->funcs->name, r);
2620 return r;
2621 }
2622 }
2623 }
2624 return 0;
2625 }
2626
amdgpu_device_enable_mgpu_fan_boost(void)2627 static int amdgpu_device_enable_mgpu_fan_boost(void)
2628 {
2629 struct amdgpu_gpu_instance *gpu_ins;
2630 struct amdgpu_device *adev;
2631 int i, ret = 0;
2632
2633 mutex_lock(&mgpu_info.mutex);
2634
2635 /*
2636 * MGPU fan boost feature should be enabled
2637 * only when there are two or more dGPUs in
2638 * the system
2639 */
2640 if (mgpu_info.num_dgpu < 2)
2641 goto out;
2642
2643 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2644 gpu_ins = &(mgpu_info.gpu_ins[i]);
2645 adev = gpu_ins->adev;
2646 if (!(adev->flags & AMD_IS_APU) &&
2647 !gpu_ins->mgpu_fan_enabled) {
2648 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2649 if (ret)
2650 break;
2651
2652 gpu_ins->mgpu_fan_enabled = 1;
2653 }
2654 }
2655
2656 out:
2657 mutex_unlock(&mgpu_info.mutex);
2658
2659 return ret;
2660 }
2661
2662 /**
2663 * amdgpu_device_ip_late_init - run late init for hardware IPs
2664 *
2665 * @adev: amdgpu_device pointer
2666 *
2667 * Late initialization pass for hardware IPs. The list of all the hardware
2668 * IPs that make up the asic is walked and the late_init callbacks are run.
2669 * late_init covers any special initialization that an IP requires
2670 * after all of the have been initialized or something that needs to happen
2671 * late in the init process.
2672 * Returns 0 on success, negative error code on failure.
2673 */
amdgpu_device_ip_late_init(struct amdgpu_device * adev)2674 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2675 {
2676 struct amdgpu_gpu_instance *gpu_instance;
2677 int i = 0, r;
2678
2679 for (i = 0; i < adev->num_ip_blocks; i++) {
2680 if (!adev->ip_blocks[i].status.hw)
2681 continue;
2682 if (adev->ip_blocks[i].version->funcs->late_init) {
2683 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2684 if (r) {
2685 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2686 adev->ip_blocks[i].version->funcs->name, r);
2687 return r;
2688 }
2689 }
2690 adev->ip_blocks[i].status.late_initialized = true;
2691 }
2692
2693 r = amdgpu_ras_late_init(adev);
2694 if (r) {
2695 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2696 return r;
2697 }
2698
2699 amdgpu_ras_set_error_query_ready(adev, true);
2700
2701 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2702 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2703
2704 amdgpu_device_fill_reset_magic(adev);
2705
2706 r = amdgpu_device_enable_mgpu_fan_boost();
2707 if (r)
2708 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2709
2710 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2711 if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2712 adev->asic_type == CHIP_ALDEBARAN ))
2713 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2714
2715 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2716 mutex_lock(&mgpu_info.mutex);
2717
2718 /*
2719 * Reset device p-state to low as this was booted with high.
2720 *
2721 * This should be performed only after all devices from the same
2722 * hive get initialized.
2723 *
2724 * However, it's unknown how many device in the hive in advance.
2725 * As this is counted one by one during devices initializations.
2726 *
2727 * So, we wait for all XGMI interlinked devices initialized.
2728 * This may bring some delays as those devices may come from
2729 * different hives. But that should be OK.
2730 */
2731 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2732 for (i = 0; i < mgpu_info.num_gpu; i++) {
2733 gpu_instance = &(mgpu_info.gpu_ins[i]);
2734 if (gpu_instance->adev->flags & AMD_IS_APU)
2735 continue;
2736
2737 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2738 AMDGPU_XGMI_PSTATE_MIN);
2739 if (r) {
2740 DRM_ERROR("pstate setting failed (%d).\n", r);
2741 break;
2742 }
2743 }
2744 }
2745
2746 mutex_unlock(&mgpu_info.mutex);
2747 }
2748
2749 return 0;
2750 }
2751
2752 /**
2753 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2754 *
2755 * @adev: amdgpu_device pointer
2756 *
2757 * For ASICs need to disable SMC first
2758 */
amdgpu_device_smu_fini_early(struct amdgpu_device * adev)2759 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2760 {
2761 int i, r;
2762
2763 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2764 return;
2765
2766 for (i = 0; i < adev->num_ip_blocks; i++) {
2767 if (!adev->ip_blocks[i].status.hw)
2768 continue;
2769 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2770 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2771 /* XXX handle errors */
2772 if (r) {
2773 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2774 adev->ip_blocks[i].version->funcs->name, r);
2775 }
2776 adev->ip_blocks[i].status.hw = false;
2777 break;
2778 }
2779 }
2780 }
2781
amdgpu_device_ip_fini_early(struct amdgpu_device * adev)2782 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2783 {
2784 int i, r;
2785
2786 for (i = 0; i < adev->num_ip_blocks; i++) {
2787 if (!adev->ip_blocks[i].version->funcs->early_fini)
2788 continue;
2789
2790 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2791 if (r) {
2792 DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2793 adev->ip_blocks[i].version->funcs->name, r);
2794 }
2795 }
2796
2797 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2798 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2799
2800 amdgpu_amdkfd_suspend(adev, false);
2801
2802 /* Workaroud for ASICs need to disable SMC first */
2803 amdgpu_device_smu_fini_early(adev);
2804
2805 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2806 if (!adev->ip_blocks[i].status.hw)
2807 continue;
2808
2809 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2810 /* XXX handle errors */
2811 if (r) {
2812 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2813 adev->ip_blocks[i].version->funcs->name, r);
2814 }
2815
2816 adev->ip_blocks[i].status.hw = false;
2817 }
2818
2819 if (amdgpu_sriov_vf(adev)) {
2820 if (amdgpu_virt_release_full_gpu(adev, false))
2821 DRM_ERROR("failed to release exclusive mode on fini\n");
2822 }
2823
2824 return 0;
2825 }
2826
2827 /**
2828 * amdgpu_device_ip_fini - run fini for hardware IPs
2829 *
2830 * @adev: amdgpu_device pointer
2831 *
2832 * Main teardown pass for hardware IPs. The list of all the hardware
2833 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2834 * are run. hw_fini tears down the hardware associated with each IP
2835 * and sw_fini tears down any software state associated with each IP.
2836 * Returns 0 on success, negative error code on failure.
2837 */
amdgpu_device_ip_fini(struct amdgpu_device * adev)2838 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2839 {
2840 int i, r;
2841
2842 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2843 amdgpu_virt_release_ras_err_handler_data(adev);
2844
2845 if (adev->gmc.xgmi.num_physical_nodes > 1)
2846 amdgpu_xgmi_remove_device(adev);
2847
2848 amdgpu_amdkfd_device_fini_sw(adev);
2849
2850 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2851 if (!adev->ip_blocks[i].status.sw)
2852 continue;
2853
2854 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2855 amdgpu_ucode_free_bo(adev);
2856 amdgpu_free_static_csa(&adev->virt.csa_obj);
2857 amdgpu_device_wb_fini(adev);
2858 amdgpu_device_vram_scratch_fini(adev);
2859 amdgpu_ib_pool_fini(adev);
2860 }
2861
2862 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2863 /* XXX handle errors */
2864 if (r) {
2865 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2866 adev->ip_blocks[i].version->funcs->name, r);
2867 }
2868 adev->ip_blocks[i].status.sw = false;
2869 adev->ip_blocks[i].status.valid = false;
2870 }
2871
2872 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2873 if (!adev->ip_blocks[i].status.late_initialized)
2874 continue;
2875 if (adev->ip_blocks[i].version->funcs->late_fini)
2876 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2877 adev->ip_blocks[i].status.late_initialized = false;
2878 }
2879
2880 amdgpu_ras_fini(adev);
2881
2882 return 0;
2883 }
2884
2885 /**
2886 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2887 *
2888 * @work: work_struct.
2889 */
amdgpu_device_delayed_init_work_handler(struct work_struct * work)2890 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2891 {
2892 struct amdgpu_device *adev =
2893 container_of(work, struct amdgpu_device, delayed_init_work.work);
2894 int r;
2895
2896 r = amdgpu_ib_ring_tests(adev);
2897 if (r)
2898 DRM_ERROR("ib ring test failed (%d).\n", r);
2899 }
2900
amdgpu_device_delay_enable_gfx_off(struct work_struct * work)2901 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2902 {
2903 struct amdgpu_device *adev =
2904 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2905
2906 WARN_ON_ONCE(adev->gfx.gfx_off_state);
2907 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2908
2909 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2910 adev->gfx.gfx_off_state = true;
2911 }
2912
2913 /**
2914 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2915 *
2916 * @adev: amdgpu_device pointer
2917 *
2918 * Main suspend function for hardware IPs. The list of all the hardware
2919 * IPs that make up the asic is walked, clockgating is disabled and the
2920 * suspend callbacks are run. suspend puts the hardware and software state
2921 * in each IP into a state suitable for suspend.
2922 * Returns 0 on success, negative error code on failure.
2923 */
amdgpu_device_ip_suspend_phase1(struct amdgpu_device * adev)2924 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2925 {
2926 int i, r;
2927
2928 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2929 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2930
2931 /*
2932 * Per PMFW team's suggestion, driver needs to handle gfxoff
2933 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
2934 * scenario. Add the missing df cstate disablement here.
2935 */
2936 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2937 dev_warn(adev->dev, "Failed to disallow df cstate");
2938
2939 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2940 if (!adev->ip_blocks[i].status.valid)
2941 continue;
2942
2943 /* displays are handled separately */
2944 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2945 continue;
2946
2947 /* XXX handle errors */
2948 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2949 /* XXX handle errors */
2950 if (r) {
2951 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2952 adev->ip_blocks[i].version->funcs->name, r);
2953 return r;
2954 }
2955
2956 adev->ip_blocks[i].status.hw = false;
2957 }
2958
2959 return 0;
2960 }
2961
2962 /**
2963 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2964 *
2965 * @adev: amdgpu_device pointer
2966 *
2967 * Main suspend function for hardware IPs. The list of all the hardware
2968 * IPs that make up the asic is walked, clockgating is disabled and the
2969 * suspend callbacks are run. suspend puts the hardware and software state
2970 * in each IP into a state suitable for suspend.
2971 * Returns 0 on success, negative error code on failure.
2972 */
amdgpu_device_ip_suspend_phase2(struct amdgpu_device * adev)2973 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2974 {
2975 int i, r;
2976
2977 if (adev->in_s0ix)
2978 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2979
2980 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2981 if (!adev->ip_blocks[i].status.valid)
2982 continue;
2983 /* displays are handled in phase1 */
2984 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2985 continue;
2986 /* PSP lost connection when err_event_athub occurs */
2987 if (amdgpu_ras_intr_triggered() &&
2988 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2989 adev->ip_blocks[i].status.hw = false;
2990 continue;
2991 }
2992
2993 /* skip unnecessary suspend if we do not initialize them yet */
2994 if (adev->gmc.xgmi.pending_reset &&
2995 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2996 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2997 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2998 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2999 adev->ip_blocks[i].status.hw = false;
3000 continue;
3001 }
3002
3003 /* skip suspend of gfx and psp for S0ix
3004 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3005 * like at runtime. PSP is also part of the always on hardware
3006 * so no need to suspend it.
3007 */
3008 if (adev->in_s0ix &&
3009 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3010 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
3011 continue;
3012
3013 /* XXX handle errors */
3014 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3015 /* XXX handle errors */
3016 if (r) {
3017 DRM_ERROR("suspend of IP block <%s> failed %d\n",
3018 adev->ip_blocks[i].version->funcs->name, r);
3019 }
3020 adev->ip_blocks[i].status.hw = false;
3021 /* handle putting the SMC in the appropriate state */
3022 if(!amdgpu_sriov_vf(adev)){
3023 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3024 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3025 if (r) {
3026 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3027 adev->mp1_state, r);
3028 return r;
3029 }
3030 }
3031 }
3032 }
3033
3034 return 0;
3035 }
3036
3037 /**
3038 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3039 *
3040 * @adev: amdgpu_device pointer
3041 *
3042 * Main suspend function for hardware IPs. The list of all the hardware
3043 * IPs that make up the asic is walked, clockgating is disabled and the
3044 * suspend callbacks are run. suspend puts the hardware and software state
3045 * in each IP into a state suitable for suspend.
3046 * Returns 0 on success, negative error code on failure.
3047 */
amdgpu_device_ip_suspend(struct amdgpu_device * adev)3048 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3049 {
3050 int r;
3051
3052 if (amdgpu_sriov_vf(adev)) {
3053 amdgpu_virt_fini_data_exchange(adev);
3054 amdgpu_virt_request_full_gpu(adev, false);
3055 }
3056
3057 r = amdgpu_device_ip_suspend_phase1(adev);
3058 if (r)
3059 return r;
3060 r = amdgpu_device_ip_suspend_phase2(adev);
3061
3062 if (amdgpu_sriov_vf(adev))
3063 amdgpu_virt_release_full_gpu(adev, false);
3064
3065 return r;
3066 }
3067
amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device * adev)3068 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3069 {
3070 int i, r;
3071
3072 static enum amd_ip_block_type ip_order[] = {
3073 AMD_IP_BLOCK_TYPE_COMMON,
3074 AMD_IP_BLOCK_TYPE_GMC,
3075 AMD_IP_BLOCK_TYPE_PSP,
3076 AMD_IP_BLOCK_TYPE_IH,
3077 };
3078
3079 for (i = 0; i < adev->num_ip_blocks; i++) {
3080 int j;
3081 struct amdgpu_ip_block *block;
3082
3083 block = &adev->ip_blocks[i];
3084 block->status.hw = false;
3085
3086 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3087
3088 if (block->version->type != ip_order[j] ||
3089 !block->status.valid)
3090 continue;
3091
3092 r = block->version->funcs->hw_init(adev);
3093 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3094 if (r)
3095 return r;
3096 block->status.hw = true;
3097 }
3098 }
3099
3100 return 0;
3101 }
3102
amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device * adev)3103 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3104 {
3105 int i, r;
3106
3107 static enum amd_ip_block_type ip_order[] = {
3108 AMD_IP_BLOCK_TYPE_SMC,
3109 AMD_IP_BLOCK_TYPE_DCE,
3110 AMD_IP_BLOCK_TYPE_GFX,
3111 AMD_IP_BLOCK_TYPE_SDMA,
3112 AMD_IP_BLOCK_TYPE_UVD,
3113 AMD_IP_BLOCK_TYPE_VCE,
3114 AMD_IP_BLOCK_TYPE_VCN
3115 };
3116
3117 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3118 int j;
3119 struct amdgpu_ip_block *block;
3120
3121 for (j = 0; j < adev->num_ip_blocks; j++) {
3122 block = &adev->ip_blocks[j];
3123
3124 if (block->version->type != ip_order[i] ||
3125 !block->status.valid ||
3126 block->status.hw)
3127 continue;
3128
3129 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3130 r = block->version->funcs->resume(adev);
3131 else
3132 r = block->version->funcs->hw_init(adev);
3133
3134 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3135 if (r)
3136 return r;
3137 block->status.hw = true;
3138 }
3139 }
3140
3141 return 0;
3142 }
3143
3144 /**
3145 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3146 *
3147 * @adev: amdgpu_device pointer
3148 *
3149 * First resume function for hardware IPs. The list of all the hardware
3150 * IPs that make up the asic is walked and the resume callbacks are run for
3151 * COMMON, GMC, and IH. resume puts the hardware into a functional state
3152 * after a suspend and updates the software state as necessary. This
3153 * function is also used for restoring the GPU after a GPU reset.
3154 * Returns 0 on success, negative error code on failure.
3155 */
amdgpu_device_ip_resume_phase1(struct amdgpu_device * adev)3156 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3157 {
3158 int i, r;
3159
3160 for (i = 0; i < adev->num_ip_blocks; i++) {
3161 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3162 continue;
3163 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3164 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3165 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3166 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3167
3168 r = adev->ip_blocks[i].version->funcs->resume(adev);
3169 if (r) {
3170 DRM_ERROR("resume of IP block <%s> failed %d\n",
3171 adev->ip_blocks[i].version->funcs->name, r);
3172 return r;
3173 }
3174 adev->ip_blocks[i].status.hw = true;
3175 }
3176 }
3177
3178 return 0;
3179 }
3180
3181 /**
3182 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3183 *
3184 * @adev: amdgpu_device pointer
3185 *
3186 * First resume function for hardware IPs. The list of all the hardware
3187 * IPs that make up the asic is walked and the resume callbacks are run for
3188 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
3189 * functional state after a suspend and updates the software state as
3190 * necessary. This function is also used for restoring the GPU after a GPU
3191 * reset.
3192 * Returns 0 on success, negative error code on failure.
3193 */
amdgpu_device_ip_resume_phase2(struct amdgpu_device * adev)3194 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3195 {
3196 int i, r;
3197
3198 for (i = 0; i < adev->num_ip_blocks; i++) {
3199 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3200 continue;
3201 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3202 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3203 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3204 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3205 continue;
3206 r = adev->ip_blocks[i].version->funcs->resume(adev);
3207 if (r) {
3208 DRM_ERROR("resume of IP block <%s> failed %d\n",
3209 adev->ip_blocks[i].version->funcs->name, r);
3210 return r;
3211 }
3212 adev->ip_blocks[i].status.hw = true;
3213
3214 if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3215 /* disable gfxoff for IP resume. The gfxoff will be re-enabled in
3216 * amdgpu_device_resume() after IP resume.
3217 */
3218 amdgpu_gfx_off_ctrl(adev, false);
3219 DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n");
3220 }
3221
3222 }
3223
3224 return 0;
3225 }
3226
3227 /**
3228 * amdgpu_device_ip_resume - run resume for hardware IPs
3229 *
3230 * @adev: amdgpu_device pointer
3231 *
3232 * Main resume function for hardware IPs. The hardware IPs
3233 * are split into two resume functions because they are
3234 * are also used in in recovering from a GPU reset and some additional
3235 * steps need to be take between them. In this case (S3/S4) they are
3236 * run sequentially.
3237 * Returns 0 on success, negative error code on failure.
3238 */
amdgpu_device_ip_resume(struct amdgpu_device * adev)3239 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3240 {
3241 int r;
3242
3243 r = amdgpu_amdkfd_resume_iommu(adev);
3244 if (r)
3245 return r;
3246
3247 r = amdgpu_device_ip_resume_phase1(adev);
3248 if (r)
3249 return r;
3250
3251 r = amdgpu_device_fw_loading(adev);
3252 if (r)
3253 return r;
3254
3255 r = amdgpu_device_ip_resume_phase2(adev);
3256
3257 return r;
3258 }
3259
3260 /**
3261 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3262 *
3263 * @adev: amdgpu_device pointer
3264 *
3265 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3266 */
amdgpu_device_detect_sriov_bios(struct amdgpu_device * adev)3267 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3268 {
3269 if (amdgpu_sriov_vf(adev)) {
3270 if (adev->is_atom_fw) {
3271 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3272 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3273 } else {
3274 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3275 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3276 }
3277
3278 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3279 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3280 }
3281 }
3282
3283 /**
3284 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3285 *
3286 * @asic_type: AMD asic type
3287 *
3288 * Check if there is DC (new modesetting infrastructre) support for an asic.
3289 * returns true if DC has support, false if not.
3290 */
amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)3291 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3292 {
3293 switch (asic_type) {
3294 #ifdef CONFIG_DRM_AMDGPU_SI
3295 case CHIP_HAINAN:
3296 #endif
3297 case CHIP_TOPAZ:
3298 /* chips with no display hardware */
3299 return false;
3300 #if defined(CONFIG_DRM_AMD_DC)
3301 case CHIP_TAHITI:
3302 case CHIP_PITCAIRN:
3303 case CHIP_VERDE:
3304 case CHIP_OLAND:
3305 /*
3306 * We have systems in the wild with these ASICs that require
3307 * LVDS and VGA support which is not supported with DC.
3308 *
3309 * Fallback to the non-DC driver here by default so as not to
3310 * cause regressions.
3311 */
3312 #if defined(CONFIG_DRM_AMD_DC_SI)
3313 return amdgpu_dc > 0;
3314 #else
3315 return false;
3316 #endif
3317 case CHIP_BONAIRE:
3318 case CHIP_KAVERI:
3319 case CHIP_KABINI:
3320 case CHIP_MULLINS:
3321 /*
3322 * We have systems in the wild with these ASICs that require
3323 * VGA support which is not supported with DC.
3324 *
3325 * Fallback to the non-DC driver here by default so as not to
3326 * cause regressions.
3327 */
3328 return amdgpu_dc > 0;
3329 default:
3330 return amdgpu_dc != 0;
3331 #else
3332 default:
3333 if (amdgpu_dc > 0)
3334 DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3335 "but isn't supported by ASIC, ignoring\n");
3336 return false;
3337 #endif
3338 }
3339 }
3340
3341 /**
3342 * amdgpu_device_has_dc_support - check if dc is supported
3343 *
3344 * @adev: amdgpu_device pointer
3345 *
3346 * Returns true for supported, false for not supported
3347 */
amdgpu_device_has_dc_support(struct amdgpu_device * adev)3348 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3349 {
3350 if (amdgpu_sriov_vf(adev) ||
3351 adev->enable_virtual_display ||
3352 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3353 return false;
3354
3355 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3356 }
3357
amdgpu_device_xgmi_reset_func(struct work_struct * __work)3358 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3359 {
3360 struct amdgpu_device *adev =
3361 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3362 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3363
3364 /* It's a bug to not have a hive within this function */
3365 if (WARN_ON(!hive))
3366 return;
3367
3368 /*
3369 * Use task barrier to synchronize all xgmi reset works across the
3370 * hive. task_barrier_enter and task_barrier_exit will block
3371 * until all the threads running the xgmi reset works reach
3372 * those points. task_barrier_full will do both blocks.
3373 */
3374 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3375
3376 task_barrier_enter(&hive->tb);
3377 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3378
3379 if (adev->asic_reset_res)
3380 goto fail;
3381
3382 task_barrier_exit(&hive->tb);
3383 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3384
3385 if (adev->asic_reset_res)
3386 goto fail;
3387
3388 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3389 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3390 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3391 } else {
3392
3393 task_barrier_full(&hive->tb);
3394 adev->asic_reset_res = amdgpu_asic_reset(adev);
3395 }
3396
3397 fail:
3398 if (adev->asic_reset_res)
3399 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3400 adev->asic_reset_res, adev_to_drm(adev)->unique);
3401 amdgpu_put_xgmi_hive(hive);
3402 }
3403
amdgpu_device_get_job_timeout_settings(struct amdgpu_device * adev)3404 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3405 {
3406 char *input = amdgpu_lockup_timeout;
3407 char *timeout_setting = NULL;
3408 int index = 0;
3409 long timeout;
3410 int ret = 0;
3411
3412 /*
3413 * By default timeout for non compute jobs is 10000
3414 * and 60000 for compute jobs.
3415 * In SR-IOV or passthrough mode, timeout for compute
3416 * jobs are 60000 by default.
3417 */
3418 adev->gfx_timeout = msecs_to_jiffies(10000);
3419 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3420 if (amdgpu_sriov_vf(adev))
3421 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3422 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3423 else
3424 adev->compute_timeout = msecs_to_jiffies(60000);
3425
3426 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3427 while ((timeout_setting = strsep(&input, ",")) &&
3428 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3429 ret = kstrtol(timeout_setting, 0, &timeout);
3430 if (ret)
3431 return ret;
3432
3433 if (timeout == 0) {
3434 index++;
3435 continue;
3436 } else if (timeout < 0) {
3437 timeout = MAX_SCHEDULE_TIMEOUT;
3438 dev_warn(adev->dev, "lockup timeout disabled");
3439 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3440 } else {
3441 timeout = msecs_to_jiffies(timeout);
3442 }
3443
3444 switch (index++) {
3445 case 0:
3446 adev->gfx_timeout = timeout;
3447 break;
3448 case 1:
3449 adev->compute_timeout = timeout;
3450 break;
3451 case 2:
3452 adev->sdma_timeout = timeout;
3453 break;
3454 case 3:
3455 adev->video_timeout = timeout;
3456 break;
3457 default:
3458 break;
3459 }
3460 }
3461 /*
3462 * There is only one value specified and
3463 * it should apply to all non-compute jobs.
3464 */
3465 if (index == 1) {
3466 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3467 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3468 adev->compute_timeout = adev->gfx_timeout;
3469 }
3470 }
3471
3472 return ret;
3473 }
3474
3475 /**
3476 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3477 *
3478 * @adev: amdgpu_device pointer
3479 *
3480 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3481 */
amdgpu_device_check_iommu_direct_map(struct amdgpu_device * adev)3482 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3483 {
3484 struct iommu_domain *domain;
3485
3486 domain = iommu_get_domain_for_dev(adev->dev);
3487 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3488 adev->ram_is_direct_mapped = true;
3489 }
3490
3491 static const struct attribute *amdgpu_dev_attributes[] = {
3492 &dev_attr_product_name.attr,
3493 &dev_attr_product_number.attr,
3494 &dev_attr_serial_number.attr,
3495 &dev_attr_pcie_replay_count.attr,
3496 NULL
3497 };
3498
3499 /**
3500 * amdgpu_device_init - initialize the driver
3501 *
3502 * @adev: amdgpu_device pointer
3503 * @flags: driver flags
3504 *
3505 * Initializes the driver info and hw (all asics).
3506 * Returns 0 for success or an error on failure.
3507 * Called at driver startup.
3508 */
amdgpu_device_init(struct amdgpu_device * adev,uint32_t flags)3509 int amdgpu_device_init(struct amdgpu_device *adev,
3510 uint32_t flags)
3511 {
3512 struct drm_device *ddev = adev_to_drm(adev);
3513 struct pci_dev *pdev = adev->pdev;
3514 int r, i;
3515 bool px = false;
3516 u32 max_MBps;
3517
3518 adev->shutdown = false;
3519 adev->flags = flags;
3520
3521 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3522 adev->asic_type = amdgpu_force_asic_type;
3523 else
3524 adev->asic_type = flags & AMD_ASIC_MASK;
3525
3526 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3527 if (amdgpu_emu_mode == 1)
3528 adev->usec_timeout *= 10;
3529 adev->gmc.gart_size = 512 * 1024 * 1024;
3530 adev->accel_working = false;
3531 adev->num_rings = 0;
3532 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3533 adev->mman.buffer_funcs = NULL;
3534 adev->mman.buffer_funcs_ring = NULL;
3535 adev->vm_manager.vm_pte_funcs = NULL;
3536 adev->vm_manager.vm_pte_num_scheds = 0;
3537 adev->gmc.gmc_funcs = NULL;
3538 adev->harvest_ip_mask = 0x0;
3539 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3540 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3541
3542 adev->smc_rreg = &amdgpu_invalid_rreg;
3543 adev->smc_wreg = &amdgpu_invalid_wreg;
3544 adev->pcie_rreg = &amdgpu_invalid_rreg;
3545 adev->pcie_wreg = &amdgpu_invalid_wreg;
3546 adev->pciep_rreg = &amdgpu_invalid_rreg;
3547 adev->pciep_wreg = &amdgpu_invalid_wreg;
3548 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3549 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3550 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3551 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3552 adev->didt_rreg = &amdgpu_invalid_rreg;
3553 adev->didt_wreg = &amdgpu_invalid_wreg;
3554 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3555 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3556 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3557 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3558
3559 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3560 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3561 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3562
3563 /* mutex initialization are all done here so we
3564 * can recall function without having locking issues */
3565 mutex_init(&adev->firmware.mutex);
3566 mutex_init(&adev->pm.mutex);
3567 mutex_init(&adev->gfx.gpu_clock_mutex);
3568 mutex_init(&adev->srbm_mutex);
3569 mutex_init(&adev->gfx.pipe_reserve_mutex);
3570 mutex_init(&adev->gfx.gfx_off_mutex);
3571 mutex_init(&adev->grbm_idx_mutex);
3572 mutex_init(&adev->mn_lock);
3573 mutex_init(&adev->virt.vf_errors.lock);
3574 hash_init(adev->mn_hash);
3575 mutex_init(&adev->psp.mutex);
3576 mutex_init(&adev->notifier_lock);
3577 mutex_init(&adev->pm.stable_pstate_ctx_lock);
3578 mutex_init(&adev->benchmark_mutex);
3579
3580 amdgpu_device_init_apu_flags(adev);
3581
3582 r = amdgpu_device_check_arguments(adev);
3583 if (r)
3584 return r;
3585
3586 spin_lock_init(&adev->mmio_idx_lock);
3587 spin_lock_init(&adev->smc_idx_lock);
3588 spin_lock_init(&adev->pcie_idx_lock);
3589 spin_lock_init(&adev->uvd_ctx_idx_lock);
3590 spin_lock_init(&adev->didt_idx_lock);
3591 spin_lock_init(&adev->gc_cac_idx_lock);
3592 spin_lock_init(&adev->se_cac_idx_lock);
3593 spin_lock_init(&adev->audio_endpt_idx_lock);
3594 spin_lock_init(&adev->mm_stats.lock);
3595
3596 INIT_LIST_HEAD(&adev->shadow_list);
3597 mutex_init(&adev->shadow_list_lock);
3598
3599 INIT_LIST_HEAD(&adev->reset_list);
3600
3601 INIT_LIST_HEAD(&adev->ras_list);
3602
3603 INIT_DELAYED_WORK(&adev->delayed_init_work,
3604 amdgpu_device_delayed_init_work_handler);
3605 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3606 amdgpu_device_delay_enable_gfx_off);
3607
3608 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3609
3610 adev->gfx.gfx_off_req_count = 1;
3611 adev->gfx.gfx_off_residency = 0;
3612 adev->gfx.gfx_off_entrycount = 0;
3613 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3614
3615 atomic_set(&adev->throttling_logging_enabled, 1);
3616 /*
3617 * If throttling continues, logging will be performed every minute
3618 * to avoid log flooding. "-1" is subtracted since the thermal
3619 * throttling interrupt comes every second. Thus, the total logging
3620 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3621 * for throttling interrupt) = 60 seconds.
3622 */
3623 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3624 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3625
3626 /* Registers mapping */
3627 /* TODO: block userspace mapping of io register */
3628 if (adev->asic_type >= CHIP_BONAIRE) {
3629 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3630 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3631 } else {
3632 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3633 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3634 }
3635
3636 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3637 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3638
3639 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3640 if (adev->rmmio == NULL) {
3641 return -ENOMEM;
3642 }
3643 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3644 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3645
3646 amdgpu_device_get_pcie_info(adev);
3647
3648 if (amdgpu_mcbp)
3649 DRM_INFO("MCBP is enabled\n");
3650
3651 /*
3652 * Reset domain needs to be present early, before XGMI hive discovered
3653 * (if any) and intitialized to use reset sem and in_gpu reset flag
3654 * early on during init and before calling to RREG32.
3655 */
3656 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3657 if (!adev->reset_domain)
3658 return -ENOMEM;
3659
3660 /* detect hw virtualization here */
3661 amdgpu_detect_virtualization(adev);
3662
3663 r = amdgpu_device_get_job_timeout_settings(adev);
3664 if (r) {
3665 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3666 return r;
3667 }
3668
3669 /* early init functions */
3670 r = amdgpu_device_ip_early_init(adev);
3671 if (r)
3672 return r;
3673
3674 /* Enable TMZ based on IP_VERSION */
3675 amdgpu_gmc_tmz_set(adev);
3676
3677 amdgpu_gmc_noretry_set(adev);
3678 /* Need to get xgmi info early to decide the reset behavior*/
3679 if (adev->gmc.xgmi.supported) {
3680 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3681 if (r)
3682 return r;
3683 }
3684
3685 /* enable PCIE atomic ops */
3686 if (amdgpu_sriov_vf(adev))
3687 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3688 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3689 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3690 else
3691 adev->have_atomics_support =
3692 !pci_enable_atomic_ops_to_root(adev->pdev,
3693 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3694 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3695 if (!adev->have_atomics_support)
3696 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3697
3698 /* doorbell bar mapping and doorbell index init*/
3699 amdgpu_device_doorbell_init(adev);
3700
3701 if (amdgpu_emu_mode == 1) {
3702 /* post the asic on emulation mode */
3703 emu_soc_asic_init(adev);
3704 goto fence_driver_init;
3705 }
3706
3707 amdgpu_reset_init(adev);
3708
3709 /* detect if we are with an SRIOV vbios */
3710 amdgpu_device_detect_sriov_bios(adev);
3711
3712 /* check if we need to reset the asic
3713 * E.g., driver was not cleanly unloaded previously, etc.
3714 */
3715 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3716 if (adev->gmc.xgmi.num_physical_nodes) {
3717 dev_info(adev->dev, "Pending hive reset.\n");
3718 adev->gmc.xgmi.pending_reset = true;
3719 /* Only need to init necessary block for SMU to handle the reset */
3720 for (i = 0; i < adev->num_ip_blocks; i++) {
3721 if (!adev->ip_blocks[i].status.valid)
3722 continue;
3723 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3724 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3725 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3726 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3727 DRM_DEBUG("IP %s disabled for hw_init.\n",
3728 adev->ip_blocks[i].version->funcs->name);
3729 adev->ip_blocks[i].status.hw = true;
3730 }
3731 }
3732 } else {
3733 r = amdgpu_asic_reset(adev);
3734 if (r) {
3735 dev_err(adev->dev, "asic reset on init failed\n");
3736 goto failed;
3737 }
3738 }
3739 }
3740
3741 pci_enable_pcie_error_reporting(adev->pdev);
3742
3743 /* Post card if necessary */
3744 if (amdgpu_device_need_post(adev)) {
3745 if (!adev->bios) {
3746 dev_err(adev->dev, "no vBIOS found\n");
3747 r = -EINVAL;
3748 goto failed;
3749 }
3750 DRM_INFO("GPU posting now...\n");
3751 r = amdgpu_device_asic_init(adev);
3752 if (r) {
3753 dev_err(adev->dev, "gpu post error!\n");
3754 goto failed;
3755 }
3756 }
3757
3758 if (adev->is_atom_fw) {
3759 /* Initialize clocks */
3760 r = amdgpu_atomfirmware_get_clock_info(adev);
3761 if (r) {
3762 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3763 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3764 goto failed;
3765 }
3766 } else {
3767 /* Initialize clocks */
3768 r = amdgpu_atombios_get_clock_info(adev);
3769 if (r) {
3770 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3771 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3772 goto failed;
3773 }
3774 /* init i2c buses */
3775 if (!amdgpu_device_has_dc_support(adev))
3776 amdgpu_atombios_i2c_init(adev);
3777 }
3778
3779 fence_driver_init:
3780 /* Fence driver */
3781 r = amdgpu_fence_driver_sw_init(adev);
3782 if (r) {
3783 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3784 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3785 goto failed;
3786 }
3787
3788 /* init the mode config */
3789 drm_mode_config_init(adev_to_drm(adev));
3790
3791 r = amdgpu_device_ip_init(adev);
3792 if (r) {
3793 /* failed in exclusive mode due to timeout */
3794 if (amdgpu_sriov_vf(adev) &&
3795 !amdgpu_sriov_runtime(adev) &&
3796 amdgpu_virt_mmio_blocked(adev) &&
3797 !amdgpu_virt_wait_reset(adev)) {
3798 dev_err(adev->dev, "VF exclusive mode timeout\n");
3799 /* Don't send request since VF is inactive. */
3800 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3801 adev->virt.ops = NULL;
3802 r = -EAGAIN;
3803 goto release_ras_con;
3804 }
3805 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3806 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3807 goto release_ras_con;
3808 }
3809
3810 amdgpu_fence_driver_hw_init(adev);
3811
3812 dev_info(adev->dev,
3813 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3814 adev->gfx.config.max_shader_engines,
3815 adev->gfx.config.max_sh_per_se,
3816 adev->gfx.config.max_cu_per_sh,
3817 adev->gfx.cu_info.number);
3818
3819 adev->accel_working = true;
3820
3821 amdgpu_vm_check_compute_bug(adev);
3822
3823 /* Initialize the buffer migration limit. */
3824 if (amdgpu_moverate >= 0)
3825 max_MBps = amdgpu_moverate;
3826 else
3827 max_MBps = 8; /* Allow 8 MB/s. */
3828 /* Get a log2 for easy divisions. */
3829 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3830
3831 r = amdgpu_pm_sysfs_init(adev);
3832 if (r) {
3833 adev->pm_sysfs_en = false;
3834 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3835 } else
3836 adev->pm_sysfs_en = true;
3837
3838 r = amdgpu_ucode_sysfs_init(adev);
3839 if (r) {
3840 adev->ucode_sysfs_en = false;
3841 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3842 } else
3843 adev->ucode_sysfs_en = true;
3844
3845 r = amdgpu_psp_sysfs_init(adev);
3846 if (r) {
3847 adev->psp_sysfs_en = false;
3848 if (!amdgpu_sriov_vf(adev))
3849 DRM_ERROR("Creating psp sysfs failed\n");
3850 } else
3851 adev->psp_sysfs_en = true;
3852
3853 /*
3854 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3855 * Otherwise the mgpu fan boost feature will be skipped due to the
3856 * gpu instance is counted less.
3857 */
3858 amdgpu_register_gpu_instance(adev);
3859
3860 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3861 * explicit gating rather than handling it automatically.
3862 */
3863 if (!adev->gmc.xgmi.pending_reset) {
3864 r = amdgpu_device_ip_late_init(adev);
3865 if (r) {
3866 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3867 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3868 goto release_ras_con;
3869 }
3870 /* must succeed. */
3871 amdgpu_ras_resume(adev);
3872 queue_delayed_work(system_wq, &adev->delayed_init_work,
3873 msecs_to_jiffies(AMDGPU_RESUME_MS));
3874 }
3875
3876 if (amdgpu_sriov_vf(adev))
3877 flush_delayed_work(&adev->delayed_init_work);
3878
3879 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3880 if (r)
3881 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3882
3883 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3884 r = amdgpu_pmu_init(adev);
3885 if (r)
3886 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3887
3888 /* Have stored pci confspace at hand for restore in sudden PCI error */
3889 if (amdgpu_device_cache_pci_state(adev->pdev))
3890 pci_restore_state(pdev);
3891
3892 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3893 /* this will fail for cards that aren't VGA class devices, just
3894 * ignore it */
3895 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3896 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3897
3898 if (amdgpu_device_supports_px(ddev)) {
3899 px = true;
3900 vga_switcheroo_register_client(adev->pdev,
3901 &amdgpu_switcheroo_ops, px);
3902 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3903 }
3904
3905 if (adev->gmc.xgmi.pending_reset)
3906 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3907 msecs_to_jiffies(AMDGPU_RESUME_MS));
3908
3909 amdgpu_device_check_iommu_direct_map(adev);
3910
3911 return 0;
3912
3913 release_ras_con:
3914 amdgpu_release_ras_context(adev);
3915
3916 failed:
3917 amdgpu_vf_error_trans_all(adev);
3918
3919 return r;
3920 }
3921
amdgpu_device_unmap_mmio(struct amdgpu_device * adev)3922 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3923 {
3924
3925 /* Clear all CPU mappings pointing to this device */
3926 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3927
3928 /* Unmap all mapped bars - Doorbell, registers and VRAM */
3929 amdgpu_device_doorbell_fini(adev);
3930
3931 iounmap(adev->rmmio);
3932 adev->rmmio = NULL;
3933 if (adev->mman.aper_base_kaddr)
3934 iounmap(adev->mman.aper_base_kaddr);
3935 adev->mman.aper_base_kaddr = NULL;
3936
3937 /* Memory manager related */
3938 if (!adev->gmc.xgmi.connected_to_cpu) {
3939 arch_phys_wc_del(adev->gmc.vram_mtrr);
3940 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3941 }
3942 }
3943
3944 /**
3945 * amdgpu_device_fini_hw - tear down the driver
3946 *
3947 * @adev: amdgpu_device pointer
3948 *
3949 * Tear down the driver info (all asics).
3950 * Called at driver shutdown.
3951 */
amdgpu_device_fini_hw(struct amdgpu_device * adev)3952 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3953 {
3954 dev_info(adev->dev, "amdgpu: finishing device.\n");
3955 flush_delayed_work(&adev->delayed_init_work);
3956 adev->shutdown = true;
3957
3958 /* make sure IB test finished before entering exclusive mode
3959 * to avoid preemption on IB test
3960 * */
3961 if (amdgpu_sriov_vf(adev)) {
3962 amdgpu_virt_request_full_gpu(adev, false);
3963 amdgpu_virt_fini_data_exchange(adev);
3964 }
3965
3966 /* disable all interrupts */
3967 amdgpu_irq_disable_all(adev);
3968 if (adev->mode_info.mode_config_initialized){
3969 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3970 drm_helper_force_disable_all(adev_to_drm(adev));
3971 else
3972 drm_atomic_helper_shutdown(adev_to_drm(adev));
3973 }
3974 amdgpu_fence_driver_hw_fini(adev);
3975
3976 if (adev->mman.initialized) {
3977 flush_delayed_work(&adev->mman.bdev.wq);
3978 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3979 }
3980
3981 if (adev->pm_sysfs_en)
3982 amdgpu_pm_sysfs_fini(adev);
3983 if (adev->ucode_sysfs_en)
3984 amdgpu_ucode_sysfs_fini(adev);
3985 if (adev->psp_sysfs_en)
3986 amdgpu_psp_sysfs_fini(adev);
3987 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3988
3989 /* disable ras feature must before hw fini */
3990 amdgpu_ras_pre_fini(adev);
3991
3992 amdgpu_device_ip_fini_early(adev);
3993
3994 amdgpu_irq_fini_hw(adev);
3995
3996 if (adev->mman.initialized)
3997 ttm_device_clear_dma_mappings(&adev->mman.bdev);
3998
3999 amdgpu_gart_dummy_page_fini(adev);
4000
4001 amdgpu_device_unmap_mmio(adev);
4002
4003 }
4004
amdgpu_device_fini_sw(struct amdgpu_device * adev)4005 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4006 {
4007 int idx;
4008
4009 amdgpu_fence_driver_sw_fini(adev);
4010 amdgpu_device_ip_fini(adev);
4011 release_firmware(adev->firmware.gpu_info_fw);
4012 adev->firmware.gpu_info_fw = NULL;
4013 adev->accel_working = false;
4014 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4015
4016 amdgpu_reset_fini(adev);
4017
4018 /* free i2c buses */
4019 if (!amdgpu_device_has_dc_support(adev))
4020 amdgpu_i2c_fini(adev);
4021
4022 if (amdgpu_emu_mode != 1)
4023 amdgpu_atombios_fini(adev);
4024
4025 kfree(adev->bios);
4026 adev->bios = NULL;
4027 if (amdgpu_device_supports_px(adev_to_drm(adev))) {
4028 vga_switcheroo_unregister_client(adev->pdev);
4029 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4030 }
4031 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4032 vga_client_unregister(adev->pdev);
4033
4034 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4035
4036 iounmap(adev->rmmio);
4037 adev->rmmio = NULL;
4038 amdgpu_device_doorbell_fini(adev);
4039 drm_dev_exit(idx);
4040 }
4041
4042 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4043 amdgpu_pmu_fini(adev);
4044 if (adev->mman.discovery_bin)
4045 amdgpu_discovery_fini(adev);
4046
4047 amdgpu_reset_put_reset_domain(adev->reset_domain);
4048 adev->reset_domain = NULL;
4049
4050 kfree(adev->pci_state);
4051
4052 }
4053
4054 /**
4055 * amdgpu_device_evict_resources - evict device resources
4056 * @adev: amdgpu device object
4057 *
4058 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4059 * of the vram memory type. Mainly used for evicting device resources
4060 * at suspend time.
4061 *
4062 */
amdgpu_device_evict_resources(struct amdgpu_device * adev)4063 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4064 {
4065 int ret;
4066
4067 /* No need to evict vram on APUs for suspend to ram or s2idle */
4068 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4069 return 0;
4070
4071 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4072 if (ret)
4073 DRM_WARN("evicting device resources failed\n");
4074 return ret;
4075 }
4076
4077 /*
4078 * Suspend & resume.
4079 */
4080 /**
4081 * amdgpu_device_suspend - initiate device suspend
4082 *
4083 * @dev: drm dev pointer
4084 * @fbcon : notify the fbdev of suspend
4085 *
4086 * Puts the hw in the suspend state (all asics).
4087 * Returns 0 for success or an error on failure.
4088 * Called at driver suspend.
4089 */
amdgpu_device_suspend(struct drm_device * dev,bool fbcon)4090 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4091 {
4092 struct amdgpu_device *adev = drm_to_adev(dev);
4093 int r = 0;
4094
4095 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4096 return 0;
4097
4098 adev->in_suspend = true;
4099
4100 if (amdgpu_sriov_vf(adev)) {
4101 amdgpu_virt_fini_data_exchange(adev);
4102 r = amdgpu_virt_request_full_gpu(adev, false);
4103 if (r)
4104 return r;
4105 }
4106
4107 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4108 DRM_WARN("smart shift update failed\n");
4109
4110 drm_kms_helper_poll_disable(dev);
4111
4112 if (fbcon)
4113 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4114
4115 cancel_delayed_work_sync(&adev->delayed_init_work);
4116
4117 amdgpu_ras_suspend(adev);
4118
4119 amdgpu_device_ip_suspend_phase1(adev);
4120
4121 if (!adev->in_s0ix)
4122 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4123
4124 r = amdgpu_device_evict_resources(adev);
4125 if (r)
4126 return r;
4127
4128 amdgpu_fence_driver_hw_fini(adev);
4129
4130 amdgpu_device_ip_suspend_phase2(adev);
4131
4132 if (amdgpu_sriov_vf(adev))
4133 amdgpu_virt_release_full_gpu(adev, false);
4134
4135 return 0;
4136 }
4137
4138 /**
4139 * amdgpu_device_resume - initiate device resume
4140 *
4141 * @dev: drm dev pointer
4142 * @fbcon : notify the fbdev of resume
4143 *
4144 * Bring the hw back to operating state (all asics).
4145 * Returns 0 for success or an error on failure.
4146 * Called at driver resume.
4147 */
amdgpu_device_resume(struct drm_device * dev,bool fbcon)4148 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4149 {
4150 struct amdgpu_device *adev = drm_to_adev(dev);
4151 int r = 0;
4152
4153 if (amdgpu_sriov_vf(adev)) {
4154 r = amdgpu_virt_request_full_gpu(adev, true);
4155 if (r)
4156 return r;
4157 }
4158
4159 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4160 return 0;
4161
4162 if (adev->in_s0ix)
4163 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4164
4165 /* post card */
4166 if (amdgpu_device_need_post(adev)) {
4167 r = amdgpu_device_asic_init(adev);
4168 if (r)
4169 dev_err(adev->dev, "amdgpu asic init failed\n");
4170 }
4171
4172 r = amdgpu_device_ip_resume(adev);
4173
4174 /* no matter what r is, always need to properly release full GPU */
4175 if (amdgpu_sriov_vf(adev)) {
4176 amdgpu_virt_init_data_exchange(adev);
4177 amdgpu_virt_release_full_gpu(adev, true);
4178 }
4179
4180 if (r) {
4181 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4182 return r;
4183 }
4184 amdgpu_fence_driver_hw_init(adev);
4185
4186 r = amdgpu_device_ip_late_init(adev);
4187 if (r)
4188 return r;
4189
4190 queue_delayed_work(system_wq, &adev->delayed_init_work,
4191 msecs_to_jiffies(AMDGPU_RESUME_MS));
4192
4193 if (!adev->in_s0ix) {
4194 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4195 if (r)
4196 return r;
4197 }
4198
4199 /* Make sure IB tests flushed */
4200 flush_delayed_work(&adev->delayed_init_work);
4201
4202 if (adev->in_s0ix) {
4203 /* re-enable gfxoff after IP resume. This re-enables gfxoff after
4204 * it was disabled for IP resume in amdgpu_device_ip_resume_phase2().
4205 */
4206 amdgpu_gfx_off_ctrl(adev, true);
4207 DRM_DEBUG("will enable gfxoff for the mission mode\n");
4208 }
4209 if (fbcon)
4210 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4211
4212 drm_kms_helper_poll_enable(dev);
4213
4214 amdgpu_ras_resume(adev);
4215
4216 /*
4217 * Most of the connector probing functions try to acquire runtime pm
4218 * refs to ensure that the GPU is powered on when connector polling is
4219 * performed. Since we're calling this from a runtime PM callback,
4220 * trying to acquire rpm refs will cause us to deadlock.
4221 *
4222 * Since we're guaranteed to be holding the rpm lock, it's safe to
4223 * temporarily disable the rpm helpers so this doesn't deadlock us.
4224 */
4225 #ifdef CONFIG_PM
4226 dev->dev->power.disable_depth++;
4227 #endif
4228 if (!amdgpu_device_has_dc_support(adev))
4229 drm_helper_hpd_irq_event(dev);
4230 else
4231 drm_kms_helper_hotplug_event(dev);
4232 #ifdef CONFIG_PM
4233 dev->dev->power.disable_depth--;
4234 #endif
4235 adev->in_suspend = false;
4236
4237 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4238 DRM_WARN("smart shift update failed\n");
4239
4240 return 0;
4241 }
4242
4243 /**
4244 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4245 *
4246 * @adev: amdgpu_device pointer
4247 *
4248 * The list of all the hardware IPs that make up the asic is walked and
4249 * the check_soft_reset callbacks are run. check_soft_reset determines
4250 * if the asic is still hung or not.
4251 * Returns true if any of the IPs are still in a hung state, false if not.
4252 */
amdgpu_device_ip_check_soft_reset(struct amdgpu_device * adev)4253 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4254 {
4255 int i;
4256 bool asic_hang = false;
4257
4258 if (amdgpu_sriov_vf(adev))
4259 return true;
4260
4261 if (amdgpu_asic_need_full_reset(adev))
4262 return true;
4263
4264 for (i = 0; i < adev->num_ip_blocks; i++) {
4265 if (!adev->ip_blocks[i].status.valid)
4266 continue;
4267 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4268 adev->ip_blocks[i].status.hang =
4269 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4270 if (adev->ip_blocks[i].status.hang) {
4271 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4272 asic_hang = true;
4273 }
4274 }
4275 return asic_hang;
4276 }
4277
4278 /**
4279 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4280 *
4281 * @adev: amdgpu_device pointer
4282 *
4283 * The list of all the hardware IPs that make up the asic is walked and the
4284 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
4285 * handles any IP specific hardware or software state changes that are
4286 * necessary for a soft reset to succeed.
4287 * Returns 0 on success, negative error code on failure.
4288 */
amdgpu_device_ip_pre_soft_reset(struct amdgpu_device * adev)4289 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4290 {
4291 int i, r = 0;
4292
4293 for (i = 0; i < adev->num_ip_blocks; i++) {
4294 if (!adev->ip_blocks[i].status.valid)
4295 continue;
4296 if (adev->ip_blocks[i].status.hang &&
4297 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4298 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4299 if (r)
4300 return r;
4301 }
4302 }
4303
4304 return 0;
4305 }
4306
4307 /**
4308 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4309 *
4310 * @adev: amdgpu_device pointer
4311 *
4312 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
4313 * reset is necessary to recover.
4314 * Returns true if a full asic reset is required, false if not.
4315 */
amdgpu_device_ip_need_full_reset(struct amdgpu_device * adev)4316 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4317 {
4318 int i;
4319
4320 if (amdgpu_asic_need_full_reset(adev))
4321 return true;
4322
4323 for (i = 0; i < adev->num_ip_blocks; i++) {
4324 if (!adev->ip_blocks[i].status.valid)
4325 continue;
4326 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4327 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4328 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4329 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4330 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4331 if (adev->ip_blocks[i].status.hang) {
4332 dev_info(adev->dev, "Some block need full reset!\n");
4333 return true;
4334 }
4335 }
4336 }
4337 return false;
4338 }
4339
4340 /**
4341 * amdgpu_device_ip_soft_reset - do a soft reset
4342 *
4343 * @adev: amdgpu_device pointer
4344 *
4345 * The list of all the hardware IPs that make up the asic is walked and the
4346 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4347 * IP specific hardware or software state changes that are necessary to soft
4348 * reset the IP.
4349 * Returns 0 on success, negative error code on failure.
4350 */
amdgpu_device_ip_soft_reset(struct amdgpu_device * adev)4351 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4352 {
4353 int i, r = 0;
4354
4355 for (i = 0; i < adev->num_ip_blocks; i++) {
4356 if (!adev->ip_blocks[i].status.valid)
4357 continue;
4358 if (adev->ip_blocks[i].status.hang &&
4359 adev->ip_blocks[i].version->funcs->soft_reset) {
4360 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4361 if (r)
4362 return r;
4363 }
4364 }
4365
4366 return 0;
4367 }
4368
4369 /**
4370 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4371 *
4372 * @adev: amdgpu_device pointer
4373 *
4374 * The list of all the hardware IPs that make up the asic is walked and the
4375 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4376 * handles any IP specific hardware or software state changes that are
4377 * necessary after the IP has been soft reset.
4378 * Returns 0 on success, negative error code on failure.
4379 */
amdgpu_device_ip_post_soft_reset(struct amdgpu_device * adev)4380 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4381 {
4382 int i, r = 0;
4383
4384 for (i = 0; i < adev->num_ip_blocks; i++) {
4385 if (!adev->ip_blocks[i].status.valid)
4386 continue;
4387 if (adev->ip_blocks[i].status.hang &&
4388 adev->ip_blocks[i].version->funcs->post_soft_reset)
4389 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4390 if (r)
4391 return r;
4392 }
4393
4394 return 0;
4395 }
4396
4397 /**
4398 * amdgpu_device_recover_vram - Recover some VRAM contents
4399 *
4400 * @adev: amdgpu_device pointer
4401 *
4402 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4403 * restore things like GPUVM page tables after a GPU reset where
4404 * the contents of VRAM might be lost.
4405 *
4406 * Returns:
4407 * 0 on success, negative error code on failure.
4408 */
amdgpu_device_recover_vram(struct amdgpu_device * adev)4409 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4410 {
4411 struct dma_fence *fence = NULL, *next = NULL;
4412 struct amdgpu_bo *shadow;
4413 struct amdgpu_bo_vm *vmbo;
4414 long r = 1, tmo;
4415
4416 if (amdgpu_sriov_runtime(adev))
4417 tmo = msecs_to_jiffies(8000);
4418 else
4419 tmo = msecs_to_jiffies(100);
4420
4421 dev_info(adev->dev, "recover vram bo from shadow start\n");
4422 mutex_lock(&adev->shadow_list_lock);
4423 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4424 shadow = &vmbo->bo;
4425 /* No need to recover an evicted BO */
4426 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4427 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4428 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4429 continue;
4430
4431 r = amdgpu_bo_restore_shadow(shadow, &next);
4432 if (r)
4433 break;
4434
4435 if (fence) {
4436 tmo = dma_fence_wait_timeout(fence, false, tmo);
4437 dma_fence_put(fence);
4438 fence = next;
4439 if (tmo == 0) {
4440 r = -ETIMEDOUT;
4441 break;
4442 } else if (tmo < 0) {
4443 r = tmo;
4444 break;
4445 }
4446 } else {
4447 fence = next;
4448 }
4449 }
4450 mutex_unlock(&adev->shadow_list_lock);
4451
4452 if (fence)
4453 tmo = dma_fence_wait_timeout(fence, false, tmo);
4454 dma_fence_put(fence);
4455
4456 if (r < 0 || tmo <= 0) {
4457 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4458 return -EIO;
4459 }
4460
4461 dev_info(adev->dev, "recover vram bo from shadow done\n");
4462 return 0;
4463 }
4464
4465
4466 /**
4467 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4468 *
4469 * @adev: amdgpu_device pointer
4470 * @from_hypervisor: request from hypervisor
4471 *
4472 * do VF FLR and reinitialize Asic
4473 * return 0 means succeeded otherwise failed
4474 */
amdgpu_device_reset_sriov(struct amdgpu_device * adev,bool from_hypervisor)4475 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4476 bool from_hypervisor)
4477 {
4478 int r;
4479 struct amdgpu_hive_info *hive = NULL;
4480 int retry_limit = 0;
4481
4482 retry:
4483 amdgpu_amdkfd_pre_reset(adev);
4484
4485 if (from_hypervisor)
4486 r = amdgpu_virt_request_full_gpu(adev, true);
4487 else
4488 r = amdgpu_virt_reset_gpu(adev);
4489 if (r)
4490 return r;
4491
4492 /* Resume IP prior to SMC */
4493 r = amdgpu_device_ip_reinit_early_sriov(adev);
4494 if (r)
4495 goto error;
4496
4497 amdgpu_virt_init_data_exchange(adev);
4498
4499 r = amdgpu_device_fw_loading(adev);
4500 if (r)
4501 return r;
4502
4503 /* now we are okay to resume SMC/CP/SDMA */
4504 r = amdgpu_device_ip_reinit_late_sriov(adev);
4505 if (r)
4506 goto error;
4507
4508 hive = amdgpu_get_xgmi_hive(adev);
4509 /* Update PSP FW topology after reset */
4510 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4511 r = amdgpu_xgmi_update_topology(hive, adev);
4512
4513 if (hive)
4514 amdgpu_put_xgmi_hive(hive);
4515
4516 if (!r) {
4517 amdgpu_irq_gpu_reset_resume_helper(adev);
4518 r = amdgpu_ib_ring_tests(adev);
4519
4520 amdgpu_amdkfd_post_reset(adev);
4521 }
4522
4523 error:
4524 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4525 amdgpu_inc_vram_lost(adev);
4526 r = amdgpu_device_recover_vram(adev);
4527 }
4528 amdgpu_virt_release_full_gpu(adev, true);
4529
4530 if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4531 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4532 retry_limit++;
4533 goto retry;
4534 } else
4535 DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4536 }
4537
4538 return r;
4539 }
4540
4541 /**
4542 * amdgpu_device_has_job_running - check if there is any job in mirror list
4543 *
4544 * @adev: amdgpu_device pointer
4545 *
4546 * check if there is any job in mirror list
4547 */
amdgpu_device_has_job_running(struct amdgpu_device * adev)4548 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4549 {
4550 int i;
4551 struct drm_sched_job *job;
4552
4553 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4554 struct amdgpu_ring *ring = adev->rings[i];
4555
4556 if (!ring || !ring->sched.thread)
4557 continue;
4558
4559 spin_lock(&ring->sched.job_list_lock);
4560 job = list_first_entry_or_null(&ring->sched.pending_list,
4561 struct drm_sched_job, list);
4562 spin_unlock(&ring->sched.job_list_lock);
4563 if (job)
4564 return true;
4565 }
4566 return false;
4567 }
4568
4569 /**
4570 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4571 *
4572 * @adev: amdgpu_device pointer
4573 *
4574 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4575 * a hung GPU.
4576 */
amdgpu_device_should_recover_gpu(struct amdgpu_device * adev)4577 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4578 {
4579
4580 if (amdgpu_gpu_recovery == 0)
4581 goto disabled;
4582
4583 if (!amdgpu_device_ip_check_soft_reset(adev)) {
4584 dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
4585 return false;
4586 }
4587
4588 if (amdgpu_sriov_vf(adev))
4589 return true;
4590
4591 if (amdgpu_gpu_recovery == -1) {
4592 switch (adev->asic_type) {
4593 #ifdef CONFIG_DRM_AMDGPU_SI
4594 case CHIP_VERDE:
4595 case CHIP_TAHITI:
4596 case CHIP_PITCAIRN:
4597 case CHIP_OLAND:
4598 case CHIP_HAINAN:
4599 #endif
4600 #ifdef CONFIG_DRM_AMDGPU_CIK
4601 case CHIP_KAVERI:
4602 case CHIP_KABINI:
4603 case CHIP_MULLINS:
4604 #endif
4605 case CHIP_CARRIZO:
4606 case CHIP_STONEY:
4607 case CHIP_CYAN_SKILLFISH:
4608 goto disabled;
4609 default:
4610 break;
4611 }
4612 }
4613
4614 return true;
4615
4616 disabled:
4617 dev_info(adev->dev, "GPU recovery disabled.\n");
4618 return false;
4619 }
4620
amdgpu_device_mode1_reset(struct amdgpu_device * adev)4621 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4622 {
4623 u32 i;
4624 int ret = 0;
4625
4626 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4627
4628 dev_info(adev->dev, "GPU mode1 reset\n");
4629
4630 /* disable BM */
4631 pci_clear_master(adev->pdev);
4632
4633 amdgpu_device_cache_pci_state(adev->pdev);
4634
4635 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4636 dev_info(adev->dev, "GPU smu mode1 reset\n");
4637 ret = amdgpu_dpm_mode1_reset(adev);
4638 } else {
4639 dev_info(adev->dev, "GPU psp mode1 reset\n");
4640 ret = psp_gpu_reset(adev);
4641 }
4642
4643 if (ret)
4644 dev_err(adev->dev, "GPU mode1 reset failed\n");
4645
4646 amdgpu_device_load_pci_state(adev->pdev);
4647
4648 /* wait for asic to come out of reset */
4649 for (i = 0; i < adev->usec_timeout; i++) {
4650 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4651
4652 if (memsize != 0xffffffff)
4653 break;
4654 udelay(1);
4655 }
4656
4657 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4658 return ret;
4659 }
4660
amdgpu_device_pre_asic_reset(struct amdgpu_device * adev,struct amdgpu_reset_context * reset_context)4661 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4662 struct amdgpu_reset_context *reset_context)
4663 {
4664 int i, r = 0;
4665 struct amdgpu_job *job = NULL;
4666 bool need_full_reset =
4667 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4668
4669 if (reset_context->reset_req_dev == adev)
4670 job = reset_context->job;
4671
4672 if (amdgpu_sriov_vf(adev)) {
4673 /* stop the data exchange thread */
4674 amdgpu_virt_fini_data_exchange(adev);
4675 }
4676
4677 amdgpu_fence_driver_isr_toggle(adev, true);
4678
4679 /* block all schedulers and reset given job's ring */
4680 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4681 struct amdgpu_ring *ring = adev->rings[i];
4682
4683 if (!ring || !ring->sched.thread)
4684 continue;
4685
4686 /*clear job fence from fence drv to avoid force_completion
4687 *leave NULL and vm flush fence in fence drv */
4688 amdgpu_fence_driver_clear_job_fences(ring);
4689
4690 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4691 amdgpu_fence_driver_force_completion(ring);
4692 }
4693
4694 amdgpu_fence_driver_isr_toggle(adev, false);
4695
4696 if (job && job->vm)
4697 drm_sched_increase_karma(&job->base);
4698
4699 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4700 /* If reset handler not implemented, continue; otherwise return */
4701 if (r == -ENOSYS)
4702 r = 0;
4703 else
4704 return r;
4705
4706 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4707 if (!amdgpu_sriov_vf(adev)) {
4708
4709 if (!need_full_reset)
4710 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4711
4712 if (!need_full_reset && amdgpu_gpu_recovery) {
4713 amdgpu_device_ip_pre_soft_reset(adev);
4714 r = amdgpu_device_ip_soft_reset(adev);
4715 amdgpu_device_ip_post_soft_reset(adev);
4716 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4717 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4718 need_full_reset = true;
4719 }
4720 }
4721
4722 if (need_full_reset)
4723 r = amdgpu_device_ip_suspend(adev);
4724 if (need_full_reset)
4725 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4726 else
4727 clear_bit(AMDGPU_NEED_FULL_RESET,
4728 &reset_context->flags);
4729 }
4730
4731 return r;
4732 }
4733
amdgpu_reset_reg_dumps(struct amdgpu_device * adev)4734 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4735 {
4736 int i;
4737
4738 lockdep_assert_held(&adev->reset_domain->sem);
4739
4740 for (i = 0; i < adev->num_regs; i++) {
4741 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4742 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4743 adev->reset_dump_reg_value[i]);
4744 }
4745
4746 return 0;
4747 }
4748
4749 #ifdef CONFIG_DEV_COREDUMP
amdgpu_devcoredump_read(char * buffer,loff_t offset,size_t count,void * data,size_t datalen)4750 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4751 size_t count, void *data, size_t datalen)
4752 {
4753 struct drm_printer p;
4754 struct amdgpu_device *adev = data;
4755 struct drm_print_iterator iter;
4756 int i;
4757
4758 iter.data = buffer;
4759 iter.offset = 0;
4760 iter.start = offset;
4761 iter.remain = count;
4762
4763 p = drm_coredump_printer(&iter);
4764
4765 drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4766 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4767 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4768 drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4769 if (adev->reset_task_info.pid)
4770 drm_printf(&p, "process_name: %s PID: %d\n",
4771 adev->reset_task_info.process_name,
4772 adev->reset_task_info.pid);
4773
4774 if (adev->reset_vram_lost)
4775 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4776 if (adev->num_regs) {
4777 drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n");
4778
4779 for (i = 0; i < adev->num_regs; i++)
4780 drm_printf(&p, "0x%08x: 0x%08x\n",
4781 adev->reset_dump_reg_list[i],
4782 adev->reset_dump_reg_value[i]);
4783 }
4784
4785 return count - iter.remain;
4786 }
4787
amdgpu_devcoredump_free(void * data)4788 static void amdgpu_devcoredump_free(void *data)
4789 {
4790 }
4791
amdgpu_reset_capture_coredumpm(struct amdgpu_device * adev)4792 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4793 {
4794 struct drm_device *dev = adev_to_drm(adev);
4795
4796 ktime_get_ts64(&adev->reset_time);
4797 dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4798 amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4799 }
4800 #endif
4801
amdgpu_do_asic_reset(struct list_head * device_list_handle,struct amdgpu_reset_context * reset_context)4802 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4803 struct amdgpu_reset_context *reset_context)
4804 {
4805 struct amdgpu_device *tmp_adev = NULL;
4806 bool need_full_reset, skip_hw_reset, vram_lost = false;
4807 int r = 0;
4808 bool gpu_reset_for_dev_remove = 0;
4809
4810 /* Try reset handler method first */
4811 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4812 reset_list);
4813 amdgpu_reset_reg_dumps(tmp_adev);
4814
4815 reset_context->reset_device_list = device_list_handle;
4816 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4817 /* If reset handler not implemented, continue; otherwise return */
4818 if (r == -ENOSYS)
4819 r = 0;
4820 else
4821 return r;
4822
4823 /* Reset handler not implemented, use the default method */
4824 need_full_reset =
4825 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4826 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4827
4828 gpu_reset_for_dev_remove =
4829 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
4830 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4831
4832 /*
4833 * ASIC reset has to be done on all XGMI hive nodes ASAP
4834 * to allow proper links negotiation in FW (within 1 sec)
4835 */
4836 if (!skip_hw_reset && need_full_reset) {
4837 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4838 /* For XGMI run all resets in parallel to speed up the process */
4839 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4840 tmp_adev->gmc.xgmi.pending_reset = false;
4841 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4842 r = -EALREADY;
4843 } else
4844 r = amdgpu_asic_reset(tmp_adev);
4845
4846 if (r) {
4847 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4848 r, adev_to_drm(tmp_adev)->unique);
4849 break;
4850 }
4851 }
4852
4853 /* For XGMI wait for all resets to complete before proceed */
4854 if (!r) {
4855 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4856 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4857 flush_work(&tmp_adev->xgmi_reset_work);
4858 r = tmp_adev->asic_reset_res;
4859 if (r)
4860 break;
4861 }
4862 }
4863 }
4864 }
4865
4866 if (!r && amdgpu_ras_intr_triggered()) {
4867 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4868 if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4869 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4870 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4871 }
4872
4873 amdgpu_ras_intr_cleared();
4874 }
4875
4876 /* Since the mode1 reset affects base ip blocks, the
4877 * phase1 ip blocks need to be resumed. Otherwise there
4878 * will be a BIOS signature error and the psp bootloader
4879 * can't load kdb on the next amdgpu install.
4880 */
4881 if (gpu_reset_for_dev_remove) {
4882 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
4883 amdgpu_device_ip_resume_phase1(tmp_adev);
4884
4885 goto end;
4886 }
4887
4888 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4889 if (need_full_reset) {
4890 /* post card */
4891 r = amdgpu_device_asic_init(tmp_adev);
4892 if (r) {
4893 dev_warn(tmp_adev->dev, "asic atom init failed!");
4894 } else {
4895 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4896 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4897 if (r)
4898 goto out;
4899
4900 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4901 if (r)
4902 goto out;
4903
4904 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4905 #ifdef CONFIG_DEV_COREDUMP
4906 tmp_adev->reset_vram_lost = vram_lost;
4907 memset(&tmp_adev->reset_task_info, 0,
4908 sizeof(tmp_adev->reset_task_info));
4909 if (reset_context->job && reset_context->job->vm)
4910 tmp_adev->reset_task_info =
4911 reset_context->job->vm->task_info;
4912 amdgpu_reset_capture_coredumpm(tmp_adev);
4913 #endif
4914 if (vram_lost) {
4915 DRM_INFO("VRAM is lost due to GPU reset!\n");
4916 amdgpu_inc_vram_lost(tmp_adev);
4917 }
4918
4919 r = amdgpu_device_fw_loading(tmp_adev);
4920 if (r)
4921 return r;
4922
4923 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4924 if (r)
4925 goto out;
4926
4927 if (vram_lost)
4928 amdgpu_device_fill_reset_magic(tmp_adev);
4929
4930 /*
4931 * Add this ASIC as tracked as reset was already
4932 * complete successfully.
4933 */
4934 amdgpu_register_gpu_instance(tmp_adev);
4935
4936 if (!reset_context->hive &&
4937 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4938 amdgpu_xgmi_add_device(tmp_adev);
4939
4940 r = amdgpu_device_ip_late_init(tmp_adev);
4941 if (r)
4942 goto out;
4943
4944 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4945
4946 /*
4947 * The GPU enters bad state once faulty pages
4948 * by ECC has reached the threshold, and ras
4949 * recovery is scheduled next. So add one check
4950 * here to break recovery if it indeed exceeds
4951 * bad page threshold, and remind user to
4952 * retire this GPU or setting one bigger
4953 * bad_page_threshold value to fix this once
4954 * probing driver again.
4955 */
4956 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4957 /* must succeed. */
4958 amdgpu_ras_resume(tmp_adev);
4959 } else {
4960 r = -EINVAL;
4961 goto out;
4962 }
4963
4964 /* Update PSP FW topology after reset */
4965 if (reset_context->hive &&
4966 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4967 r = amdgpu_xgmi_update_topology(
4968 reset_context->hive, tmp_adev);
4969 }
4970 }
4971
4972 out:
4973 if (!r) {
4974 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4975 r = amdgpu_ib_ring_tests(tmp_adev);
4976 if (r) {
4977 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4978 need_full_reset = true;
4979 r = -EAGAIN;
4980 goto end;
4981 }
4982 }
4983
4984 if (!r)
4985 r = amdgpu_device_recover_vram(tmp_adev);
4986 else
4987 tmp_adev->asic_reset_res = r;
4988 }
4989
4990 end:
4991 if (need_full_reset)
4992 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4993 else
4994 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4995 return r;
4996 }
4997
amdgpu_device_set_mp1_state(struct amdgpu_device * adev)4998 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
4999 {
5000
5001 switch (amdgpu_asic_reset_method(adev)) {
5002 case AMD_RESET_METHOD_MODE1:
5003 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5004 break;
5005 case AMD_RESET_METHOD_MODE2:
5006 adev->mp1_state = PP_MP1_STATE_RESET;
5007 break;
5008 default:
5009 adev->mp1_state = PP_MP1_STATE_NONE;
5010 break;
5011 }
5012 }
5013
amdgpu_device_unset_mp1_state(struct amdgpu_device * adev)5014 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5015 {
5016 amdgpu_vf_error_trans_all(adev);
5017 adev->mp1_state = PP_MP1_STATE_NONE;
5018 }
5019
amdgpu_device_resume_display_audio(struct amdgpu_device * adev)5020 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5021 {
5022 struct pci_dev *p = NULL;
5023
5024 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5025 adev->pdev->bus->number, 1);
5026 if (p) {
5027 pm_runtime_enable(&(p->dev));
5028 pm_runtime_resume(&(p->dev));
5029 }
5030 }
5031
amdgpu_device_suspend_display_audio(struct amdgpu_device * adev)5032 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5033 {
5034 enum amd_reset_method reset_method;
5035 struct pci_dev *p = NULL;
5036 u64 expires;
5037
5038 /*
5039 * For now, only BACO and mode1 reset are confirmed
5040 * to suffer the audio issue without proper suspended.
5041 */
5042 reset_method = amdgpu_asic_reset_method(adev);
5043 if ((reset_method != AMD_RESET_METHOD_BACO) &&
5044 (reset_method != AMD_RESET_METHOD_MODE1))
5045 return -EINVAL;
5046
5047 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5048 adev->pdev->bus->number, 1);
5049 if (!p)
5050 return -ENODEV;
5051
5052 expires = pm_runtime_autosuspend_expiration(&(p->dev));
5053 if (!expires)
5054 /*
5055 * If we cannot get the audio device autosuspend delay,
5056 * a fixed 4S interval will be used. Considering 3S is
5057 * the audio controller default autosuspend delay setting.
5058 * 4S used here is guaranteed to cover that.
5059 */
5060 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5061
5062 while (!pm_runtime_status_suspended(&(p->dev))) {
5063 if (!pm_runtime_suspend(&(p->dev)))
5064 break;
5065
5066 if (expires < ktime_get_mono_fast_ns()) {
5067 dev_warn(adev->dev, "failed to suspend display audio\n");
5068 /* TODO: abort the succeeding gpu reset? */
5069 return -ETIMEDOUT;
5070 }
5071 }
5072
5073 pm_runtime_disable(&(p->dev));
5074
5075 return 0;
5076 }
5077
amdgpu_device_recheck_guilty_jobs(struct amdgpu_device * adev,struct list_head * device_list_handle,struct amdgpu_reset_context * reset_context)5078 static void amdgpu_device_recheck_guilty_jobs(
5079 struct amdgpu_device *adev, struct list_head *device_list_handle,
5080 struct amdgpu_reset_context *reset_context)
5081 {
5082 int i, r = 0;
5083
5084 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5085 struct amdgpu_ring *ring = adev->rings[i];
5086 int ret = 0;
5087 struct drm_sched_job *s_job;
5088
5089 if (!ring || !ring->sched.thread)
5090 continue;
5091
5092 s_job = list_first_entry_or_null(&ring->sched.pending_list,
5093 struct drm_sched_job, list);
5094 if (s_job == NULL)
5095 continue;
5096
5097 /* clear job's guilty and depend the folowing step to decide the real one */
5098 drm_sched_reset_karma(s_job);
5099 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
5100
5101 if (!s_job->s_fence->parent) {
5102 DRM_WARN("Failed to get a HW fence for job!");
5103 continue;
5104 }
5105
5106 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
5107 if (ret == 0) { /* timeout */
5108 DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
5109 ring->sched.name, s_job->id);
5110
5111
5112 amdgpu_fence_driver_isr_toggle(adev, true);
5113
5114 /* Clear this failed job from fence array */
5115 amdgpu_fence_driver_clear_job_fences(ring);
5116
5117 amdgpu_fence_driver_isr_toggle(adev, false);
5118
5119 /* Since the job won't signal and we go for
5120 * another resubmit drop this parent pointer
5121 */
5122 dma_fence_put(s_job->s_fence->parent);
5123 s_job->s_fence->parent = NULL;
5124
5125 /* set guilty */
5126 drm_sched_increase_karma(s_job);
5127 amdgpu_reset_prepare_hwcontext(adev, reset_context);
5128 retry:
5129 /* do hw reset */
5130 if (amdgpu_sriov_vf(adev)) {
5131 amdgpu_virt_fini_data_exchange(adev);
5132 r = amdgpu_device_reset_sriov(adev, false);
5133 if (r)
5134 adev->asic_reset_res = r;
5135 } else {
5136 clear_bit(AMDGPU_SKIP_HW_RESET,
5137 &reset_context->flags);
5138 r = amdgpu_do_asic_reset(device_list_handle,
5139 reset_context);
5140 if (r && r == -EAGAIN)
5141 goto retry;
5142 }
5143
5144 /*
5145 * add reset counter so that the following
5146 * resubmitted job could flush vmid
5147 */
5148 atomic_inc(&adev->gpu_reset_counter);
5149 continue;
5150 }
5151
5152 /* got the hw fence, signal finished fence */
5153 atomic_dec(ring->sched.score);
5154 dma_fence_get(&s_job->s_fence->finished);
5155 dma_fence_signal(&s_job->s_fence->finished);
5156 dma_fence_put(&s_job->s_fence->finished);
5157
5158 /* remove node from list and free the job */
5159 spin_lock(&ring->sched.job_list_lock);
5160 list_del_init(&s_job->list);
5161 spin_unlock(&ring->sched.job_list_lock);
5162 ring->sched.ops->free_job(s_job);
5163 }
5164 }
5165
amdgpu_device_stop_pending_resets(struct amdgpu_device * adev)5166 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5167 {
5168 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5169
5170 #if defined(CONFIG_DEBUG_FS)
5171 if (!amdgpu_sriov_vf(adev))
5172 cancel_work(&adev->reset_work);
5173 #endif
5174
5175 if (adev->kfd.dev)
5176 cancel_work(&adev->kfd.reset_work);
5177
5178 if (amdgpu_sriov_vf(adev))
5179 cancel_work(&adev->virt.flr_work);
5180
5181 if (con && adev->ras_enabled)
5182 cancel_work(&con->recovery_work);
5183
5184 }
5185
5186
5187 /**
5188 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5189 *
5190 * @adev: amdgpu_device pointer
5191 * @job: which job trigger hang
5192 *
5193 * Attempt to reset the GPU if it has hung (all asics).
5194 * Attempt to do soft-reset or full-reset and reinitialize Asic
5195 * Returns 0 for success or an error on failure.
5196 */
5197
amdgpu_device_gpu_recover(struct amdgpu_device * adev,struct amdgpu_job * job,struct amdgpu_reset_context * reset_context)5198 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5199 struct amdgpu_job *job,
5200 struct amdgpu_reset_context *reset_context)
5201 {
5202 struct list_head device_list, *device_list_handle = NULL;
5203 bool job_signaled = false;
5204 struct amdgpu_hive_info *hive = NULL;
5205 struct amdgpu_device *tmp_adev = NULL;
5206 int i, r = 0;
5207 bool need_emergency_restart = false;
5208 bool audio_suspended = false;
5209 int tmp_vram_lost_counter;
5210 bool gpu_reset_for_dev_remove = false;
5211
5212 gpu_reset_for_dev_remove =
5213 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5214 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5215
5216 /*
5217 * Special case: RAS triggered and full reset isn't supported
5218 */
5219 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5220
5221 /*
5222 * Flush RAM to disk so that after reboot
5223 * the user can read log and see why the system rebooted.
5224 */
5225 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5226 DRM_WARN("Emergency reboot.");
5227
5228 ksys_sync_helper();
5229 emergency_restart();
5230 }
5231
5232 dev_info(adev->dev, "GPU %s begin!\n",
5233 need_emergency_restart ? "jobs stop":"reset");
5234
5235 if (!amdgpu_sriov_vf(adev))
5236 hive = amdgpu_get_xgmi_hive(adev);
5237 if (hive)
5238 mutex_lock(&hive->hive_lock);
5239
5240 reset_context->job = job;
5241 reset_context->hive = hive;
5242 /*
5243 * Build list of devices to reset.
5244 * In case we are in XGMI hive mode, resort the device list
5245 * to put adev in the 1st position.
5246 */
5247 INIT_LIST_HEAD(&device_list);
5248 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5249 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5250 list_add_tail(&tmp_adev->reset_list, &device_list);
5251 if (gpu_reset_for_dev_remove && adev->shutdown)
5252 tmp_adev->shutdown = true;
5253 }
5254 if (!list_is_first(&adev->reset_list, &device_list))
5255 list_rotate_to_front(&adev->reset_list, &device_list);
5256 device_list_handle = &device_list;
5257 } else {
5258 list_add_tail(&adev->reset_list, &device_list);
5259 device_list_handle = &device_list;
5260 }
5261
5262 /* We need to lock reset domain only once both for XGMI and single device */
5263 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5264 reset_list);
5265 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5266
5267 /* block all schedulers and reset given job's ring */
5268 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5269
5270 amdgpu_device_set_mp1_state(tmp_adev);
5271
5272 /*
5273 * Try to put the audio codec into suspend state
5274 * before gpu reset started.
5275 *
5276 * Due to the power domain of the graphics device
5277 * is shared with AZ power domain. Without this,
5278 * we may change the audio hardware from behind
5279 * the audio driver's back. That will trigger
5280 * some audio codec errors.
5281 */
5282 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5283 audio_suspended = true;
5284
5285 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5286
5287 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5288
5289 if (!amdgpu_sriov_vf(tmp_adev))
5290 amdgpu_amdkfd_pre_reset(tmp_adev);
5291
5292 /*
5293 * Mark these ASICs to be reseted as untracked first
5294 * And add them back after reset completed
5295 */
5296 amdgpu_unregister_gpu_instance(tmp_adev);
5297
5298 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5299
5300 /* disable ras on ALL IPs */
5301 if (!need_emergency_restart &&
5302 amdgpu_device_ip_need_full_reset(tmp_adev))
5303 amdgpu_ras_suspend(tmp_adev);
5304
5305 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5306 struct amdgpu_ring *ring = tmp_adev->rings[i];
5307
5308 if (!ring || !ring->sched.thread)
5309 continue;
5310
5311 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5312
5313 if (need_emergency_restart)
5314 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5315 }
5316 atomic_inc(&tmp_adev->gpu_reset_counter);
5317 }
5318
5319 if (need_emergency_restart)
5320 goto skip_sched_resume;
5321
5322 /*
5323 * Must check guilty signal here since after this point all old
5324 * HW fences are force signaled.
5325 *
5326 * job->base holds a reference to parent fence
5327 */
5328 if (job && dma_fence_is_signaled(&job->hw_fence)) {
5329 job_signaled = true;
5330 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5331 goto skip_hw_reset;
5332 }
5333
5334 retry: /* Rest of adevs pre asic reset from XGMI hive. */
5335 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5336 if (gpu_reset_for_dev_remove) {
5337 /* Workaroud for ASICs need to disable SMC first */
5338 amdgpu_device_smu_fini_early(tmp_adev);
5339 }
5340 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5341 /*TODO Should we stop ?*/
5342 if (r) {
5343 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5344 r, adev_to_drm(tmp_adev)->unique);
5345 tmp_adev->asic_reset_res = r;
5346 }
5347
5348 /*
5349 * Drop all pending non scheduler resets. Scheduler resets
5350 * were already dropped during drm_sched_stop
5351 */
5352 amdgpu_device_stop_pending_resets(tmp_adev);
5353 }
5354
5355 tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5356 /* Actual ASIC resets if needed.*/
5357 /* Host driver will handle XGMI hive reset for SRIOV */
5358 if (amdgpu_sriov_vf(adev)) {
5359 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5360 if (r)
5361 adev->asic_reset_res = r;
5362
5363 /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5364 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5365 amdgpu_ras_resume(adev);
5366 } else {
5367 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5368 if (r && r == -EAGAIN)
5369 goto retry;
5370
5371 if (!r && gpu_reset_for_dev_remove)
5372 goto recover_end;
5373 }
5374
5375 skip_hw_reset:
5376
5377 /* Post ASIC reset for all devs .*/
5378 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5379
5380 /*
5381 * Sometimes a later bad compute job can block a good gfx job as gfx
5382 * and compute ring share internal GC HW mutually. We add an additional
5383 * guilty jobs recheck step to find the real guilty job, it synchronously
5384 * submits and pends for the first job being signaled. If it gets timeout,
5385 * we identify it as a real guilty job.
5386 */
5387 if (amdgpu_gpu_recovery == 2 &&
5388 !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5389 amdgpu_device_recheck_guilty_jobs(
5390 tmp_adev, device_list_handle, reset_context);
5391
5392 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5393 struct amdgpu_ring *ring = tmp_adev->rings[i];
5394
5395 if (!ring || !ring->sched.thread)
5396 continue;
5397
5398 /* No point to resubmit jobs if we didn't HW reset*/
5399 if (!tmp_adev->asic_reset_res && !job_signaled)
5400 drm_sched_resubmit_jobs(&ring->sched);
5401
5402 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5403 }
5404
5405 if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
5406 amdgpu_mes_self_test(tmp_adev);
5407
5408 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5409 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5410 }
5411
5412 if (tmp_adev->asic_reset_res)
5413 r = tmp_adev->asic_reset_res;
5414
5415 tmp_adev->asic_reset_res = 0;
5416
5417 if (r) {
5418 /* bad news, how to tell it to userspace ? */
5419 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5420 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5421 } else {
5422 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5423 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5424 DRM_WARN("smart shift update failed\n");
5425 }
5426 }
5427
5428 skip_sched_resume:
5429 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5430 /* unlock kfd: SRIOV would do it separately */
5431 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5432 amdgpu_amdkfd_post_reset(tmp_adev);
5433
5434 /* kfd_post_reset will do nothing if kfd device is not initialized,
5435 * need to bring up kfd here if it's not be initialized before
5436 */
5437 if (!adev->kfd.init_complete)
5438 amdgpu_amdkfd_device_init(adev);
5439
5440 if (audio_suspended)
5441 amdgpu_device_resume_display_audio(tmp_adev);
5442
5443 amdgpu_device_unset_mp1_state(tmp_adev);
5444 }
5445
5446 recover_end:
5447 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5448 reset_list);
5449 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5450
5451 if (hive) {
5452 mutex_unlock(&hive->hive_lock);
5453 amdgpu_put_xgmi_hive(hive);
5454 }
5455
5456 if (r)
5457 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5458
5459 atomic_set(&adev->reset_domain->reset_res, r);
5460 return r;
5461 }
5462
5463 /**
5464 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5465 *
5466 * @adev: amdgpu_device pointer
5467 *
5468 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5469 * and lanes) of the slot the device is in. Handles APUs and
5470 * virtualized environments where PCIE config space may not be available.
5471 */
amdgpu_device_get_pcie_info(struct amdgpu_device * adev)5472 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5473 {
5474 struct pci_dev *pdev;
5475 enum pci_bus_speed speed_cap, platform_speed_cap;
5476 enum pcie_link_width platform_link_width;
5477
5478 if (amdgpu_pcie_gen_cap)
5479 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5480
5481 if (amdgpu_pcie_lane_cap)
5482 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5483
5484 /* covers APUs as well */
5485 if (pci_is_root_bus(adev->pdev->bus)) {
5486 if (adev->pm.pcie_gen_mask == 0)
5487 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5488 if (adev->pm.pcie_mlw_mask == 0)
5489 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5490 return;
5491 }
5492
5493 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5494 return;
5495
5496 pcie_bandwidth_available(adev->pdev, NULL,
5497 &platform_speed_cap, &platform_link_width);
5498
5499 if (adev->pm.pcie_gen_mask == 0) {
5500 /* asic caps */
5501 pdev = adev->pdev;
5502 speed_cap = pcie_get_speed_cap(pdev);
5503 if (speed_cap == PCI_SPEED_UNKNOWN) {
5504 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5505 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5506 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5507 } else {
5508 if (speed_cap == PCIE_SPEED_32_0GT)
5509 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5510 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5511 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5512 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5513 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5514 else if (speed_cap == PCIE_SPEED_16_0GT)
5515 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5516 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5517 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5518 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5519 else if (speed_cap == PCIE_SPEED_8_0GT)
5520 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5521 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5522 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5523 else if (speed_cap == PCIE_SPEED_5_0GT)
5524 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5525 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5526 else
5527 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5528 }
5529 /* platform caps */
5530 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5531 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5532 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5533 } else {
5534 if (platform_speed_cap == PCIE_SPEED_32_0GT)
5535 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5536 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5537 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5538 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5539 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5540 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5541 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5542 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5543 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5544 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5545 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5546 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5547 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5548 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5549 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5550 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5551 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5552 else
5553 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5554
5555 }
5556 }
5557 if (adev->pm.pcie_mlw_mask == 0) {
5558 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5559 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5560 } else {
5561 switch (platform_link_width) {
5562 case PCIE_LNK_X32:
5563 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5564 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5565 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5566 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5567 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5568 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5569 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5570 break;
5571 case PCIE_LNK_X16:
5572 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5573 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5574 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5575 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5576 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5577 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5578 break;
5579 case PCIE_LNK_X12:
5580 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5581 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5582 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5583 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5584 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5585 break;
5586 case PCIE_LNK_X8:
5587 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5588 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5589 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5590 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5591 break;
5592 case PCIE_LNK_X4:
5593 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5594 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5595 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5596 break;
5597 case PCIE_LNK_X2:
5598 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5599 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5600 break;
5601 case PCIE_LNK_X1:
5602 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5603 break;
5604 default:
5605 break;
5606 }
5607 }
5608 }
5609 }
5610
5611 /**
5612 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5613 *
5614 * @adev: amdgpu_device pointer
5615 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5616 *
5617 * Return true if @peer_adev can access (DMA) @adev through the PCIe
5618 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5619 * @peer_adev.
5620 */
amdgpu_device_is_peer_accessible(struct amdgpu_device * adev,struct amdgpu_device * peer_adev)5621 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5622 struct amdgpu_device *peer_adev)
5623 {
5624 #ifdef CONFIG_HSA_AMD_P2P
5625 uint64_t address_mask = peer_adev->dev->dma_mask ?
5626 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5627 resource_size_t aper_limit =
5628 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5629 bool p2p_access =
5630 !adev->gmc.xgmi.connected_to_cpu &&
5631 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5632
5633 return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5634 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5635 !(adev->gmc.aper_base & address_mask ||
5636 aper_limit & address_mask));
5637 #else
5638 return false;
5639 #endif
5640 }
5641
amdgpu_device_baco_enter(struct drm_device * dev)5642 int amdgpu_device_baco_enter(struct drm_device *dev)
5643 {
5644 struct amdgpu_device *adev = drm_to_adev(dev);
5645 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5646
5647 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5648 return -ENOTSUPP;
5649
5650 if (ras && adev->ras_enabled &&
5651 adev->nbio.funcs->enable_doorbell_interrupt)
5652 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5653
5654 return amdgpu_dpm_baco_enter(adev);
5655 }
5656
amdgpu_device_baco_exit(struct drm_device * dev)5657 int amdgpu_device_baco_exit(struct drm_device *dev)
5658 {
5659 struct amdgpu_device *adev = drm_to_adev(dev);
5660 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5661 int ret = 0;
5662
5663 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5664 return -ENOTSUPP;
5665
5666 ret = amdgpu_dpm_baco_exit(adev);
5667 if (ret)
5668 return ret;
5669
5670 if (ras && adev->ras_enabled &&
5671 adev->nbio.funcs->enable_doorbell_interrupt)
5672 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5673
5674 if (amdgpu_passthrough(adev) &&
5675 adev->nbio.funcs->clear_doorbell_interrupt)
5676 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5677
5678 return 0;
5679 }
5680
5681 /**
5682 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5683 * @pdev: PCI device struct
5684 * @state: PCI channel state
5685 *
5686 * Description: Called when a PCI error is detected.
5687 *
5688 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5689 */
amdgpu_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)5690 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5691 {
5692 struct drm_device *dev = pci_get_drvdata(pdev);
5693 struct amdgpu_device *adev = drm_to_adev(dev);
5694 int i;
5695
5696 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5697
5698 if (adev->gmc.xgmi.num_physical_nodes > 1) {
5699 DRM_WARN("No support for XGMI hive yet...");
5700 return PCI_ERS_RESULT_DISCONNECT;
5701 }
5702
5703 adev->pci_channel_state = state;
5704
5705 switch (state) {
5706 case pci_channel_io_normal:
5707 return PCI_ERS_RESULT_CAN_RECOVER;
5708 /* Fatal error, prepare for slot reset */
5709 case pci_channel_io_frozen:
5710 /*
5711 * Locking adev->reset_domain->sem will prevent any external access
5712 * to GPU during PCI error recovery
5713 */
5714 amdgpu_device_lock_reset_domain(adev->reset_domain);
5715 amdgpu_device_set_mp1_state(adev);
5716
5717 /*
5718 * Block any work scheduling as we do for regular GPU reset
5719 * for the duration of the recovery
5720 */
5721 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5722 struct amdgpu_ring *ring = adev->rings[i];
5723
5724 if (!ring || !ring->sched.thread)
5725 continue;
5726
5727 drm_sched_stop(&ring->sched, NULL);
5728 }
5729 atomic_inc(&adev->gpu_reset_counter);
5730 return PCI_ERS_RESULT_NEED_RESET;
5731 case pci_channel_io_perm_failure:
5732 /* Permanent error, prepare for device removal */
5733 return PCI_ERS_RESULT_DISCONNECT;
5734 }
5735
5736 return PCI_ERS_RESULT_NEED_RESET;
5737 }
5738
5739 /**
5740 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5741 * @pdev: pointer to PCI device
5742 */
amdgpu_pci_mmio_enabled(struct pci_dev * pdev)5743 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5744 {
5745
5746 DRM_INFO("PCI error: mmio enabled callback!!\n");
5747
5748 /* TODO - dump whatever for debugging purposes */
5749
5750 /* This called only if amdgpu_pci_error_detected returns
5751 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5752 * works, no need to reset slot.
5753 */
5754
5755 return PCI_ERS_RESULT_RECOVERED;
5756 }
5757
5758 /**
5759 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5760 * @pdev: PCI device struct
5761 *
5762 * Description: This routine is called by the pci error recovery
5763 * code after the PCI slot has been reset, just before we
5764 * should resume normal operations.
5765 */
amdgpu_pci_slot_reset(struct pci_dev * pdev)5766 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5767 {
5768 struct drm_device *dev = pci_get_drvdata(pdev);
5769 struct amdgpu_device *adev = drm_to_adev(dev);
5770 int r, i;
5771 struct amdgpu_reset_context reset_context;
5772 u32 memsize;
5773 struct list_head device_list;
5774
5775 DRM_INFO("PCI error: slot reset callback!!\n");
5776
5777 memset(&reset_context, 0, sizeof(reset_context));
5778
5779 INIT_LIST_HEAD(&device_list);
5780 list_add_tail(&adev->reset_list, &device_list);
5781
5782 /* wait for asic to come out of reset */
5783 msleep(500);
5784
5785 /* Restore PCI confspace */
5786 amdgpu_device_load_pci_state(pdev);
5787
5788 /* confirm ASIC came out of reset */
5789 for (i = 0; i < adev->usec_timeout; i++) {
5790 memsize = amdgpu_asic_get_config_memsize(adev);
5791
5792 if (memsize != 0xffffffff)
5793 break;
5794 udelay(1);
5795 }
5796 if (memsize == 0xffffffff) {
5797 r = -ETIME;
5798 goto out;
5799 }
5800
5801 reset_context.method = AMD_RESET_METHOD_NONE;
5802 reset_context.reset_req_dev = adev;
5803 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5804 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5805
5806 adev->no_hw_access = true;
5807 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5808 adev->no_hw_access = false;
5809 if (r)
5810 goto out;
5811
5812 r = amdgpu_do_asic_reset(&device_list, &reset_context);
5813
5814 out:
5815 if (!r) {
5816 if (amdgpu_device_cache_pci_state(adev->pdev))
5817 pci_restore_state(adev->pdev);
5818
5819 DRM_INFO("PCIe error recovery succeeded\n");
5820 } else {
5821 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5822 amdgpu_device_unset_mp1_state(adev);
5823 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5824 }
5825
5826 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5827 }
5828
5829 /**
5830 * amdgpu_pci_resume() - resume normal ops after PCI reset
5831 * @pdev: pointer to PCI device
5832 *
5833 * Called when the error recovery driver tells us that its
5834 * OK to resume normal operation.
5835 */
amdgpu_pci_resume(struct pci_dev * pdev)5836 void amdgpu_pci_resume(struct pci_dev *pdev)
5837 {
5838 struct drm_device *dev = pci_get_drvdata(pdev);
5839 struct amdgpu_device *adev = drm_to_adev(dev);
5840 int i;
5841
5842
5843 DRM_INFO("PCI error: resume callback!!\n");
5844
5845 /* Only continue execution for the case of pci_channel_io_frozen */
5846 if (adev->pci_channel_state != pci_channel_io_frozen)
5847 return;
5848
5849 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5850 struct amdgpu_ring *ring = adev->rings[i];
5851
5852 if (!ring || !ring->sched.thread)
5853 continue;
5854
5855
5856 drm_sched_resubmit_jobs(&ring->sched);
5857 drm_sched_start(&ring->sched, true);
5858 }
5859
5860 amdgpu_device_unset_mp1_state(adev);
5861 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5862 }
5863
amdgpu_device_cache_pci_state(struct pci_dev * pdev)5864 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5865 {
5866 struct drm_device *dev = pci_get_drvdata(pdev);
5867 struct amdgpu_device *adev = drm_to_adev(dev);
5868 int r;
5869
5870 r = pci_save_state(pdev);
5871 if (!r) {
5872 kfree(adev->pci_state);
5873
5874 adev->pci_state = pci_store_saved_state(pdev);
5875
5876 if (!adev->pci_state) {
5877 DRM_ERROR("Failed to store PCI saved state");
5878 return false;
5879 }
5880 } else {
5881 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5882 return false;
5883 }
5884
5885 return true;
5886 }
5887
amdgpu_device_load_pci_state(struct pci_dev * pdev)5888 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5889 {
5890 struct drm_device *dev = pci_get_drvdata(pdev);
5891 struct amdgpu_device *adev = drm_to_adev(dev);
5892 int r;
5893
5894 if (!adev->pci_state)
5895 return false;
5896
5897 r = pci_load_saved_state(pdev, adev->pci_state);
5898
5899 if (!r) {
5900 pci_restore_state(pdev);
5901 } else {
5902 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5903 return false;
5904 }
5905
5906 return true;
5907 }
5908
amdgpu_device_flush_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)5909 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5910 struct amdgpu_ring *ring)
5911 {
5912 #ifdef CONFIG_X86_64
5913 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5914 return;
5915 #endif
5916 if (adev->gmc.xgmi.connected_to_cpu)
5917 return;
5918
5919 if (ring && ring->funcs->emit_hdp_flush)
5920 amdgpu_ring_emit_hdp_flush(ring);
5921 else
5922 amdgpu_asic_flush_hdp(adev, ring);
5923 }
5924
amdgpu_device_invalidate_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)5925 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5926 struct amdgpu_ring *ring)
5927 {
5928 #ifdef CONFIG_X86_64
5929 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5930 return;
5931 #endif
5932 if (adev->gmc.xgmi.connected_to_cpu)
5933 return;
5934
5935 amdgpu_asic_invalidate_hdp(adev, ring);
5936 }
5937
amdgpu_in_reset(struct amdgpu_device * adev)5938 int amdgpu_in_reset(struct amdgpu_device *adev)
5939 {
5940 return atomic_read(&adev->reset_domain->in_gpu_reset);
5941 }
5942
5943 /**
5944 * amdgpu_device_halt() - bring hardware to some kind of halt state
5945 *
5946 * @adev: amdgpu_device pointer
5947 *
5948 * Bring hardware to some kind of halt state so that no one can touch it
5949 * any more. It will help to maintain error context when error occurred.
5950 * Compare to a simple hang, the system will keep stable at least for SSH
5951 * access. Then it should be trivial to inspect the hardware state and
5952 * see what's going on. Implemented as following:
5953 *
5954 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5955 * clears all CPU mappings to device, disallows remappings through page faults
5956 * 2. amdgpu_irq_disable_all() disables all interrupts
5957 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5958 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5959 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5960 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5961 * flush any in flight DMA operations
5962 */
amdgpu_device_halt(struct amdgpu_device * adev)5963 void amdgpu_device_halt(struct amdgpu_device *adev)
5964 {
5965 struct pci_dev *pdev = adev->pdev;
5966 struct drm_device *ddev = adev_to_drm(adev);
5967
5968 drm_dev_unplug(ddev);
5969
5970 amdgpu_irq_disable_all(adev);
5971
5972 amdgpu_fence_driver_hw_fini(adev);
5973
5974 adev->no_hw_access = true;
5975
5976 amdgpu_device_unmap_mmio(adev);
5977
5978 pci_disable_device(pdev);
5979 pci_wait_for_pending_transaction(pdev);
5980 }
5981
amdgpu_device_pcie_port_rreg(struct amdgpu_device * adev,u32 reg)5982 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5983 u32 reg)
5984 {
5985 unsigned long flags, address, data;
5986 u32 r;
5987
5988 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5989 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5990
5991 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5992 WREG32(address, reg * 4);
5993 (void)RREG32(address);
5994 r = RREG32(data);
5995 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5996 return r;
5997 }
5998
amdgpu_device_pcie_port_wreg(struct amdgpu_device * adev,u32 reg,u32 v)5999 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6000 u32 reg, u32 v)
6001 {
6002 unsigned long flags, address, data;
6003
6004 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6005 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6006
6007 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6008 WREG32(address, reg * 4);
6009 (void)RREG32(address);
6010 WREG32(data, v);
6011 (void)RREG32(data);
6012 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6013 }
6014
6015 /**
6016 * amdgpu_device_switch_gang - switch to a new gang
6017 * @adev: amdgpu_device pointer
6018 * @gang: the gang to switch to
6019 *
6020 * Try to switch to a new gang.
6021 * Returns: NULL if we switched to the new gang or a reference to the current
6022 * gang leader.
6023 */
amdgpu_device_switch_gang(struct amdgpu_device * adev,struct dma_fence * gang)6024 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6025 struct dma_fence *gang)
6026 {
6027 struct dma_fence *old = NULL;
6028
6029 do {
6030 dma_fence_put(old);
6031 rcu_read_lock();
6032 old = dma_fence_get_rcu_safe(&adev->gang_submit);
6033 rcu_read_unlock();
6034
6035 if (old == gang)
6036 break;
6037
6038 if (!dma_fence_is_signaled(old))
6039 return old;
6040
6041 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6042 old, gang) != old);
6043
6044 dma_fence_put(old);
6045 return NULL;
6046 }
6047
amdgpu_device_has_display_hardware(struct amdgpu_device * adev)6048 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6049 {
6050 switch (adev->asic_type) {
6051 #ifdef CONFIG_DRM_AMDGPU_SI
6052 case CHIP_HAINAN:
6053 #endif
6054 case CHIP_TOPAZ:
6055 /* chips with no display hardware */
6056 return false;
6057 #ifdef CONFIG_DRM_AMDGPU_SI
6058 case CHIP_TAHITI:
6059 case CHIP_PITCAIRN:
6060 case CHIP_VERDE:
6061 case CHIP_OLAND:
6062 #endif
6063 #ifdef CONFIG_DRM_AMDGPU_CIK
6064 case CHIP_BONAIRE:
6065 case CHIP_HAWAII:
6066 case CHIP_KAVERI:
6067 case CHIP_KABINI:
6068 case CHIP_MULLINS:
6069 #endif
6070 case CHIP_TONGA:
6071 case CHIP_FIJI:
6072 case CHIP_POLARIS10:
6073 case CHIP_POLARIS11:
6074 case CHIP_POLARIS12:
6075 case CHIP_VEGAM:
6076 case CHIP_CARRIZO:
6077 case CHIP_STONEY:
6078 /* chips with display hardware */
6079 return true;
6080 default:
6081 /* IP discovery */
6082 if (!adev->ip_versions[DCE_HWIP][0] ||
6083 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6084 return false;
6085 return true;
6086 }
6087 }
6088