Merge tag 'drm-misc-next-2022-02-23' of git://anongit.freedesktop.org/drm/drm-misc...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_probe_helper.h>
38 #include <drm/amdgpu_drm.h>
39 #include <linux/vgaarb.h>
40 #include <linux/vga_switcheroo.h>
41 #include <linux/efi.h>
42 #include "amdgpu.h"
43 #include "amdgpu_trace.h"
44 #include "amdgpu_i2c.h"
45 #include "atom.h"
46 #include "amdgpu_atombios.h"
47 #include "amdgpu_atomfirmware.h"
48 #include "amd_pcie.h"
49 #ifdef CONFIG_DRM_AMDGPU_SI
50 #include "si.h"
51 #endif
52 #ifdef CONFIG_DRM_AMDGPU_CIK
53 #include "cik.h"
54 #endif
55 #include "vi.h"
56 #include "soc15.h"
57 #include "nv.h"
58 #include "bif/bif_4_1_d.h"
59 #include <linux/firmware.h>
60 #include "amdgpu_vf_error.h"
61
62 #include "amdgpu_amdkfd.h"
63 #include "amdgpu_pm.h"
64
65 #include "amdgpu_xgmi.h"
66 #include "amdgpu_ras.h"
67 #include "amdgpu_pmu.h"
68 #include "amdgpu_fru_eeprom.h"
69 #include "amdgpu_reset.h"
70
71 #include <linux/suspend.h>
72 #include <drm/task_barrier.h>
73 #include <linux/pm_runtime.h>
74
75 #include <drm/drm_drv.h>
76
77 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
87 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
88 MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin");
89
90 #define AMDGPU_RESUME_MS                2000
91 #define AMDGPU_MAX_RETRY_LIMIT          2
92 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
93
94 const char *amdgpu_asic_name[] = {
95         "TAHITI",
96         "PITCAIRN",
97         "VERDE",
98         "OLAND",
99         "HAINAN",
100         "BONAIRE",
101         "KAVERI",
102         "KABINI",
103         "HAWAII",
104         "MULLINS",
105         "TOPAZ",
106         "TONGA",
107         "FIJI",
108         "CARRIZO",
109         "STONEY",
110         "POLARIS10",
111         "POLARIS11",
112         "POLARIS12",
113         "VEGAM",
114         "VEGA10",
115         "VEGA12",
116         "VEGA20",
117         "RAVEN",
118         "ARCTURUS",
119         "RENOIR",
120         "ALDEBARAN",
121         "NAVI10",
122         "CYAN_SKILLFISH",
123         "NAVI14",
124         "NAVI12",
125         "SIENNA_CICHLID",
126         "NAVY_FLOUNDER",
127         "VANGOGH",
128         "DIMGREY_CAVEFISH",
129         "BEIGE_GOBY",
130         "YELLOW_CARP",
131         "IP DISCOVERY",
132         "LAST",
133 };
134
135 /**
136  * DOC: pcie_replay_count
137  *
138  * The amdgpu driver provides a sysfs API for reporting the total number
139  * of PCIe replays (NAKs)
140  * The file pcie_replay_count is used for this and returns the total
141  * number of replays as a sum of the NAKs generated and NAKs received
142  */
143
144 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
145                 struct device_attribute *attr, char *buf)
146 {
147         struct drm_device *ddev = dev_get_drvdata(dev);
148         struct amdgpu_device *adev = drm_to_adev(ddev);
149         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
150
151         return sysfs_emit(buf, "%llu\n", cnt);
152 }
153
154 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
155                 amdgpu_device_get_pcie_replay_count, NULL);
156
157 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
158
159 /**
160  * DOC: product_name
161  *
162  * The amdgpu driver provides a sysfs API for reporting the product name
163  * for the device
164  * The file serial_number is used for this and returns the product name
165  * as returned from the FRU.
166  * NOTE: This is only available for certain server cards
167  */
168
169 static ssize_t amdgpu_device_get_product_name(struct device *dev,
170                 struct device_attribute *attr, char *buf)
171 {
172         struct drm_device *ddev = dev_get_drvdata(dev);
173         struct amdgpu_device *adev = drm_to_adev(ddev);
174
175         return sysfs_emit(buf, "%s\n", adev->product_name);
176 }
177
178 static DEVICE_ATTR(product_name, S_IRUGO,
179                 amdgpu_device_get_product_name, NULL);
180
181 /**
182  * DOC: product_number
183  *
184  * The amdgpu driver provides a sysfs API for reporting the part number
185  * for the device
186  * The file serial_number is used for this and returns the part number
187  * as returned from the FRU.
188  * NOTE: This is only available for certain server cards
189  */
190
191 static ssize_t amdgpu_device_get_product_number(struct device *dev,
192                 struct device_attribute *attr, char *buf)
193 {
194         struct drm_device *ddev = dev_get_drvdata(dev);
195         struct amdgpu_device *adev = drm_to_adev(ddev);
196
197         return sysfs_emit(buf, "%s\n", adev->product_number);
198 }
199
200 static DEVICE_ATTR(product_number, S_IRUGO,
201                 amdgpu_device_get_product_number, NULL);
202
203 /**
204  * DOC: serial_number
205  *
206  * The amdgpu driver provides a sysfs API for reporting the serial number
207  * for the device
208  * The file serial_number is used for this and returns the serial number
209  * as returned from the FRU.
210  * NOTE: This is only available for certain server cards
211  */
212
213 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
214                 struct device_attribute *attr, char *buf)
215 {
216         struct drm_device *ddev = dev_get_drvdata(dev);
217         struct amdgpu_device *adev = drm_to_adev(ddev);
218
219         return sysfs_emit(buf, "%s\n", adev->serial);
220 }
221
222 static DEVICE_ATTR(serial_number, S_IRUGO,
223                 amdgpu_device_get_serial_number, NULL);
224
225 /**
226  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
227  *
228  * @dev: drm_device pointer
229  *
230  * Returns true if the device is a dGPU with ATPX power control,
231  * otherwise return false.
232  */
233 bool amdgpu_device_supports_px(struct drm_device *dev)
234 {
235         struct amdgpu_device *adev = drm_to_adev(dev);
236
237         if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
238                 return true;
239         return false;
240 }
241
242 /**
243  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
244  *
245  * @dev: drm_device pointer
246  *
247  * Returns true if the device is a dGPU with ACPI power control,
248  * otherwise return false.
249  */
250 bool amdgpu_device_supports_boco(struct drm_device *dev)
251 {
252         struct amdgpu_device *adev = drm_to_adev(dev);
253
254         if (adev->has_pr3 ||
255             ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
256                 return true;
257         return false;
258 }
259
260 /**
261  * amdgpu_device_supports_baco - Does the device support BACO
262  *
263  * @dev: drm_device pointer
264  *
265  * Returns true if the device supporte BACO,
266  * otherwise return false.
267  */
268 bool amdgpu_device_supports_baco(struct drm_device *dev)
269 {
270         struct amdgpu_device *adev = drm_to_adev(dev);
271
272         return amdgpu_asic_supports_baco(adev);
273 }
274
275 /**
276  * amdgpu_device_supports_smart_shift - Is the device dGPU with
277  * smart shift support
278  *
279  * @dev: drm_device pointer
280  *
281  * Returns true if the device is a dGPU with Smart Shift support,
282  * otherwise returns false.
283  */
284 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
285 {
286         return (amdgpu_device_supports_boco(dev) &&
287                 amdgpu_acpi_is_power_shift_control_supported());
288 }
289
290 /*
291  * VRAM access helper functions
292  */
293
294 /**
295  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
296  *
297  * @adev: amdgpu_device pointer
298  * @pos: offset of the buffer in vram
299  * @buf: virtual address of the buffer in system memory
300  * @size: read/write size, sizeof(@buf) must > @size
301  * @write: true - write to vram, otherwise - read from vram
302  */
303 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
304                              void *buf, size_t size, bool write)
305 {
306         unsigned long flags;
307         uint32_t hi = ~0, tmp = 0;
308         uint32_t *data = buf;
309         uint64_t last;
310         int idx;
311
312         if (!drm_dev_enter(adev_to_drm(adev), &idx))
313                 return;
314
315         BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
316
317         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
318         for (last = pos + size; pos < last; pos += 4) {
319                 tmp = pos >> 31;
320
321                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
322                 if (tmp != hi) {
323                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
324                         hi = tmp;
325                 }
326                 if (write)
327                         WREG32_NO_KIQ(mmMM_DATA, *data++);
328                 else
329                         *data++ = RREG32_NO_KIQ(mmMM_DATA);
330         }
331
332         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
333         drm_dev_exit(idx);
334 }
335
336 /**
337  * amdgpu_device_aper_access - access vram by vram aperature
338  *
339  * @adev: amdgpu_device pointer
340  * @pos: offset of the buffer in vram
341  * @buf: virtual address of the buffer in system memory
342  * @size: read/write size, sizeof(@buf) must > @size
343  * @write: true - write to vram, otherwise - read from vram
344  *
345  * The return value means how many bytes have been transferred.
346  */
347 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
348                                  void *buf, size_t size, bool write)
349 {
350 #ifdef CONFIG_64BIT
351         void __iomem *addr;
352         size_t count = 0;
353         uint64_t last;
354
355         if (!adev->mman.aper_base_kaddr)
356                 return 0;
357
358         last = min(pos + size, adev->gmc.visible_vram_size);
359         if (last > pos) {
360                 addr = adev->mman.aper_base_kaddr + pos;
361                 count = last - pos;
362
363                 if (write) {
364                         memcpy_toio(addr, buf, count);
365                         mb();
366                         amdgpu_device_flush_hdp(adev, NULL);
367                 } else {
368                         amdgpu_device_invalidate_hdp(adev, NULL);
369                         mb();
370                         memcpy_fromio(buf, addr, count);
371                 }
372
373         }
374
375         return count;
376 #else
377         return 0;
378 #endif
379 }
380
381 /**
382  * amdgpu_device_vram_access - read/write a buffer in vram
383  *
384  * @adev: amdgpu_device pointer
385  * @pos: offset of the buffer in vram
386  * @buf: virtual address of the buffer in system memory
387  * @size: read/write size, sizeof(@buf) must > @size
388  * @write: true - write to vram, otherwise - read from vram
389  */
390 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
391                                void *buf, size_t size, bool write)
392 {
393         size_t count;
394
395         /* try to using vram apreature to access vram first */
396         count = amdgpu_device_aper_access(adev, pos, buf, size, write);
397         size -= count;
398         if (size) {
399                 /* using MM to access rest vram */
400                 pos += count;
401                 buf += count;
402                 amdgpu_device_mm_access(adev, pos, buf, size, write);
403         }
404 }
405
406 /*
407  * register access helper functions.
408  */
409
410 /* Check if hw access should be skipped because of hotplug or device error */
411 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
412 {
413         if (adev->no_hw_access)
414                 return true;
415
416 #ifdef CONFIG_LOCKDEP
417         /*
418          * This is a bit complicated to understand, so worth a comment. What we assert
419          * here is that the GPU reset is not running on another thread in parallel.
420          *
421          * For this we trylock the read side of the reset semaphore, if that succeeds
422          * we know that the reset is not running in paralell.
423          *
424          * If the trylock fails we assert that we are either already holding the read
425          * side of the lock or are the reset thread itself and hold the write side of
426          * the lock.
427          */
428         if (in_task()) {
429                 if (down_read_trylock(&adev->reset_domain->sem))
430                         up_read(&adev->reset_domain->sem);
431                 else
432                         lockdep_assert_held(&adev->reset_domain->sem);
433         }
434 #endif
435         return false;
436 }
437
438 /**
439  * amdgpu_device_rreg - read a memory mapped IO or indirect register
440  *
441  * @adev: amdgpu_device pointer
442  * @reg: dword aligned register offset
443  * @acc_flags: access flags which require special behavior
444  *
445  * Returns the 32 bit value from the offset specified.
446  */
447 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
448                             uint32_t reg, uint32_t acc_flags)
449 {
450         uint32_t ret;
451
452         if (amdgpu_device_skip_hw_access(adev))
453                 return 0;
454
455         if ((reg * 4) < adev->rmmio_size) {
456                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
457                     amdgpu_sriov_runtime(adev) &&
458                     down_read_trylock(&adev->reset_domain->sem)) {
459                         ret = amdgpu_kiq_rreg(adev, reg);
460                         up_read(&adev->reset_domain->sem);
461                 } else {
462                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
463                 }
464         } else {
465                 ret = adev->pcie_rreg(adev, reg * 4);
466         }
467
468         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
469
470         return ret;
471 }
472
473 /*
474  * MMIO register read with bytes helper functions
475  * @offset:bytes offset from MMIO start
476  *
477 */
478
479 /**
480  * amdgpu_mm_rreg8 - read a memory mapped IO register
481  *
482  * @adev: amdgpu_device pointer
483  * @offset: byte aligned register offset
484  *
485  * Returns the 8 bit value from the offset specified.
486  */
487 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
488 {
489         if (amdgpu_device_skip_hw_access(adev))
490                 return 0;
491
492         if (offset < adev->rmmio_size)
493                 return (readb(adev->rmmio + offset));
494         BUG();
495 }
496
497 /*
498  * MMIO register write with bytes helper functions
499  * @offset:bytes offset from MMIO start
500  * @value: the value want to be written to the register
501  *
502 */
503 /**
504  * amdgpu_mm_wreg8 - read a memory mapped IO register
505  *
506  * @adev: amdgpu_device pointer
507  * @offset: byte aligned register offset
508  * @value: 8 bit value to write
509  *
510  * Writes the value specified to the offset specified.
511  */
512 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
513 {
514         if (amdgpu_device_skip_hw_access(adev))
515                 return;
516
517         if (offset < adev->rmmio_size)
518                 writeb(value, adev->rmmio + offset);
519         else
520                 BUG();
521 }
522
523 /**
524  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
525  *
526  * @adev: amdgpu_device pointer
527  * @reg: dword aligned register offset
528  * @v: 32 bit value to write to the register
529  * @acc_flags: access flags which require special behavior
530  *
531  * Writes the value specified to the offset specified.
532  */
533 void amdgpu_device_wreg(struct amdgpu_device *adev,
534                         uint32_t reg, uint32_t v,
535                         uint32_t acc_flags)
536 {
537         if (amdgpu_device_skip_hw_access(adev))
538                 return;
539
540         if ((reg * 4) < adev->rmmio_size) {
541                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
542                     amdgpu_sriov_runtime(adev) &&
543                     down_read_trylock(&adev->reset_domain->sem)) {
544                         amdgpu_kiq_wreg(adev, reg, v);
545                         up_read(&adev->reset_domain->sem);
546                 } else {
547                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
548                 }
549         } else {
550                 adev->pcie_wreg(adev, reg * 4, v);
551         }
552
553         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
554 }
555
556 /**
557  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
558  *
559  * @adev: amdgpu_device pointer
560  * @reg: mmio/rlc register
561  * @v: value to write
562  *
563  * this function is invoked only for the debugfs register access
564  */
565 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
566                              uint32_t reg, uint32_t v)
567 {
568         if (amdgpu_device_skip_hw_access(adev))
569                 return;
570
571         if (amdgpu_sriov_fullaccess(adev) &&
572             adev->gfx.rlc.funcs &&
573             adev->gfx.rlc.funcs->is_rlcg_access_range) {
574                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
575                         return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
576         } else if ((reg * 4) >= adev->rmmio_size) {
577                 adev->pcie_wreg(adev, reg * 4, v);
578         } else {
579                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
580         }
581 }
582
583 /**
584  * amdgpu_mm_rdoorbell - read a doorbell dword
585  *
586  * @adev: amdgpu_device pointer
587  * @index: doorbell index
588  *
589  * Returns the value in the doorbell aperture at the
590  * requested doorbell index (CIK).
591  */
592 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
593 {
594         if (amdgpu_device_skip_hw_access(adev))
595                 return 0;
596
597         if (index < adev->doorbell.num_doorbells) {
598                 return readl(adev->doorbell.ptr + index);
599         } else {
600                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
601                 return 0;
602         }
603 }
604
605 /**
606  * amdgpu_mm_wdoorbell - write a doorbell dword
607  *
608  * @adev: amdgpu_device pointer
609  * @index: doorbell index
610  * @v: value to write
611  *
612  * Writes @v to the doorbell aperture at the
613  * requested doorbell index (CIK).
614  */
615 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
616 {
617         if (amdgpu_device_skip_hw_access(adev))
618                 return;
619
620         if (index < adev->doorbell.num_doorbells) {
621                 writel(v, adev->doorbell.ptr + index);
622         } else {
623                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
624         }
625 }
626
627 /**
628  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
629  *
630  * @adev: amdgpu_device pointer
631  * @index: doorbell index
632  *
633  * Returns the value in the doorbell aperture at the
634  * requested doorbell index (VEGA10+).
635  */
636 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
637 {
638         if (amdgpu_device_skip_hw_access(adev))
639                 return 0;
640
641         if (index < adev->doorbell.num_doorbells) {
642                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
643         } else {
644                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
645                 return 0;
646         }
647 }
648
649 /**
650  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
651  *
652  * @adev: amdgpu_device pointer
653  * @index: doorbell index
654  * @v: value to write
655  *
656  * Writes @v to the doorbell aperture at the
657  * requested doorbell index (VEGA10+).
658  */
659 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
660 {
661         if (amdgpu_device_skip_hw_access(adev))
662                 return;
663
664         if (index < adev->doorbell.num_doorbells) {
665                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
666         } else {
667                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
668         }
669 }
670
671 /**
672  * amdgpu_device_indirect_rreg - read an indirect register
673  *
674  * @adev: amdgpu_device pointer
675  * @pcie_index: mmio register offset
676  * @pcie_data: mmio register offset
677  * @reg_addr: indirect register address to read from
678  *
679  * Returns the value of indirect register @reg_addr
680  */
681 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
682                                 u32 pcie_index, u32 pcie_data,
683                                 u32 reg_addr)
684 {
685         unsigned long flags;
686         u32 r;
687         void __iomem *pcie_index_offset;
688         void __iomem *pcie_data_offset;
689
690         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
691         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
692         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
693
694         writel(reg_addr, pcie_index_offset);
695         readl(pcie_index_offset);
696         r = readl(pcie_data_offset);
697         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
698
699         return r;
700 }
701
702 /**
703  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
704  *
705  * @adev: amdgpu_device pointer
706  * @pcie_index: mmio register offset
707  * @pcie_data: mmio register offset
708  * @reg_addr: indirect register address to read from
709  *
710  * Returns the value of indirect register @reg_addr
711  */
712 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
713                                   u32 pcie_index, u32 pcie_data,
714                                   u32 reg_addr)
715 {
716         unsigned long flags;
717         u64 r;
718         void __iomem *pcie_index_offset;
719         void __iomem *pcie_data_offset;
720
721         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
722         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
723         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
724
725         /* read low 32 bits */
726         writel(reg_addr, pcie_index_offset);
727         readl(pcie_index_offset);
728         r = readl(pcie_data_offset);
729         /* read high 32 bits */
730         writel(reg_addr + 4, pcie_index_offset);
731         readl(pcie_index_offset);
732         r |= ((u64)readl(pcie_data_offset) << 32);
733         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
734
735         return r;
736 }
737
738 /**
739  * amdgpu_device_indirect_wreg - write an indirect register address
740  *
741  * @adev: amdgpu_device pointer
742  * @pcie_index: mmio register offset
743  * @pcie_data: mmio register offset
744  * @reg_addr: indirect register offset
745  * @reg_data: indirect register data
746  *
747  */
748 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
749                                  u32 pcie_index, u32 pcie_data,
750                                  u32 reg_addr, u32 reg_data)
751 {
752         unsigned long flags;
753         void __iomem *pcie_index_offset;
754         void __iomem *pcie_data_offset;
755
756         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
757         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
758         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
759
760         writel(reg_addr, pcie_index_offset);
761         readl(pcie_index_offset);
762         writel(reg_data, pcie_data_offset);
763         readl(pcie_data_offset);
764         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
765 }
766
767 /**
768  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
769  *
770  * @adev: amdgpu_device pointer
771  * @pcie_index: mmio register offset
772  * @pcie_data: mmio register offset
773  * @reg_addr: indirect register offset
774  * @reg_data: indirect register data
775  *
776  */
777 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
778                                    u32 pcie_index, u32 pcie_data,
779                                    u32 reg_addr, u64 reg_data)
780 {
781         unsigned long flags;
782         void __iomem *pcie_index_offset;
783         void __iomem *pcie_data_offset;
784
785         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
786         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
787         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
788
789         /* write low 32 bits */
790         writel(reg_addr, pcie_index_offset);
791         readl(pcie_index_offset);
792         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
793         readl(pcie_data_offset);
794         /* write high 32 bits */
795         writel(reg_addr + 4, pcie_index_offset);
796         readl(pcie_index_offset);
797         writel((u32)(reg_data >> 32), pcie_data_offset);
798         readl(pcie_data_offset);
799         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
800 }
801
802 /**
803  * amdgpu_invalid_rreg - dummy reg read function
804  *
805  * @adev: amdgpu_device pointer
806  * @reg: offset of register
807  *
808  * Dummy register read function.  Used for register blocks
809  * that certain asics don't have (all asics).
810  * Returns the value in the register.
811  */
812 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
813 {
814         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
815         BUG();
816         return 0;
817 }
818
819 /**
820  * amdgpu_invalid_wreg - dummy reg write function
821  *
822  * @adev: amdgpu_device pointer
823  * @reg: offset of register
824  * @v: value to write to the register
825  *
826  * Dummy register read function.  Used for register blocks
827  * that certain asics don't have (all asics).
828  */
829 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
830 {
831         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
832                   reg, v);
833         BUG();
834 }
835
836 /**
837  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
838  *
839  * @adev: amdgpu_device pointer
840  * @reg: offset of register
841  *
842  * Dummy register read function.  Used for register blocks
843  * that certain asics don't have (all asics).
844  * Returns the value in the register.
845  */
846 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
847 {
848         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
849         BUG();
850         return 0;
851 }
852
853 /**
854  * amdgpu_invalid_wreg64 - dummy reg write function
855  *
856  * @adev: amdgpu_device pointer
857  * @reg: offset of register
858  * @v: value to write to the register
859  *
860  * Dummy register read function.  Used for register blocks
861  * that certain asics don't have (all asics).
862  */
863 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
864 {
865         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
866                   reg, v);
867         BUG();
868 }
869
870 /**
871  * amdgpu_block_invalid_rreg - dummy reg read function
872  *
873  * @adev: amdgpu_device pointer
874  * @block: offset of instance
875  * @reg: offset of register
876  *
877  * Dummy register read function.  Used for register blocks
878  * that certain asics don't have (all asics).
879  * Returns the value in the register.
880  */
881 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
882                                           uint32_t block, uint32_t reg)
883 {
884         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
885                   reg, block);
886         BUG();
887         return 0;
888 }
889
890 /**
891  * amdgpu_block_invalid_wreg - dummy reg write function
892  *
893  * @adev: amdgpu_device pointer
894  * @block: offset of instance
895  * @reg: offset of register
896  * @v: value to write to the register
897  *
898  * Dummy register read function.  Used for register blocks
899  * that certain asics don't have (all asics).
900  */
901 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
902                                       uint32_t block,
903                                       uint32_t reg, uint32_t v)
904 {
905         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
906                   reg, block, v);
907         BUG();
908 }
909
910 /**
911  * amdgpu_device_asic_init - Wrapper for atom asic_init
912  *
913  * @adev: amdgpu_device pointer
914  *
915  * Does any asic specific work and then calls atom asic init.
916  */
917 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
918 {
919         amdgpu_asic_pre_asic_init(adev);
920
921         return amdgpu_atom_asic_init(adev->mode_info.atom_context);
922 }
923
924 /**
925  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
926  *
927  * @adev: amdgpu_device pointer
928  *
929  * Allocates a scratch page of VRAM for use by various things in the
930  * driver.
931  */
932 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
933 {
934         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
935                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
936                                        &adev->vram_scratch.robj,
937                                        &adev->vram_scratch.gpu_addr,
938                                        (void **)&adev->vram_scratch.ptr);
939 }
940
941 /**
942  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
943  *
944  * @adev: amdgpu_device pointer
945  *
946  * Frees the VRAM scratch page.
947  */
948 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
949 {
950         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
951 }
952
953 /**
954  * amdgpu_device_program_register_sequence - program an array of registers.
955  *
956  * @adev: amdgpu_device pointer
957  * @registers: pointer to the register array
958  * @array_size: size of the register array
959  *
960  * Programs an array or registers with and and or masks.
961  * This is a helper for setting golden registers.
962  */
963 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
964                                              const u32 *registers,
965                                              const u32 array_size)
966 {
967         u32 tmp, reg, and_mask, or_mask;
968         int i;
969
970         if (array_size % 3)
971                 return;
972
973         for (i = 0; i < array_size; i +=3) {
974                 reg = registers[i + 0];
975                 and_mask = registers[i + 1];
976                 or_mask = registers[i + 2];
977
978                 if (and_mask == 0xffffffff) {
979                         tmp = or_mask;
980                 } else {
981                         tmp = RREG32(reg);
982                         tmp &= ~and_mask;
983                         if (adev->family >= AMDGPU_FAMILY_AI)
984                                 tmp |= (or_mask & and_mask);
985                         else
986                                 tmp |= or_mask;
987                 }
988                 WREG32(reg, tmp);
989         }
990 }
991
992 /**
993  * amdgpu_device_pci_config_reset - reset the GPU
994  *
995  * @adev: amdgpu_device pointer
996  *
997  * Resets the GPU using the pci config reset sequence.
998  * Only applicable to asics prior to vega10.
999  */
1000 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1001 {
1002         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1003 }
1004
1005 /**
1006  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1007  *
1008  * @adev: amdgpu_device pointer
1009  *
1010  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1011  */
1012 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1013 {
1014         return pci_reset_function(adev->pdev);
1015 }
1016
1017 /*
1018  * GPU doorbell aperture helpers function.
1019  */
1020 /**
1021  * amdgpu_device_doorbell_init - Init doorbell driver information.
1022  *
1023  * @adev: amdgpu_device pointer
1024  *
1025  * Init doorbell driver information (CIK)
1026  * Returns 0 on success, error on failure.
1027  */
1028 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1029 {
1030
1031         /* No doorbell on SI hardware generation */
1032         if (adev->asic_type < CHIP_BONAIRE) {
1033                 adev->doorbell.base = 0;
1034                 adev->doorbell.size = 0;
1035                 adev->doorbell.num_doorbells = 0;
1036                 adev->doorbell.ptr = NULL;
1037                 return 0;
1038         }
1039
1040         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1041                 return -EINVAL;
1042
1043         amdgpu_asic_init_doorbell_index(adev);
1044
1045         /* doorbell bar mapping */
1046         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1047         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1048
1049         adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
1050                                              adev->doorbell_index.max_assignment+1);
1051         if (adev->doorbell.num_doorbells == 0)
1052                 return -EINVAL;
1053
1054         /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1055          * paging queue doorbell use the second page. The
1056          * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1057          * doorbells are in the first page. So with paging queue enabled,
1058          * the max num_doorbells should + 1 page (0x400 in dword)
1059          */
1060         if (adev->asic_type >= CHIP_VEGA10)
1061                 adev->doorbell.num_doorbells += 0x400;
1062
1063         adev->doorbell.ptr = ioremap(adev->doorbell.base,
1064                                      adev->doorbell.num_doorbells *
1065                                      sizeof(u32));
1066         if (adev->doorbell.ptr == NULL)
1067                 return -ENOMEM;
1068
1069         return 0;
1070 }
1071
1072 /**
1073  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1074  *
1075  * @adev: amdgpu_device pointer
1076  *
1077  * Tear down doorbell driver information (CIK)
1078  */
1079 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1080 {
1081         iounmap(adev->doorbell.ptr);
1082         adev->doorbell.ptr = NULL;
1083 }
1084
1085
1086
1087 /*
1088  * amdgpu_device_wb_*()
1089  * Writeback is the method by which the GPU updates special pages in memory
1090  * with the status of certain GPU events (fences, ring pointers,etc.).
1091  */
1092
1093 /**
1094  * amdgpu_device_wb_fini - Disable Writeback and free memory
1095  *
1096  * @adev: amdgpu_device pointer
1097  *
1098  * Disables Writeback and frees the Writeback memory (all asics).
1099  * Used at driver shutdown.
1100  */
1101 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1102 {
1103         if (adev->wb.wb_obj) {
1104                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1105                                       &adev->wb.gpu_addr,
1106                                       (void **)&adev->wb.wb);
1107                 adev->wb.wb_obj = NULL;
1108         }
1109 }
1110
1111 /**
1112  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1113  *
1114  * @adev: amdgpu_device pointer
1115  *
1116  * Initializes writeback and allocates writeback memory (all asics).
1117  * Used at driver startup.
1118  * Returns 0 on success or an -error on failure.
1119  */
1120 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1121 {
1122         int r;
1123
1124         if (adev->wb.wb_obj == NULL) {
1125                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1126                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1127                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1128                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1129                                             (void **)&adev->wb.wb);
1130                 if (r) {
1131                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1132                         return r;
1133                 }
1134
1135                 adev->wb.num_wb = AMDGPU_MAX_WB;
1136                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1137
1138                 /* clear wb memory */
1139                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1140         }
1141
1142         return 0;
1143 }
1144
1145 /**
1146  * amdgpu_device_wb_get - Allocate a wb entry
1147  *
1148  * @adev: amdgpu_device pointer
1149  * @wb: wb index
1150  *
1151  * Allocate a wb slot for use by the driver (all asics).
1152  * Returns 0 on success or -EINVAL on failure.
1153  */
1154 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1155 {
1156         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1157
1158         if (offset < adev->wb.num_wb) {
1159                 __set_bit(offset, adev->wb.used);
1160                 *wb = offset << 3; /* convert to dw offset */
1161                 return 0;
1162         } else {
1163                 return -EINVAL;
1164         }
1165 }
1166
1167 /**
1168  * amdgpu_device_wb_free - Free a wb entry
1169  *
1170  * @adev: amdgpu_device pointer
1171  * @wb: wb index
1172  *
1173  * Free a wb slot allocated for use by the driver (all asics)
1174  */
1175 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1176 {
1177         wb >>= 3;
1178         if (wb < adev->wb.num_wb)
1179                 __clear_bit(wb, adev->wb.used);
1180 }
1181
1182 /**
1183  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1184  *
1185  * @adev: amdgpu_device pointer
1186  *
1187  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1188  * to fail, but if any of the BARs is not accessible after the size we abort
1189  * driver loading by returning -ENODEV.
1190  */
1191 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1192 {
1193         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1194         struct pci_bus *root;
1195         struct resource *res;
1196         unsigned i;
1197         u16 cmd;
1198         int r;
1199
1200         /* Bypass for VF */
1201         if (amdgpu_sriov_vf(adev))
1202                 return 0;
1203
1204         /* skip if the bios has already enabled large BAR */
1205         if (adev->gmc.real_vram_size &&
1206             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1207                 return 0;
1208
1209         /* Check if the root BUS has 64bit memory resources */
1210         root = adev->pdev->bus;
1211         while (root->parent)
1212                 root = root->parent;
1213
1214         pci_bus_for_each_resource(root, res, i) {
1215                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1216                     res->start > 0x100000000ull)
1217                         break;
1218         }
1219
1220         /* Trying to resize is pointless without a root hub window above 4GB */
1221         if (!res)
1222                 return 0;
1223
1224         /* Limit the BAR size to what is available */
1225         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1226                         rbar_size);
1227
1228         /* Disable memory decoding while we change the BAR addresses and size */
1229         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1230         pci_write_config_word(adev->pdev, PCI_COMMAND,
1231                               cmd & ~PCI_COMMAND_MEMORY);
1232
1233         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1234         amdgpu_device_doorbell_fini(adev);
1235         if (adev->asic_type >= CHIP_BONAIRE)
1236                 pci_release_resource(adev->pdev, 2);
1237
1238         pci_release_resource(adev->pdev, 0);
1239
1240         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1241         if (r == -ENOSPC)
1242                 DRM_INFO("Not enough PCI address space for a large BAR.");
1243         else if (r && r != -ENOTSUPP)
1244                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1245
1246         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1247
1248         /* When the doorbell or fb BAR isn't available we have no chance of
1249          * using the device.
1250          */
1251         r = amdgpu_device_doorbell_init(adev);
1252         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1253                 return -ENODEV;
1254
1255         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1256
1257         return 0;
1258 }
1259
1260 /*
1261  * GPU helpers function.
1262  */
1263 /**
1264  * amdgpu_device_need_post - check if the hw need post or not
1265  *
1266  * @adev: amdgpu_device pointer
1267  *
1268  * Check if the asic has been initialized (all asics) at driver startup
1269  * or post is needed if  hw reset is performed.
1270  * Returns true if need or false if not.
1271  */
1272 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1273 {
1274         uint32_t reg;
1275
1276         if (amdgpu_sriov_vf(adev))
1277                 return false;
1278
1279         if (amdgpu_passthrough(adev)) {
1280                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1281                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1282                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1283                  * vpost executed for smc version below 22.15
1284                  */
1285                 if (adev->asic_type == CHIP_FIJI) {
1286                         int err;
1287                         uint32_t fw_ver;
1288                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1289                         /* force vPost if error occured */
1290                         if (err)
1291                                 return true;
1292
1293                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1294                         if (fw_ver < 0x00160e00)
1295                                 return true;
1296                 }
1297         }
1298
1299         /* Don't post if we need to reset whole hive on init */
1300         if (adev->gmc.xgmi.pending_reset)
1301                 return false;
1302
1303         if (adev->has_hw_reset) {
1304                 adev->has_hw_reset = false;
1305                 return true;
1306         }
1307
1308         /* bios scratch used on CIK+ */
1309         if (adev->asic_type >= CHIP_BONAIRE)
1310                 return amdgpu_atombios_scratch_need_asic_init(adev);
1311
1312         /* check MEM_SIZE for older asics */
1313         reg = amdgpu_asic_get_config_memsize(adev);
1314
1315         if ((reg != 0) && (reg != 0xffffffff))
1316                 return false;
1317
1318         return true;
1319 }
1320
1321 /**
1322  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1323  *
1324  * @adev: amdgpu_device pointer
1325  *
1326  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1327  * be set for this device.
1328  *
1329  * Returns true if it should be used or false if not.
1330  */
1331 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1332 {
1333         switch (amdgpu_aspm) {
1334         case -1:
1335                 break;
1336         case 0:
1337                 return false;
1338         case 1:
1339                 return true;
1340         default:
1341                 return false;
1342         }
1343         return pcie_aspm_enabled(adev->pdev);
1344 }
1345
1346 /* if we get transitioned to only one device, take VGA back */
1347 /**
1348  * amdgpu_device_vga_set_decode - enable/disable vga decode
1349  *
1350  * @pdev: PCI device pointer
1351  * @state: enable/disable vga decode
1352  *
1353  * Enable/disable vga decode (all asics).
1354  * Returns VGA resource flags.
1355  */
1356 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1357                 bool state)
1358 {
1359         struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1360         amdgpu_asic_set_vga_state(adev, state);
1361         if (state)
1362                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1363                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1364         else
1365                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1366 }
1367
1368 /**
1369  * amdgpu_device_check_block_size - validate the vm block size
1370  *
1371  * @adev: amdgpu_device pointer
1372  *
1373  * Validates the vm block size specified via module parameter.
1374  * The vm block size defines number of bits in page table versus page directory,
1375  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1376  * page table and the remaining bits are in the page directory.
1377  */
1378 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1379 {
1380         /* defines number of bits in page table versus page directory,
1381          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1382          * page table and the remaining bits are in the page directory */
1383         if (amdgpu_vm_block_size == -1)
1384                 return;
1385
1386         if (amdgpu_vm_block_size < 9) {
1387                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1388                          amdgpu_vm_block_size);
1389                 amdgpu_vm_block_size = -1;
1390         }
1391 }
1392
1393 /**
1394  * amdgpu_device_check_vm_size - validate the vm size
1395  *
1396  * @adev: amdgpu_device pointer
1397  *
1398  * Validates the vm size in GB specified via module parameter.
1399  * The VM size is the size of the GPU virtual memory space in GB.
1400  */
1401 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1402 {
1403         /* no need to check the default value */
1404         if (amdgpu_vm_size == -1)
1405                 return;
1406
1407         if (amdgpu_vm_size < 1) {
1408                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1409                          amdgpu_vm_size);
1410                 amdgpu_vm_size = -1;
1411         }
1412 }
1413
1414 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1415 {
1416         struct sysinfo si;
1417         bool is_os_64 = (sizeof(void *) == 8);
1418         uint64_t total_memory;
1419         uint64_t dram_size_seven_GB = 0x1B8000000;
1420         uint64_t dram_size_three_GB = 0xB8000000;
1421
1422         if (amdgpu_smu_memory_pool_size == 0)
1423                 return;
1424
1425         if (!is_os_64) {
1426                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1427                 goto def_value;
1428         }
1429         si_meminfo(&si);
1430         total_memory = (uint64_t)si.totalram * si.mem_unit;
1431
1432         if ((amdgpu_smu_memory_pool_size == 1) ||
1433                 (amdgpu_smu_memory_pool_size == 2)) {
1434                 if (total_memory < dram_size_three_GB)
1435                         goto def_value1;
1436         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1437                 (amdgpu_smu_memory_pool_size == 8)) {
1438                 if (total_memory < dram_size_seven_GB)
1439                         goto def_value1;
1440         } else {
1441                 DRM_WARN("Smu memory pool size not supported\n");
1442                 goto def_value;
1443         }
1444         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1445
1446         return;
1447
1448 def_value1:
1449         DRM_WARN("No enough system memory\n");
1450 def_value:
1451         adev->pm.smu_prv_buffer_size = 0;
1452 }
1453
1454 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1455 {
1456         if (!(adev->flags & AMD_IS_APU) ||
1457             adev->asic_type < CHIP_RAVEN)
1458                 return 0;
1459
1460         switch (adev->asic_type) {
1461         case CHIP_RAVEN:
1462                 if (adev->pdev->device == 0x15dd)
1463                         adev->apu_flags |= AMD_APU_IS_RAVEN;
1464                 if (adev->pdev->device == 0x15d8)
1465                         adev->apu_flags |= AMD_APU_IS_PICASSO;
1466                 break;
1467         case CHIP_RENOIR:
1468                 if ((adev->pdev->device == 0x1636) ||
1469                     (adev->pdev->device == 0x164c))
1470                         adev->apu_flags |= AMD_APU_IS_RENOIR;
1471                 else
1472                         adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1473                 break;
1474         case CHIP_VANGOGH:
1475                 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1476                 break;
1477         case CHIP_YELLOW_CARP:
1478                 break;
1479         case CHIP_CYAN_SKILLFISH:
1480                 if ((adev->pdev->device == 0x13FE) ||
1481                     (adev->pdev->device == 0x143F))
1482                         adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1483                 break;
1484         default:
1485                 break;
1486         }
1487
1488         return 0;
1489 }
1490
1491 /**
1492  * amdgpu_device_check_arguments - validate module params
1493  *
1494  * @adev: amdgpu_device pointer
1495  *
1496  * Validates certain module parameters and updates
1497  * the associated values used by the driver (all asics).
1498  */
1499 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1500 {
1501         if (amdgpu_sched_jobs < 4) {
1502                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1503                          amdgpu_sched_jobs);
1504                 amdgpu_sched_jobs = 4;
1505         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1506                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1507                          amdgpu_sched_jobs);
1508                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1509         }
1510
1511         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1512                 /* gart size must be greater or equal to 32M */
1513                 dev_warn(adev->dev, "gart size (%d) too small\n",
1514                          amdgpu_gart_size);
1515                 amdgpu_gart_size = -1;
1516         }
1517
1518         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1519                 /* gtt size must be greater or equal to 32M */
1520                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1521                                  amdgpu_gtt_size);
1522                 amdgpu_gtt_size = -1;
1523         }
1524
1525         /* valid range is between 4 and 9 inclusive */
1526         if (amdgpu_vm_fragment_size != -1 &&
1527             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1528                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1529                 amdgpu_vm_fragment_size = -1;
1530         }
1531
1532         if (amdgpu_sched_hw_submission < 2) {
1533                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1534                          amdgpu_sched_hw_submission);
1535                 amdgpu_sched_hw_submission = 2;
1536         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1537                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1538                          amdgpu_sched_hw_submission);
1539                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1540         }
1541
1542         amdgpu_device_check_smu_prv_buffer_size(adev);
1543
1544         amdgpu_device_check_vm_size(adev);
1545
1546         amdgpu_device_check_block_size(adev);
1547
1548         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1549
1550         amdgpu_gmc_tmz_set(adev);
1551
1552         amdgpu_gmc_noretry_set(adev);
1553
1554         return 0;
1555 }
1556
1557 /**
1558  * amdgpu_switcheroo_set_state - set switcheroo state
1559  *
1560  * @pdev: pci dev pointer
1561  * @state: vga_switcheroo state
1562  *
1563  * Callback for the switcheroo driver.  Suspends or resumes the
1564  * the asics before or after it is powered up using ACPI methods.
1565  */
1566 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1567                                         enum vga_switcheroo_state state)
1568 {
1569         struct drm_device *dev = pci_get_drvdata(pdev);
1570         int r;
1571
1572         if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1573                 return;
1574
1575         if (state == VGA_SWITCHEROO_ON) {
1576                 pr_info("switched on\n");
1577                 /* don't suspend or resume card normally */
1578                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1579
1580                 pci_set_power_state(pdev, PCI_D0);
1581                 amdgpu_device_load_pci_state(pdev);
1582                 r = pci_enable_device(pdev);
1583                 if (r)
1584                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1585                 amdgpu_device_resume(dev, true);
1586
1587                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1588         } else {
1589                 pr_info("switched off\n");
1590                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1591                 amdgpu_device_suspend(dev, true);
1592                 amdgpu_device_cache_pci_state(pdev);
1593                 /* Shut down the device */
1594                 pci_disable_device(pdev);
1595                 pci_set_power_state(pdev, PCI_D3cold);
1596                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1597         }
1598 }
1599
1600 /**
1601  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1602  *
1603  * @pdev: pci dev pointer
1604  *
1605  * Callback for the switcheroo driver.  Check of the switcheroo
1606  * state can be changed.
1607  * Returns true if the state can be changed, false if not.
1608  */
1609 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1610 {
1611         struct drm_device *dev = pci_get_drvdata(pdev);
1612
1613         /*
1614         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1615         * locking inversion with the driver load path. And the access here is
1616         * completely racy anyway. So don't bother with locking for now.
1617         */
1618         return atomic_read(&dev->open_count) == 0;
1619 }
1620
1621 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1622         .set_gpu_state = amdgpu_switcheroo_set_state,
1623         .reprobe = NULL,
1624         .can_switch = amdgpu_switcheroo_can_switch,
1625 };
1626
1627 /**
1628  * amdgpu_device_ip_set_clockgating_state - set the CG state
1629  *
1630  * @dev: amdgpu_device pointer
1631  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1632  * @state: clockgating state (gate or ungate)
1633  *
1634  * Sets the requested clockgating state for all instances of
1635  * the hardware IP specified.
1636  * Returns the error code from the last instance.
1637  */
1638 int amdgpu_device_ip_set_clockgating_state(void *dev,
1639                                            enum amd_ip_block_type block_type,
1640                                            enum amd_clockgating_state state)
1641 {
1642         struct amdgpu_device *adev = dev;
1643         int i, r = 0;
1644
1645         for (i = 0; i < adev->num_ip_blocks; i++) {
1646                 if (!adev->ip_blocks[i].status.valid)
1647                         continue;
1648                 if (adev->ip_blocks[i].version->type != block_type)
1649                         continue;
1650                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1651                         continue;
1652                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1653                         (void *)adev, state);
1654                 if (r)
1655                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1656                                   adev->ip_blocks[i].version->funcs->name, r);
1657         }
1658         return r;
1659 }
1660
1661 /**
1662  * amdgpu_device_ip_set_powergating_state - set the PG state
1663  *
1664  * @dev: amdgpu_device pointer
1665  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1666  * @state: powergating state (gate or ungate)
1667  *
1668  * Sets the requested powergating state for all instances of
1669  * the hardware IP specified.
1670  * Returns the error code from the last instance.
1671  */
1672 int amdgpu_device_ip_set_powergating_state(void *dev,
1673                                            enum amd_ip_block_type block_type,
1674                                            enum amd_powergating_state state)
1675 {
1676         struct amdgpu_device *adev = dev;
1677         int i, r = 0;
1678
1679         for (i = 0; i < adev->num_ip_blocks; i++) {
1680                 if (!adev->ip_blocks[i].status.valid)
1681                         continue;
1682                 if (adev->ip_blocks[i].version->type != block_type)
1683                         continue;
1684                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1685                         continue;
1686                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1687                         (void *)adev, state);
1688                 if (r)
1689                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1690                                   adev->ip_blocks[i].version->funcs->name, r);
1691         }
1692         return r;
1693 }
1694
1695 /**
1696  * amdgpu_device_ip_get_clockgating_state - get the CG state
1697  *
1698  * @adev: amdgpu_device pointer
1699  * @flags: clockgating feature flags
1700  *
1701  * Walks the list of IPs on the device and updates the clockgating
1702  * flags for each IP.
1703  * Updates @flags with the feature flags for each hardware IP where
1704  * clockgating is enabled.
1705  */
1706 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1707                                             u32 *flags)
1708 {
1709         int i;
1710
1711         for (i = 0; i < adev->num_ip_blocks; i++) {
1712                 if (!adev->ip_blocks[i].status.valid)
1713                         continue;
1714                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1715                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1716         }
1717 }
1718
1719 /**
1720  * amdgpu_device_ip_wait_for_idle - wait for idle
1721  *
1722  * @adev: amdgpu_device pointer
1723  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1724  *
1725  * Waits for the request hardware IP to be idle.
1726  * Returns 0 for success or a negative error code on failure.
1727  */
1728 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1729                                    enum amd_ip_block_type block_type)
1730 {
1731         int i, r;
1732
1733         for (i = 0; i < adev->num_ip_blocks; i++) {
1734                 if (!adev->ip_blocks[i].status.valid)
1735                         continue;
1736                 if (adev->ip_blocks[i].version->type == block_type) {
1737                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1738                         if (r)
1739                                 return r;
1740                         break;
1741                 }
1742         }
1743         return 0;
1744
1745 }
1746
1747 /**
1748  * amdgpu_device_ip_is_idle - is the hardware IP idle
1749  *
1750  * @adev: amdgpu_device pointer
1751  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1752  *
1753  * Check if the hardware IP is idle or not.
1754  * Returns true if it the IP is idle, false if not.
1755  */
1756 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1757                               enum amd_ip_block_type block_type)
1758 {
1759         int i;
1760
1761         for (i = 0; i < adev->num_ip_blocks; i++) {
1762                 if (!adev->ip_blocks[i].status.valid)
1763                         continue;
1764                 if (adev->ip_blocks[i].version->type == block_type)
1765                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1766         }
1767         return true;
1768
1769 }
1770
1771 /**
1772  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1773  *
1774  * @adev: amdgpu_device pointer
1775  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1776  *
1777  * Returns a pointer to the hardware IP block structure
1778  * if it exists for the asic, otherwise NULL.
1779  */
1780 struct amdgpu_ip_block *
1781 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1782                               enum amd_ip_block_type type)
1783 {
1784         int i;
1785
1786         for (i = 0; i < adev->num_ip_blocks; i++)
1787                 if (adev->ip_blocks[i].version->type == type)
1788                         return &adev->ip_blocks[i];
1789
1790         return NULL;
1791 }
1792
1793 /**
1794  * amdgpu_device_ip_block_version_cmp
1795  *
1796  * @adev: amdgpu_device pointer
1797  * @type: enum amd_ip_block_type
1798  * @major: major version
1799  * @minor: minor version
1800  *
1801  * return 0 if equal or greater
1802  * return 1 if smaller or the ip_block doesn't exist
1803  */
1804 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1805                                        enum amd_ip_block_type type,
1806                                        u32 major, u32 minor)
1807 {
1808         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1809
1810         if (ip_block && ((ip_block->version->major > major) ||
1811                         ((ip_block->version->major == major) &&
1812                         (ip_block->version->minor >= minor))))
1813                 return 0;
1814
1815         return 1;
1816 }
1817
1818 /**
1819  * amdgpu_device_ip_block_add
1820  *
1821  * @adev: amdgpu_device pointer
1822  * @ip_block_version: pointer to the IP to add
1823  *
1824  * Adds the IP block driver information to the collection of IPs
1825  * on the asic.
1826  */
1827 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1828                                const struct amdgpu_ip_block_version *ip_block_version)
1829 {
1830         if (!ip_block_version)
1831                 return -EINVAL;
1832
1833         switch (ip_block_version->type) {
1834         case AMD_IP_BLOCK_TYPE_VCN:
1835                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1836                         return 0;
1837                 break;
1838         case AMD_IP_BLOCK_TYPE_JPEG:
1839                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1840                         return 0;
1841                 break;
1842         default:
1843                 break;
1844         }
1845
1846         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1847                   ip_block_version->funcs->name);
1848
1849         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1850
1851         return 0;
1852 }
1853
1854 /**
1855  * amdgpu_device_enable_virtual_display - enable virtual display feature
1856  *
1857  * @adev: amdgpu_device pointer
1858  *
1859  * Enabled the virtual display feature if the user has enabled it via
1860  * the module parameter virtual_display.  This feature provides a virtual
1861  * display hardware on headless boards or in virtualized environments.
1862  * This function parses and validates the configuration string specified by
1863  * the user and configues the virtual display configuration (number of
1864  * virtual connectors, crtcs, etc.) specified.
1865  */
1866 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1867 {
1868         adev->enable_virtual_display = false;
1869
1870         if (amdgpu_virtual_display) {
1871                 const char *pci_address_name = pci_name(adev->pdev);
1872                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1873
1874                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1875                 pciaddstr_tmp = pciaddstr;
1876                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1877                         pciaddname = strsep(&pciaddname_tmp, ",");
1878                         if (!strcmp("all", pciaddname)
1879                             || !strcmp(pci_address_name, pciaddname)) {
1880                                 long num_crtc;
1881                                 int res = -1;
1882
1883                                 adev->enable_virtual_display = true;
1884
1885                                 if (pciaddname_tmp)
1886                                         res = kstrtol(pciaddname_tmp, 10,
1887                                                       &num_crtc);
1888
1889                                 if (!res) {
1890                                         if (num_crtc < 1)
1891                                                 num_crtc = 1;
1892                                         if (num_crtc > 6)
1893                                                 num_crtc = 6;
1894                                         adev->mode_info.num_crtc = num_crtc;
1895                                 } else {
1896                                         adev->mode_info.num_crtc = 1;
1897                                 }
1898                                 break;
1899                         }
1900                 }
1901
1902                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1903                          amdgpu_virtual_display, pci_address_name,
1904                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1905
1906                 kfree(pciaddstr);
1907         }
1908 }
1909
1910 /**
1911  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1912  *
1913  * @adev: amdgpu_device pointer
1914  *
1915  * Parses the asic configuration parameters specified in the gpu info
1916  * firmware and makes them availale to the driver for use in configuring
1917  * the asic.
1918  * Returns 0 on success, -EINVAL on failure.
1919  */
1920 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1921 {
1922         const char *chip_name;
1923         char fw_name[40];
1924         int err;
1925         const struct gpu_info_firmware_header_v1_0 *hdr;
1926
1927         adev->firmware.gpu_info_fw = NULL;
1928
1929         if (adev->mman.discovery_bin) {
1930                 amdgpu_discovery_get_gfx_info(adev);
1931
1932                 /*
1933                  * FIXME: The bounding box is still needed by Navi12, so
1934                  * temporarily read it from gpu_info firmware. Should be droped
1935                  * when DAL no longer needs it.
1936                  */
1937                 if (adev->asic_type != CHIP_NAVI12)
1938                         return 0;
1939         }
1940
1941         switch (adev->asic_type) {
1942 #ifdef CONFIG_DRM_AMDGPU_SI
1943         case CHIP_VERDE:
1944         case CHIP_TAHITI:
1945         case CHIP_PITCAIRN:
1946         case CHIP_OLAND:
1947         case CHIP_HAINAN:
1948 #endif
1949 #ifdef CONFIG_DRM_AMDGPU_CIK
1950         case CHIP_BONAIRE:
1951         case CHIP_HAWAII:
1952         case CHIP_KAVERI:
1953         case CHIP_KABINI:
1954         case CHIP_MULLINS:
1955 #endif
1956         case CHIP_TOPAZ:
1957         case CHIP_TONGA:
1958         case CHIP_FIJI:
1959         case CHIP_POLARIS10:
1960         case CHIP_POLARIS11:
1961         case CHIP_POLARIS12:
1962         case CHIP_VEGAM:
1963         case CHIP_CARRIZO:
1964         case CHIP_STONEY:
1965         case CHIP_VEGA20:
1966         case CHIP_ALDEBARAN:
1967         case CHIP_SIENNA_CICHLID:
1968         case CHIP_NAVY_FLOUNDER:
1969         case CHIP_DIMGREY_CAVEFISH:
1970         case CHIP_BEIGE_GOBY:
1971         default:
1972                 return 0;
1973         case CHIP_VEGA10:
1974                 chip_name = "vega10";
1975                 break;
1976         case CHIP_VEGA12:
1977                 chip_name = "vega12";
1978                 break;
1979         case CHIP_RAVEN:
1980                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1981                         chip_name = "raven2";
1982                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1983                         chip_name = "picasso";
1984                 else
1985                         chip_name = "raven";
1986                 break;
1987         case CHIP_ARCTURUS:
1988                 chip_name = "arcturus";
1989                 break;
1990         case CHIP_RENOIR:
1991                 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1992                         chip_name = "renoir";
1993                 else
1994                         chip_name = "green_sardine";
1995                 break;
1996         case CHIP_NAVI10:
1997                 chip_name = "navi10";
1998                 break;
1999         case CHIP_NAVI14:
2000                 chip_name = "navi14";
2001                 break;
2002         case CHIP_NAVI12:
2003                 chip_name = "navi12";
2004                 break;
2005         case CHIP_VANGOGH:
2006                 chip_name = "vangogh";
2007                 break;
2008         case CHIP_YELLOW_CARP:
2009                 chip_name = "yellow_carp";
2010                 break;
2011         }
2012
2013         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
2014         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
2015         if (err) {
2016                 dev_err(adev->dev,
2017                         "Failed to load gpu_info firmware \"%s\"\n",
2018                         fw_name);
2019                 goto out;
2020         }
2021         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
2022         if (err) {
2023                 dev_err(adev->dev,
2024                         "Failed to validate gpu_info firmware \"%s\"\n",
2025                         fw_name);
2026                 goto out;
2027         }
2028
2029         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2030         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2031
2032         switch (hdr->version_major) {
2033         case 1:
2034         {
2035                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2036                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2037                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2038
2039                 /*
2040                  * Should be droped when DAL no longer needs it.
2041                  */
2042                 if (adev->asic_type == CHIP_NAVI12)
2043                         goto parse_soc_bounding_box;
2044
2045                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2046                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2047                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2048                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2049                 adev->gfx.config.max_texture_channel_caches =
2050                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
2051                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2052                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2053                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2054                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2055                 adev->gfx.config.double_offchip_lds_buf =
2056                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2057                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2058                 adev->gfx.cu_info.max_waves_per_simd =
2059                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2060                 adev->gfx.cu_info.max_scratch_slots_per_cu =
2061                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2062                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2063                 if (hdr->version_minor >= 1) {
2064                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2065                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2066                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2067                         adev->gfx.config.num_sc_per_sh =
2068                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2069                         adev->gfx.config.num_packer_per_sc =
2070                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2071                 }
2072
2073 parse_soc_bounding_box:
2074                 /*
2075                  * soc bounding box info is not integrated in disocovery table,
2076                  * we always need to parse it from gpu info firmware if needed.
2077                  */
2078                 if (hdr->version_minor == 2) {
2079                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2080                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2081                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2082                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2083                 }
2084                 break;
2085         }
2086         default:
2087                 dev_err(adev->dev,
2088                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2089                 err = -EINVAL;
2090                 goto out;
2091         }
2092 out:
2093         return err;
2094 }
2095
2096 /**
2097  * amdgpu_device_ip_early_init - run early init for hardware IPs
2098  *
2099  * @adev: amdgpu_device pointer
2100  *
2101  * Early initialization pass for hardware IPs.  The hardware IPs that make
2102  * up each asic are discovered each IP's early_init callback is run.  This
2103  * is the first stage in initializing the asic.
2104  * Returns 0 on success, negative error code on failure.
2105  */
2106 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2107 {
2108         struct drm_device *dev = adev_to_drm(adev);
2109         struct pci_dev *parent;
2110         int i, r;
2111
2112         amdgpu_device_enable_virtual_display(adev);
2113
2114         if (amdgpu_sriov_vf(adev)) {
2115                 r = amdgpu_virt_request_full_gpu(adev, true);
2116                 if (r)
2117                         return r;
2118         }
2119
2120         switch (adev->asic_type) {
2121 #ifdef CONFIG_DRM_AMDGPU_SI
2122         case CHIP_VERDE:
2123         case CHIP_TAHITI:
2124         case CHIP_PITCAIRN:
2125         case CHIP_OLAND:
2126         case CHIP_HAINAN:
2127                 adev->family = AMDGPU_FAMILY_SI;
2128                 r = si_set_ip_blocks(adev);
2129                 if (r)
2130                         return r;
2131                 break;
2132 #endif
2133 #ifdef CONFIG_DRM_AMDGPU_CIK
2134         case CHIP_BONAIRE:
2135         case CHIP_HAWAII:
2136         case CHIP_KAVERI:
2137         case CHIP_KABINI:
2138         case CHIP_MULLINS:
2139                 if (adev->flags & AMD_IS_APU)
2140                         adev->family = AMDGPU_FAMILY_KV;
2141                 else
2142                         adev->family = AMDGPU_FAMILY_CI;
2143
2144                 r = cik_set_ip_blocks(adev);
2145                 if (r)
2146                         return r;
2147                 break;
2148 #endif
2149         case CHIP_TOPAZ:
2150         case CHIP_TONGA:
2151         case CHIP_FIJI:
2152         case CHIP_POLARIS10:
2153         case CHIP_POLARIS11:
2154         case CHIP_POLARIS12:
2155         case CHIP_VEGAM:
2156         case CHIP_CARRIZO:
2157         case CHIP_STONEY:
2158                 if (adev->flags & AMD_IS_APU)
2159                         adev->family = AMDGPU_FAMILY_CZ;
2160                 else
2161                         adev->family = AMDGPU_FAMILY_VI;
2162
2163                 r = vi_set_ip_blocks(adev);
2164                 if (r)
2165                         return r;
2166                 break;
2167         default:
2168                 r = amdgpu_discovery_set_ip_blocks(adev);
2169                 if (r)
2170                         return r;
2171                 break;
2172         }
2173
2174         if (amdgpu_has_atpx() &&
2175             (amdgpu_is_atpx_hybrid() ||
2176              amdgpu_has_atpx_dgpu_power_cntl()) &&
2177             ((adev->flags & AMD_IS_APU) == 0) &&
2178             !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2179                 adev->flags |= AMD_IS_PX;
2180
2181         parent = pci_upstream_bridge(adev->pdev);
2182         adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2183
2184         amdgpu_amdkfd_device_probe(adev);
2185
2186         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2187         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2188                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2189         if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2190                 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2191
2192         for (i = 0; i < adev->num_ip_blocks; i++) {
2193                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2194                         DRM_ERROR("disabled ip block: %d <%s>\n",
2195                                   i, adev->ip_blocks[i].version->funcs->name);
2196                         adev->ip_blocks[i].status.valid = false;
2197                 } else {
2198                         if (adev->ip_blocks[i].version->funcs->early_init) {
2199                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2200                                 if (r == -ENOENT) {
2201                                         adev->ip_blocks[i].status.valid = false;
2202                                 } else if (r) {
2203                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2204                                                   adev->ip_blocks[i].version->funcs->name, r);
2205                                         return r;
2206                                 } else {
2207                                         adev->ip_blocks[i].status.valid = true;
2208                                 }
2209                         } else {
2210                                 adev->ip_blocks[i].status.valid = true;
2211                         }
2212                 }
2213                 /* get the vbios after the asic_funcs are set up */
2214                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2215                         r = amdgpu_device_parse_gpu_info_fw(adev);
2216                         if (r)
2217                                 return r;
2218
2219                         /* Read BIOS */
2220                         if (!amdgpu_get_bios(adev))
2221                                 return -EINVAL;
2222
2223                         r = amdgpu_atombios_init(adev);
2224                         if (r) {
2225                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2226                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2227                                 return r;
2228                         }
2229
2230                         /*get pf2vf msg info at it's earliest time*/
2231                         if (amdgpu_sriov_vf(adev))
2232                                 amdgpu_virt_init_data_exchange(adev);
2233
2234                 }
2235         }
2236
2237         adev->cg_flags &= amdgpu_cg_mask;
2238         adev->pg_flags &= amdgpu_pg_mask;
2239
2240         return 0;
2241 }
2242
2243 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2244 {
2245         int i, r;
2246
2247         for (i = 0; i < adev->num_ip_blocks; i++) {
2248                 if (!adev->ip_blocks[i].status.sw)
2249                         continue;
2250                 if (adev->ip_blocks[i].status.hw)
2251                         continue;
2252                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2253                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2254                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2255                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2256                         if (r) {
2257                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2258                                           adev->ip_blocks[i].version->funcs->name, r);
2259                                 return r;
2260                         }
2261                         adev->ip_blocks[i].status.hw = true;
2262                 }
2263         }
2264
2265         return 0;
2266 }
2267
2268 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2269 {
2270         int i, r;
2271
2272         for (i = 0; i < adev->num_ip_blocks; i++) {
2273                 if (!adev->ip_blocks[i].status.sw)
2274                         continue;
2275                 if (adev->ip_blocks[i].status.hw)
2276                         continue;
2277                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2278                 if (r) {
2279                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2280                                   adev->ip_blocks[i].version->funcs->name, r);
2281                         return r;
2282                 }
2283                 adev->ip_blocks[i].status.hw = true;
2284         }
2285
2286         return 0;
2287 }
2288
2289 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2290 {
2291         int r = 0;
2292         int i;
2293         uint32_t smu_version;
2294
2295         if (adev->asic_type >= CHIP_VEGA10) {
2296                 for (i = 0; i < adev->num_ip_blocks; i++) {
2297                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2298                                 continue;
2299
2300                         if (!adev->ip_blocks[i].status.sw)
2301                                 continue;
2302
2303                         /* no need to do the fw loading again if already done*/
2304                         if (adev->ip_blocks[i].status.hw == true)
2305                                 break;
2306
2307                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2308                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2309                                 if (r) {
2310                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2311                                                           adev->ip_blocks[i].version->funcs->name, r);
2312                                         return r;
2313                                 }
2314                         } else {
2315                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2316                                 if (r) {
2317                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2318                                                           adev->ip_blocks[i].version->funcs->name, r);
2319                                         return r;
2320                                 }
2321                         }
2322
2323                         adev->ip_blocks[i].status.hw = true;
2324                         break;
2325                 }
2326         }
2327
2328         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2329                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2330
2331         return r;
2332 }
2333
2334 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2335 {
2336         long timeout;
2337         int r, i;
2338
2339         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2340                 struct amdgpu_ring *ring = adev->rings[i];
2341
2342                 /* No need to setup the GPU scheduler for rings that don't need it */
2343                 if (!ring || ring->no_scheduler)
2344                         continue;
2345
2346                 switch (ring->funcs->type) {
2347                 case AMDGPU_RING_TYPE_GFX:
2348                         timeout = adev->gfx_timeout;
2349                         break;
2350                 case AMDGPU_RING_TYPE_COMPUTE:
2351                         timeout = adev->compute_timeout;
2352                         break;
2353                 case AMDGPU_RING_TYPE_SDMA:
2354                         timeout = adev->sdma_timeout;
2355                         break;
2356                 default:
2357                         timeout = adev->video_timeout;
2358                         break;
2359                 }
2360
2361                 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2362                                    ring->num_hw_submission, amdgpu_job_hang_limit,
2363                                    timeout, adev->reset_domain->wq,
2364                                    ring->sched_score, ring->name,
2365                                    adev->dev);
2366                 if (r) {
2367                         DRM_ERROR("Failed to create scheduler on ring %s.\n",
2368                                   ring->name);
2369                         return r;
2370                 }
2371         }
2372
2373         return 0;
2374 }
2375
2376
2377 /**
2378  * amdgpu_device_ip_init - run init for hardware IPs
2379  *
2380  * @adev: amdgpu_device pointer
2381  *
2382  * Main initialization pass for hardware IPs.  The list of all the hardware
2383  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2384  * are run.  sw_init initializes the software state associated with each IP
2385  * and hw_init initializes the hardware associated with each IP.
2386  * Returns 0 on success, negative error code on failure.
2387  */
2388 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2389 {
2390         int i, r;
2391
2392         r = amdgpu_ras_init(adev);
2393         if (r)
2394                 return r;
2395
2396         for (i = 0; i < adev->num_ip_blocks; i++) {
2397                 if (!adev->ip_blocks[i].status.valid)
2398                         continue;
2399                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2400                 if (r) {
2401                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2402                                   adev->ip_blocks[i].version->funcs->name, r);
2403                         goto init_failed;
2404                 }
2405                 adev->ip_blocks[i].status.sw = true;
2406
2407                 /* need to do gmc hw init early so we can allocate gpu mem */
2408                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2409                         /* Try to reserve bad pages early */
2410                         if (amdgpu_sriov_vf(adev))
2411                                 amdgpu_virt_exchange_data(adev);
2412
2413                         r = amdgpu_device_vram_scratch_init(adev);
2414                         if (r) {
2415                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2416                                 goto init_failed;
2417                         }
2418                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2419                         if (r) {
2420                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2421                                 goto init_failed;
2422                         }
2423                         r = amdgpu_device_wb_init(adev);
2424                         if (r) {
2425                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2426                                 goto init_failed;
2427                         }
2428                         adev->ip_blocks[i].status.hw = true;
2429
2430                         /* right after GMC hw init, we create CSA */
2431                         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2432                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2433                                                                 AMDGPU_GEM_DOMAIN_VRAM,
2434                                                                 AMDGPU_CSA_SIZE);
2435                                 if (r) {
2436                                         DRM_ERROR("allocate CSA failed %d\n", r);
2437                                         goto init_failed;
2438                                 }
2439                         }
2440                 }
2441         }
2442
2443         if (amdgpu_sriov_vf(adev))
2444                 amdgpu_virt_init_data_exchange(adev);
2445
2446         r = amdgpu_ib_pool_init(adev);
2447         if (r) {
2448                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2449                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2450                 goto init_failed;
2451         }
2452
2453         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2454         if (r)
2455                 goto init_failed;
2456
2457         r = amdgpu_device_ip_hw_init_phase1(adev);
2458         if (r)
2459                 goto init_failed;
2460
2461         r = amdgpu_device_fw_loading(adev);
2462         if (r)
2463                 goto init_failed;
2464
2465         r = amdgpu_device_ip_hw_init_phase2(adev);
2466         if (r)
2467                 goto init_failed;
2468
2469         /*
2470          * retired pages will be loaded from eeprom and reserved here,
2471          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2472          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2473          * for I2C communication which only true at this point.
2474          *
2475          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2476          * failure from bad gpu situation and stop amdgpu init process
2477          * accordingly. For other failed cases, it will still release all
2478          * the resource and print error message, rather than returning one
2479          * negative value to upper level.
2480          *
2481          * Note: theoretically, this should be called before all vram allocations
2482          * to protect retired page from abusing
2483          */
2484         r = amdgpu_ras_recovery_init(adev);
2485         if (r)
2486                 goto init_failed;
2487
2488         /**
2489          * In case of XGMI grab extra reference for reset domain for this device
2490          */
2491         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2492                 if (amdgpu_xgmi_add_device(adev) == 0) {
2493                         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2494
2495                         if (!hive->reset_domain ||
2496                             !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2497                                 r = -ENOENT;
2498                                 goto init_failed;
2499                         }
2500
2501                         /* Drop the early temporary reset domain we created for device */
2502                         amdgpu_reset_put_reset_domain(adev->reset_domain);
2503                         adev->reset_domain = hive->reset_domain;
2504                 }
2505         }
2506
2507         r = amdgpu_device_init_schedulers(adev);
2508         if (r)
2509                 goto init_failed;
2510
2511         /* Don't init kfd if whole hive need to be reset during init */
2512         if (!adev->gmc.xgmi.pending_reset)
2513                 amdgpu_amdkfd_device_init(adev);
2514
2515         amdgpu_fru_get_product_info(adev);
2516
2517 init_failed:
2518         if (amdgpu_sriov_vf(adev))
2519                 amdgpu_virt_release_full_gpu(adev, true);
2520
2521         return r;
2522 }
2523
2524 /**
2525  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2526  *
2527  * @adev: amdgpu_device pointer
2528  *
2529  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2530  * this function before a GPU reset.  If the value is retained after a
2531  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2532  */
2533 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2534 {
2535         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2536 }
2537
2538 /**
2539  * amdgpu_device_check_vram_lost - check if vram is valid
2540  *
2541  * @adev: amdgpu_device pointer
2542  *
2543  * Checks the reset magic value written to the gart pointer in VRAM.
2544  * The driver calls this after a GPU reset to see if the contents of
2545  * VRAM is lost or now.
2546  * returns true if vram is lost, false if not.
2547  */
2548 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2549 {
2550         if (memcmp(adev->gart.ptr, adev->reset_magic,
2551                         AMDGPU_RESET_MAGIC_NUM))
2552                 return true;
2553
2554         if (!amdgpu_in_reset(adev))
2555                 return false;
2556
2557         /*
2558          * For all ASICs with baco/mode1 reset, the VRAM is
2559          * always assumed to be lost.
2560          */
2561         switch (amdgpu_asic_reset_method(adev)) {
2562         case AMD_RESET_METHOD_BACO:
2563         case AMD_RESET_METHOD_MODE1:
2564                 return true;
2565         default:
2566                 return false;
2567         }
2568 }
2569
2570 /**
2571  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2572  *
2573  * @adev: amdgpu_device pointer
2574  * @state: clockgating state (gate or ungate)
2575  *
2576  * The list of all the hardware IPs that make up the asic is walked and the
2577  * set_clockgating_state callbacks are run.
2578  * Late initialization pass enabling clockgating for hardware IPs.
2579  * Fini or suspend, pass disabling clockgating for hardware IPs.
2580  * Returns 0 on success, negative error code on failure.
2581  */
2582
2583 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2584                                enum amd_clockgating_state state)
2585 {
2586         int i, j, r;
2587
2588         if (amdgpu_emu_mode == 1)
2589                 return 0;
2590
2591         for (j = 0; j < adev->num_ip_blocks; j++) {
2592                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2593                 if (!adev->ip_blocks[i].status.late_initialized)
2594                         continue;
2595                 /* skip CG for GFX on S0ix */
2596                 if (adev->in_s0ix &&
2597                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2598                         continue;
2599                 /* skip CG for VCE/UVD, it's handled specially */
2600                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2601                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2602                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2603                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2604                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2605                         /* enable clockgating to save power */
2606                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2607                                                                                      state);
2608                         if (r) {
2609                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2610                                           adev->ip_blocks[i].version->funcs->name, r);
2611                                 return r;
2612                         }
2613                 }
2614         }
2615
2616         return 0;
2617 }
2618
2619 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2620                                enum amd_powergating_state state)
2621 {
2622         int i, j, r;
2623
2624         if (amdgpu_emu_mode == 1)
2625                 return 0;
2626
2627         for (j = 0; j < adev->num_ip_blocks; j++) {
2628                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2629                 if (!adev->ip_blocks[i].status.late_initialized)
2630                         continue;
2631                 /* skip PG for GFX on S0ix */
2632                 if (adev->in_s0ix &&
2633                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2634                         continue;
2635                 /* skip CG for VCE/UVD, it's handled specially */
2636                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2637                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2638                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2639                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2640                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2641                         /* enable powergating to save power */
2642                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2643                                                                                         state);
2644                         if (r) {
2645                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2646                                           adev->ip_blocks[i].version->funcs->name, r);
2647                                 return r;
2648                         }
2649                 }
2650         }
2651         return 0;
2652 }
2653
2654 static int amdgpu_device_enable_mgpu_fan_boost(void)
2655 {
2656         struct amdgpu_gpu_instance *gpu_ins;
2657         struct amdgpu_device *adev;
2658         int i, ret = 0;
2659
2660         mutex_lock(&mgpu_info.mutex);
2661
2662         /*
2663          * MGPU fan boost feature should be enabled
2664          * only when there are two or more dGPUs in
2665          * the system
2666          */
2667         if (mgpu_info.num_dgpu < 2)
2668                 goto out;
2669
2670         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2671                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2672                 adev = gpu_ins->adev;
2673                 if (!(adev->flags & AMD_IS_APU) &&
2674                     !gpu_ins->mgpu_fan_enabled) {
2675                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2676                         if (ret)
2677                                 break;
2678
2679                         gpu_ins->mgpu_fan_enabled = 1;
2680                 }
2681         }
2682
2683 out:
2684         mutex_unlock(&mgpu_info.mutex);
2685
2686         return ret;
2687 }
2688
2689 /**
2690  * amdgpu_device_ip_late_init - run late init for hardware IPs
2691  *
2692  * @adev: amdgpu_device pointer
2693  *
2694  * Late initialization pass for hardware IPs.  The list of all the hardware
2695  * IPs that make up the asic is walked and the late_init callbacks are run.
2696  * late_init covers any special initialization that an IP requires
2697  * after all of the have been initialized or something that needs to happen
2698  * late in the init process.
2699  * Returns 0 on success, negative error code on failure.
2700  */
2701 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2702 {
2703         struct amdgpu_gpu_instance *gpu_instance;
2704         int i = 0, r;
2705
2706         for (i = 0; i < adev->num_ip_blocks; i++) {
2707                 if (!adev->ip_blocks[i].status.hw)
2708                         continue;
2709                 if (adev->ip_blocks[i].version->funcs->late_init) {
2710                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2711                         if (r) {
2712                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2713                                           adev->ip_blocks[i].version->funcs->name, r);
2714                                 return r;
2715                         }
2716                 }
2717                 adev->ip_blocks[i].status.late_initialized = true;
2718         }
2719
2720         r = amdgpu_ras_late_init(adev);
2721         if (r) {
2722                 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2723                 return r;
2724         }
2725
2726         amdgpu_ras_set_error_query_ready(adev, true);
2727
2728         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2729         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2730
2731         amdgpu_device_fill_reset_magic(adev);
2732
2733         r = amdgpu_device_enable_mgpu_fan_boost();
2734         if (r)
2735                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2736
2737         /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2738         if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2739                                adev->asic_type == CHIP_ALDEBARAN ))
2740                 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2741
2742         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2743                 mutex_lock(&mgpu_info.mutex);
2744
2745                 /*
2746                  * Reset device p-state to low as this was booted with high.
2747                  *
2748                  * This should be performed only after all devices from the same
2749                  * hive get initialized.
2750                  *
2751                  * However, it's unknown how many device in the hive in advance.
2752                  * As this is counted one by one during devices initializations.
2753                  *
2754                  * So, we wait for all XGMI interlinked devices initialized.
2755                  * This may bring some delays as those devices may come from
2756                  * different hives. But that should be OK.
2757                  */
2758                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2759                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2760                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2761                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2762                                         continue;
2763
2764                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2765                                                 AMDGPU_XGMI_PSTATE_MIN);
2766                                 if (r) {
2767                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2768                                         break;
2769                                 }
2770                         }
2771                 }
2772
2773                 mutex_unlock(&mgpu_info.mutex);
2774         }
2775
2776         return 0;
2777 }
2778
2779 /**
2780  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2781  *
2782  * @adev: amdgpu_device pointer
2783  *
2784  * For ASICs need to disable SMC first
2785  */
2786 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2787 {
2788         int i, r;
2789
2790         if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2791                 return;
2792
2793         for (i = 0; i < adev->num_ip_blocks; i++) {
2794                 if (!adev->ip_blocks[i].status.hw)
2795                         continue;
2796                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2797                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2798                         /* XXX handle errors */
2799                         if (r) {
2800                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2801                                           adev->ip_blocks[i].version->funcs->name, r);
2802                         }
2803                         adev->ip_blocks[i].status.hw = false;
2804                         break;
2805                 }
2806         }
2807 }
2808
2809 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2810 {
2811         int i, r;
2812
2813         for (i = 0; i < adev->num_ip_blocks; i++) {
2814                 if (!adev->ip_blocks[i].version->funcs->early_fini)
2815                         continue;
2816
2817                 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2818                 if (r) {
2819                         DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2820                                   adev->ip_blocks[i].version->funcs->name, r);
2821                 }
2822         }
2823
2824         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2825         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2826
2827         amdgpu_amdkfd_suspend(adev, false);
2828
2829         /* Workaroud for ASICs need to disable SMC first */
2830         amdgpu_device_smu_fini_early(adev);
2831
2832         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2833                 if (!adev->ip_blocks[i].status.hw)
2834                         continue;
2835
2836                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2837                 /* XXX handle errors */
2838                 if (r) {
2839                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2840                                   adev->ip_blocks[i].version->funcs->name, r);
2841                 }
2842
2843                 adev->ip_blocks[i].status.hw = false;
2844         }
2845
2846         if (amdgpu_sriov_vf(adev)) {
2847                 if (amdgpu_virt_release_full_gpu(adev, false))
2848                         DRM_ERROR("failed to release exclusive mode on fini\n");
2849         }
2850
2851         return 0;
2852 }
2853
2854 /**
2855  * amdgpu_device_ip_fini - run fini for hardware IPs
2856  *
2857  * @adev: amdgpu_device pointer
2858  *
2859  * Main teardown pass for hardware IPs.  The list of all the hardware
2860  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2861  * are run.  hw_fini tears down the hardware associated with each IP
2862  * and sw_fini tears down any software state associated with each IP.
2863  * Returns 0 on success, negative error code on failure.
2864  */
2865 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2866 {
2867         int i, r;
2868
2869         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2870                 amdgpu_virt_release_ras_err_handler_data(adev);
2871
2872         if (adev->gmc.xgmi.num_physical_nodes > 1)
2873                 amdgpu_xgmi_remove_device(adev);
2874
2875         amdgpu_amdkfd_device_fini_sw(adev);
2876
2877         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2878                 if (!adev->ip_blocks[i].status.sw)
2879                         continue;
2880
2881                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2882                         amdgpu_ucode_free_bo(adev);
2883                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2884                         amdgpu_device_wb_fini(adev);
2885                         amdgpu_device_vram_scratch_fini(adev);
2886                         amdgpu_ib_pool_fini(adev);
2887                 }
2888
2889                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2890                 /* XXX handle errors */
2891                 if (r) {
2892                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2893                                   adev->ip_blocks[i].version->funcs->name, r);
2894                 }
2895                 adev->ip_blocks[i].status.sw = false;
2896                 adev->ip_blocks[i].status.valid = false;
2897         }
2898
2899         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2900                 if (!adev->ip_blocks[i].status.late_initialized)
2901                         continue;
2902                 if (adev->ip_blocks[i].version->funcs->late_fini)
2903                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2904                 adev->ip_blocks[i].status.late_initialized = false;
2905         }
2906
2907         amdgpu_ras_fini(adev);
2908
2909         return 0;
2910 }
2911
2912 /**
2913  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2914  *
2915  * @work: work_struct.
2916  */
2917 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2918 {
2919         struct amdgpu_device *adev =
2920                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2921         int r;
2922
2923         r = amdgpu_ib_ring_tests(adev);
2924         if (r)
2925                 DRM_ERROR("ib ring test failed (%d).\n", r);
2926 }
2927
2928 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2929 {
2930         struct amdgpu_device *adev =
2931                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2932
2933         WARN_ON_ONCE(adev->gfx.gfx_off_state);
2934         WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2935
2936         if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2937                 adev->gfx.gfx_off_state = true;
2938 }
2939
2940 /**
2941  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2942  *
2943  * @adev: amdgpu_device pointer
2944  *
2945  * Main suspend function for hardware IPs.  The list of all the hardware
2946  * IPs that make up the asic is walked, clockgating is disabled and the
2947  * suspend callbacks are run.  suspend puts the hardware and software state
2948  * in each IP into a state suitable for suspend.
2949  * Returns 0 on success, negative error code on failure.
2950  */
2951 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2952 {
2953         int i, r;
2954
2955         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2956         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2957
2958         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2959                 if (!adev->ip_blocks[i].status.valid)
2960                         continue;
2961
2962                 /* displays are handled separately */
2963                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2964                         continue;
2965
2966                 /* XXX handle errors */
2967                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2968                 /* XXX handle errors */
2969                 if (r) {
2970                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2971                                   adev->ip_blocks[i].version->funcs->name, r);
2972                         return r;
2973                 }
2974
2975                 adev->ip_blocks[i].status.hw = false;
2976         }
2977
2978         return 0;
2979 }
2980
2981 /**
2982  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2983  *
2984  * @adev: amdgpu_device pointer
2985  *
2986  * Main suspend function for hardware IPs.  The list of all the hardware
2987  * IPs that make up the asic is walked, clockgating is disabled and the
2988  * suspend callbacks are run.  suspend puts the hardware and software state
2989  * in each IP into a state suitable for suspend.
2990  * Returns 0 on success, negative error code on failure.
2991  */
2992 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2993 {
2994         int i, r;
2995
2996         if (adev->in_s0ix)
2997                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2998
2999         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3000                 if (!adev->ip_blocks[i].status.valid)
3001                         continue;
3002                 /* displays are handled in phase1 */
3003                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3004                         continue;
3005                 /* PSP lost connection when err_event_athub occurs */
3006                 if (amdgpu_ras_intr_triggered() &&
3007                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3008                         adev->ip_blocks[i].status.hw = false;
3009                         continue;
3010                 }
3011
3012                 /* skip unnecessary suspend if we do not initialize them yet */
3013                 if (adev->gmc.xgmi.pending_reset &&
3014                     !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3015                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3016                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3017                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3018                         adev->ip_blocks[i].status.hw = false;
3019                         continue;
3020                 }
3021
3022                 /* skip suspend of gfx and psp for S0ix
3023                  * gfx is in gfxoff state, so on resume it will exit gfxoff just
3024                  * like at runtime. PSP is also part of the always on hardware
3025                  * so no need to suspend it.
3026                  */
3027                 if (adev->in_s0ix &&
3028                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3029                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
3030                         continue;
3031
3032                 /* XXX handle errors */
3033                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3034                 /* XXX handle errors */
3035                 if (r) {
3036                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
3037                                   adev->ip_blocks[i].version->funcs->name, r);
3038                 }
3039                 adev->ip_blocks[i].status.hw = false;
3040                 /* handle putting the SMC in the appropriate state */
3041                 if(!amdgpu_sriov_vf(adev)){
3042                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3043                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3044                                 if (r) {
3045                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3046                                                         adev->mp1_state, r);
3047                                         return r;
3048                                 }
3049                         }
3050                 }
3051         }
3052
3053         return 0;
3054 }
3055
3056 /**
3057  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3058  *
3059  * @adev: amdgpu_device pointer
3060  *
3061  * Main suspend function for hardware IPs.  The list of all the hardware
3062  * IPs that make up the asic is walked, clockgating is disabled and the
3063  * suspend callbacks are run.  suspend puts the hardware and software state
3064  * in each IP into a state suitable for suspend.
3065  * Returns 0 on success, negative error code on failure.
3066  */
3067 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3068 {
3069         int r;
3070
3071         if (amdgpu_sriov_vf(adev)) {
3072                 amdgpu_virt_fini_data_exchange(adev);
3073                 amdgpu_virt_request_full_gpu(adev, false);
3074         }
3075
3076         r = amdgpu_device_ip_suspend_phase1(adev);
3077         if (r)
3078                 return r;
3079         r = amdgpu_device_ip_suspend_phase2(adev);
3080
3081         if (amdgpu_sriov_vf(adev))
3082                 amdgpu_virt_release_full_gpu(adev, false);
3083
3084         return r;
3085 }
3086
3087 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3088 {
3089         int i, r;
3090
3091         static enum amd_ip_block_type ip_order[] = {
3092                 AMD_IP_BLOCK_TYPE_GMC,
3093                 AMD_IP_BLOCK_TYPE_COMMON,
3094                 AMD_IP_BLOCK_TYPE_PSP,
3095                 AMD_IP_BLOCK_TYPE_IH,
3096         };
3097
3098         for (i = 0; i < adev->num_ip_blocks; i++) {
3099                 int j;
3100                 struct amdgpu_ip_block *block;
3101
3102                 block = &adev->ip_blocks[i];
3103                 block->status.hw = false;
3104
3105                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3106
3107                         if (block->version->type != ip_order[j] ||
3108                                 !block->status.valid)
3109                                 continue;
3110
3111                         r = block->version->funcs->hw_init(adev);
3112                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3113                         if (r)
3114                                 return r;
3115                         block->status.hw = true;
3116                 }
3117         }
3118
3119         return 0;
3120 }
3121
3122 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3123 {
3124         int i, r;
3125
3126         static enum amd_ip_block_type ip_order[] = {
3127                 AMD_IP_BLOCK_TYPE_SMC,
3128                 AMD_IP_BLOCK_TYPE_DCE,
3129                 AMD_IP_BLOCK_TYPE_GFX,
3130                 AMD_IP_BLOCK_TYPE_SDMA,
3131                 AMD_IP_BLOCK_TYPE_UVD,
3132                 AMD_IP_BLOCK_TYPE_VCE,
3133                 AMD_IP_BLOCK_TYPE_VCN
3134         };
3135
3136         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3137                 int j;
3138                 struct amdgpu_ip_block *block;
3139
3140                 for (j = 0; j < adev->num_ip_blocks; j++) {
3141                         block = &adev->ip_blocks[j];
3142
3143                         if (block->version->type != ip_order[i] ||
3144                                 !block->status.valid ||
3145                                 block->status.hw)
3146                                 continue;
3147
3148                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3149                                 r = block->version->funcs->resume(adev);
3150                         else
3151                                 r = block->version->funcs->hw_init(adev);
3152
3153                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3154                         if (r)
3155                                 return r;
3156                         block->status.hw = true;
3157                 }
3158         }
3159
3160         return 0;
3161 }
3162
3163 /**
3164  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3165  *
3166  * @adev: amdgpu_device pointer
3167  *
3168  * First resume function for hardware IPs.  The list of all the hardware
3169  * IPs that make up the asic is walked and the resume callbacks are run for
3170  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3171  * after a suspend and updates the software state as necessary.  This
3172  * function is also used for restoring the GPU after a GPU reset.
3173  * Returns 0 on success, negative error code on failure.
3174  */
3175 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3176 {
3177         int i, r;
3178
3179         for (i = 0; i < adev->num_ip_blocks; i++) {
3180                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3181                         continue;
3182                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3183                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3184                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
3185
3186                         r = adev->ip_blocks[i].version->funcs->resume(adev);
3187                         if (r) {
3188                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
3189                                           adev->ip_blocks[i].version->funcs->name, r);
3190                                 return r;
3191                         }
3192                         adev->ip_blocks[i].status.hw = true;
3193                 }
3194         }
3195
3196         return 0;
3197 }
3198
3199 /**
3200  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3201  *
3202  * @adev: amdgpu_device pointer
3203  *
3204  * First resume function for hardware IPs.  The list of all the hardware
3205  * IPs that make up the asic is walked and the resume callbacks are run for
3206  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3207  * functional state after a suspend and updates the software state as
3208  * necessary.  This function is also used for restoring the GPU after a GPU
3209  * reset.
3210  * Returns 0 on success, negative error code on failure.
3211  */
3212 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3213 {
3214         int i, r;
3215
3216         for (i = 0; i < adev->num_ip_blocks; i++) {
3217                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3218                         continue;
3219                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3220                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3221                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3222                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3223                         continue;
3224                 r = adev->ip_blocks[i].version->funcs->resume(adev);
3225                 if (r) {
3226                         DRM_ERROR("resume of IP block <%s> failed %d\n",
3227                                   adev->ip_blocks[i].version->funcs->name, r);
3228                         return r;
3229                 }
3230                 adev->ip_blocks[i].status.hw = true;
3231         }
3232
3233         return 0;
3234 }
3235
3236 /**
3237  * amdgpu_device_ip_resume - run resume for hardware IPs
3238  *
3239  * @adev: amdgpu_device pointer
3240  *
3241  * Main resume function for hardware IPs.  The hardware IPs
3242  * are split into two resume functions because they are
3243  * are also used in in recovering from a GPU reset and some additional
3244  * steps need to be take between them.  In this case (S3/S4) they are
3245  * run sequentially.
3246  * Returns 0 on success, negative error code on failure.
3247  */
3248 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3249 {
3250         int r;
3251
3252         r = amdgpu_amdkfd_resume_iommu(adev);
3253         if (r)
3254                 return r;
3255
3256         r = amdgpu_device_ip_resume_phase1(adev);
3257         if (r)
3258                 return r;
3259
3260         r = amdgpu_device_fw_loading(adev);
3261         if (r)
3262                 return r;
3263
3264         r = amdgpu_device_ip_resume_phase2(adev);
3265
3266         return r;
3267 }
3268
3269 /**
3270  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3271  *
3272  * @adev: amdgpu_device pointer
3273  *
3274  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3275  */
3276 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3277 {
3278         if (amdgpu_sriov_vf(adev)) {
3279                 if (adev->is_atom_fw) {
3280                         if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3281                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3282                 } else {
3283                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3284                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3285                 }
3286
3287                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3288                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3289         }
3290 }
3291
3292 /**
3293  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3294  *
3295  * @asic_type: AMD asic type
3296  *
3297  * Check if there is DC (new modesetting infrastructre) support for an asic.
3298  * returns true if DC has support, false if not.
3299  */
3300 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3301 {
3302         switch (asic_type) {
3303 #ifdef CONFIG_DRM_AMDGPU_SI
3304         case CHIP_HAINAN:
3305 #endif
3306         case CHIP_TOPAZ:
3307                 /* chips with no display hardware */
3308                 return false;
3309 #if defined(CONFIG_DRM_AMD_DC)
3310         case CHIP_TAHITI:
3311         case CHIP_PITCAIRN:
3312         case CHIP_VERDE:
3313         case CHIP_OLAND:
3314                 /*
3315                  * We have systems in the wild with these ASICs that require
3316                  * LVDS and VGA support which is not supported with DC.
3317                  *
3318                  * Fallback to the non-DC driver here by default so as not to
3319                  * cause regressions.
3320                  */
3321 #if defined(CONFIG_DRM_AMD_DC_SI)
3322                 return amdgpu_dc > 0;
3323 #else
3324                 return false;
3325 #endif
3326         case CHIP_BONAIRE:
3327         case CHIP_KAVERI:
3328         case CHIP_KABINI:
3329         case CHIP_MULLINS:
3330                 /*
3331                  * We have systems in the wild with these ASICs that require
3332                  * LVDS and VGA support which is not supported with DC.
3333                  *
3334                  * Fallback to the non-DC driver here by default so as not to
3335                  * cause regressions.
3336                  */
3337                 return amdgpu_dc > 0;
3338         case CHIP_HAWAII:
3339         case CHIP_CARRIZO:
3340         case CHIP_STONEY:
3341         case CHIP_POLARIS10:
3342         case CHIP_POLARIS11:
3343         case CHIP_POLARIS12:
3344         case CHIP_VEGAM:
3345         case CHIP_TONGA:
3346         case CHIP_FIJI:
3347         case CHIP_VEGA10:
3348         case CHIP_VEGA12:
3349         case CHIP_VEGA20:
3350 #if defined(CONFIG_DRM_AMD_DC_DCN)
3351         case CHIP_RAVEN:
3352         case CHIP_NAVI10:
3353         case CHIP_NAVI14:
3354         case CHIP_NAVI12:
3355         case CHIP_RENOIR:
3356         case CHIP_CYAN_SKILLFISH:
3357         case CHIP_SIENNA_CICHLID:
3358         case CHIP_NAVY_FLOUNDER:
3359         case CHIP_DIMGREY_CAVEFISH:
3360         case CHIP_BEIGE_GOBY:
3361         case CHIP_VANGOGH:
3362         case CHIP_YELLOW_CARP:
3363 #endif
3364         default:
3365                 return amdgpu_dc != 0;
3366 #else
3367         default:
3368                 if (amdgpu_dc > 0)
3369                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3370                                          "but isn't supported by ASIC, ignoring\n");
3371                 return false;
3372 #endif
3373         }
3374 }
3375
3376 /**
3377  * amdgpu_device_has_dc_support - check if dc is supported
3378  *
3379  * @adev: amdgpu_device pointer
3380  *
3381  * Returns true for supported, false for not supported
3382  */
3383 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3384 {
3385         if (amdgpu_sriov_vf(adev) || 
3386             adev->enable_virtual_display ||
3387             (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3388                 return false;
3389
3390         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3391 }
3392
3393 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3394 {
3395         struct amdgpu_device *adev =
3396                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3397         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3398
3399         /* It's a bug to not have a hive within this function */
3400         if (WARN_ON(!hive))
3401                 return;
3402
3403         /*
3404          * Use task barrier to synchronize all xgmi reset works across the
3405          * hive. task_barrier_enter and task_barrier_exit will block
3406          * until all the threads running the xgmi reset works reach
3407          * those points. task_barrier_full will do both blocks.
3408          */
3409         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3410
3411                 task_barrier_enter(&hive->tb);
3412                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3413
3414                 if (adev->asic_reset_res)
3415                         goto fail;
3416
3417                 task_barrier_exit(&hive->tb);
3418                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3419
3420                 if (adev->asic_reset_res)
3421                         goto fail;
3422
3423                 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3424                     adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3425                         adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3426         } else {
3427
3428                 task_barrier_full(&hive->tb);
3429                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3430         }
3431
3432 fail:
3433         if (adev->asic_reset_res)
3434                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3435                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3436         amdgpu_put_xgmi_hive(hive);
3437 }
3438
3439 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3440 {
3441         char *input = amdgpu_lockup_timeout;
3442         char *timeout_setting = NULL;
3443         int index = 0;
3444         long timeout;
3445         int ret = 0;
3446
3447         /*
3448          * By default timeout for non compute jobs is 10000
3449          * and 60000 for compute jobs.
3450          * In SR-IOV or passthrough mode, timeout for compute
3451          * jobs are 60000 by default.
3452          */
3453         adev->gfx_timeout = msecs_to_jiffies(10000);
3454         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3455         if (amdgpu_sriov_vf(adev))
3456                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3457                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3458         else
3459                 adev->compute_timeout =  msecs_to_jiffies(60000);
3460
3461         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3462                 while ((timeout_setting = strsep(&input, ",")) &&
3463                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3464                         ret = kstrtol(timeout_setting, 0, &timeout);
3465                         if (ret)
3466                                 return ret;
3467
3468                         if (timeout == 0) {
3469                                 index++;
3470                                 continue;
3471                         } else if (timeout < 0) {
3472                                 timeout = MAX_SCHEDULE_TIMEOUT;
3473                                 dev_warn(adev->dev, "lockup timeout disabled");
3474                                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3475                         } else {
3476                                 timeout = msecs_to_jiffies(timeout);
3477                         }
3478
3479                         switch (index++) {
3480                         case 0:
3481                                 adev->gfx_timeout = timeout;
3482                                 break;
3483                         case 1:
3484                                 adev->compute_timeout = timeout;
3485                                 break;
3486                         case 2:
3487                                 adev->sdma_timeout = timeout;
3488                                 break;
3489                         case 3:
3490                                 adev->video_timeout = timeout;
3491                                 break;
3492                         default:
3493                                 break;
3494                         }
3495                 }
3496                 /*
3497                  * There is only one value specified and
3498                  * it should apply to all non-compute jobs.
3499                  */
3500                 if (index == 1) {
3501                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3502                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3503                                 adev->compute_timeout = adev->gfx_timeout;
3504                 }
3505         }
3506
3507         return ret;
3508 }
3509
3510 /**
3511  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3512  *
3513  * @adev: amdgpu_device pointer
3514  *
3515  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3516  */
3517 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3518 {
3519         struct iommu_domain *domain;
3520
3521         domain = iommu_get_domain_for_dev(adev->dev);
3522         if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3523                 adev->ram_is_direct_mapped = true;
3524 }
3525
3526 static const struct attribute *amdgpu_dev_attributes[] = {
3527         &dev_attr_product_name.attr,
3528         &dev_attr_product_number.attr,
3529         &dev_attr_serial_number.attr,
3530         &dev_attr_pcie_replay_count.attr,
3531         NULL
3532 };
3533
3534 /**
3535  * amdgpu_device_init - initialize the driver
3536  *
3537  * @adev: amdgpu_device pointer
3538  * @flags: driver flags
3539  *
3540  * Initializes the driver info and hw (all asics).
3541  * Returns 0 for success or an error on failure.
3542  * Called at driver startup.
3543  */
3544 int amdgpu_device_init(struct amdgpu_device *adev,
3545                        uint32_t flags)
3546 {
3547         struct drm_device *ddev = adev_to_drm(adev);
3548         struct pci_dev *pdev = adev->pdev;
3549         int r, i;
3550         bool px = false;
3551         u32 max_MBps;
3552
3553         adev->shutdown = false;
3554         adev->flags = flags;
3555
3556         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3557                 adev->asic_type = amdgpu_force_asic_type;
3558         else
3559                 adev->asic_type = flags & AMD_ASIC_MASK;
3560
3561         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3562         if (amdgpu_emu_mode == 1)
3563                 adev->usec_timeout *= 10;
3564         adev->gmc.gart_size = 512 * 1024 * 1024;
3565         adev->accel_working = false;
3566         adev->num_rings = 0;
3567         adev->mman.buffer_funcs = NULL;
3568         adev->mman.buffer_funcs_ring = NULL;
3569         adev->vm_manager.vm_pte_funcs = NULL;
3570         adev->vm_manager.vm_pte_num_scheds = 0;
3571         adev->gmc.gmc_funcs = NULL;
3572         adev->harvest_ip_mask = 0x0;
3573         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3574         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3575
3576         adev->smc_rreg = &amdgpu_invalid_rreg;
3577         adev->smc_wreg = &amdgpu_invalid_wreg;
3578         adev->pcie_rreg = &amdgpu_invalid_rreg;
3579         adev->pcie_wreg = &amdgpu_invalid_wreg;
3580         adev->pciep_rreg = &amdgpu_invalid_rreg;
3581         adev->pciep_wreg = &amdgpu_invalid_wreg;
3582         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3583         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3584         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3585         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3586         adev->didt_rreg = &amdgpu_invalid_rreg;
3587         adev->didt_wreg = &amdgpu_invalid_wreg;
3588         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3589         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3590         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3591         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3592
3593         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3594                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3595                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3596
3597         /* mutex initialization are all done here so we
3598          * can recall function without having locking issues */
3599         mutex_init(&adev->firmware.mutex);
3600         mutex_init(&adev->pm.mutex);
3601         mutex_init(&adev->gfx.gpu_clock_mutex);
3602         mutex_init(&adev->srbm_mutex);
3603         mutex_init(&adev->gfx.pipe_reserve_mutex);
3604         mutex_init(&adev->gfx.gfx_off_mutex);
3605         mutex_init(&adev->grbm_idx_mutex);
3606         mutex_init(&adev->mn_lock);
3607         mutex_init(&adev->virt.vf_errors.lock);
3608         hash_init(adev->mn_hash);
3609         mutex_init(&adev->psp.mutex);
3610         mutex_init(&adev->notifier_lock);
3611         mutex_init(&adev->pm.stable_pstate_ctx_lock);
3612
3613         amdgpu_device_init_apu_flags(adev);
3614
3615         r = amdgpu_device_check_arguments(adev);
3616         if (r)
3617                 return r;
3618
3619         spin_lock_init(&adev->mmio_idx_lock);
3620         spin_lock_init(&adev->smc_idx_lock);
3621         spin_lock_init(&adev->pcie_idx_lock);
3622         spin_lock_init(&adev->uvd_ctx_idx_lock);
3623         spin_lock_init(&adev->didt_idx_lock);
3624         spin_lock_init(&adev->gc_cac_idx_lock);
3625         spin_lock_init(&adev->se_cac_idx_lock);
3626         spin_lock_init(&adev->audio_endpt_idx_lock);
3627         spin_lock_init(&adev->mm_stats.lock);
3628
3629         INIT_LIST_HEAD(&adev->shadow_list);
3630         mutex_init(&adev->shadow_list_lock);
3631
3632         INIT_LIST_HEAD(&adev->reset_list);
3633
3634         INIT_LIST_HEAD(&adev->ras_list);
3635
3636         INIT_DELAYED_WORK(&adev->delayed_init_work,
3637                           amdgpu_device_delayed_init_work_handler);
3638         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3639                           amdgpu_device_delay_enable_gfx_off);
3640
3641         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3642
3643         adev->gfx.gfx_off_req_count = 1;
3644         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3645
3646         atomic_set(&adev->throttling_logging_enabled, 1);
3647         /*
3648          * If throttling continues, logging will be performed every minute
3649          * to avoid log flooding. "-1" is subtracted since the thermal
3650          * throttling interrupt comes every second. Thus, the total logging
3651          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3652          * for throttling interrupt) = 60 seconds.
3653          */
3654         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3655         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3656
3657         /* Registers mapping */
3658         /* TODO: block userspace mapping of io register */
3659         if (adev->asic_type >= CHIP_BONAIRE) {
3660                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3661                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3662         } else {
3663                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3664                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3665         }
3666
3667         for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3668                 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3669
3670         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3671         if (adev->rmmio == NULL) {
3672                 return -ENOMEM;
3673         }
3674         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3675         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3676
3677         amdgpu_device_get_pcie_info(adev);
3678
3679         if (amdgpu_mcbp)
3680                 DRM_INFO("MCBP is enabled\n");
3681
3682         if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3683                 adev->enable_mes = true;
3684
3685         /* detect hw virtualization here */
3686         amdgpu_detect_virtualization(adev);
3687
3688         r = amdgpu_device_get_job_timeout_settings(adev);
3689         if (r) {
3690                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3691                 return r;
3692         }
3693
3694         /*
3695          * Reset domain needs to be present early, before XGMI hive discovered
3696          * (if any) and intitialized to use reset sem and in_gpu reset flag
3697          * early on during init.
3698          */
3699         adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE ,"amdgpu-reset-dev");
3700         if (!adev->reset_domain)
3701                 return -ENOMEM;
3702
3703         /* early init functions */
3704         r = amdgpu_device_ip_early_init(adev);
3705         if (r)
3706                 return r;
3707
3708         /* Need to get xgmi info early to decide the reset behavior*/
3709         if (adev->gmc.xgmi.supported) {
3710                 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3711                 if (r)
3712                         return r;
3713         }
3714
3715         /* enable PCIE atomic ops */
3716         if (amdgpu_sriov_vf(adev))
3717                 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3718                         adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags ==
3719                         (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3720         else
3721                 adev->have_atomics_support =
3722                         !pci_enable_atomic_ops_to_root(adev->pdev,
3723                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3724                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3725         if (!adev->have_atomics_support)
3726                 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3727
3728         /* doorbell bar mapping and doorbell index init*/
3729         amdgpu_device_doorbell_init(adev);
3730
3731         if (amdgpu_emu_mode == 1) {
3732                 /* post the asic on emulation mode */
3733                 emu_soc_asic_init(adev);
3734                 goto fence_driver_init;
3735         }
3736
3737         amdgpu_reset_init(adev);
3738
3739         /* detect if we are with an SRIOV vbios */
3740         amdgpu_device_detect_sriov_bios(adev);
3741
3742         /* check if we need to reset the asic
3743          *  E.g., driver was not cleanly unloaded previously, etc.
3744          */
3745         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3746                 if (adev->gmc.xgmi.num_physical_nodes) {
3747                         dev_info(adev->dev, "Pending hive reset.\n");
3748                         adev->gmc.xgmi.pending_reset = true;
3749                         /* Only need to init necessary block for SMU to handle the reset */
3750                         for (i = 0; i < adev->num_ip_blocks; i++) {
3751                                 if (!adev->ip_blocks[i].status.valid)
3752                                         continue;
3753                                 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3754                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3755                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3756                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3757                                         DRM_DEBUG("IP %s disabled for hw_init.\n",
3758                                                 adev->ip_blocks[i].version->funcs->name);
3759                                         adev->ip_blocks[i].status.hw = true;
3760                                 }
3761                         }
3762                 } else {
3763                         r = amdgpu_asic_reset(adev);
3764                         if (r) {
3765                                 dev_err(adev->dev, "asic reset on init failed\n");
3766                                 goto failed;
3767                         }
3768                 }
3769         }
3770
3771         pci_enable_pcie_error_reporting(adev->pdev);
3772
3773         /* Post card if necessary */
3774         if (amdgpu_device_need_post(adev)) {
3775                 if (!adev->bios) {
3776                         dev_err(adev->dev, "no vBIOS found\n");
3777                         r = -EINVAL;
3778                         goto failed;
3779                 }
3780                 DRM_INFO("GPU posting now...\n");
3781                 r = amdgpu_device_asic_init(adev);
3782                 if (r) {
3783                         dev_err(adev->dev, "gpu post error!\n");
3784                         goto failed;
3785                 }
3786         }
3787
3788         if (adev->is_atom_fw) {
3789                 /* Initialize clocks */
3790                 r = amdgpu_atomfirmware_get_clock_info(adev);
3791                 if (r) {
3792                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3793                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3794                         goto failed;
3795                 }
3796         } else {
3797                 /* Initialize clocks */
3798                 r = amdgpu_atombios_get_clock_info(adev);
3799                 if (r) {
3800                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3801                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3802                         goto failed;
3803                 }
3804                 /* init i2c buses */
3805                 if (!amdgpu_device_has_dc_support(adev))
3806                         amdgpu_atombios_i2c_init(adev);
3807         }
3808
3809 fence_driver_init:
3810         /* Fence driver */
3811         r = amdgpu_fence_driver_sw_init(adev);
3812         if (r) {
3813                 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3814                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3815                 goto failed;
3816         }
3817
3818         /* init the mode config */
3819         drm_mode_config_init(adev_to_drm(adev));
3820
3821         r = amdgpu_device_ip_init(adev);
3822         if (r) {
3823                 /* failed in exclusive mode due to timeout */
3824                 if (amdgpu_sriov_vf(adev) &&
3825                     !amdgpu_sriov_runtime(adev) &&
3826                     amdgpu_virt_mmio_blocked(adev) &&
3827                     !amdgpu_virt_wait_reset(adev)) {
3828                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3829                         /* Don't send request since VF is inactive. */
3830                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3831                         adev->virt.ops = NULL;
3832                         r = -EAGAIN;
3833                         goto release_ras_con;
3834                 }
3835                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3836                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3837                 goto release_ras_con;
3838         }
3839
3840         amdgpu_fence_driver_hw_init(adev);
3841
3842         dev_info(adev->dev,
3843                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3844                         adev->gfx.config.max_shader_engines,
3845                         adev->gfx.config.max_sh_per_se,
3846                         adev->gfx.config.max_cu_per_sh,
3847                         adev->gfx.cu_info.number);
3848
3849         adev->accel_working = true;
3850
3851         amdgpu_vm_check_compute_bug(adev);
3852
3853         /* Initialize the buffer migration limit. */
3854         if (amdgpu_moverate >= 0)
3855                 max_MBps = amdgpu_moverate;
3856         else
3857                 max_MBps = 8; /* Allow 8 MB/s. */
3858         /* Get a log2 for easy divisions. */
3859         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3860
3861         r = amdgpu_pm_sysfs_init(adev);
3862         if (r) {
3863                 adev->pm_sysfs_en = false;
3864                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3865         } else
3866                 adev->pm_sysfs_en = true;
3867
3868         r = amdgpu_ucode_sysfs_init(adev);
3869         if (r) {
3870                 adev->ucode_sysfs_en = false;
3871                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3872         } else
3873                 adev->ucode_sysfs_en = true;
3874
3875         if ((amdgpu_testing & 1)) {
3876                 if (adev->accel_working)
3877                         amdgpu_test_moves(adev);
3878                 else
3879                         DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3880         }
3881         if (amdgpu_benchmarking) {
3882                 if (adev->accel_working)
3883                         amdgpu_benchmark(adev, amdgpu_benchmarking);
3884                 else
3885                         DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3886         }
3887
3888         /*
3889          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3890          * Otherwise the mgpu fan boost feature will be skipped due to the
3891          * gpu instance is counted less.
3892          */
3893         amdgpu_register_gpu_instance(adev);
3894
3895         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3896          * explicit gating rather than handling it automatically.
3897          */
3898         if (!adev->gmc.xgmi.pending_reset) {
3899                 r = amdgpu_device_ip_late_init(adev);
3900                 if (r) {
3901                         dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3902                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3903                         goto release_ras_con;
3904                 }
3905                 /* must succeed. */
3906                 amdgpu_ras_resume(adev);
3907                 queue_delayed_work(system_wq, &adev->delayed_init_work,
3908                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3909         }
3910
3911         if (amdgpu_sriov_vf(adev))
3912                 flush_delayed_work(&adev->delayed_init_work);
3913
3914         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3915         if (r)
3916                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3917
3918         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3919                 r = amdgpu_pmu_init(adev);
3920         if (r)
3921                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3922
3923         /* Have stored pci confspace at hand for restore in sudden PCI error */
3924         if (amdgpu_device_cache_pci_state(adev->pdev))
3925                 pci_restore_state(pdev);
3926
3927         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3928         /* this will fail for cards that aren't VGA class devices, just
3929          * ignore it */
3930         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3931                 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3932
3933         if (amdgpu_device_supports_px(ddev)) {
3934                 px = true;
3935                 vga_switcheroo_register_client(adev->pdev,
3936                                                &amdgpu_switcheroo_ops, px);
3937                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3938         }
3939
3940         if (adev->gmc.xgmi.pending_reset)
3941                 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3942                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3943
3944         amdgpu_device_check_iommu_direct_map(adev);
3945
3946         return 0;
3947
3948 release_ras_con:
3949         amdgpu_release_ras_context(adev);
3950
3951 failed:
3952         amdgpu_vf_error_trans_all(adev);
3953
3954         return r;
3955 }
3956
3957 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3958 {
3959
3960         /* Clear all CPU mappings pointing to this device */
3961         unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3962
3963         /* Unmap all mapped bars - Doorbell, registers and VRAM */
3964         amdgpu_device_doorbell_fini(adev);
3965
3966         iounmap(adev->rmmio);
3967         adev->rmmio = NULL;
3968         if (adev->mman.aper_base_kaddr)
3969                 iounmap(adev->mman.aper_base_kaddr);
3970         adev->mman.aper_base_kaddr = NULL;
3971
3972         /* Memory manager related */
3973         if (!adev->gmc.xgmi.connected_to_cpu) {
3974                 arch_phys_wc_del(adev->gmc.vram_mtrr);
3975                 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3976         }
3977 }
3978
3979 /**
3980  * amdgpu_device_fini_hw - tear down the driver
3981  *
3982  * @adev: amdgpu_device pointer
3983  *
3984  * Tear down the driver info (all asics).
3985  * Called at driver shutdown.
3986  */
3987 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3988 {
3989         dev_info(adev->dev, "amdgpu: finishing device.\n");
3990         flush_delayed_work(&adev->delayed_init_work);
3991         if (adev->mman.initialized) {
3992                 flush_delayed_work(&adev->mman.bdev.wq);
3993                 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3994         }
3995         adev->shutdown = true;
3996
3997         /* make sure IB test finished before entering exclusive mode
3998          * to avoid preemption on IB test
3999          * */
4000         if (amdgpu_sriov_vf(adev)) {
4001                 amdgpu_virt_request_full_gpu(adev, false);
4002                 amdgpu_virt_fini_data_exchange(adev);
4003         }
4004
4005         /* disable all interrupts */
4006         amdgpu_irq_disable_all(adev);
4007         if (adev->mode_info.mode_config_initialized){
4008                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4009                         drm_helper_force_disable_all(adev_to_drm(adev));
4010                 else
4011                         drm_atomic_helper_shutdown(adev_to_drm(adev));
4012         }
4013         amdgpu_fence_driver_hw_fini(adev);
4014
4015         if (adev->pm_sysfs_en)
4016                 amdgpu_pm_sysfs_fini(adev);
4017         if (adev->ucode_sysfs_en)
4018                 amdgpu_ucode_sysfs_fini(adev);
4019         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4020
4021         /* disable ras feature must before hw fini */
4022         amdgpu_ras_pre_fini(adev);
4023
4024         amdgpu_device_ip_fini_early(adev);
4025
4026         amdgpu_irq_fini_hw(adev);
4027
4028         if (adev->mman.initialized)
4029                 ttm_device_clear_dma_mappings(&adev->mman.bdev);
4030
4031         amdgpu_gart_dummy_page_fini(adev);
4032
4033         if (drm_dev_is_unplugged(adev_to_drm(adev)))
4034                 amdgpu_device_unmap_mmio(adev);
4035
4036 }
4037
4038 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4039 {
4040         int idx;
4041
4042         amdgpu_fence_driver_sw_fini(adev);
4043         amdgpu_device_ip_fini(adev);
4044         release_firmware(adev->firmware.gpu_info_fw);
4045         adev->firmware.gpu_info_fw = NULL;
4046         adev->accel_working = false;
4047
4048         amdgpu_reset_fini(adev);
4049
4050         /* free i2c buses */
4051         if (!amdgpu_device_has_dc_support(adev))
4052                 amdgpu_i2c_fini(adev);
4053
4054         if (amdgpu_emu_mode != 1)
4055                 amdgpu_atombios_fini(adev);
4056
4057         kfree(adev->bios);
4058         adev->bios = NULL;
4059         if (amdgpu_device_supports_px(adev_to_drm(adev))) {
4060                 vga_switcheroo_unregister_client(adev->pdev);
4061                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4062         }
4063         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4064                 vga_client_unregister(adev->pdev);
4065
4066         if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4067
4068                 iounmap(adev->rmmio);
4069                 adev->rmmio = NULL;
4070                 amdgpu_device_doorbell_fini(adev);
4071                 drm_dev_exit(idx);
4072         }
4073
4074         if (IS_ENABLED(CONFIG_PERF_EVENTS))
4075                 amdgpu_pmu_fini(adev);
4076         if (adev->mman.discovery_bin)
4077                 amdgpu_discovery_fini(adev);
4078
4079         amdgpu_reset_put_reset_domain(adev->reset_domain);
4080         adev->reset_domain = NULL;
4081
4082         kfree(adev->pci_state);
4083
4084 }
4085
4086 /**
4087  * amdgpu_device_evict_resources - evict device resources
4088  * @adev: amdgpu device object
4089  *
4090  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4091  * of the vram memory type. Mainly used for evicting device resources
4092  * at suspend time.
4093  *
4094  */
4095 static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
4096 {
4097         /* No need to evict vram on APUs for suspend to ram or s2idle */
4098         if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4099                 return;
4100
4101         if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
4102                 DRM_WARN("evicting device resources failed\n");
4103
4104 }
4105
4106 /*
4107  * Suspend & resume.
4108  */
4109 /**
4110  * amdgpu_device_suspend - initiate device suspend
4111  *
4112  * @dev: drm dev pointer
4113  * @fbcon : notify the fbdev of suspend
4114  *
4115  * Puts the hw in the suspend state (all asics).
4116  * Returns 0 for success or an error on failure.
4117  * Called at driver suspend.
4118  */
4119 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4120 {
4121         struct amdgpu_device *adev = drm_to_adev(dev);
4122
4123         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4124                 return 0;
4125
4126         adev->in_suspend = true;
4127
4128         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4129                 DRM_WARN("smart shift update failed\n");
4130
4131         drm_kms_helper_poll_disable(dev);
4132
4133         if (fbcon)
4134                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4135
4136         cancel_delayed_work_sync(&adev->delayed_init_work);
4137
4138         amdgpu_ras_suspend(adev);
4139
4140         amdgpu_device_ip_suspend_phase1(adev);
4141
4142         if (!adev->in_s0ix)
4143                 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4144
4145         amdgpu_device_evict_resources(adev);
4146
4147         amdgpu_fence_driver_hw_fini(adev);
4148
4149         amdgpu_device_ip_suspend_phase2(adev);
4150
4151         return 0;
4152 }
4153
4154 /**
4155  * amdgpu_device_resume - initiate device resume
4156  *
4157  * @dev: drm dev pointer
4158  * @fbcon : notify the fbdev of resume
4159  *
4160  * Bring the hw back to operating state (all asics).
4161  * Returns 0 for success or an error on failure.
4162  * Called at driver resume.
4163  */
4164 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4165 {
4166         struct amdgpu_device *adev = drm_to_adev(dev);
4167         int r = 0;
4168
4169         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4170                 return 0;
4171
4172         if (adev->in_s0ix)
4173                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4174
4175         /* post card */
4176         if (amdgpu_device_need_post(adev)) {
4177                 r = amdgpu_device_asic_init(adev);
4178                 if (r)
4179                         dev_err(adev->dev, "amdgpu asic init failed\n");
4180         }
4181
4182         r = amdgpu_device_ip_resume(adev);
4183         if (r) {
4184                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4185                 return r;
4186         }
4187         amdgpu_fence_driver_hw_init(adev);
4188
4189         r = amdgpu_device_ip_late_init(adev);
4190         if (r)
4191                 return r;
4192
4193         queue_delayed_work(system_wq, &adev->delayed_init_work,
4194                            msecs_to_jiffies(AMDGPU_RESUME_MS));
4195
4196         if (!adev->in_s0ix) {
4197                 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4198                 if (r)
4199                         return r;
4200         }
4201
4202         /* Make sure IB tests flushed */
4203         flush_delayed_work(&adev->delayed_init_work);
4204
4205         if (fbcon)
4206                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4207
4208         drm_kms_helper_poll_enable(dev);
4209
4210         amdgpu_ras_resume(adev);
4211
4212         /*
4213          * Most of the connector probing functions try to acquire runtime pm
4214          * refs to ensure that the GPU is powered on when connector polling is
4215          * performed. Since we're calling this from a runtime PM callback,
4216          * trying to acquire rpm refs will cause us to deadlock.
4217          *
4218          * Since we're guaranteed to be holding the rpm lock, it's safe to
4219          * temporarily disable the rpm helpers so this doesn't deadlock us.
4220          */
4221 #ifdef CONFIG_PM
4222         dev->dev->power.disable_depth++;
4223 #endif
4224         if (!amdgpu_device_has_dc_support(adev))
4225                 drm_helper_hpd_irq_event(dev);
4226         else
4227                 drm_kms_helper_hotplug_event(dev);
4228 #ifdef CONFIG_PM
4229         dev->dev->power.disable_depth--;
4230 #endif
4231         adev->in_suspend = false;
4232
4233         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4234                 DRM_WARN("smart shift update failed\n");
4235
4236         return 0;
4237 }
4238
4239 /**
4240  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4241  *
4242  * @adev: amdgpu_device pointer
4243  *
4244  * The list of all the hardware IPs that make up the asic is walked and
4245  * the check_soft_reset callbacks are run.  check_soft_reset determines
4246  * if the asic is still hung or not.
4247  * Returns true if any of the IPs are still in a hung state, false if not.
4248  */
4249 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4250 {
4251         int i;
4252         bool asic_hang = false;
4253
4254         if (amdgpu_sriov_vf(adev))
4255                 return true;
4256
4257         if (amdgpu_asic_need_full_reset(adev))
4258                 return true;
4259
4260         for (i = 0; i < adev->num_ip_blocks; i++) {
4261                 if (!adev->ip_blocks[i].status.valid)
4262                         continue;
4263                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4264                         adev->ip_blocks[i].status.hang =
4265                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4266                 if (adev->ip_blocks[i].status.hang) {
4267                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4268                         asic_hang = true;
4269                 }
4270         }
4271         return asic_hang;
4272 }
4273
4274 /**
4275  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4276  *
4277  * @adev: amdgpu_device pointer
4278  *
4279  * The list of all the hardware IPs that make up the asic is walked and the
4280  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4281  * handles any IP specific hardware or software state changes that are
4282  * necessary for a soft reset to succeed.
4283  * Returns 0 on success, negative error code on failure.
4284  */
4285 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4286 {
4287         int i, r = 0;
4288
4289         for (i = 0; i < adev->num_ip_blocks; i++) {
4290                 if (!adev->ip_blocks[i].status.valid)
4291                         continue;
4292                 if (adev->ip_blocks[i].status.hang &&
4293                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4294                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4295                         if (r)
4296                                 return r;
4297                 }
4298         }
4299
4300         return 0;
4301 }
4302
4303 /**
4304  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4305  *
4306  * @adev: amdgpu_device pointer
4307  *
4308  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4309  * reset is necessary to recover.
4310  * Returns true if a full asic reset is required, false if not.
4311  */
4312 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4313 {
4314         int i;
4315
4316         if (amdgpu_asic_need_full_reset(adev))
4317                 return true;
4318
4319         for (i = 0; i < adev->num_ip_blocks; i++) {
4320                 if (!adev->ip_blocks[i].status.valid)
4321                         continue;
4322                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4323                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4324                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4325                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4326                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4327                         if (adev->ip_blocks[i].status.hang) {
4328                                 dev_info(adev->dev, "Some block need full reset!\n");
4329                                 return true;
4330                         }
4331                 }
4332         }
4333         return false;
4334 }
4335
4336 /**
4337  * amdgpu_device_ip_soft_reset - do a soft reset
4338  *
4339  * @adev: amdgpu_device pointer
4340  *
4341  * The list of all the hardware IPs that make up the asic is walked and the
4342  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4343  * IP specific hardware or software state changes that are necessary to soft
4344  * reset the IP.
4345  * Returns 0 on success, negative error code on failure.
4346  */
4347 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4348 {
4349         int i, r = 0;
4350
4351         for (i = 0; i < adev->num_ip_blocks; i++) {
4352                 if (!adev->ip_blocks[i].status.valid)
4353                         continue;
4354                 if (adev->ip_blocks[i].status.hang &&
4355                     adev->ip_blocks[i].version->funcs->soft_reset) {
4356                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4357                         if (r)
4358                                 return r;
4359                 }
4360         }
4361
4362         return 0;
4363 }
4364
4365 /**
4366  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4367  *
4368  * @adev: amdgpu_device pointer
4369  *
4370  * The list of all the hardware IPs that make up the asic is walked and the
4371  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4372  * handles any IP specific hardware or software state changes that are
4373  * necessary after the IP has been soft reset.
4374  * Returns 0 on success, negative error code on failure.
4375  */
4376 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4377 {
4378         int i, r = 0;
4379
4380         for (i = 0; i < adev->num_ip_blocks; i++) {
4381                 if (!adev->ip_blocks[i].status.valid)
4382                         continue;
4383                 if (adev->ip_blocks[i].status.hang &&
4384                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4385                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4386                 if (r)
4387                         return r;
4388         }
4389
4390         return 0;
4391 }
4392
4393 /**
4394  * amdgpu_device_recover_vram - Recover some VRAM contents
4395  *
4396  * @adev: amdgpu_device pointer
4397  *
4398  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4399  * restore things like GPUVM page tables after a GPU reset where
4400  * the contents of VRAM might be lost.
4401  *
4402  * Returns:
4403  * 0 on success, negative error code on failure.
4404  */
4405 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4406 {
4407         struct dma_fence *fence = NULL, *next = NULL;
4408         struct amdgpu_bo *shadow;
4409         struct amdgpu_bo_vm *vmbo;
4410         long r = 1, tmo;
4411
4412         if (amdgpu_sriov_runtime(adev))
4413                 tmo = msecs_to_jiffies(8000);
4414         else
4415                 tmo = msecs_to_jiffies(100);
4416
4417         dev_info(adev->dev, "recover vram bo from shadow start\n");
4418         mutex_lock(&adev->shadow_list_lock);
4419         list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4420                 shadow = &vmbo->bo;
4421                 /* No need to recover an evicted BO */
4422                 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4423                     shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4424                     shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4425                         continue;
4426
4427                 r = amdgpu_bo_restore_shadow(shadow, &next);
4428                 if (r)
4429                         break;
4430
4431                 if (fence) {
4432                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4433                         dma_fence_put(fence);
4434                         fence = next;
4435                         if (tmo == 0) {
4436                                 r = -ETIMEDOUT;
4437                                 break;
4438                         } else if (tmo < 0) {
4439                                 r = tmo;
4440                                 break;
4441                         }
4442                 } else {
4443                         fence = next;
4444                 }
4445         }
4446         mutex_unlock(&adev->shadow_list_lock);
4447
4448         if (fence)
4449                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4450         dma_fence_put(fence);
4451
4452         if (r < 0 || tmo <= 0) {
4453                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4454                 return -EIO;
4455         }
4456
4457         dev_info(adev->dev, "recover vram bo from shadow done\n");
4458         return 0;
4459 }
4460
4461
4462 /**
4463  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4464  *
4465  * @adev: amdgpu_device pointer
4466  * @from_hypervisor: request from hypervisor
4467  *
4468  * do VF FLR and reinitialize Asic
4469  * return 0 means succeeded otherwise failed
4470  */
4471 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4472                                      bool from_hypervisor)
4473 {
4474         int r;
4475         struct amdgpu_hive_info *hive = NULL;
4476         int retry_limit = 0;
4477
4478 retry:
4479         amdgpu_amdkfd_pre_reset(adev);
4480
4481         amdgpu_amdkfd_pre_reset(adev);
4482
4483         if (from_hypervisor)
4484                 r = amdgpu_virt_request_full_gpu(adev, true);
4485         else
4486                 r = amdgpu_virt_reset_gpu(adev);
4487         if (r)
4488                 return r;
4489
4490         /* Resume IP prior to SMC */
4491         r = amdgpu_device_ip_reinit_early_sriov(adev);
4492         if (r)
4493                 goto error;
4494
4495         amdgpu_virt_init_data_exchange(adev);
4496
4497         r = amdgpu_device_fw_loading(adev);
4498         if (r)
4499                 return r;
4500
4501         /* now we are okay to resume SMC/CP/SDMA */
4502         r = amdgpu_device_ip_reinit_late_sriov(adev);
4503         if (r)
4504                 goto error;
4505
4506         hive = amdgpu_get_xgmi_hive(adev);
4507         /* Update PSP FW topology after reset */
4508         if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4509                 r = amdgpu_xgmi_update_topology(hive, adev);
4510
4511         if (hive)
4512                 amdgpu_put_xgmi_hive(hive);
4513
4514         if (!r) {
4515                 amdgpu_irq_gpu_reset_resume_helper(adev);
4516                 r = amdgpu_ib_ring_tests(adev);
4517                 amdgpu_amdkfd_post_reset(adev);
4518         }
4519
4520 error:
4521         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4522                 amdgpu_inc_vram_lost(adev);
4523                 r = amdgpu_device_recover_vram(adev);
4524         }
4525         amdgpu_virt_release_full_gpu(adev, true);
4526
4527         if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4528                 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4529                         retry_limit++;
4530                         goto retry;
4531                 } else
4532                         DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4533         }
4534
4535         return r;
4536 }
4537
4538 /**
4539  * amdgpu_device_has_job_running - check if there is any job in mirror list
4540  *
4541  * @adev: amdgpu_device pointer
4542  *
4543  * check if there is any job in mirror list
4544  */
4545 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4546 {
4547         int i;
4548         struct drm_sched_job *job;
4549
4550         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4551                 struct amdgpu_ring *ring = adev->rings[i];
4552
4553                 if (!ring || !ring->sched.thread)
4554                         continue;
4555
4556                 spin_lock(&ring->sched.job_list_lock);
4557                 job = list_first_entry_or_null(&ring->sched.pending_list,
4558                                                struct drm_sched_job, list);
4559                 spin_unlock(&ring->sched.job_list_lock);
4560                 if (job)
4561                         return true;
4562         }
4563         return false;
4564 }
4565
4566 /**
4567  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4568  *
4569  * @adev: amdgpu_device pointer
4570  *
4571  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4572  * a hung GPU.
4573  */
4574 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4575 {
4576         if (!amdgpu_device_ip_check_soft_reset(adev)) {
4577                 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4578                 return false;
4579         }
4580
4581         if (amdgpu_gpu_recovery == 0)
4582                 goto disabled;
4583
4584         if (amdgpu_sriov_vf(adev))
4585                 return true;
4586
4587         if (amdgpu_gpu_recovery == -1) {
4588                 switch (adev->asic_type) {
4589 #ifdef CONFIG_DRM_AMDGPU_SI
4590                 case CHIP_VERDE:
4591                 case CHIP_TAHITI:
4592                 case CHIP_PITCAIRN:
4593                 case CHIP_OLAND:
4594                 case CHIP_HAINAN:
4595 #endif
4596 #ifdef CONFIG_DRM_AMDGPU_CIK
4597                 case CHIP_KAVERI:
4598                 case CHIP_KABINI:
4599                 case CHIP_MULLINS:
4600 #endif
4601                 case CHIP_CARRIZO:
4602                 case CHIP_STONEY:
4603                 case CHIP_CYAN_SKILLFISH:
4604                         goto disabled;
4605                 default:
4606                         break;
4607                 }
4608         }
4609
4610         return true;
4611
4612 disabled:
4613                 dev_info(adev->dev, "GPU recovery disabled.\n");
4614                 return false;
4615 }
4616
4617 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4618 {
4619         u32 i;
4620         int ret = 0;
4621
4622         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4623
4624         dev_info(adev->dev, "GPU mode1 reset\n");
4625
4626         /* disable BM */
4627         pci_clear_master(adev->pdev);
4628
4629         amdgpu_device_cache_pci_state(adev->pdev);
4630
4631         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4632                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4633                 ret = amdgpu_dpm_mode1_reset(adev);
4634         } else {
4635                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4636                 ret = psp_gpu_reset(adev);
4637         }
4638
4639         if (ret)
4640                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4641
4642         amdgpu_device_load_pci_state(adev->pdev);
4643
4644         /* wait for asic to come out of reset */
4645         for (i = 0; i < adev->usec_timeout; i++) {
4646                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4647
4648                 if (memsize != 0xffffffff)
4649                         break;
4650                 udelay(1);
4651         }
4652
4653         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4654         return ret;
4655 }
4656
4657 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4658                                  struct amdgpu_reset_context *reset_context)
4659 {
4660         int i, r = 0;
4661         struct amdgpu_job *job = NULL;
4662         bool need_full_reset =
4663                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4664
4665         if (reset_context->reset_req_dev == adev)
4666                 job = reset_context->job;
4667
4668         if (amdgpu_sriov_vf(adev)) {
4669                 /* stop the data exchange thread */
4670                 amdgpu_virt_fini_data_exchange(adev);
4671         }
4672
4673         /* block all schedulers and reset given job's ring */
4674         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4675                 struct amdgpu_ring *ring = adev->rings[i];
4676
4677                 if (!ring || !ring->sched.thread)
4678                         continue;
4679
4680                 /*clear job fence from fence drv to avoid force_completion
4681                  *leave NULL and vm flush fence in fence drv */
4682                 amdgpu_fence_driver_clear_job_fences(ring);
4683
4684                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4685                 amdgpu_fence_driver_force_completion(ring);
4686         }
4687
4688         if (job && job->vm)
4689                 drm_sched_increase_karma(&job->base);
4690
4691         r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4692         /* If reset handler not implemented, continue; otherwise return */
4693         if (r == -ENOSYS)
4694                 r = 0;
4695         else
4696                 return r;
4697
4698         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4699         if (!amdgpu_sriov_vf(adev)) {
4700
4701                 if (!need_full_reset)
4702                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4703
4704                 if (!need_full_reset) {
4705                         amdgpu_device_ip_pre_soft_reset(adev);
4706                         r = amdgpu_device_ip_soft_reset(adev);
4707                         amdgpu_device_ip_post_soft_reset(adev);
4708                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4709                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4710                                 need_full_reset = true;
4711                         }
4712                 }
4713
4714                 if (need_full_reset)
4715                         r = amdgpu_device_ip_suspend(adev);
4716                 if (need_full_reset)
4717                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4718                 else
4719                         clear_bit(AMDGPU_NEED_FULL_RESET,
4720                                   &reset_context->flags);
4721         }
4722
4723         return r;
4724 }
4725
4726 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4727                          struct amdgpu_reset_context *reset_context)
4728 {
4729         struct amdgpu_device *tmp_adev = NULL;
4730         bool need_full_reset, skip_hw_reset, vram_lost = false;
4731         int r = 0;
4732
4733         /* Try reset handler method first */
4734         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4735                                     reset_list);
4736         r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4737         /* If reset handler not implemented, continue; otherwise return */
4738         if (r == -ENOSYS)
4739                 r = 0;
4740         else
4741                 return r;
4742
4743         /* Reset handler not implemented, use the default method */
4744         need_full_reset =
4745                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4746         skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4747
4748         /*
4749          * ASIC reset has to be done on all XGMI hive nodes ASAP
4750          * to allow proper links negotiation in FW (within 1 sec)
4751          */
4752         if (!skip_hw_reset && need_full_reset) {
4753                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4754                         /* For XGMI run all resets in parallel to speed up the process */
4755                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4756                                 tmp_adev->gmc.xgmi.pending_reset = false;
4757                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4758                                         r = -EALREADY;
4759                         } else
4760                                 r = amdgpu_asic_reset(tmp_adev);
4761
4762                         if (r) {
4763                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4764                                          r, adev_to_drm(tmp_adev)->unique);
4765                                 break;
4766                         }
4767                 }
4768
4769                 /* For XGMI wait for all resets to complete before proceed */
4770                 if (!r) {
4771                         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4772                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4773                                         flush_work(&tmp_adev->xgmi_reset_work);
4774                                         r = tmp_adev->asic_reset_res;
4775                                         if (r)
4776                                                 break;
4777                                 }
4778                         }
4779                 }
4780         }
4781
4782         if (!r && amdgpu_ras_intr_triggered()) {
4783                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4784                         if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4785                             tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4786                                 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4787                 }
4788
4789                 amdgpu_ras_intr_cleared();
4790         }
4791
4792         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4793                 if (need_full_reset) {
4794                         /* post card */
4795                         r = amdgpu_device_asic_init(tmp_adev);
4796                         if (r) {
4797                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4798                         } else {
4799                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4800                                 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4801                                 if (r)
4802                                         goto out;
4803
4804                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4805                                 if (r)
4806                                         goto out;
4807
4808                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4809                                 if (vram_lost) {
4810                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4811                                         amdgpu_inc_vram_lost(tmp_adev);
4812                                 }
4813
4814                                 r = amdgpu_device_fw_loading(tmp_adev);
4815                                 if (r)
4816                                         return r;
4817
4818                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4819                                 if (r)
4820                                         goto out;
4821
4822                                 if (vram_lost)
4823                                         amdgpu_device_fill_reset_magic(tmp_adev);
4824
4825                                 /*
4826                                  * Add this ASIC as tracked as reset was already
4827                                  * complete successfully.
4828                                  */
4829                                 amdgpu_register_gpu_instance(tmp_adev);
4830
4831                                 if (!reset_context->hive &&
4832                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4833                                         amdgpu_xgmi_add_device(tmp_adev);
4834
4835                                 r = amdgpu_device_ip_late_init(tmp_adev);
4836                                 if (r)
4837                                         goto out;
4838
4839                                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4840
4841                                 /*
4842                                  * The GPU enters bad state once faulty pages
4843                                  * by ECC has reached the threshold, and ras
4844                                  * recovery is scheduled next. So add one check
4845                                  * here to break recovery if it indeed exceeds
4846                                  * bad page threshold, and remind user to
4847                                  * retire this GPU or setting one bigger
4848                                  * bad_page_threshold value to fix this once
4849                                  * probing driver again.
4850                                  */
4851                                 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4852                                         /* must succeed. */
4853                                         amdgpu_ras_resume(tmp_adev);
4854                                 } else {
4855                                         r = -EINVAL;
4856                                         goto out;
4857                                 }
4858
4859                                 /* Update PSP FW topology after reset */
4860                                 if (reset_context->hive &&
4861                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4862                                         r = amdgpu_xgmi_update_topology(
4863                                                 reset_context->hive, tmp_adev);
4864                         }
4865                 }
4866
4867 out:
4868                 if (!r) {
4869                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4870                         r = amdgpu_ib_ring_tests(tmp_adev);
4871                         if (r) {
4872                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4873                                 need_full_reset = true;
4874                                 r = -EAGAIN;
4875                                 goto end;
4876                         }
4877                 }
4878
4879                 if (!r)
4880                         r = amdgpu_device_recover_vram(tmp_adev);
4881                 else
4882                         tmp_adev->asic_reset_res = r;
4883         }
4884
4885 end:
4886         if (need_full_reset)
4887                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4888         else
4889                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4890         return r;
4891 }
4892
4893 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
4894 {
4895
4896         switch (amdgpu_asic_reset_method(adev)) {
4897         case AMD_RESET_METHOD_MODE1:
4898                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4899                 break;
4900         case AMD_RESET_METHOD_MODE2:
4901                 adev->mp1_state = PP_MP1_STATE_RESET;
4902                 break;
4903         default:
4904                 adev->mp1_state = PP_MP1_STATE_NONE;
4905                 break;
4906         }
4907 }
4908
4909 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
4910 {
4911         amdgpu_vf_error_trans_all(adev);
4912         adev->mp1_state = PP_MP1_STATE_NONE;
4913 }
4914
4915 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4916 {
4917         struct pci_dev *p = NULL;
4918
4919         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4920                         adev->pdev->bus->number, 1);
4921         if (p) {
4922                 pm_runtime_enable(&(p->dev));
4923                 pm_runtime_resume(&(p->dev));
4924         }
4925 }
4926
4927 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4928 {
4929         enum amd_reset_method reset_method;
4930         struct pci_dev *p = NULL;
4931         u64 expires;
4932
4933         /*
4934          * For now, only BACO and mode1 reset are confirmed
4935          * to suffer the audio issue without proper suspended.
4936          */
4937         reset_method = amdgpu_asic_reset_method(adev);
4938         if ((reset_method != AMD_RESET_METHOD_BACO) &&
4939              (reset_method != AMD_RESET_METHOD_MODE1))
4940                 return -EINVAL;
4941
4942         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4943                         adev->pdev->bus->number, 1);
4944         if (!p)
4945                 return -ENODEV;
4946
4947         expires = pm_runtime_autosuspend_expiration(&(p->dev));
4948         if (!expires)
4949                 /*
4950                  * If we cannot get the audio device autosuspend delay,
4951                  * a fixed 4S interval will be used. Considering 3S is
4952                  * the audio controller default autosuspend delay setting.
4953                  * 4S used here is guaranteed to cover that.
4954                  */
4955                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4956
4957         while (!pm_runtime_status_suspended(&(p->dev))) {
4958                 if (!pm_runtime_suspend(&(p->dev)))
4959                         break;
4960
4961                 if (expires < ktime_get_mono_fast_ns()) {
4962                         dev_warn(adev->dev, "failed to suspend display audio\n");
4963                         /* TODO: abort the succeeding gpu reset? */
4964                         return -ETIMEDOUT;
4965                 }
4966         }
4967
4968         pm_runtime_disable(&(p->dev));
4969
4970         return 0;
4971 }
4972
4973 static void amdgpu_device_recheck_guilty_jobs(
4974         struct amdgpu_device *adev, struct list_head *device_list_handle,
4975         struct amdgpu_reset_context *reset_context)
4976 {
4977         int i, r = 0;
4978
4979         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4980                 struct amdgpu_ring *ring = adev->rings[i];
4981                 int ret = 0;
4982                 struct drm_sched_job *s_job;
4983
4984                 if (!ring || !ring->sched.thread)
4985                         continue;
4986
4987                 s_job = list_first_entry_or_null(&ring->sched.pending_list,
4988                                 struct drm_sched_job, list);
4989                 if (s_job == NULL)
4990                         continue;
4991
4992                 /* clear job's guilty and depend the folowing step to decide the real one */
4993                 drm_sched_reset_karma(s_job);
4994                 /* for the real bad job, it will be resubmitted twice, adding a dma_fence_get
4995                  * to make sure fence is balanced */
4996                 dma_fence_get(s_job->s_fence->parent);
4997                 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
4998
4999                 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
5000                 if (ret == 0) { /* timeout */
5001                         DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
5002                                                 ring->sched.name, s_job->id);
5003
5004                         /* set guilty */
5005                         drm_sched_increase_karma(s_job);
5006 retry:
5007                         /* do hw reset */
5008                         if (amdgpu_sriov_vf(adev)) {
5009                                 amdgpu_virt_fini_data_exchange(adev);
5010                                 r = amdgpu_device_reset_sriov(adev, false);
5011                                 if (r)
5012                                         adev->asic_reset_res = r;
5013                         } else {
5014                                 clear_bit(AMDGPU_SKIP_HW_RESET,
5015                                           &reset_context->flags);
5016                                 r = amdgpu_do_asic_reset(device_list_handle,
5017                                                          reset_context);
5018                                 if (r && r == -EAGAIN)
5019                                         goto retry;
5020                         }
5021
5022                         /*
5023                          * add reset counter so that the following
5024                          * resubmitted job could flush vmid
5025                          */
5026                         atomic_inc(&adev->gpu_reset_counter);
5027                         continue;
5028                 }
5029
5030                 /* got the hw fence, signal finished fence */
5031                 atomic_dec(ring->sched.score);
5032                 dma_fence_put(s_job->s_fence->parent);
5033                 dma_fence_get(&s_job->s_fence->finished);
5034                 dma_fence_signal(&s_job->s_fence->finished);
5035                 dma_fence_put(&s_job->s_fence->finished);
5036
5037                 /* remove node from list and free the job */
5038                 spin_lock(&ring->sched.job_list_lock);
5039                 list_del_init(&s_job->list);
5040                 spin_unlock(&ring->sched.job_list_lock);
5041                 ring->sched.ops->free_job(s_job);
5042         }
5043 }
5044
5045 /**
5046  * amdgpu_device_gpu_recover_imp - reset the asic and recover scheduler
5047  *
5048  * @adev: amdgpu_device pointer
5049  * @job: which job trigger hang
5050  *
5051  * Attempt to reset the GPU if it has hung (all asics).
5052  * Attempt to do soft-reset or full-reset and reinitialize Asic
5053  * Returns 0 for success or an error on failure.
5054  */
5055
5056 int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
5057                               struct amdgpu_job *job)
5058 {
5059         struct list_head device_list, *device_list_handle =  NULL;
5060         bool job_signaled = false;
5061         struct amdgpu_hive_info *hive = NULL;
5062         struct amdgpu_device *tmp_adev = NULL;
5063         int i, r = 0;
5064         bool need_emergency_restart = false;
5065         bool audio_suspended = false;
5066         int tmp_vram_lost_counter;
5067         struct amdgpu_reset_context reset_context;
5068
5069         memset(&reset_context, 0, sizeof(reset_context));
5070
5071         /*
5072          * Special case: RAS triggered and full reset isn't supported
5073          */
5074         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5075
5076         /*
5077          * Flush RAM to disk so that after reboot
5078          * the user can read log and see why the system rebooted.
5079          */
5080         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5081                 DRM_WARN("Emergency reboot.");
5082
5083                 ksys_sync_helper();
5084                 emergency_restart();
5085         }
5086
5087         dev_info(adev->dev, "GPU %s begin!\n",
5088                 need_emergency_restart ? "jobs stop":"reset");
5089
5090         if (!amdgpu_sriov_vf(adev))
5091                 hive = amdgpu_get_xgmi_hive(adev);
5092         if (hive)
5093                 mutex_lock(&hive->hive_lock);
5094
5095         reset_context.method = AMD_RESET_METHOD_NONE;
5096         reset_context.reset_req_dev = adev;
5097         reset_context.job = job;
5098         reset_context.hive = hive;
5099         clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5100
5101         /*
5102          * Build list of devices to reset.
5103          * In case we are in XGMI hive mode, resort the device list
5104          * to put adev in the 1st position.
5105          */
5106         INIT_LIST_HEAD(&device_list);
5107         if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5108                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
5109                         list_add_tail(&tmp_adev->reset_list, &device_list);
5110                 if (!list_is_first(&adev->reset_list, &device_list))
5111                         list_rotate_to_front(&adev->reset_list, &device_list);
5112                 device_list_handle = &device_list;
5113         } else {
5114                 list_add_tail(&adev->reset_list, &device_list);
5115                 device_list_handle = &device_list;
5116         }
5117
5118         /* We need to lock reset domain only once both for XGMI and single device */
5119         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5120                                     reset_list);
5121         amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5122
5123         /* block all schedulers and reset given job's ring */
5124         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5125
5126                 amdgpu_device_set_mp1_state(tmp_adev);
5127
5128                 /*
5129                  * Try to put the audio codec into suspend state
5130                  * before gpu reset started.
5131                  *
5132                  * Due to the power domain of the graphics device
5133                  * is shared with AZ power domain. Without this,
5134                  * we may change the audio hardware from behind
5135                  * the audio driver's back. That will trigger
5136                  * some audio codec errors.
5137                  */
5138                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5139                         audio_suspended = true;
5140
5141                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5142
5143                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5144
5145                 if (!amdgpu_sriov_vf(tmp_adev))
5146                         amdgpu_amdkfd_pre_reset(tmp_adev);
5147
5148                 /*
5149                  * Mark these ASICs to be reseted as untracked first
5150                  * And add them back after reset completed
5151                  */
5152                 amdgpu_unregister_gpu_instance(tmp_adev);
5153
5154                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
5155
5156                 /* disable ras on ALL IPs */
5157                 if (!need_emergency_restart &&
5158                       amdgpu_device_ip_need_full_reset(tmp_adev))
5159                         amdgpu_ras_suspend(tmp_adev);
5160
5161                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5162                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5163
5164                         if (!ring || !ring->sched.thread)
5165                                 continue;
5166
5167                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5168
5169                         if (need_emergency_restart)
5170                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5171                 }
5172                 atomic_inc(&tmp_adev->gpu_reset_counter);
5173         }
5174
5175         if (need_emergency_restart)
5176                 goto skip_sched_resume;
5177
5178         /*
5179          * Must check guilty signal here since after this point all old
5180          * HW fences are force signaled.
5181          *
5182          * job->base holds a reference to parent fence
5183          */
5184         if (job && job->base.s_fence->parent &&
5185             dma_fence_is_signaled(job->base.s_fence->parent)) {
5186                 job_signaled = true;
5187                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5188                 goto skip_hw_reset;
5189         }
5190
5191 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
5192         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5193                 r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
5194                 /*TODO Should we stop ?*/
5195                 if (r) {
5196                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5197                                   r, adev_to_drm(tmp_adev)->unique);
5198                         tmp_adev->asic_reset_res = r;
5199                 }
5200         }
5201
5202         tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5203         /* Actual ASIC resets if needed.*/
5204         /* Host driver will handle XGMI hive reset for SRIOV */
5205         if (amdgpu_sriov_vf(adev)) {
5206                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5207                 if (r)
5208                         adev->asic_reset_res = r;
5209         } else {
5210                 r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
5211                 if (r && r == -EAGAIN)
5212                         goto retry;
5213         }
5214
5215 skip_hw_reset:
5216
5217         /* Post ASIC reset for all devs .*/
5218         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5219
5220                 /*
5221                  * Sometimes a later bad compute job can block a good gfx job as gfx
5222                  * and compute ring share internal GC HW mutually. We add an additional
5223                  * guilty jobs recheck step to find the real guilty job, it synchronously
5224                  * submits and pends for the first job being signaled. If it gets timeout,
5225                  * we identify it as a real guilty job.
5226                  */
5227                 if (amdgpu_gpu_recovery == 2 &&
5228                         !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5229                         amdgpu_device_recheck_guilty_jobs(
5230                                 tmp_adev, device_list_handle, &reset_context);
5231
5232                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5233                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5234
5235                         if (!ring || !ring->sched.thread)
5236                                 continue;
5237
5238                         /* No point to resubmit jobs if we didn't HW reset*/
5239                         if (!tmp_adev->asic_reset_res && !job_signaled)
5240                                 drm_sched_resubmit_jobs(&ring->sched);
5241
5242                         drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5243                 }
5244
5245                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5246                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5247                 }
5248
5249                 if (tmp_adev->asic_reset_res)
5250                         r = tmp_adev->asic_reset_res;
5251
5252                 tmp_adev->asic_reset_res = 0;
5253
5254                 if (r) {
5255                         /* bad news, how to tell it to userspace ? */
5256                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5257                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5258                 } else {
5259                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5260                         if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5261                                 DRM_WARN("smart shift update failed\n");
5262                 }
5263         }
5264
5265 skip_sched_resume:
5266         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5267                 /* unlock kfd: SRIOV would do it separately */
5268                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5269                         amdgpu_amdkfd_post_reset(tmp_adev);
5270
5271                 /* kfd_post_reset will do nothing if kfd device is not initialized,
5272                  * need to bring up kfd here if it's not be initialized before
5273                  */
5274                 if (!adev->kfd.init_complete)
5275                         amdgpu_amdkfd_device_init(adev);
5276
5277                 if (audio_suspended)
5278                         amdgpu_device_resume_display_audio(tmp_adev);
5279
5280                 amdgpu_device_unset_mp1_state(tmp_adev);
5281         }
5282
5283         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5284                                             reset_list);
5285         amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5286
5287         if (hive) {
5288                 mutex_unlock(&hive->hive_lock);
5289                 amdgpu_put_xgmi_hive(hive);
5290         }
5291
5292         if (r)
5293                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5294         return r;
5295 }
5296
5297 struct amdgpu_recover_work_struct {
5298         struct work_struct base;
5299         struct amdgpu_device *adev;
5300         struct amdgpu_job *job;
5301         int ret;
5302 };
5303
5304 static void amdgpu_device_queue_gpu_recover_work(struct work_struct *work)
5305 {
5306         struct amdgpu_recover_work_struct *recover_work = container_of(work, struct amdgpu_recover_work_struct, base);
5307
5308         recover_work->ret = amdgpu_device_gpu_recover_imp(recover_work->adev, recover_work->job);
5309 }
5310 /*
5311  * Serialize gpu recover into reset domain single threaded wq
5312  */
5313 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5314                                     struct amdgpu_job *job)
5315 {
5316         struct amdgpu_recover_work_struct work = {.adev = adev, .job = job};
5317
5318         INIT_WORK(&work.base, amdgpu_device_queue_gpu_recover_work);
5319
5320         if (!amdgpu_reset_domain_schedule(adev->reset_domain, &work.base))
5321                 return -EAGAIN;
5322
5323         flush_work(&work.base);
5324
5325         return work.ret;
5326 }
5327
5328 /**
5329  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5330  *
5331  * @adev: amdgpu_device pointer
5332  *
5333  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5334  * and lanes) of the slot the device is in. Handles APUs and
5335  * virtualized environments where PCIE config space may not be available.
5336  */
5337 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5338 {
5339         struct pci_dev *pdev;
5340         enum pci_bus_speed speed_cap, platform_speed_cap;
5341         enum pcie_link_width platform_link_width;
5342
5343         if (amdgpu_pcie_gen_cap)
5344                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5345
5346         if (amdgpu_pcie_lane_cap)
5347                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5348
5349         /* covers APUs as well */
5350         if (pci_is_root_bus(adev->pdev->bus)) {
5351                 if (adev->pm.pcie_gen_mask == 0)
5352                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5353                 if (adev->pm.pcie_mlw_mask == 0)
5354                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5355                 return;
5356         }
5357
5358         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5359                 return;
5360
5361         pcie_bandwidth_available(adev->pdev, NULL,
5362                                  &platform_speed_cap, &platform_link_width);
5363
5364         if (adev->pm.pcie_gen_mask == 0) {
5365                 /* asic caps */
5366                 pdev = adev->pdev;
5367                 speed_cap = pcie_get_speed_cap(pdev);
5368                 if (speed_cap == PCI_SPEED_UNKNOWN) {
5369                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5370                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5371                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5372                 } else {
5373                         if (speed_cap == PCIE_SPEED_32_0GT)
5374                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5375                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5376                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5377                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5378                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5379                         else if (speed_cap == PCIE_SPEED_16_0GT)
5380                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5381                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5382                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5383                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5384                         else if (speed_cap == PCIE_SPEED_8_0GT)
5385                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5386                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5387                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5388                         else if (speed_cap == PCIE_SPEED_5_0GT)
5389                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5390                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5391                         else
5392                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5393                 }
5394                 /* platform caps */
5395                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5396                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5397                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5398                 } else {
5399                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
5400                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5401                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5402                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5403                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5404                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5405                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5406                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5407                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5408                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5409                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5410                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5411                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5412                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5413                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5414                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5415                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5416                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5417                         else
5418                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5419
5420                 }
5421         }
5422         if (adev->pm.pcie_mlw_mask == 0) {
5423                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5424                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5425                 } else {
5426                         switch (platform_link_width) {
5427                         case PCIE_LNK_X32:
5428                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5429                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5430                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5431                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5432                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5433                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5434                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5435                                 break;
5436                         case PCIE_LNK_X16:
5437                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5438                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5439                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5440                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5441                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5442                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5443                                 break;
5444                         case PCIE_LNK_X12:
5445                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5446                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5447                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5448                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5449                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5450                                 break;
5451                         case PCIE_LNK_X8:
5452                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5453                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5454                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5455                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5456                                 break;
5457                         case PCIE_LNK_X4:
5458                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5459                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5460                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5461                                 break;
5462                         case PCIE_LNK_X2:
5463                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5464                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5465                                 break;
5466                         case PCIE_LNK_X1:
5467                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5468                                 break;
5469                         default:
5470                                 break;
5471                         }
5472                 }
5473         }
5474 }
5475
5476 int amdgpu_device_baco_enter(struct drm_device *dev)
5477 {
5478         struct amdgpu_device *adev = drm_to_adev(dev);
5479         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5480
5481         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5482                 return -ENOTSUPP;
5483
5484         if (ras && adev->ras_enabled &&
5485             adev->nbio.funcs->enable_doorbell_interrupt)
5486                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5487
5488         return amdgpu_dpm_baco_enter(adev);
5489 }
5490
5491 int amdgpu_device_baco_exit(struct drm_device *dev)
5492 {
5493         struct amdgpu_device *adev = drm_to_adev(dev);
5494         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5495         int ret = 0;
5496
5497         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5498                 return -ENOTSUPP;
5499
5500         ret = amdgpu_dpm_baco_exit(adev);
5501         if (ret)
5502                 return ret;
5503
5504         if (ras && adev->ras_enabled &&
5505             adev->nbio.funcs->enable_doorbell_interrupt)
5506                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5507
5508         if (amdgpu_passthrough(adev) &&
5509             adev->nbio.funcs->clear_doorbell_interrupt)
5510                 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5511
5512         return 0;
5513 }
5514
5515 /**
5516  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5517  * @pdev: PCI device struct
5518  * @state: PCI channel state
5519  *
5520  * Description: Called when a PCI error is detected.
5521  *
5522  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5523  */
5524 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5525 {
5526         struct drm_device *dev = pci_get_drvdata(pdev);
5527         struct amdgpu_device *adev = drm_to_adev(dev);
5528         int i;
5529
5530         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5531
5532         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5533                 DRM_WARN("No support for XGMI hive yet...");
5534                 return PCI_ERS_RESULT_DISCONNECT;
5535         }
5536
5537         adev->pci_channel_state = state;
5538
5539         switch (state) {
5540         case pci_channel_io_normal:
5541                 return PCI_ERS_RESULT_CAN_RECOVER;
5542         /* Fatal error, prepare for slot reset */
5543         case pci_channel_io_frozen:
5544                 /*
5545                  * Locking adev->reset_domain->sem will prevent any external access
5546                  * to GPU during PCI error recovery
5547                  */
5548                 amdgpu_device_lock_reset_domain(adev->reset_domain);
5549                 amdgpu_device_set_mp1_state(adev);
5550
5551                 /*
5552                  * Block any work scheduling as we do for regular GPU reset
5553                  * for the duration of the recovery
5554                  */
5555                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5556                         struct amdgpu_ring *ring = adev->rings[i];
5557
5558                         if (!ring || !ring->sched.thread)
5559                                 continue;
5560
5561                         drm_sched_stop(&ring->sched, NULL);
5562                 }
5563                 atomic_inc(&adev->gpu_reset_counter);
5564                 return PCI_ERS_RESULT_NEED_RESET;
5565         case pci_channel_io_perm_failure:
5566                 /* Permanent error, prepare for device removal */
5567                 return PCI_ERS_RESULT_DISCONNECT;
5568         }
5569
5570         return PCI_ERS_RESULT_NEED_RESET;
5571 }
5572
5573 /**
5574  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5575  * @pdev: pointer to PCI device
5576  */
5577 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5578 {
5579
5580         DRM_INFO("PCI error: mmio enabled callback!!\n");
5581
5582         /* TODO - dump whatever for debugging purposes */
5583
5584         /* This called only if amdgpu_pci_error_detected returns
5585          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5586          * works, no need to reset slot.
5587          */
5588
5589         return PCI_ERS_RESULT_RECOVERED;
5590 }
5591
5592 /**
5593  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5594  * @pdev: PCI device struct
5595  *
5596  * Description: This routine is called by the pci error recovery
5597  * code after the PCI slot has been reset, just before we
5598  * should resume normal operations.
5599  */
5600 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5601 {
5602         struct drm_device *dev = pci_get_drvdata(pdev);
5603         struct amdgpu_device *adev = drm_to_adev(dev);
5604         int r, i;
5605         struct amdgpu_reset_context reset_context;
5606         u32 memsize;
5607         struct list_head device_list;
5608
5609         DRM_INFO("PCI error: slot reset callback!!\n");
5610
5611         memset(&reset_context, 0, sizeof(reset_context));
5612
5613         INIT_LIST_HEAD(&device_list);
5614         list_add_tail(&adev->reset_list, &device_list);
5615
5616         /* wait for asic to come out of reset */
5617         msleep(500);
5618
5619         /* Restore PCI confspace */
5620         amdgpu_device_load_pci_state(pdev);
5621
5622         /* confirm  ASIC came out of reset */
5623         for (i = 0; i < adev->usec_timeout; i++) {
5624                 memsize = amdgpu_asic_get_config_memsize(adev);
5625
5626                 if (memsize != 0xffffffff)
5627                         break;
5628                 udelay(1);
5629         }
5630         if (memsize == 0xffffffff) {
5631                 r = -ETIME;
5632                 goto out;
5633         }
5634
5635         reset_context.method = AMD_RESET_METHOD_NONE;
5636         reset_context.reset_req_dev = adev;
5637         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5638         set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5639
5640         adev->no_hw_access = true;
5641         r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5642         adev->no_hw_access = false;
5643         if (r)
5644                 goto out;
5645
5646         r = amdgpu_do_asic_reset(&device_list, &reset_context);
5647
5648 out:
5649         if (!r) {
5650                 if (amdgpu_device_cache_pci_state(adev->pdev))
5651                         pci_restore_state(adev->pdev);
5652
5653                 DRM_INFO("PCIe error recovery succeeded\n");
5654         } else {
5655                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5656                 amdgpu_device_unset_mp1_state(adev);
5657                 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5658         }
5659
5660         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5661 }
5662
5663 /**
5664  * amdgpu_pci_resume() - resume normal ops after PCI reset
5665  * @pdev: pointer to PCI device
5666  *
5667  * Called when the error recovery driver tells us that its
5668  * OK to resume normal operation.
5669  */
5670 void amdgpu_pci_resume(struct pci_dev *pdev)
5671 {
5672         struct drm_device *dev = pci_get_drvdata(pdev);
5673         struct amdgpu_device *adev = drm_to_adev(dev);
5674         int i;
5675
5676
5677         DRM_INFO("PCI error: resume callback!!\n");
5678
5679         /* Only continue execution for the case of pci_channel_io_frozen */
5680         if (adev->pci_channel_state != pci_channel_io_frozen)
5681                 return;
5682
5683         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5684                 struct amdgpu_ring *ring = adev->rings[i];
5685
5686                 if (!ring || !ring->sched.thread)
5687                         continue;
5688
5689
5690                 drm_sched_resubmit_jobs(&ring->sched);
5691                 drm_sched_start(&ring->sched, true);
5692         }
5693
5694         amdgpu_device_unset_mp1_state(adev);
5695         amdgpu_device_unlock_reset_domain(adev->reset_domain);
5696 }
5697
5698 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5699 {
5700         struct drm_device *dev = pci_get_drvdata(pdev);
5701         struct amdgpu_device *adev = drm_to_adev(dev);
5702         int r;
5703
5704         r = pci_save_state(pdev);
5705         if (!r) {
5706                 kfree(adev->pci_state);
5707
5708                 adev->pci_state = pci_store_saved_state(pdev);
5709
5710                 if (!adev->pci_state) {
5711                         DRM_ERROR("Failed to store PCI saved state");
5712                         return false;
5713                 }
5714         } else {
5715                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5716                 return false;
5717         }
5718
5719         return true;
5720 }
5721
5722 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5723 {
5724         struct drm_device *dev = pci_get_drvdata(pdev);
5725         struct amdgpu_device *adev = drm_to_adev(dev);
5726         int r;
5727
5728         if (!adev->pci_state)
5729                 return false;
5730
5731         r = pci_load_saved_state(pdev, adev->pci_state);
5732
5733         if (!r) {
5734                 pci_restore_state(pdev);
5735         } else {
5736                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5737                 return false;
5738         }
5739
5740         return true;
5741 }
5742
5743 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5744                 struct amdgpu_ring *ring)
5745 {
5746 #ifdef CONFIG_X86_64
5747         if (adev->flags & AMD_IS_APU)
5748                 return;
5749 #endif
5750         if (adev->gmc.xgmi.connected_to_cpu)
5751                 return;
5752
5753         if (ring && ring->funcs->emit_hdp_flush)
5754                 amdgpu_ring_emit_hdp_flush(ring);
5755         else
5756                 amdgpu_asic_flush_hdp(adev, ring);
5757 }
5758
5759 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5760                 struct amdgpu_ring *ring)
5761 {
5762 #ifdef CONFIG_X86_64
5763         if (adev->flags & AMD_IS_APU)
5764                 return;
5765 #endif
5766         if (adev->gmc.xgmi.connected_to_cpu)
5767                 return;
5768
5769         amdgpu_asic_invalidate_hdp(adev, ring);
5770 }
5771
5772 int amdgpu_in_reset(struct amdgpu_device *adev)
5773 {
5774         return atomic_read(&adev->reset_domain->in_gpu_reset);
5775         }
5776         
5777 /**
5778  * amdgpu_device_halt() - bring hardware to some kind of halt state
5779  *
5780  * @adev: amdgpu_device pointer
5781  *
5782  * Bring hardware to some kind of halt state so that no one can touch it
5783  * any more. It will help to maintain error context when error occurred.
5784  * Compare to a simple hang, the system will keep stable at least for SSH
5785  * access. Then it should be trivial to inspect the hardware state and
5786  * see what's going on. Implemented as following:
5787  *
5788  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5789  *    clears all CPU mappings to device, disallows remappings through page faults
5790  * 2. amdgpu_irq_disable_all() disables all interrupts
5791  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5792  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5793  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5794  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5795  *    flush any in flight DMA operations
5796  */
5797 void amdgpu_device_halt(struct amdgpu_device *adev)
5798 {
5799         struct pci_dev *pdev = adev->pdev;
5800         struct drm_device *ddev = adev_to_drm(adev);
5801
5802         drm_dev_unplug(ddev);
5803
5804         amdgpu_irq_disable_all(adev);
5805
5806         amdgpu_fence_driver_hw_fini(adev);
5807
5808         adev->no_hw_access = true;
5809
5810         amdgpu_device_unmap_mmio(adev);
5811
5812         pci_disable_device(pdev);
5813         pci_wait_for_pending_transaction(pdev);
5814 }
5815
5816 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5817                                 u32 reg)
5818 {
5819         unsigned long flags, address, data;
5820         u32 r;
5821
5822         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5823         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5824
5825         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5826         WREG32(address, reg * 4);
5827         (void)RREG32(address);
5828         r = RREG32(data);
5829         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5830         return r;
5831 }
5832
5833 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5834                                 u32 reg, u32 v)
5835 {
5836         unsigned long flags, address, data;
5837
5838         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5839         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5840
5841         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5842         WREG32(address, reg * 4);
5843         (void)RREG32(address);
5844         WREG32(data, v);
5845         (void)RREG32(data);
5846         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5847 }