]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
Merge remote-tracking branch 'asoc/topic/pcm512x' into asoc-next
[mirror_ubuntu-focal-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
4562236b 34#include <drm/drm_atomic_helper.h>
d38ceaf9
AD
35#include <drm/amdgpu_drm.h>
36#include <linux/vgaarb.h>
37#include <linux/vga_switcheroo.h>
38#include <linux/efi.h>
39#include "amdgpu.h"
f4b373f4 40#include "amdgpu_trace.h"
d38ceaf9
AD
41#include "amdgpu_i2c.h"
42#include "atom.h"
43#include "amdgpu_atombios.h"
a5bde2f9 44#include "amdgpu_atomfirmware.h"
d0dd7f0c 45#include "amd_pcie.h"
33f34802
KW
46#ifdef CONFIG_DRM_AMDGPU_SI
47#include "si.h"
48#endif
a2e73f56
AD
49#ifdef CONFIG_DRM_AMDGPU_CIK
50#include "cik.h"
51#endif
aaa36a97 52#include "vi.h"
460826e6 53#include "soc15.h"
d38ceaf9 54#include "bif/bif_4_1_d.h"
9accf2fd 55#include <linux/pci.h>
bec86378 56#include <linux/firmware.h>
89041940 57#include "amdgpu_vf_error.h"
d38ceaf9 58
ba997709 59#include "amdgpu_amdkfd.h"
d2f52ac8 60#include "amdgpu_pm.h"
d38ceaf9 61
e2a75f88 62MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
2d2e5e7e 63MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
e2a75f88 64
2dc80b00
S
65#define AMDGPU_RESUME_MS 2000
66
d38ceaf9
AD
67static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
68static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
4f0955fc 69static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
db95e218 70static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
d38ceaf9
AD
71
72static const char *amdgpu_asic_name[] = {
da69c161
KW
73 "TAHITI",
74 "PITCAIRN",
75 "VERDE",
76 "OLAND",
77 "HAINAN",
d38ceaf9
AD
78 "BONAIRE",
79 "KAVERI",
80 "KABINI",
81 "HAWAII",
82 "MULLINS",
83 "TOPAZ",
84 "TONGA",
48299f95 85 "FIJI",
d38ceaf9 86 "CARRIZO",
139f4917 87 "STONEY",
2cc0c0b5
FC
88 "POLARIS10",
89 "POLARIS11",
c4642a47 90 "POLARIS12",
d4196f01 91 "VEGA10",
2ca8a5d2 92 "RAVEN",
d38ceaf9
AD
93 "LAST",
94};
95
96bool amdgpu_device_is_px(struct drm_device *dev)
97{
98 struct amdgpu_device *adev = dev->dev_private;
99
2f7d10b3 100 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
101 return true;
102 return false;
103}
104
105/*
106 * MMIO register access helper functions.
107 */
108uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 109 uint32_t acc_flags)
d38ceaf9 110{
f4b373f4
TSD
111 uint32_t ret;
112
43ca8efa 113 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 114 return amdgpu_virt_kiq_rreg(adev, reg);
bc992ba5 115
15d72fd7 116 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 117 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
118 else {
119 unsigned long flags;
d38ceaf9
AD
120
121 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
122 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
123 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
124 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 125 }
f4b373f4
TSD
126 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
127 return ret;
d38ceaf9
AD
128}
129
130void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 131 uint32_t acc_flags)
d38ceaf9 132{
f4b373f4 133 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 134
47ed4e1c
KW
135 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
136 adev->last_mm_index = v;
137 }
138
43ca8efa 139 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 140 return amdgpu_virt_kiq_wreg(adev, reg, v);
bc992ba5 141
15d72fd7 142 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
143 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
144 else {
145 unsigned long flags;
146
147 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
148 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
149 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
150 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
151 }
47ed4e1c
KW
152
153 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
154 udelay(500);
155 }
d38ceaf9
AD
156}
157
158u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
159{
160 if ((reg * 4) < adev->rio_mem_size)
161 return ioread32(adev->rio_mem + (reg * 4));
162 else {
163 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
164 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
165 }
166}
167
168void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
169{
47ed4e1c
KW
170 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
171 adev->last_mm_index = v;
172 }
d38ceaf9
AD
173
174 if ((reg * 4) < adev->rio_mem_size)
175 iowrite32(v, adev->rio_mem + (reg * 4));
176 else {
177 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
178 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
179 }
47ed4e1c
KW
180
181 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
182 udelay(500);
183 }
d38ceaf9
AD
184}
185
186/**
187 * amdgpu_mm_rdoorbell - read a doorbell dword
188 *
189 * @adev: amdgpu_device pointer
190 * @index: doorbell index
191 *
192 * Returns the value in the doorbell aperture at the
193 * requested doorbell index (CIK).
194 */
195u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
196{
197 if (index < adev->doorbell.num_doorbells) {
198 return readl(adev->doorbell.ptr + index);
199 } else {
200 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
201 return 0;
202 }
203}
204
205/**
206 * amdgpu_mm_wdoorbell - write a doorbell dword
207 *
208 * @adev: amdgpu_device pointer
209 * @index: doorbell index
210 * @v: value to write
211 *
212 * Writes @v to the doorbell aperture at the
213 * requested doorbell index (CIK).
214 */
215void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
216{
217 if (index < adev->doorbell.num_doorbells) {
218 writel(v, adev->doorbell.ptr + index);
219 } else {
220 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
221 }
222}
223
832be404
KW
224/**
225 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
226 *
227 * @adev: amdgpu_device pointer
228 * @index: doorbell index
229 *
230 * Returns the value in the doorbell aperture at the
231 * requested doorbell index (VEGA10+).
232 */
233u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
234{
235 if (index < adev->doorbell.num_doorbells) {
236 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
237 } else {
238 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
239 return 0;
240 }
241}
242
243/**
244 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
245 *
246 * @adev: amdgpu_device pointer
247 * @index: doorbell index
248 * @v: value to write
249 *
250 * Writes @v to the doorbell aperture at the
251 * requested doorbell index (VEGA10+).
252 */
253void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
254{
255 if (index < adev->doorbell.num_doorbells) {
256 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
257 } else {
258 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
259 }
260}
261
d38ceaf9
AD
262/**
263 * amdgpu_invalid_rreg - dummy reg read function
264 *
265 * @adev: amdgpu device pointer
266 * @reg: offset of register
267 *
268 * Dummy register read function. Used for register blocks
269 * that certain asics don't have (all asics).
270 * Returns the value in the register.
271 */
272static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
273{
274 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
275 BUG();
276 return 0;
277}
278
279/**
280 * amdgpu_invalid_wreg - dummy reg write function
281 *
282 * @adev: amdgpu device pointer
283 * @reg: offset of register
284 * @v: value to write to the register
285 *
286 * Dummy register read function. Used for register blocks
287 * that certain asics don't have (all asics).
288 */
289static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
290{
291 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
292 reg, v);
293 BUG();
294}
295
296/**
297 * amdgpu_block_invalid_rreg - dummy reg read function
298 *
299 * @adev: amdgpu device pointer
300 * @block: offset of instance
301 * @reg: offset of register
302 *
303 * Dummy register read function. Used for register blocks
304 * that certain asics don't have (all asics).
305 * Returns the value in the register.
306 */
307static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
308 uint32_t block, uint32_t reg)
309{
310 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
311 reg, block);
312 BUG();
313 return 0;
314}
315
316/**
317 * amdgpu_block_invalid_wreg - dummy reg write function
318 *
319 * @adev: amdgpu device pointer
320 * @block: offset of instance
321 * @reg: offset of register
322 * @v: value to write to the register
323 *
324 * Dummy register read function. Used for register blocks
325 * that certain asics don't have (all asics).
326 */
327static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
328 uint32_t block,
329 uint32_t reg, uint32_t v)
330{
331 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
332 reg, block, v);
333 BUG();
334}
335
336static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
337{
a4a02777
CK
338 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
339 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
340 &adev->vram_scratch.robj,
341 &adev->vram_scratch.gpu_addr,
342 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
343}
344
345static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
346{
078af1a3 347 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
348}
349
350/**
351 * amdgpu_program_register_sequence - program an array of registers.
352 *
353 * @adev: amdgpu_device pointer
354 * @registers: pointer to the register array
355 * @array_size: size of the register array
356 *
357 * Programs an array or registers with and and or masks.
358 * This is a helper for setting golden registers.
359 */
360void amdgpu_program_register_sequence(struct amdgpu_device *adev,
361 const u32 *registers,
362 const u32 array_size)
363{
364 u32 tmp, reg, and_mask, or_mask;
365 int i;
366
367 if (array_size % 3)
368 return;
369
370 for (i = 0; i < array_size; i +=3) {
371 reg = registers[i + 0];
372 and_mask = registers[i + 1];
373 or_mask = registers[i + 2];
374
375 if (and_mask == 0xffffffff) {
376 tmp = or_mask;
377 } else {
378 tmp = RREG32(reg);
379 tmp &= ~and_mask;
380 tmp |= or_mask;
381 }
382 WREG32(reg, tmp);
383 }
384}
385
386void amdgpu_pci_config_reset(struct amdgpu_device *adev)
387{
388 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
389}
390
391/*
392 * GPU doorbell aperture helpers function.
393 */
394/**
395 * amdgpu_doorbell_init - Init doorbell driver information.
396 *
397 * @adev: amdgpu_device pointer
398 *
399 * Init doorbell driver information (CIK)
400 * Returns 0 on success, error on failure.
401 */
402static int amdgpu_doorbell_init(struct amdgpu_device *adev)
403{
705e519e
CK
404 /* No doorbell on SI hardware generation */
405 if (adev->asic_type < CHIP_BONAIRE) {
406 adev->doorbell.base = 0;
407 adev->doorbell.size = 0;
408 adev->doorbell.num_doorbells = 0;
409 adev->doorbell.ptr = NULL;
410 return 0;
411 }
412
d38ceaf9
AD
413 /* doorbell bar mapping */
414 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
415 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
416
edf600da 417 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
418 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
419 if (adev->doorbell.num_doorbells == 0)
420 return -EINVAL;
421
8972e5d2
CK
422 adev->doorbell.ptr = ioremap(adev->doorbell.base,
423 adev->doorbell.num_doorbells *
424 sizeof(u32));
425 if (adev->doorbell.ptr == NULL)
d38ceaf9 426 return -ENOMEM;
d38ceaf9
AD
427
428 return 0;
429}
430
431/**
432 * amdgpu_doorbell_fini - Tear down doorbell driver information.
433 *
434 * @adev: amdgpu_device pointer
435 *
436 * Tear down doorbell driver information (CIK)
437 */
438static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
439{
440 iounmap(adev->doorbell.ptr);
441 adev->doorbell.ptr = NULL;
442}
443
444/**
445 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
446 * setup amdkfd
447 *
448 * @adev: amdgpu_device pointer
449 * @aperture_base: output returning doorbell aperture base physical address
450 * @aperture_size: output returning doorbell aperture size in bytes
451 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
452 *
453 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
454 * takes doorbells required for its own rings and reports the setup to amdkfd.
455 * amdgpu reserved doorbells are at the start of the doorbell aperture.
456 */
457void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
458 phys_addr_t *aperture_base,
459 size_t *aperture_size,
460 size_t *start_offset)
461{
462 /*
463 * The first num_doorbells are used by amdgpu.
464 * amdkfd takes whatever's left in the aperture.
465 */
466 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
467 *aperture_base = adev->doorbell.base;
468 *aperture_size = adev->doorbell.size;
469 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
470 } else {
471 *aperture_base = 0;
472 *aperture_size = 0;
473 *start_offset = 0;
474 }
475}
476
477/*
478 * amdgpu_wb_*()
455a7bc2 479 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 480 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
481 */
482
483/**
484 * amdgpu_wb_fini - Disable Writeback and free memory
485 *
486 * @adev: amdgpu_device pointer
487 *
488 * Disables Writeback and frees the Writeback memory (all asics).
489 * Used at driver shutdown.
490 */
491static void amdgpu_wb_fini(struct amdgpu_device *adev)
492{
493 if (adev->wb.wb_obj) {
a76ed485
AD
494 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
495 &adev->wb.gpu_addr,
496 (void **)&adev->wb.wb);
d38ceaf9
AD
497 adev->wb.wb_obj = NULL;
498 }
499}
500
501/**
502 * amdgpu_wb_init- Init Writeback driver info and allocate memory
503 *
504 * @adev: amdgpu_device pointer
505 *
455a7bc2 506 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
507 * Used at driver startup.
508 * Returns 0 on success or an -error on failure.
509 */
510static int amdgpu_wb_init(struct amdgpu_device *adev)
511{
512 int r;
513
514 if (adev->wb.wb_obj == NULL) {
97407b63
AD
515 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
516 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
517 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
518 &adev->wb.wb_obj, &adev->wb.gpu_addr,
519 (void **)&adev->wb.wb);
d38ceaf9
AD
520 if (r) {
521 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
522 return r;
523 }
d38ceaf9
AD
524
525 adev->wb.num_wb = AMDGPU_MAX_WB;
526 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
527
528 /* clear wb memory */
60a970a6 529 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
d38ceaf9
AD
530 }
531
532 return 0;
533}
534
535/**
536 * amdgpu_wb_get - Allocate a wb entry
537 *
538 * @adev: amdgpu_device pointer
539 * @wb: wb index
540 *
541 * Allocate a wb slot for use by the driver (all asics).
542 * Returns 0 on success or -EINVAL on failure.
543 */
544int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
545{
546 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 547
97407b63 548 if (offset < adev->wb.num_wb) {
7014285a 549 __set_bit(offset, adev->wb.used);
63ae07ca 550 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
551 return 0;
552 } else {
553 return -EINVAL;
554 }
555}
556
d38ceaf9
AD
557/**
558 * amdgpu_wb_free - Free a wb entry
559 *
560 * @adev: amdgpu_device pointer
561 * @wb: wb index
562 *
563 * Free a wb slot allocated for use by the driver (all asics)
564 */
565void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
566{
567 if (wb < adev->wb.num_wb)
63ae07ca 568 __clear_bit(wb >> 3, adev->wb.used);
d38ceaf9
AD
569}
570
571/**
572 * amdgpu_vram_location - try to find VRAM location
573 * @adev: amdgpu device structure holding all necessary informations
574 * @mc: memory controller structure holding memory informations
575 * @base: base address at which to put VRAM
576 *
455a7bc2 577 * Function will try to place VRAM at base address provided
d38ceaf9
AD
578 * as parameter (which is so far either PCI aperture address or
579 * for IGP TOM base address).
580 *
581 * If there is not enough space to fit the unvisible VRAM in the 32bits
582 * address space then we limit the VRAM size to the aperture.
583 *
584 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
585 * this shouldn't be a problem as we are using the PCI aperture as a reference.
586 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
587 * not IGP.
588 *
589 * Note: we use mc_vram_size as on some board we need to program the mc to
590 * cover the whole aperture even if VRAM size is inferior to aperture size
591 * Novell bug 204882 + along with lots of ubuntu ones
592 *
593 * Note: when limiting vram it's safe to overwritte real_vram_size because
594 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
595 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
596 * ones)
597 *
598 * Note: IGP TOM addr should be the same as the aperture addr, we don't
455a7bc2 599 * explicitly check for that though.
d38ceaf9
AD
600 *
601 * FIXME: when reducing VRAM size align new size on power of 2.
602 */
603void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
604{
605 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
606
607 mc->vram_start = base;
608 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
609 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
610 mc->real_vram_size = mc->aper_size;
611 mc->mc_vram_size = mc->aper_size;
612 }
613 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
614 if (limit && limit < mc->real_vram_size)
615 mc->real_vram_size = limit;
616 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
617 mc->mc_vram_size >> 20, mc->vram_start,
618 mc->vram_end, mc->real_vram_size >> 20);
619}
620
621/**
6f02a696 622 * amdgpu_gart_location - try to find GTT location
d38ceaf9
AD
623 * @adev: amdgpu device structure holding all necessary informations
624 * @mc: memory controller structure holding memory informations
625 *
626 * Function will place try to place GTT before or after VRAM.
627 *
628 * If GTT size is bigger than space left then we ajust GTT size.
629 * Thus function will never fails.
630 *
631 * FIXME: when reducing GTT size align new size on power of 2.
632 */
6f02a696 633void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
d38ceaf9
AD
634{
635 u64 size_af, size_bf;
636
ed21c047
CK
637 size_af = adev->mc.mc_mask - mc->vram_end;
638 size_bf = mc->vram_start;
d38ceaf9 639 if (size_bf > size_af) {
6f02a696 640 if (mc->gart_size > size_bf) {
d38ceaf9 641 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 642 mc->gart_size = size_bf;
d38ceaf9 643 }
6f02a696 644 mc->gart_start = 0;
d38ceaf9 645 } else {
6f02a696 646 if (mc->gart_size > size_af) {
d38ceaf9 647 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 648 mc->gart_size = size_af;
d38ceaf9 649 }
6f02a696 650 mc->gart_start = mc->vram_end + 1;
d38ceaf9 651 }
6f02a696 652 mc->gart_end = mc->gart_start + mc->gart_size - 1;
d38ceaf9 653 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
6f02a696 654 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
d38ceaf9
AD
655}
656
a05502e5
HC
657/*
658 * Firmware Reservation functions
659 */
660/**
661 * amdgpu_fw_reserve_vram_fini - free fw reserved vram
662 *
663 * @adev: amdgpu_device pointer
664 *
665 * free fw reserved vram if it has been reserved.
666 */
667void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
668{
669 amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
670 NULL, &adev->fw_vram_usage.va);
671}
672
673/**
674 * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
675 *
676 * @adev: amdgpu_device pointer
677 *
678 * create bo vram reservation from fw.
679 */
680int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
681{
682 int r = 0;
683 u64 gpu_addr;
684 u64 vram_size = adev->mc.visible_vram_size;
685
686 adev->fw_vram_usage.va = NULL;
687 adev->fw_vram_usage.reserved_bo = NULL;
688
689 if (adev->fw_vram_usage.size > 0 &&
690 adev->fw_vram_usage.size <= vram_size) {
691
692 r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
693 PAGE_SIZE, true, 0,
694 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
695 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
696 &adev->fw_vram_usage.reserved_bo);
697 if (r)
698 goto error_create;
699
700 r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
701 if (r)
702 goto error_reserve;
703 r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
704 AMDGPU_GEM_DOMAIN_VRAM,
705 adev->fw_vram_usage.start_offset,
706 (adev->fw_vram_usage.start_offset +
707 adev->fw_vram_usage.size), &gpu_addr);
708 if (r)
709 goto error_pin;
710 r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
711 &adev->fw_vram_usage.va);
712 if (r)
713 goto error_kmap;
714
715 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
716 }
717 return r;
718
719error_kmap:
720 amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
721error_pin:
722 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
723error_reserve:
724 amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
725error_create:
726 adev->fw_vram_usage.va = NULL;
727 adev->fw_vram_usage.reserved_bo = NULL;
728 return r;
729}
730
731
d38ceaf9
AD
732/*
733 * GPU helpers function.
734 */
735/**
c836fec5 736 * amdgpu_need_post - check if the hw need post or not
d38ceaf9
AD
737 *
738 * @adev: amdgpu_device pointer
739 *
c836fec5
JQ
740 * Check if the asic has been initialized (all asics) at driver startup
741 * or post is needed if hw reset is performed.
742 * Returns true if need or false if not.
d38ceaf9 743 */
c836fec5 744bool amdgpu_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
745{
746 uint32_t reg;
747
bec86378
ML
748 if (amdgpu_sriov_vf(adev))
749 return false;
750
751 if (amdgpu_passthrough(adev)) {
1da2c326
ML
752 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
753 * some old smc fw still need driver do vPost otherwise gpu hang, while
754 * those smc fw version above 22.15 doesn't have this flaw, so we force
755 * vpost executed for smc version below 22.15
bec86378
ML
756 */
757 if (adev->asic_type == CHIP_FIJI) {
758 int err;
759 uint32_t fw_ver;
760 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
761 /* force vPost if error occured */
762 if (err)
763 return true;
764
765 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
766 if (fw_ver < 0x00160e00)
767 return true;
bec86378 768 }
bec86378 769 }
91fe77eb 770
771 if (adev->has_hw_reset) {
772 adev->has_hw_reset = false;
773 return true;
774 }
775
776 /* bios scratch used on CIK+ */
777 if (adev->asic_type >= CHIP_BONAIRE)
778 return amdgpu_atombios_scratch_need_asic_init(adev);
779
780 /* check MEM_SIZE for older asics */
781 reg = amdgpu_asic_get_config_memsize(adev);
782
783 if ((reg != 0) && (reg != 0xffffffff))
784 return false;
785
786 return true;
bec86378
ML
787}
788
d38ceaf9
AD
789/**
790 * amdgpu_dummy_page_init - init dummy page used by the driver
791 *
792 * @adev: amdgpu_device pointer
793 *
794 * Allocate the dummy page used by the driver (all asics).
795 * This dummy page is used by the driver as a filler for gart entries
796 * when pages are taken out of the GART
797 * Returns 0 on sucess, -ENOMEM on failure.
798 */
799int amdgpu_dummy_page_init(struct amdgpu_device *adev)
800{
801 if (adev->dummy_page.page)
802 return 0;
803 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
804 if (adev->dummy_page.page == NULL)
805 return -ENOMEM;
806 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
807 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
808 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
809 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
810 __free_page(adev->dummy_page.page);
811 adev->dummy_page.page = NULL;
812 return -ENOMEM;
813 }
814 return 0;
815}
816
817/**
818 * amdgpu_dummy_page_fini - free dummy page used by the driver
819 *
820 * @adev: amdgpu_device pointer
821 *
822 * Frees the dummy page used by the driver (all asics).
823 */
824void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
825{
826 if (adev->dummy_page.page == NULL)
827 return;
828 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
829 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
830 __free_page(adev->dummy_page.page);
831 adev->dummy_page.page = NULL;
832}
833
834
835/* ATOM accessor methods */
836/*
837 * ATOM is an interpreted byte code stored in tables in the vbios. The
838 * driver registers callbacks to access registers and the interpreter
839 * in the driver parses the tables and executes then to program specific
840 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
841 * atombios.h, and atom.c
842 */
843
844/**
845 * cail_pll_read - read PLL register
846 *
847 * @info: atom card_info pointer
848 * @reg: PLL register offset
849 *
850 * Provides a PLL register accessor for the atom interpreter (r4xx+).
851 * Returns the value of the PLL register.
852 */
853static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
854{
855 return 0;
856}
857
858/**
859 * cail_pll_write - write PLL register
860 *
861 * @info: atom card_info pointer
862 * @reg: PLL register offset
863 * @val: value to write to the pll register
864 *
865 * Provides a PLL register accessor for the atom interpreter (r4xx+).
866 */
867static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
868{
869
870}
871
872/**
873 * cail_mc_read - read MC (Memory Controller) register
874 *
875 * @info: atom card_info pointer
876 * @reg: MC register offset
877 *
878 * Provides an MC register accessor for the atom interpreter (r4xx+).
879 * Returns the value of the MC register.
880 */
881static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
882{
883 return 0;
884}
885
886/**
887 * cail_mc_write - write MC (Memory Controller) register
888 *
889 * @info: atom card_info pointer
890 * @reg: MC register offset
891 * @val: value to write to the pll register
892 *
893 * Provides a MC register accessor for the atom interpreter (r4xx+).
894 */
895static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
896{
897
898}
899
900/**
901 * cail_reg_write - write MMIO register
902 *
903 * @info: atom card_info pointer
904 * @reg: MMIO register offset
905 * @val: value to write to the pll register
906 *
907 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
908 */
909static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
910{
911 struct amdgpu_device *adev = info->dev->dev_private;
912
913 WREG32(reg, val);
914}
915
916/**
917 * cail_reg_read - read MMIO register
918 *
919 * @info: atom card_info pointer
920 * @reg: MMIO register offset
921 *
922 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
923 * Returns the value of the MMIO register.
924 */
925static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
926{
927 struct amdgpu_device *adev = info->dev->dev_private;
928 uint32_t r;
929
930 r = RREG32(reg);
931 return r;
932}
933
934/**
935 * cail_ioreg_write - write IO register
936 *
937 * @info: atom card_info pointer
938 * @reg: IO register offset
939 * @val: value to write to the pll register
940 *
941 * Provides a IO register accessor for the atom interpreter (r4xx+).
942 */
943static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
944{
945 struct amdgpu_device *adev = info->dev->dev_private;
946
947 WREG32_IO(reg, val);
948}
949
950/**
951 * cail_ioreg_read - read IO register
952 *
953 * @info: atom card_info pointer
954 * @reg: IO register offset
955 *
956 * Provides an IO register accessor for the atom interpreter (r4xx+).
957 * Returns the value of the IO register.
958 */
959static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
960{
961 struct amdgpu_device *adev = info->dev->dev_private;
962 uint32_t r;
963
964 r = RREG32_IO(reg);
965 return r;
966}
967
5b41d94c
KR
968static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
969 struct device_attribute *attr,
970 char *buf)
971{
972 struct drm_device *ddev = dev_get_drvdata(dev);
973 struct amdgpu_device *adev = ddev->dev_private;
974 struct atom_context *ctx = adev->mode_info.atom_context;
975
976 return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
977}
978
979static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
980 NULL);
981
d38ceaf9
AD
982/**
983 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
984 *
985 * @adev: amdgpu_device pointer
986 *
987 * Frees the driver info and register access callbacks for the ATOM
988 * interpreter (r4xx+).
989 * Called at driver shutdown.
990 */
991static void amdgpu_atombios_fini(struct amdgpu_device *adev)
992{
89e0ec9f 993 if (adev->mode_info.atom_context) {
d38ceaf9 994 kfree(adev->mode_info.atom_context->scratch);
89e0ec9f
ML
995 kfree(adev->mode_info.atom_context->iio);
996 }
d38ceaf9
AD
997 kfree(adev->mode_info.atom_context);
998 adev->mode_info.atom_context = NULL;
999 kfree(adev->mode_info.atom_card_info);
1000 adev->mode_info.atom_card_info = NULL;
5b41d94c 1001 device_remove_file(adev->dev, &dev_attr_vbios_version);
d38ceaf9
AD
1002}
1003
1004/**
1005 * amdgpu_atombios_init - init the driver info and callbacks for atombios
1006 *
1007 * @adev: amdgpu_device pointer
1008 *
1009 * Initializes the driver info and register access callbacks for the
1010 * ATOM interpreter (r4xx+).
1011 * Returns 0 on sucess, -ENOMEM on failure.
1012 * Called at driver startup.
1013 */
1014static int amdgpu_atombios_init(struct amdgpu_device *adev)
1015{
1016 struct card_info *atom_card_info =
1017 kzalloc(sizeof(struct card_info), GFP_KERNEL);
5b41d94c 1018 int ret;
d38ceaf9
AD
1019
1020 if (!atom_card_info)
1021 return -ENOMEM;
1022
1023 adev->mode_info.atom_card_info = atom_card_info;
1024 atom_card_info->dev = adev->ddev;
1025 atom_card_info->reg_read = cail_reg_read;
1026 atom_card_info->reg_write = cail_reg_write;
1027 /* needed for iio ops */
1028 if (adev->rio_mem) {
1029 atom_card_info->ioreg_read = cail_ioreg_read;
1030 atom_card_info->ioreg_write = cail_ioreg_write;
1031 } else {
b64a18c5 1032 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
d38ceaf9
AD
1033 atom_card_info->ioreg_read = cail_reg_read;
1034 atom_card_info->ioreg_write = cail_reg_write;
1035 }
1036 atom_card_info->mc_read = cail_mc_read;
1037 atom_card_info->mc_write = cail_mc_write;
1038 atom_card_info->pll_read = cail_pll_read;
1039 atom_card_info->pll_write = cail_pll_write;
1040
1041 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
1042 if (!adev->mode_info.atom_context) {
1043 amdgpu_atombios_fini(adev);
1044 return -ENOMEM;
1045 }
1046
1047 mutex_init(&adev->mode_info.atom_context->mutex);
a5bde2f9
AD
1048 if (adev->is_atom_fw) {
1049 amdgpu_atomfirmware_scratch_regs_init(adev);
1050 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1051 } else {
1052 amdgpu_atombios_scratch_regs_init(adev);
1053 amdgpu_atombios_allocate_fb_scratch(adev);
1054 }
5b41d94c
KR
1055
1056 ret = device_create_file(adev->dev, &dev_attr_vbios_version);
1057 if (ret) {
1058 DRM_ERROR("Failed to create device file for VBIOS version\n");
1059 return ret;
1060 }
1061
d38ceaf9
AD
1062 return 0;
1063}
1064
1065/* if we get transitioned to only one device, take VGA back */
1066/**
1067 * amdgpu_vga_set_decode - enable/disable vga decode
1068 *
1069 * @cookie: amdgpu_device pointer
1070 * @state: enable/disable vga decode
1071 *
1072 * Enable/disable vga decode (all asics).
1073 * Returns VGA resource flags.
1074 */
1075static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1076{
1077 struct amdgpu_device *adev = cookie;
1078 amdgpu_asic_set_vga_state(adev, state);
1079 if (state)
1080 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1081 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1082 else
1083 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1084}
1085
bab4fee7 1086static void amdgpu_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
1087{
1088 /* defines number of bits in page table versus page directory,
1089 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1090 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
1091 if (amdgpu_vm_block_size == -1)
1092 return;
a1adf8be 1093
bab4fee7 1094 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
1095 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1096 amdgpu_vm_block_size);
bab4fee7 1097 goto def_value;
a1adf8be
CZ
1098 }
1099
1100 if (amdgpu_vm_block_size > 24 ||
1101 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1102 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1103 amdgpu_vm_block_size);
bab4fee7 1104 goto def_value;
a1adf8be 1105 }
bab4fee7
JZ
1106
1107 return;
1108
1109def_value:
1110 amdgpu_vm_block_size = -1;
a1adf8be
CZ
1111}
1112
83ca145d
ZJ
1113static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1114{
64dab074
AD
1115 /* no need to check the default value */
1116 if (amdgpu_vm_size == -1)
1117 return;
1118
76117507 1119 if (!is_power_of_2(amdgpu_vm_size)) {
83ca145d
ZJ
1120 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1121 amdgpu_vm_size);
1122 goto def_value;
1123 }
1124
1125 if (amdgpu_vm_size < 1) {
1126 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1127 amdgpu_vm_size);
1128 goto def_value;
1129 }
1130
1131 /*
1132 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1133 */
1134 if (amdgpu_vm_size > 1024) {
1135 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1136 amdgpu_vm_size);
1137 goto def_value;
1138 }
1139
1140 return;
1141
1142def_value:
bab4fee7 1143 amdgpu_vm_size = -1;
83ca145d
ZJ
1144}
1145
d38ceaf9
AD
1146/**
1147 * amdgpu_check_arguments - validate module params
1148 *
1149 * @adev: amdgpu_device pointer
1150 *
1151 * Validates certain module parameters and updates
1152 * the associated values used by the driver (all asics).
1153 */
1154static void amdgpu_check_arguments(struct amdgpu_device *adev)
1155{
5b011235
CZ
1156 if (amdgpu_sched_jobs < 4) {
1157 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1158 amdgpu_sched_jobs);
1159 amdgpu_sched_jobs = 4;
76117507 1160 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
1161 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1162 amdgpu_sched_jobs);
1163 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1164 }
d38ceaf9 1165
83e74db6 1166 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
1167 /* gart size must be greater or equal to 32M */
1168 dev_warn(adev->dev, "gart size (%d) too small\n",
1169 amdgpu_gart_size);
83e74db6 1170 amdgpu_gart_size = -1;
d38ceaf9
AD
1171 }
1172
36d38372 1173 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 1174 /* gtt size must be greater or equal to 32M */
36d38372
CK
1175 dev_warn(adev->dev, "gtt size (%d) too small\n",
1176 amdgpu_gtt_size);
1177 amdgpu_gtt_size = -1;
d38ceaf9
AD
1178 }
1179
d07f14be
RH
1180 /* valid range is between 4 and 9 inclusive */
1181 if (amdgpu_vm_fragment_size != -1 &&
1182 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1183 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1184 amdgpu_vm_fragment_size = -1;
1185 }
1186
83ca145d 1187 amdgpu_check_vm_size(adev);
d38ceaf9 1188
bab4fee7 1189 amdgpu_check_block_size(adev);
6a7f76e7 1190
526bae37 1191 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
76117507 1192 !is_power_of_2(amdgpu_vram_page_split))) {
6a7f76e7
CK
1193 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1194 amdgpu_vram_page_split);
1195 amdgpu_vram_page_split = 1024;
1196 }
d38ceaf9
AD
1197}
1198
1199/**
1200 * amdgpu_switcheroo_set_state - set switcheroo state
1201 *
1202 * @pdev: pci dev pointer
1694467b 1203 * @state: vga_switcheroo state
d38ceaf9
AD
1204 *
1205 * Callback for the switcheroo driver. Suspends or resumes the
1206 * the asics before or after it is powered up using ACPI methods.
1207 */
1208static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1209{
1210 struct drm_device *dev = pci_get_drvdata(pdev);
1211
1212 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1213 return;
1214
1215 if (state == VGA_SWITCHEROO_ON) {
7ca85295 1216 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
1217 /* don't suspend or resume card normally */
1218 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1219
810ddc3a 1220 amdgpu_device_resume(dev, true, true);
d38ceaf9 1221
d38ceaf9
AD
1222 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1223 drm_kms_helper_poll_enable(dev);
1224 } else {
7ca85295 1225 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
1226 drm_kms_helper_poll_disable(dev);
1227 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 1228 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
1229 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1230 }
1231}
1232
1233/**
1234 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1235 *
1236 * @pdev: pci dev pointer
1237 *
1238 * Callback for the switcheroo driver. Check of the switcheroo
1239 * state can be changed.
1240 * Returns true if the state can be changed, false if not.
1241 */
1242static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1243{
1244 struct drm_device *dev = pci_get_drvdata(pdev);
1245
1246 /*
1247 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1248 * locking inversion with the driver load path. And the access here is
1249 * completely racy anyway. So don't bother with locking for now.
1250 */
1251 return dev->open_count == 0;
1252}
1253
1254static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1255 .set_gpu_state = amdgpu_switcheroo_set_state,
1256 .reprobe = NULL,
1257 .can_switch = amdgpu_switcheroo_can_switch,
1258};
1259
1260int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
5fc3aeeb 1261 enum amd_ip_block_type block_type,
1262 enum amd_clockgating_state state)
d38ceaf9
AD
1263{
1264 int i, r = 0;
1265
1266 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1267 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1268 continue;
c722865a
RZ
1269 if (adev->ip_blocks[i].version->type != block_type)
1270 continue;
1271 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1272 continue;
1273 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1274 (void *)adev, state);
1275 if (r)
1276 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1277 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1278 }
1279 return r;
1280}
1281
1282int amdgpu_set_powergating_state(struct amdgpu_device *adev,
5fc3aeeb 1283 enum amd_ip_block_type block_type,
1284 enum amd_powergating_state state)
d38ceaf9
AD
1285{
1286 int i, r = 0;
1287
1288 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1289 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1290 continue;
c722865a
RZ
1291 if (adev->ip_blocks[i].version->type != block_type)
1292 continue;
1293 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1294 continue;
1295 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1296 (void *)adev, state);
1297 if (r)
1298 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1299 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1300 }
1301 return r;
1302}
1303
6cb2d4e4
HR
1304void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1305{
1306 int i;
1307
1308 for (i = 0; i < adev->num_ip_blocks; i++) {
1309 if (!adev->ip_blocks[i].status.valid)
1310 continue;
1311 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1312 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1313 }
1314}
1315
5dbbb60b
AD
1316int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1317 enum amd_ip_block_type block_type)
1318{
1319 int i, r;
1320
1321 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1322 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1323 continue;
a1255107
AD
1324 if (adev->ip_blocks[i].version->type == block_type) {
1325 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1326 if (r)
1327 return r;
1328 break;
1329 }
1330 }
1331 return 0;
1332
1333}
1334
1335bool amdgpu_is_idle(struct amdgpu_device *adev,
1336 enum amd_ip_block_type block_type)
1337{
1338 int i;
1339
1340 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1341 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1342 continue;
a1255107
AD
1343 if (adev->ip_blocks[i].version->type == block_type)
1344 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1345 }
1346 return true;
1347
1348}
1349
a1255107
AD
1350struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1351 enum amd_ip_block_type type)
d38ceaf9
AD
1352{
1353 int i;
1354
1355 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1356 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1357 return &adev->ip_blocks[i];
1358
1359 return NULL;
1360}
1361
1362/**
1363 * amdgpu_ip_block_version_cmp
1364 *
1365 * @adev: amdgpu_device pointer
5fc3aeeb 1366 * @type: enum amd_ip_block_type
d38ceaf9
AD
1367 * @major: major version
1368 * @minor: minor version
1369 *
1370 * return 0 if equal or greater
1371 * return 1 if smaller or the ip_block doesn't exist
1372 */
1373int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
5fc3aeeb 1374 enum amd_ip_block_type type,
d38ceaf9
AD
1375 u32 major, u32 minor)
1376{
a1255107 1377 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
d38ceaf9 1378
a1255107
AD
1379 if (ip_block && ((ip_block->version->major > major) ||
1380 ((ip_block->version->major == major) &&
1381 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1382 return 0;
1383
1384 return 1;
1385}
1386
a1255107
AD
1387/**
1388 * amdgpu_ip_block_add
1389 *
1390 * @adev: amdgpu_device pointer
1391 * @ip_block_version: pointer to the IP to add
1392 *
1393 * Adds the IP block driver information to the collection of IPs
1394 * on the asic.
1395 */
1396int amdgpu_ip_block_add(struct amdgpu_device *adev,
1397 const struct amdgpu_ip_block_version *ip_block_version)
1398{
1399 if (!ip_block_version)
1400 return -EINVAL;
1401
a0bae357
HR
1402 DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
1403 ip_block_version->funcs->name);
1404
a1255107
AD
1405 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1406
1407 return 0;
1408}
1409
483ef985 1410static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1411{
1412 adev->enable_virtual_display = false;
1413
1414 if (amdgpu_virtual_display) {
1415 struct drm_device *ddev = adev->ddev;
1416 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1417 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1418
1419 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1420 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1421 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1422 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1423 if (!strcmp("all", pciaddname)
1424 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1425 long num_crtc;
1426 int res = -1;
1427
9accf2fd 1428 adev->enable_virtual_display = true;
0f66356d
ED
1429
1430 if (pciaddname_tmp)
1431 res = kstrtol(pciaddname_tmp, 10,
1432 &num_crtc);
1433
1434 if (!res) {
1435 if (num_crtc < 1)
1436 num_crtc = 1;
1437 if (num_crtc > 6)
1438 num_crtc = 6;
1439 adev->mode_info.num_crtc = num_crtc;
1440 } else {
1441 adev->mode_info.num_crtc = 1;
1442 }
9accf2fd
ED
1443 break;
1444 }
1445 }
1446
0f66356d
ED
1447 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1448 amdgpu_virtual_display, pci_address_name,
1449 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1450
1451 kfree(pciaddstr);
1452 }
1453}
1454
e2a75f88
AD
1455static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1456{
e2a75f88
AD
1457 const char *chip_name;
1458 char fw_name[30];
1459 int err;
1460 const struct gpu_info_firmware_header_v1_0 *hdr;
1461
ab4fe3e1
HR
1462 adev->firmware.gpu_info_fw = NULL;
1463
e2a75f88
AD
1464 switch (adev->asic_type) {
1465 case CHIP_TOPAZ:
1466 case CHIP_TONGA:
1467 case CHIP_FIJI:
1468 case CHIP_POLARIS11:
1469 case CHIP_POLARIS10:
1470 case CHIP_POLARIS12:
1471 case CHIP_CARRIZO:
1472 case CHIP_STONEY:
1473#ifdef CONFIG_DRM_AMDGPU_SI
1474 case CHIP_VERDE:
1475 case CHIP_TAHITI:
1476 case CHIP_PITCAIRN:
1477 case CHIP_OLAND:
1478 case CHIP_HAINAN:
1479#endif
1480#ifdef CONFIG_DRM_AMDGPU_CIK
1481 case CHIP_BONAIRE:
1482 case CHIP_HAWAII:
1483 case CHIP_KAVERI:
1484 case CHIP_KABINI:
1485 case CHIP_MULLINS:
1486#endif
1487 default:
1488 return 0;
1489 case CHIP_VEGA10:
1490 chip_name = "vega10";
1491 break;
2d2e5e7e
AD
1492 case CHIP_RAVEN:
1493 chip_name = "raven";
1494 break;
e2a75f88
AD
1495 }
1496
1497 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 1498 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
1499 if (err) {
1500 dev_err(adev->dev,
1501 "Failed to load gpu_info firmware \"%s\"\n",
1502 fw_name);
1503 goto out;
1504 }
ab4fe3e1 1505 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
1506 if (err) {
1507 dev_err(adev->dev,
1508 "Failed to validate gpu_info firmware \"%s\"\n",
1509 fw_name);
1510 goto out;
1511 }
1512
ab4fe3e1 1513 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
1514 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1515
1516 switch (hdr->version_major) {
1517 case 1:
1518 {
1519 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 1520 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
1521 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1522
b5ab16bf
AD
1523 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1524 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1525 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1526 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 1527 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
1528 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1529 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1530 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1531 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1532 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 1533 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
1534 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1535 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
1536 adev->gfx.cu_info.max_waves_per_simd =
1537 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1538 adev->gfx.cu_info.max_scratch_slots_per_cu =
1539 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1540 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
e2a75f88
AD
1541 break;
1542 }
1543 default:
1544 dev_err(adev->dev,
1545 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1546 err = -EINVAL;
1547 goto out;
1548 }
1549out:
e2a75f88
AD
1550 return err;
1551}
1552
d38ceaf9
AD
1553static int amdgpu_early_init(struct amdgpu_device *adev)
1554{
aaa36a97 1555 int i, r;
d38ceaf9 1556
483ef985 1557 amdgpu_device_enable_virtual_display(adev);
a6be7570 1558
d38ceaf9 1559 switch (adev->asic_type) {
aaa36a97
AD
1560 case CHIP_TOPAZ:
1561 case CHIP_TONGA:
48299f95 1562 case CHIP_FIJI:
2cc0c0b5
FC
1563 case CHIP_POLARIS11:
1564 case CHIP_POLARIS10:
c4642a47 1565 case CHIP_POLARIS12:
aaa36a97 1566 case CHIP_CARRIZO:
39bb0c92
SL
1567 case CHIP_STONEY:
1568 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1569 adev->family = AMDGPU_FAMILY_CZ;
1570 else
1571 adev->family = AMDGPU_FAMILY_VI;
1572
1573 r = vi_set_ip_blocks(adev);
1574 if (r)
1575 return r;
1576 break;
33f34802
KW
1577#ifdef CONFIG_DRM_AMDGPU_SI
1578 case CHIP_VERDE:
1579 case CHIP_TAHITI:
1580 case CHIP_PITCAIRN:
1581 case CHIP_OLAND:
1582 case CHIP_HAINAN:
295d0daf 1583 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1584 r = si_set_ip_blocks(adev);
1585 if (r)
1586 return r;
1587 break;
1588#endif
a2e73f56
AD
1589#ifdef CONFIG_DRM_AMDGPU_CIK
1590 case CHIP_BONAIRE:
1591 case CHIP_HAWAII:
1592 case CHIP_KAVERI:
1593 case CHIP_KABINI:
1594 case CHIP_MULLINS:
1595 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1596 adev->family = AMDGPU_FAMILY_CI;
1597 else
1598 adev->family = AMDGPU_FAMILY_KV;
1599
1600 r = cik_set_ip_blocks(adev);
1601 if (r)
1602 return r;
1603 break;
1604#endif
2ca8a5d2
CZ
1605 case CHIP_VEGA10:
1606 case CHIP_RAVEN:
1607 if (adev->asic_type == CHIP_RAVEN)
1608 adev->family = AMDGPU_FAMILY_RV;
1609 else
1610 adev->family = AMDGPU_FAMILY_AI;
460826e6
KW
1611
1612 r = soc15_set_ip_blocks(adev);
1613 if (r)
1614 return r;
1615 break;
d38ceaf9
AD
1616 default:
1617 /* FIXME: not supported yet */
1618 return -EINVAL;
1619 }
1620
e2a75f88
AD
1621 r = amdgpu_device_parse_gpu_info_fw(adev);
1622 if (r)
1623 return r;
1624
3149d9da
XY
1625 if (amdgpu_sriov_vf(adev)) {
1626 r = amdgpu_virt_request_full_gpu(adev, true);
1627 if (r)
1628 return r;
1629 }
1630
d38ceaf9
AD
1631 for (i = 0; i < adev->num_ip_blocks; i++) {
1632 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
1633 DRM_ERROR("disabled ip block: %d <%s>\n",
1634 i, adev->ip_blocks[i].version->funcs->name);
a1255107 1635 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1636 } else {
a1255107
AD
1637 if (adev->ip_blocks[i].version->funcs->early_init) {
1638 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1639 if (r == -ENOENT) {
a1255107 1640 adev->ip_blocks[i].status.valid = false;
2c1a2784 1641 } else if (r) {
a1255107
AD
1642 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1643 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1644 return r;
2c1a2784 1645 } else {
a1255107 1646 adev->ip_blocks[i].status.valid = true;
2c1a2784 1647 }
974e6b64 1648 } else {
a1255107 1649 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1650 }
d38ceaf9
AD
1651 }
1652 }
1653
395d1fb9
NH
1654 adev->cg_flags &= amdgpu_cg_mask;
1655 adev->pg_flags &= amdgpu_pg_mask;
1656
d38ceaf9
AD
1657 return 0;
1658}
1659
1660static int amdgpu_init(struct amdgpu_device *adev)
1661{
1662 int i, r;
1663
1664 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1665 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1666 continue;
a1255107 1667 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1668 if (r) {
a1255107
AD
1669 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1670 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1671 return r;
2c1a2784 1672 }
a1255107 1673 adev->ip_blocks[i].status.sw = true;
d38ceaf9 1674 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1675 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9 1676 r = amdgpu_vram_scratch_init(adev);
2c1a2784
AD
1677 if (r) {
1678 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1679 return r;
2c1a2784 1680 }
a1255107 1681 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1682 if (r) {
1683 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1684 return r;
2c1a2784 1685 }
d38ceaf9 1686 r = amdgpu_wb_init(adev);
2c1a2784
AD
1687 if (r) {
1688 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
d38ceaf9 1689 return r;
2c1a2784 1690 }
a1255107 1691 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1692
1693 /* right after GMC hw init, we create CSA */
1694 if (amdgpu_sriov_vf(adev)) {
1695 r = amdgpu_allocate_static_csa(adev);
1696 if (r) {
1697 DRM_ERROR("allocate CSA failed %d\n", r);
1698 return r;
1699 }
1700 }
d38ceaf9
AD
1701 }
1702 }
1703
1704 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1705 if (!adev->ip_blocks[i].status.sw)
d38ceaf9
AD
1706 continue;
1707 /* gmc hw init is done early */
a1255107 1708 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
d38ceaf9 1709 continue;
a1255107 1710 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1711 if (r) {
a1255107
AD
1712 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1713 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1714 return r;
2c1a2784 1715 }
a1255107 1716 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1717 }
1718
1719 return 0;
1720}
1721
0c49e0b8
CZ
1722static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
1723{
1724 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1725}
1726
1727static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
1728{
1729 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1730 AMDGPU_RESET_MAGIC_NUM);
1731}
1732
2dc80b00 1733static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
d38ceaf9
AD
1734{
1735 int i = 0, r;
1736
1737 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1738 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1739 continue;
4a446d55 1740 /* skip CG for VCE/UVD, it's handled specially */
a1255107
AD
1741 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1742 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
4a446d55 1743 /* enable clockgating to save power */
a1255107
AD
1744 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1745 AMD_CG_STATE_GATE);
4a446d55
AD
1746 if (r) {
1747 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1748 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1749 return r;
1750 }
b0b00ff1 1751 }
d38ceaf9 1752 }
2dc80b00
S
1753 return 0;
1754}
1755
1756static int amdgpu_late_init(struct amdgpu_device *adev)
1757{
1758 int i = 0, r;
1759
1760 for (i = 0; i < adev->num_ip_blocks; i++) {
1761 if (!adev->ip_blocks[i].status.valid)
1762 continue;
1763 if (adev->ip_blocks[i].version->funcs->late_init) {
1764 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1765 if (r) {
1766 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1767 adev->ip_blocks[i].version->funcs->name, r);
1768 return r;
1769 }
1770 adev->ip_blocks[i].status.late_initialized = true;
1771 }
1772 }
1773
1774 mod_delayed_work(system_wq, &adev->late_init_work,
1775 msecs_to_jiffies(AMDGPU_RESUME_MS));
d38ceaf9 1776
0c49e0b8 1777 amdgpu_fill_reset_magic(adev);
d38ceaf9
AD
1778
1779 return 0;
1780}
1781
1782static int amdgpu_fini(struct amdgpu_device *adev)
1783{
1784 int i, r;
1785
3e96dbfd
AD
1786 /* need to disable SMC first */
1787 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1788 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1789 continue;
a1255107 1790 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3e96dbfd 1791 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1792 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1793 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1794 if (r) {
1795 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1796 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1797 return r;
1798 }
a1255107 1799 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1800 /* XXX handle errors */
1801 if (r) {
1802 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1803 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1804 }
a1255107 1805 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1806 break;
1807 }
1808 }
1809
d38ceaf9 1810 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1811 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1812 continue;
a1255107 1813 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9
AD
1814 amdgpu_wb_fini(adev);
1815 amdgpu_vram_scratch_fini(adev);
1816 }
8201a67a
RZ
1817
1818 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1819 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1820 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1821 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1822 AMD_CG_STATE_UNGATE);
1823 if (r) {
1824 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1825 adev->ip_blocks[i].version->funcs->name, r);
1826 return r;
1827 }
2c1a2784 1828 }
8201a67a 1829
a1255107 1830 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1831 /* XXX handle errors */
2c1a2784 1832 if (r) {
a1255107
AD
1833 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1834 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1835 }
8201a67a 1836
a1255107 1837 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1838 }
1839
1840 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1841 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1842 continue;
a1255107 1843 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1844 /* XXX handle errors */
2c1a2784 1845 if (r) {
a1255107
AD
1846 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1847 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1848 }
a1255107
AD
1849 adev->ip_blocks[i].status.sw = false;
1850 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1851 }
1852
a6dcfd9c 1853 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1854 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1855 continue;
a1255107
AD
1856 if (adev->ip_blocks[i].version->funcs->late_fini)
1857 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1858 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1859 }
1860
030308fc 1861 if (amdgpu_sriov_vf(adev))
3149d9da 1862 amdgpu_virt_release_full_gpu(adev, false);
2493664f 1863
d38ceaf9
AD
1864 return 0;
1865}
1866
2dc80b00
S
1867static void amdgpu_late_init_func_handler(struct work_struct *work)
1868{
1869 struct amdgpu_device *adev =
1870 container_of(work, struct amdgpu_device, late_init_work.work);
1871 amdgpu_late_set_cg_state(adev);
1872}
1873
faefba95 1874int amdgpu_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1875{
1876 int i, r;
1877
e941ea99
XY
1878 if (amdgpu_sriov_vf(adev))
1879 amdgpu_virt_request_full_gpu(adev, false);
1880
c5a93a28
FC
1881 /* ungate SMC block first */
1882 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1883 AMD_CG_STATE_UNGATE);
1884 if (r) {
1885 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1886 }
1887
d38ceaf9 1888 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1889 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1890 continue;
1891 /* ungate blocks so that suspend can properly shut them down */
c5a93a28 1892 if (i != AMD_IP_BLOCK_TYPE_SMC) {
a1255107
AD
1893 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1894 AMD_CG_STATE_UNGATE);
c5a93a28 1895 if (r) {
a1255107
AD
1896 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1897 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1898 }
2c1a2784 1899 }
d38ceaf9 1900 /* XXX handle errors */
a1255107 1901 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1902 /* XXX handle errors */
2c1a2784 1903 if (r) {
a1255107
AD
1904 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1905 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1906 }
d38ceaf9
AD
1907 }
1908
e941ea99
XY
1909 if (amdgpu_sriov_vf(adev))
1910 amdgpu_virt_release_full_gpu(adev, false);
1911
d38ceaf9
AD
1912 return 0;
1913}
1914
e4f0fdcc 1915static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
a90ad3c2
ML
1916{
1917 int i, r;
1918
2cb681b6
ML
1919 static enum amd_ip_block_type ip_order[] = {
1920 AMD_IP_BLOCK_TYPE_GMC,
1921 AMD_IP_BLOCK_TYPE_COMMON,
2cb681b6
ML
1922 AMD_IP_BLOCK_TYPE_IH,
1923 };
a90ad3c2 1924
2cb681b6
ML
1925 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1926 int j;
1927 struct amdgpu_ip_block *block;
a90ad3c2 1928
2cb681b6
ML
1929 for (j = 0; j < adev->num_ip_blocks; j++) {
1930 block = &adev->ip_blocks[j];
1931
1932 if (block->version->type != ip_order[i] ||
1933 !block->status.valid)
1934 continue;
1935
1936 r = block->version->funcs->hw_init(adev);
1937 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
1938 }
1939 }
1940
1941 return 0;
1942}
1943
e4f0fdcc 1944static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
a90ad3c2
ML
1945{
1946 int i, r;
1947
2cb681b6
ML
1948 static enum amd_ip_block_type ip_order[] = {
1949 AMD_IP_BLOCK_TYPE_SMC,
ef4c166d 1950 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
1951 AMD_IP_BLOCK_TYPE_DCE,
1952 AMD_IP_BLOCK_TYPE_GFX,
1953 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c
FM
1954 AMD_IP_BLOCK_TYPE_UVD,
1955 AMD_IP_BLOCK_TYPE_VCE
2cb681b6 1956 };
a90ad3c2 1957
2cb681b6
ML
1958 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1959 int j;
1960 struct amdgpu_ip_block *block;
a90ad3c2 1961
2cb681b6
ML
1962 for (j = 0; j < adev->num_ip_blocks; j++) {
1963 block = &adev->ip_blocks[j];
1964
1965 if (block->version->type != ip_order[i] ||
1966 !block->status.valid)
1967 continue;
1968
1969 r = block->version->funcs->hw_init(adev);
1970 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
1971 }
1972 }
1973
1974 return 0;
1975}
1976
fcf0649f 1977static int amdgpu_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
1978{
1979 int i, r;
1980
a90ad3c2
ML
1981 for (i = 0; i < adev->num_ip_blocks; i++) {
1982 if (!adev->ip_blocks[i].status.valid)
1983 continue;
a90ad3c2
ML
1984 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1985 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
fcf0649f
CZ
1986 adev->ip_blocks[i].version->type ==
1987 AMD_IP_BLOCK_TYPE_IH) {
1988 r = adev->ip_blocks[i].version->funcs->resume(adev);
1989 if (r) {
1990 DRM_ERROR("resume of IP block <%s> failed %d\n",
1991 adev->ip_blocks[i].version->funcs->name, r);
1992 return r;
1993 }
a90ad3c2
ML
1994 }
1995 }
1996
1997 return 0;
1998}
1999
fcf0649f 2000static int amdgpu_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
2001{
2002 int i, r;
2003
2004 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2005 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 2006 continue;
fcf0649f
CZ
2007 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2008 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2009 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
2010 continue;
a1255107 2011 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 2012 if (r) {
a1255107
AD
2013 DRM_ERROR("resume of IP block <%s> failed %d\n",
2014 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 2015 return r;
2c1a2784 2016 }
d38ceaf9
AD
2017 }
2018
2019 return 0;
2020}
2021
fcf0649f
CZ
2022static int amdgpu_resume(struct amdgpu_device *adev)
2023{
2024 int r;
2025
2026 r = amdgpu_resume_phase1(adev);
2027 if (r)
2028 return r;
2029 r = amdgpu_resume_phase2(adev);
2030
2031 return r;
2032}
2033
4e99a44e 2034static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 2035{
6867e1b5
ML
2036 if (amdgpu_sriov_vf(adev)) {
2037 if (adev->is_atom_fw) {
2038 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2039 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2040 } else {
2041 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2042 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2043 }
2044
2045 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2046 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 2047 }
048765ad
AR
2048}
2049
4562236b
HW
2050bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2051{
2052 switch (asic_type) {
2053#if defined(CONFIG_DRM_AMD_DC)
2054 case CHIP_BONAIRE:
2055 case CHIP_HAWAII:
0d6fbccb 2056 case CHIP_KAVERI:
4562236b
HW
2057 case CHIP_CARRIZO:
2058 case CHIP_STONEY:
2059 case CHIP_POLARIS11:
2060 case CHIP_POLARIS10:
2c8ad2d5 2061 case CHIP_POLARIS12:
4562236b
HW
2062 case CHIP_TONGA:
2063 case CHIP_FIJI:
2064#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
2065 return amdgpu_dc != 0;
4562236b 2066#endif
17b7cf8c
AD
2067 case CHIP_KABINI:
2068 case CHIP_MULLINS:
2069 return amdgpu_dc > 0;
42f8ffa1
HW
2070 case CHIP_VEGA10:
2071#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
fd187853 2072 case CHIP_RAVEN:
42f8ffa1 2073#endif
fd187853 2074 return amdgpu_dc != 0;
4562236b
HW
2075#endif
2076 default:
2077 return false;
2078 }
2079}
2080
2081/**
2082 * amdgpu_device_has_dc_support - check if dc is supported
2083 *
2084 * @adev: amdgpu_device_pointer
2085 *
2086 * Returns true for supported, false for not supported
2087 */
2088bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2089{
2555039d
XY
2090 if (amdgpu_sriov_vf(adev))
2091 return false;
2092
4562236b
HW
2093 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2094}
2095
d38ceaf9
AD
2096/**
2097 * amdgpu_device_init - initialize the driver
2098 *
2099 * @adev: amdgpu_device pointer
2100 * @pdev: drm dev pointer
2101 * @pdev: pci dev pointer
2102 * @flags: driver flags
2103 *
2104 * Initializes the driver info and hw (all asics).
2105 * Returns 0 for success or an error on failure.
2106 * Called at driver startup.
2107 */
2108int amdgpu_device_init(struct amdgpu_device *adev,
2109 struct drm_device *ddev,
2110 struct pci_dev *pdev,
2111 uint32_t flags)
2112{
2113 int r, i;
2114 bool runtime = false;
95844d20 2115 u32 max_MBps;
d38ceaf9
AD
2116
2117 adev->shutdown = false;
2118 adev->dev = &pdev->dev;
2119 adev->ddev = ddev;
2120 adev->pdev = pdev;
2121 adev->flags = flags;
2f7d10b3 2122 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9 2123 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
6f02a696 2124 adev->mc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
2125 adev->accel_working = false;
2126 adev->num_rings = 0;
2127 adev->mman.buffer_funcs = NULL;
2128 adev->mman.buffer_funcs_ring = NULL;
2129 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 2130 adev->vm_manager.vm_pte_num_rings = 0;
d38ceaf9 2131 adev->gart.gart_funcs = NULL;
f54d1867 2132 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 2133 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
2134
2135 adev->smc_rreg = &amdgpu_invalid_rreg;
2136 adev->smc_wreg = &amdgpu_invalid_wreg;
2137 adev->pcie_rreg = &amdgpu_invalid_rreg;
2138 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
2139 adev->pciep_rreg = &amdgpu_invalid_rreg;
2140 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2141 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2142 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2143 adev->didt_rreg = &amdgpu_invalid_rreg;
2144 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
2145 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2146 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2147 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2148 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2149
3e39ab90
AD
2150 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2151 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2152 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
2153
2154 /* mutex initialization are all done here so we
2155 * can recall function without having locking issues */
d38ceaf9 2156 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 2157 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
2158 mutex_init(&adev->pm.mutex);
2159 mutex_init(&adev->gfx.gpu_clock_mutex);
2160 mutex_init(&adev->srbm_mutex);
b8866c26 2161 mutex_init(&adev->gfx.pipe_reserve_mutex);
d38ceaf9 2162 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 2163 mutex_init(&adev->mn_lock);
e23b74aa 2164 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9
AD
2165 hash_init(adev->mn_hash);
2166
2167 amdgpu_check_arguments(adev);
2168
d38ceaf9
AD
2169 spin_lock_init(&adev->mmio_idx_lock);
2170 spin_lock_init(&adev->smc_idx_lock);
2171 spin_lock_init(&adev->pcie_idx_lock);
2172 spin_lock_init(&adev->uvd_ctx_idx_lock);
2173 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 2174 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 2175 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 2176 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 2177 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 2178
0c4e7fa5
CZ
2179 INIT_LIST_HEAD(&adev->shadow_list);
2180 mutex_init(&adev->shadow_list_lock);
2181
5c1354bd
CZ
2182 INIT_LIST_HEAD(&adev->gtt_list);
2183 spin_lock_init(&adev->gtt_list_lock);
2184
795f2813
AR
2185 INIT_LIST_HEAD(&adev->ring_lru_list);
2186 spin_lock_init(&adev->ring_lru_list_lock);
2187
2dc80b00
S
2188 INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
2189
0fa49558
AX
2190 /* Registers mapping */
2191 /* TODO: block userspace mapping of io register */
da69c161
KW
2192 if (adev->asic_type >= CHIP_BONAIRE) {
2193 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2194 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2195 } else {
2196 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2197 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2198 }
d38ceaf9 2199
d38ceaf9
AD
2200 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2201 if (adev->rmmio == NULL) {
2202 return -ENOMEM;
2203 }
2204 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2205 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2206
705e519e
CK
2207 /* doorbell bar mapping */
2208 amdgpu_doorbell_init(adev);
d38ceaf9
AD
2209
2210 /* io port mapping */
2211 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2212 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2213 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2214 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2215 break;
2216 }
2217 }
2218 if (adev->rio_mem == NULL)
b64a18c5 2219 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9
AD
2220
2221 /* early init functions */
2222 r = amdgpu_early_init(adev);
2223 if (r)
2224 return r;
2225
2226 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2227 /* this will fail for cards that aren't VGA class devices, just
2228 * ignore it */
2229 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2230
2231 if (amdgpu_runtime_pm == 1)
2232 runtime = true;
e9bef455 2233 if (amdgpu_device_is_px(ddev))
d38ceaf9 2234 runtime = true;
84c8b22e
LW
2235 if (!pci_is_thunderbolt_attached(adev->pdev))
2236 vga_switcheroo_register_client(adev->pdev,
2237 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
2238 if (runtime)
2239 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2240
2241 /* Read BIOS */
83ba126a
AD
2242 if (!amdgpu_get_bios(adev)) {
2243 r = -EINVAL;
2244 goto failed;
2245 }
f7e9e9fe 2246
d38ceaf9 2247 r = amdgpu_atombios_init(adev);
2c1a2784
AD
2248 if (r) {
2249 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
e23b74aa 2250 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
83ba126a 2251 goto failed;
2c1a2784 2252 }
d38ceaf9 2253
4e99a44e
ML
2254 /* detect if we are with an SRIOV vbios */
2255 amdgpu_device_detect_sriov_bios(adev);
048765ad 2256
d38ceaf9 2257 /* Post card if necessary */
91fe77eb 2258 if (amdgpu_need_post(adev)) {
d38ceaf9 2259 if (!adev->bios) {
bec86378 2260 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
2261 r = -EINVAL;
2262 goto failed;
d38ceaf9 2263 }
bec86378 2264 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
2265 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2266 if (r) {
2267 dev_err(adev->dev, "gpu post error!\n");
2268 goto failed;
2269 }
2270 } else {
2271 DRM_INFO("GPU post is not needed\n");
d38ceaf9
AD
2272 }
2273
88b64e95
AD
2274 if (adev->is_atom_fw) {
2275 /* Initialize clocks */
2276 r = amdgpu_atomfirmware_get_clock_info(adev);
2277 if (r) {
2278 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 2279 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
2280 goto failed;
2281 }
2282 } else {
a5bde2f9
AD
2283 /* Initialize clocks */
2284 r = amdgpu_atombios_get_clock_info(adev);
2285 if (r) {
2286 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 2287 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 2288 goto failed;
a5bde2f9
AD
2289 }
2290 /* init i2c buses */
4562236b
HW
2291 if (!amdgpu_device_has_dc_support(adev))
2292 amdgpu_atombios_i2c_init(adev);
2c1a2784 2293 }
d38ceaf9
AD
2294
2295 /* Fence driver */
2296 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
2297 if (r) {
2298 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
e23b74aa 2299 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 2300 goto failed;
2c1a2784 2301 }
d38ceaf9
AD
2302
2303 /* init the mode config */
2304 drm_mode_config_init(adev->ddev);
2305
2306 r = amdgpu_init(adev);
2307 if (r) {
2c1a2784 2308 dev_err(adev->dev, "amdgpu_init failed\n");
e23b74aa 2309 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
d38ceaf9 2310 amdgpu_fini(adev);
83ba126a 2311 goto failed;
d38ceaf9
AD
2312 }
2313
2314 adev->accel_working = true;
2315
e59c0205
AX
2316 amdgpu_vm_check_compute_bug(adev);
2317
95844d20
MO
2318 /* Initialize the buffer migration limit. */
2319 if (amdgpu_moverate >= 0)
2320 max_MBps = amdgpu_moverate;
2321 else
2322 max_MBps = 8; /* Allow 8 MB/s. */
2323 /* Get a log2 for easy divisions. */
2324 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2325
d38ceaf9
AD
2326 r = amdgpu_ib_pool_init(adev);
2327 if (r) {
2328 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
e23b74aa 2329 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
83ba126a 2330 goto failed;
d38ceaf9
AD
2331 }
2332
2333 r = amdgpu_ib_ring_tests(adev);
2334 if (r)
2335 DRM_ERROR("ib ring test failed (%d).\n", r);
2336
2dc8f81e
HC
2337 if (amdgpu_sriov_vf(adev))
2338 amdgpu_virt_init_data_exchange(adev);
2339
9bc92b9c
ML
2340 amdgpu_fbdev_init(adev);
2341
d2f52ac8
RZ
2342 r = amdgpu_pm_sysfs_init(adev);
2343 if (r)
2344 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2345
d38ceaf9 2346 r = amdgpu_gem_debugfs_init(adev);
3f14e623 2347 if (r)
d38ceaf9 2348 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2349
2350 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2351 if (r)
d38ceaf9 2352 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2353
4f0955fc
HR
2354 r = amdgpu_debugfs_test_ib_ring_init(adev);
2355 if (r)
2356 DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
2357
50ab2533 2358 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2359 if (r)
50ab2533 2360 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2361
db95e218
KR
2362 r = amdgpu_debugfs_vbios_dump_init(adev);
2363 if (r)
2364 DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
2365
d38ceaf9
AD
2366 if ((amdgpu_testing & 1)) {
2367 if (adev->accel_working)
2368 amdgpu_test_moves(adev);
2369 else
2370 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2371 }
d38ceaf9
AD
2372 if (amdgpu_benchmarking) {
2373 if (adev->accel_working)
2374 amdgpu_benchmark(adev, amdgpu_benchmarking);
2375 else
2376 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2377 }
2378
2379 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2380 * explicit gating rather than handling it automatically.
2381 */
2382 r = amdgpu_late_init(adev);
2c1a2784
AD
2383 if (r) {
2384 dev_err(adev->dev, "amdgpu_late_init failed\n");
e23b74aa 2385 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
83ba126a 2386 goto failed;
2c1a2784 2387 }
d38ceaf9
AD
2388
2389 return 0;
83ba126a
AD
2390
2391failed:
89041940 2392 amdgpu_vf_error_trans_all(adev);
83ba126a
AD
2393 if (runtime)
2394 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2395 return r;
d38ceaf9
AD
2396}
2397
d38ceaf9
AD
2398/**
2399 * amdgpu_device_fini - tear down the driver
2400 *
2401 * @adev: amdgpu_device pointer
2402 *
2403 * Tear down the driver info (all asics).
2404 * Called at driver shutdown.
2405 */
2406void amdgpu_device_fini(struct amdgpu_device *adev)
2407{
2408 int r;
2409
2410 DRM_INFO("amdgpu: finishing device.\n");
2411 adev->shutdown = true;
db2c2a97
PD
2412 if (adev->mode_info.mode_config_initialized)
2413 drm_crtc_force_disable_all(adev->ddev);
d38ceaf9
AD
2414 /* evict vram memory */
2415 amdgpu_bo_evict_vram(adev);
2416 amdgpu_ib_pool_fini(adev);
a05502e5 2417 amdgpu_fw_reserve_vram_fini(adev);
d38ceaf9
AD
2418 amdgpu_fence_driver_fini(adev);
2419 amdgpu_fbdev_fini(adev);
2420 r = amdgpu_fini(adev);
ab4fe3e1
HR
2421 if (adev->firmware.gpu_info_fw) {
2422 release_firmware(adev->firmware.gpu_info_fw);
2423 adev->firmware.gpu_info_fw = NULL;
2424 }
d38ceaf9 2425 adev->accel_working = false;
2dc80b00 2426 cancel_delayed_work_sync(&adev->late_init_work);
d38ceaf9 2427 /* free i2c buses */
4562236b
HW
2428 if (!amdgpu_device_has_dc_support(adev))
2429 amdgpu_i2c_fini(adev);
d38ceaf9
AD
2430 amdgpu_atombios_fini(adev);
2431 kfree(adev->bios);
2432 adev->bios = NULL;
84c8b22e
LW
2433 if (!pci_is_thunderbolt_attached(adev->pdev))
2434 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2435 if (adev->flags & AMD_IS_PX)
2436 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2437 vga_client_register(adev->pdev, NULL, NULL, NULL);
2438 if (adev->rio_mem)
2439 pci_iounmap(adev->pdev, adev->rio_mem);
2440 adev->rio_mem = NULL;
2441 iounmap(adev->rmmio);
2442 adev->rmmio = NULL;
705e519e 2443 amdgpu_doorbell_fini(adev);
d2f52ac8 2444 amdgpu_pm_sysfs_fini(adev);
d38ceaf9 2445 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2446}
2447
2448
2449/*
2450 * Suspend & resume.
2451 */
2452/**
810ddc3a 2453 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2454 *
2455 * @pdev: drm dev pointer
2456 * @state: suspend state
2457 *
2458 * Puts the hw in the suspend state (all asics).
2459 * Returns 0 for success or an error on failure.
2460 * Called at driver suspend.
2461 */
810ddc3a 2462int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2463{
2464 struct amdgpu_device *adev;
2465 struct drm_crtc *crtc;
2466 struct drm_connector *connector;
5ceb54c6 2467 int r;
d38ceaf9
AD
2468
2469 if (dev == NULL || dev->dev_private == NULL) {
2470 return -ENODEV;
2471 }
2472
2473 adev = dev->dev_private;
2474
2475 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2476 return 0;
2477
2478 drm_kms_helper_poll_disable(dev);
2479
4562236b
HW
2480 if (!amdgpu_device_has_dc_support(adev)) {
2481 /* turn off display hw */
2482 drm_modeset_lock_all(dev);
2483 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2484 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2485 }
2486 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2487 }
2488
ba997709
YZ
2489 amdgpu_amdkfd_suspend(adev);
2490
756e6880 2491 /* unpin the front buffers and cursors */
d38ceaf9 2492 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2493 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
d38ceaf9
AD
2494 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2495 struct amdgpu_bo *robj;
2496
756e6880
AD
2497 if (amdgpu_crtc->cursor_bo) {
2498 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2499 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2500 if (r == 0) {
2501 amdgpu_bo_unpin(aobj);
2502 amdgpu_bo_unreserve(aobj);
2503 }
2504 }
2505
d38ceaf9
AD
2506 if (rfb == NULL || rfb->obj == NULL) {
2507 continue;
2508 }
2509 robj = gem_to_amdgpu_bo(rfb->obj);
2510 /* don't unpin kernel fb objects */
2511 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
7a6901d7 2512 r = amdgpu_bo_reserve(robj, true);
d38ceaf9
AD
2513 if (r == 0) {
2514 amdgpu_bo_unpin(robj);
2515 amdgpu_bo_unreserve(robj);
2516 }
2517 }
2518 }
2519 /* evict vram memory */
2520 amdgpu_bo_evict_vram(adev);
2521
5ceb54c6 2522 amdgpu_fence_driver_suspend(adev);
d38ceaf9
AD
2523
2524 r = amdgpu_suspend(adev);
2525
a0a71e49
AD
2526 /* evict remaining vram memory
2527 * This second call to evict vram is to evict the gart page table
2528 * using the CPU.
2529 */
d38ceaf9
AD
2530 amdgpu_bo_evict_vram(adev);
2531
d05da0e2 2532 amdgpu_atombios_scratch_regs_save(adev);
d38ceaf9
AD
2533 pci_save_state(dev->pdev);
2534 if (suspend) {
2535 /* Shut down the device */
2536 pci_disable_device(dev->pdev);
2537 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2538 } else {
2539 r = amdgpu_asic_reset(adev);
2540 if (r)
2541 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2542 }
2543
2544 if (fbcon) {
2545 console_lock();
2546 amdgpu_fbdev_set_suspend(adev, 1);
2547 console_unlock();
2548 }
2549 return 0;
2550}
2551
2552/**
810ddc3a 2553 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2554 *
2555 * @pdev: drm dev pointer
2556 *
2557 * Bring the hw back to operating state (all asics).
2558 * Returns 0 for success or an error on failure.
2559 * Called at driver resume.
2560 */
810ddc3a 2561int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2562{
2563 struct drm_connector *connector;
2564 struct amdgpu_device *adev = dev->dev_private;
756e6880 2565 struct drm_crtc *crtc;
03161a6e 2566 int r = 0;
d38ceaf9
AD
2567
2568 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2569 return 0;
2570
74b0b157 2571 if (fbcon)
d38ceaf9 2572 console_lock();
74b0b157 2573
d38ceaf9
AD
2574 if (resume) {
2575 pci_set_power_state(dev->pdev, PCI_D0);
2576 pci_restore_state(dev->pdev);
74b0b157 2577 r = pci_enable_device(dev->pdev);
03161a6e
HR
2578 if (r)
2579 goto unlock;
d38ceaf9 2580 }
d05da0e2 2581 amdgpu_atombios_scratch_regs_restore(adev);
d38ceaf9
AD
2582
2583 /* post card */
c836fec5 2584 if (amdgpu_need_post(adev)) {
74b0b157 2585 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2586 if (r)
2587 DRM_ERROR("amdgpu asic init failed\n");
2588 }
d38ceaf9
AD
2589
2590 r = amdgpu_resume(adev);
e6707218 2591 if (r) {
ca198528 2592 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
03161a6e 2593 goto unlock;
e6707218 2594 }
5ceb54c6
AD
2595 amdgpu_fence_driver_resume(adev);
2596
ca198528
FC
2597 if (resume) {
2598 r = amdgpu_ib_ring_tests(adev);
2599 if (r)
2600 DRM_ERROR("ib ring test failed (%d).\n", r);
2601 }
d38ceaf9
AD
2602
2603 r = amdgpu_late_init(adev);
03161a6e
HR
2604 if (r)
2605 goto unlock;
d38ceaf9 2606
756e6880
AD
2607 /* pin cursors */
2608 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2609 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2610
2611 if (amdgpu_crtc->cursor_bo) {
2612 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2613 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2614 if (r == 0) {
2615 r = amdgpu_bo_pin(aobj,
2616 AMDGPU_GEM_DOMAIN_VRAM,
2617 &amdgpu_crtc->cursor_addr);
2618 if (r != 0)
2619 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2620 amdgpu_bo_unreserve(aobj);
2621 }
2622 }
2623 }
ba997709
YZ
2624 r = amdgpu_amdkfd_resume(adev);
2625 if (r)
2626 return r;
756e6880 2627
d38ceaf9
AD
2628 /* blat the mode back in */
2629 if (fbcon) {
4562236b
HW
2630 if (!amdgpu_device_has_dc_support(adev)) {
2631 /* pre DCE11 */
2632 drm_helper_resume_force_mode(dev);
2633
2634 /* turn on display hw */
2635 drm_modeset_lock_all(dev);
2636 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2637 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2638 }
2639 drm_modeset_unlock_all(dev);
2640 } else {
2641 /*
2642 * There is no equivalent atomic helper to turn on
2643 * display, so we defined our own function for this,
2644 * once suspend resume is supported by the atomic
2645 * framework this will be reworked
2646 */
2647 amdgpu_dm_display_resume(adev);
d38ceaf9
AD
2648 }
2649 }
2650
2651 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2652
2653 /*
2654 * Most of the connector probing functions try to acquire runtime pm
2655 * refs to ensure that the GPU is powered on when connector polling is
2656 * performed. Since we're calling this from a runtime PM callback,
2657 * trying to acquire rpm refs will cause us to deadlock.
2658 *
2659 * Since we're guaranteed to be holding the rpm lock, it's safe to
2660 * temporarily disable the rpm helpers so this doesn't deadlock us.
2661 */
2662#ifdef CONFIG_PM
2663 dev->dev->power.disable_depth++;
2664#endif
4562236b
HW
2665 if (!amdgpu_device_has_dc_support(adev))
2666 drm_helper_hpd_irq_event(dev);
2667 else
2668 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
2669#ifdef CONFIG_PM
2670 dev->dev->power.disable_depth--;
2671#endif
d38ceaf9 2672
03161a6e 2673 if (fbcon)
d38ceaf9 2674 amdgpu_fbdev_set_suspend(adev, 0);
03161a6e
HR
2675
2676unlock:
2677 if (fbcon)
d38ceaf9 2678 console_unlock();
d38ceaf9 2679
03161a6e 2680 return r;
d38ceaf9
AD
2681}
2682
63fbf42f
CZ
2683static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2684{
2685 int i;
2686 bool asic_hang = false;
2687
f993d628
ML
2688 if (amdgpu_sriov_vf(adev))
2689 return true;
2690
63fbf42f 2691 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2692 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2693 continue;
a1255107
AD
2694 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2695 adev->ip_blocks[i].status.hang =
2696 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2697 if (adev->ip_blocks[i].status.hang) {
2698 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2699 asic_hang = true;
2700 }
2701 }
2702 return asic_hang;
2703}
2704
4d446656 2705static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2706{
2707 int i, r = 0;
2708
2709 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2710 if (!adev->ip_blocks[i].status.valid)
d31a501e 2711 continue;
a1255107
AD
2712 if (adev->ip_blocks[i].status.hang &&
2713 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2714 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2715 if (r)
2716 return r;
2717 }
2718 }
2719
2720 return 0;
2721}
2722
35d782fe
CZ
2723static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2724{
da146d3b
AD
2725 int i;
2726
2727 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2728 if (!adev->ip_blocks[i].status.valid)
da146d3b 2729 continue;
a1255107
AD
2730 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2731 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2732 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
2733 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2734 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 2735 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2736 DRM_INFO("Some block need full reset!\n");
2737 return true;
2738 }
2739 }
35d782fe
CZ
2740 }
2741 return false;
2742}
2743
2744static int amdgpu_soft_reset(struct amdgpu_device *adev)
2745{
2746 int i, r = 0;
2747
2748 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2749 if (!adev->ip_blocks[i].status.valid)
35d782fe 2750 continue;
a1255107
AD
2751 if (adev->ip_blocks[i].status.hang &&
2752 adev->ip_blocks[i].version->funcs->soft_reset) {
2753 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2754 if (r)
2755 return r;
2756 }
2757 }
2758
2759 return 0;
2760}
2761
2762static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2763{
2764 int i, r = 0;
2765
2766 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2767 if (!adev->ip_blocks[i].status.valid)
35d782fe 2768 continue;
a1255107
AD
2769 if (adev->ip_blocks[i].status.hang &&
2770 adev->ip_blocks[i].version->funcs->post_soft_reset)
2771 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2772 if (r)
2773 return r;
2774 }
2775
2776 return 0;
2777}
2778
3ad81f16
CZ
2779bool amdgpu_need_backup(struct amdgpu_device *adev)
2780{
2781 if (adev->flags & AMD_IS_APU)
2782 return false;
2783
2784 return amdgpu_lockup_timeout > 0 ? true : false;
2785}
2786
53cdccd5
CZ
2787static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2788 struct amdgpu_ring *ring,
2789 struct amdgpu_bo *bo,
f54d1867 2790 struct dma_fence **fence)
53cdccd5
CZ
2791{
2792 uint32_t domain;
2793 int r;
2794
23d2e504
RH
2795 if (!bo->shadow)
2796 return 0;
2797
1d284797 2798 r = amdgpu_bo_reserve(bo, true);
23d2e504
RH
2799 if (r)
2800 return r;
2801 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2802 /* if bo has been evicted, then no need to recover */
2803 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
82521316
RH
2804 r = amdgpu_bo_validate(bo->shadow);
2805 if (r) {
2806 DRM_ERROR("bo validate failed!\n");
2807 goto err;
2808 }
2809
23d2e504 2810 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
53cdccd5 2811 NULL, fence, true);
23d2e504
RH
2812 if (r) {
2813 DRM_ERROR("recover page table failed!\n");
2814 goto err;
2815 }
2816 }
53cdccd5 2817err:
23d2e504
RH
2818 amdgpu_bo_unreserve(bo);
2819 return r;
53cdccd5
CZ
2820}
2821
a90ad3c2
ML
2822/**
2823 * amdgpu_sriov_gpu_reset - reset the asic
2824 *
2825 * @adev: amdgpu device pointer
7225f873 2826 * @job: which job trigger hang
a90ad3c2
ML
2827 *
2828 * Attempt the reset the GPU if it has hung (all asics).
2829 * for SRIOV case.
2830 * Returns 0 for success or an error on failure.
2831 */
7225f873 2832int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
a90ad3c2 2833{
65781c78 2834 int i, j, r = 0;
a90ad3c2
ML
2835 int resched;
2836 struct amdgpu_bo *bo, *tmp;
2837 struct amdgpu_ring *ring;
2838 struct dma_fence *fence = NULL, *next = NULL;
2839
147b5983 2840 mutex_lock(&adev->virt.lock_reset);
a90ad3c2 2841 atomic_inc(&adev->gpu_reset_counter);
3224a12b 2842 adev->in_sriov_reset = true;
a90ad3c2
ML
2843
2844 /* block TTM */
2845 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2846
65781c78
ML
2847 /* we start from the ring trigger GPU hang */
2848 j = job ? job->ring->idx : 0;
a90ad3c2 2849
65781c78
ML
2850 /* block scheduler */
2851 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2852 ring = adev->rings[i % AMDGPU_MAX_RINGS];
a90ad3c2
ML
2853 if (!ring || !ring->sched.thread)
2854 continue;
2855
2856 kthread_park(ring->sched.thread);
65781c78
ML
2857
2858 if (job && j != i)
2859 continue;
2860
4f059ecd 2861 /* here give the last chance to check if job removed from mirror-list
65781c78 2862 * since we already pay some time on kthread_park */
4f059ecd 2863 if (job && list_empty(&job->base.node)) {
65781c78
ML
2864 kthread_unpark(ring->sched.thread);
2865 goto give_up_reset;
2866 }
2867
2868 if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
2869 amd_sched_job_kickout(&job->base);
2870
2871 /* only do job_reset on the hang ring if @job not NULL */
a90ad3c2 2872 amd_sched_hw_job_reset(&ring->sched);
a90ad3c2 2873
65781c78
ML
2874 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2875 amdgpu_fence_driver_force_completion_ring(ring);
2876 }
a90ad3c2
ML
2877
2878 /* request to take full control of GPU before re-initialization */
7225f873 2879 if (job)
a90ad3c2
ML
2880 amdgpu_virt_reset_gpu(adev);
2881 else
2882 amdgpu_virt_request_full_gpu(adev, true);
2883
2884
2885 /* Resume IP prior to SMC */
e4f0fdcc 2886 amdgpu_sriov_reinit_early(adev);
a90ad3c2
ML
2887
2888 /* we need recover gart prior to run SMC/CP/SDMA resume */
2889 amdgpu_ttm_recover_gart(adev);
2890
2891 /* now we are okay to resume SMC/CP/SDMA */
e4f0fdcc 2892 amdgpu_sriov_reinit_late(adev);
a90ad3c2
ML
2893
2894 amdgpu_irq_gpu_reset_resume_helper(adev);
2895
2896 if (amdgpu_ib_ring_tests(adev))
2897 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2898
2899 /* release full control of GPU after ib test */
2900 amdgpu_virt_release_full_gpu(adev, true);
2901
2902 DRM_INFO("recover vram bo from shadow\n");
2903
2904 ring = adev->mman.buffer_funcs_ring;
2905 mutex_lock(&adev->shadow_list_lock);
2906 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
236763d3 2907 next = NULL;
a90ad3c2
ML
2908 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2909 if (fence) {
2910 r = dma_fence_wait(fence, false);
2911 if (r) {
2912 WARN(r, "recovery from shadow isn't completed\n");
2913 break;
2914 }
2915 }
2916
2917 dma_fence_put(fence);
2918 fence = next;
2919 }
2920 mutex_unlock(&adev->shadow_list_lock);
2921
2922 if (fence) {
2923 r = dma_fence_wait(fence, false);
2924 if (r)
2925 WARN(r, "recovery from shadow isn't completed\n");
2926 }
2927 dma_fence_put(fence);
2928
65781c78
ML
2929 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2930 ring = adev->rings[i % AMDGPU_MAX_RINGS];
a90ad3c2
ML
2931 if (!ring || !ring->sched.thread)
2932 continue;
2933
65781c78
ML
2934 if (job && j != i) {
2935 kthread_unpark(ring->sched.thread);
2936 continue;
2937 }
2938
a90ad3c2
ML
2939 amd_sched_job_recovery(&ring->sched);
2940 kthread_unpark(ring->sched.thread);
2941 }
2942
2943 drm_helper_resume_force_mode(adev->ddev);
65781c78 2944give_up_reset:
a90ad3c2
ML
2945 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2946 if (r) {
2947 /* bad news, how to tell it to userspace ? */
2948 dev_info(adev->dev, "GPU reset failed\n");
65781c78
ML
2949 } else {
2950 dev_info(adev->dev, "GPU reset successed!\n");
a90ad3c2
ML
2951 }
2952
3224a12b 2953 adev->in_sriov_reset = false;
147b5983 2954 mutex_unlock(&adev->virt.lock_reset);
a90ad3c2
ML
2955 return r;
2956}
2957
d38ceaf9
AD
2958/**
2959 * amdgpu_gpu_reset - reset the asic
2960 *
2961 * @adev: amdgpu device pointer
2962 *
2963 * Attempt the reset the GPU if it has hung (all asics).
2964 * Returns 0 for success or an error on failure.
2965 */
2966int amdgpu_gpu_reset(struct amdgpu_device *adev)
2967{
4562236b 2968 struct drm_atomic_state *state = NULL;
d38ceaf9
AD
2969 int i, r;
2970 int resched;
0c49e0b8 2971 bool need_full_reset, vram_lost = false;
fb140b29 2972
63fbf42f
CZ
2973 if (!amdgpu_check_soft_reset(adev)) {
2974 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2975 return 0;
2976 }
d38ceaf9 2977
d94aed5a 2978 atomic_inc(&adev->gpu_reset_counter);
d38ceaf9 2979
a3c47d6b
CZ
2980 /* block TTM */
2981 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
4562236b
HW
2982 /* store modesetting */
2983 if (amdgpu_device_has_dc_support(adev))
2984 state = drm_atomic_helper_suspend(adev->ddev);
a3c47d6b 2985
0875dc9e
CZ
2986 /* block scheduler */
2987 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2988 struct amdgpu_ring *ring = adev->rings[i];
2989
51687759 2990 if (!ring || !ring->sched.thread)
0875dc9e
CZ
2991 continue;
2992 kthread_park(ring->sched.thread);
aa1c8900 2993 amd_sched_hw_job_reset(&ring->sched);
0875dc9e 2994 }
2200edac
CZ
2995 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2996 amdgpu_fence_driver_force_completion(adev);
d38ceaf9 2997
35d782fe 2998 need_full_reset = amdgpu_need_full_reset(adev);
d38ceaf9 2999
35d782fe
CZ
3000 if (!need_full_reset) {
3001 amdgpu_pre_soft_reset(adev);
3002 r = amdgpu_soft_reset(adev);
3003 amdgpu_post_soft_reset(adev);
3004 if (r || amdgpu_check_soft_reset(adev)) {
3005 DRM_INFO("soft reset failed, will fallback to full reset!\n");
3006 need_full_reset = true;
3007 }
f1aa7e08
CZ
3008 }
3009
35d782fe 3010 if (need_full_reset) {
35d782fe 3011 r = amdgpu_suspend(adev);
bfa99269 3012
35d782fe 3013retry:
d05da0e2 3014 amdgpu_atombios_scratch_regs_save(adev);
35d782fe 3015 r = amdgpu_asic_reset(adev);
d05da0e2 3016 amdgpu_atombios_scratch_regs_restore(adev);
35d782fe
CZ
3017 /* post card */
3018 amdgpu_atom_asic_init(adev->mode_info.atom_context);
3019
3020 if (!r) {
3021 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
fcf0649f
CZ
3022 r = amdgpu_resume_phase1(adev);
3023 if (r)
3024 goto out;
0c49e0b8 3025 vram_lost = amdgpu_check_vram_lost(adev);
f1892138 3026 if (vram_lost) {
0c49e0b8 3027 DRM_ERROR("VRAM is lost!\n");
f1892138
CZ
3028 atomic_inc(&adev->vram_lost_counter);
3029 }
fcf0649f
CZ
3030 r = amdgpu_ttm_recover_gart(adev);
3031 if (r)
3032 goto out;
3033 r = amdgpu_resume_phase2(adev);
3034 if (r)
3035 goto out;
0c49e0b8
CZ
3036 if (vram_lost)
3037 amdgpu_fill_reset_magic(adev);
35d782fe 3038 }
d38ceaf9 3039 }
fcf0649f 3040out:
d38ceaf9 3041 if (!r) {
e72cfd58 3042 amdgpu_irq_gpu_reset_resume_helper(adev);
1f465087
CZ
3043 r = amdgpu_ib_ring_tests(adev);
3044 if (r) {
3045 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
40019dc4 3046 r = amdgpu_suspend(adev);
53cdccd5 3047 need_full_reset = true;
40019dc4 3048 goto retry;
1f465087 3049 }
53cdccd5
CZ
3050 /**
3051 * recovery vm page tables, since we cannot depend on VRAM is
3052 * consistent after gpu full reset.
3053 */
3054 if (need_full_reset && amdgpu_need_backup(adev)) {
3055 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
3056 struct amdgpu_bo *bo, *tmp;
f54d1867 3057 struct dma_fence *fence = NULL, *next = NULL;
53cdccd5
CZ
3058
3059 DRM_INFO("recover vram bo from shadow\n");
3060 mutex_lock(&adev->shadow_list_lock);
3061 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
236763d3 3062 next = NULL;
53cdccd5
CZ
3063 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
3064 if (fence) {
f54d1867 3065 r = dma_fence_wait(fence, false);
53cdccd5 3066 if (r) {
1d7b17b0 3067 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5
CZ
3068 break;
3069 }
3070 }
1f465087 3071
f54d1867 3072 dma_fence_put(fence);
53cdccd5
CZ
3073 fence = next;
3074 }
3075 mutex_unlock(&adev->shadow_list_lock);
3076 if (fence) {
f54d1867 3077 r = dma_fence_wait(fence, false);
53cdccd5 3078 if (r)
1d7b17b0 3079 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5 3080 }
f54d1867 3081 dma_fence_put(fence);
53cdccd5 3082 }
d38ceaf9
AD
3083 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3084 struct amdgpu_ring *ring = adev->rings[i];
51687759
CZ
3085
3086 if (!ring || !ring->sched.thread)
d38ceaf9 3087 continue;
53cdccd5 3088
aa1c8900 3089 amd_sched_job_recovery(&ring->sched);
0875dc9e 3090 kthread_unpark(ring->sched.thread);
d38ceaf9 3091 }
d38ceaf9 3092 } else {
2200edac 3093 dev_err(adev->dev, "asic resume failed (%d).\n", r);
d38ceaf9 3094 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
51687759 3095 if (adev->rings[i] && adev->rings[i]->sched.thread) {
0875dc9e 3096 kthread_unpark(adev->rings[i]->sched.thread);
0875dc9e 3097 }
d38ceaf9
AD
3098 }
3099 }
3100
4562236b
HW
3101 if (amdgpu_device_has_dc_support(adev)) {
3102 r = drm_atomic_helper_resume(adev->ddev, state);
3103 amdgpu_dm_display_resume(adev);
3104 } else
3105 drm_helper_resume_force_mode(adev->ddev);
d38ceaf9
AD
3106
3107 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
89041940 3108 if (r) {
d38ceaf9
AD
3109 /* bad news, how to tell it to userspace ? */
3110 dev_info(adev->dev, "GPU reset failed\n");
89041940
GW
3111 }
3112 else {
6643be65 3113 dev_info(adev->dev, "GPU reset successed!\n");
89041940 3114 }
d38ceaf9 3115
89041940 3116 amdgpu_vf_error_trans_all(adev);
d38ceaf9
AD
3117 return r;
3118}
3119
d0dd7f0c
AD
3120void amdgpu_get_pcie_info(struct amdgpu_device *adev)
3121{
3122 u32 mask;
3123 int ret;
3124
cd474ba0
AD
3125 if (amdgpu_pcie_gen_cap)
3126 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 3127
cd474ba0
AD
3128 if (amdgpu_pcie_lane_cap)
3129 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 3130
cd474ba0
AD
3131 /* covers APUs as well */
3132 if (pci_is_root_bus(adev->pdev->bus)) {
3133 if (adev->pm.pcie_gen_mask == 0)
3134 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3135 if (adev->pm.pcie_mlw_mask == 0)
3136 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 3137 return;
cd474ba0 3138 }
d0dd7f0c 3139
cd474ba0
AD
3140 if (adev->pm.pcie_gen_mask == 0) {
3141 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3142 if (!ret) {
3143 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3144 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3145 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3146
3147 if (mask & DRM_PCIE_SPEED_25)
3148 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3149 if (mask & DRM_PCIE_SPEED_50)
3150 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3151 if (mask & DRM_PCIE_SPEED_80)
3152 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3153 } else {
3154 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3155 }
3156 }
3157 if (adev->pm.pcie_mlw_mask == 0) {
3158 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3159 if (!ret) {
3160 switch (mask) {
3161 case 32:
3162 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3163 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3164 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3165 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3166 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3167 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3168 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3169 break;
3170 case 16:
3171 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3172 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3173 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3174 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3175 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3176 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3177 break;
3178 case 12:
3179 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3180 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3181 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3182 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3183 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3184 break;
3185 case 8:
3186 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3187 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3188 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3189 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3190 break;
3191 case 4:
3192 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3193 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3194 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3195 break;
3196 case 2:
3197 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3198 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3199 break;
3200 case 1:
3201 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3202 break;
3203 default:
3204 break;
3205 }
3206 } else {
3207 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
3208 }
3209 }
3210}
d38ceaf9
AD
3211
3212/*
3213 * Debugfs
3214 */
3215int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
06ab6832 3216 const struct drm_info_list *files,
d38ceaf9
AD
3217 unsigned nfiles)
3218{
3219 unsigned i;
3220
3221 for (i = 0; i < adev->debugfs_count; i++) {
3222 if (adev->debugfs[i].files == files) {
3223 /* Already registered */
3224 return 0;
3225 }
3226 }
3227
3228 i = adev->debugfs_count + 1;
3229 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
3230 DRM_ERROR("Reached maximum number of debugfs components.\n");
3231 DRM_ERROR("Report so we increase "
3232 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
3233 return -EINVAL;
3234 }
3235 adev->debugfs[adev->debugfs_count].files = files;
3236 adev->debugfs[adev->debugfs_count].num_files = nfiles;
3237 adev->debugfs_count = i;
3238#if defined(CONFIG_DEBUG_FS)
d38ceaf9
AD
3239 drm_debugfs_create_files(files, nfiles,
3240 adev->ddev->primary->debugfs_root,
3241 adev->ddev->primary);
3242#endif
3243 return 0;
3244}
3245
d38ceaf9
AD
3246#if defined(CONFIG_DEBUG_FS)
3247
3248static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
3249 size_t size, loff_t *pos)
3250{
45063097 3251 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
3252 ssize_t result = 0;
3253 int r;
bd12267d 3254 bool pm_pg_lock, use_bank;
56628159 3255 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
3256
3257 if (size & 0x3 || *pos & 0x3)
3258 return -EINVAL;
3259
bd12267d
TSD
3260 /* are we reading registers for which a PG lock is necessary? */
3261 pm_pg_lock = (*pos >> 23) & 1;
3262
56628159 3263 if (*pos & (1ULL << 62)) {
0b968650
TSD
3264 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
3265 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
3266 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
32977f93
TSD
3267
3268 if (se_bank == 0x3FF)
3269 se_bank = 0xFFFFFFFF;
3270 if (sh_bank == 0x3FF)
3271 sh_bank = 0xFFFFFFFF;
3272 if (instance_bank == 0x3FF)
3273 instance_bank = 0xFFFFFFFF;
56628159 3274 use_bank = 1;
56628159
TSD
3275 } else {
3276 use_bank = 0;
3277 }
3278
801a6aa9 3279 *pos &= (1UL << 22) - 1;
bd12267d 3280
56628159 3281 if (use_bank) {
32977f93
TSD
3282 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3283 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
56628159
TSD
3284 return -EINVAL;
3285 mutex_lock(&adev->grbm_idx_mutex);
3286 amdgpu_gfx_select_se_sh(adev, se_bank,
3287 sh_bank, instance_bank);
3288 }
3289
bd12267d
TSD
3290 if (pm_pg_lock)
3291 mutex_lock(&adev->pm.mutex);
3292
d38ceaf9
AD
3293 while (size) {
3294 uint32_t value;
3295
3296 if (*pos > adev->rmmio_size)
56628159 3297 goto end;
d38ceaf9
AD
3298
3299 value = RREG32(*pos >> 2);
3300 r = put_user(value, (uint32_t *)buf);
56628159
TSD
3301 if (r) {
3302 result = r;
3303 goto end;
3304 }
d38ceaf9
AD
3305
3306 result += 4;
3307 buf += 4;
3308 *pos += 4;
3309 size -= 4;
3310 }
3311
56628159
TSD
3312end:
3313 if (use_bank) {
3314 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3315 mutex_unlock(&adev->grbm_idx_mutex);
3316 }
3317
bd12267d
TSD
3318 if (pm_pg_lock)
3319 mutex_unlock(&adev->pm.mutex);
3320
d38ceaf9
AD
3321 return result;
3322}
3323
3324static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3325 size_t size, loff_t *pos)
3326{
45063097 3327 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
3328 ssize_t result = 0;
3329 int r;
394fdde2
TSD
3330 bool pm_pg_lock, use_bank;
3331 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
3332
3333 if (size & 0x3 || *pos & 0x3)
3334 return -EINVAL;
3335
394fdde2
TSD
3336 /* are we reading registers for which a PG lock is necessary? */
3337 pm_pg_lock = (*pos >> 23) & 1;
3338
3339 if (*pos & (1ULL << 62)) {
0b968650
TSD
3340 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
3341 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
3342 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
394fdde2
TSD
3343
3344 if (se_bank == 0x3FF)
3345 se_bank = 0xFFFFFFFF;
3346 if (sh_bank == 0x3FF)
3347 sh_bank = 0xFFFFFFFF;
3348 if (instance_bank == 0x3FF)
3349 instance_bank = 0xFFFFFFFF;
3350 use_bank = 1;
3351 } else {
3352 use_bank = 0;
3353 }
3354
801a6aa9 3355 *pos &= (1UL << 22) - 1;
394fdde2
TSD
3356
3357 if (use_bank) {
3358 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3359 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3360 return -EINVAL;
3361 mutex_lock(&adev->grbm_idx_mutex);
3362 amdgpu_gfx_select_se_sh(adev, se_bank,
3363 sh_bank, instance_bank);
3364 }
3365
3366 if (pm_pg_lock)
3367 mutex_lock(&adev->pm.mutex);
3368
d38ceaf9
AD
3369 while (size) {
3370 uint32_t value;
3371
3372 if (*pos > adev->rmmio_size)
3373 return result;
3374
3375 r = get_user(value, (uint32_t *)buf);
3376 if (r)
3377 return r;
3378
3379 WREG32(*pos >> 2, value);
3380
3381 result += 4;
3382 buf += 4;
3383 *pos += 4;
3384 size -= 4;
3385 }
3386
394fdde2
TSD
3387 if (use_bank) {
3388 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3389 mutex_unlock(&adev->grbm_idx_mutex);
3390 }
3391
3392 if (pm_pg_lock)
3393 mutex_unlock(&adev->pm.mutex);
3394
d38ceaf9
AD
3395 return result;
3396}
3397
adcec288
TSD
3398static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3399 size_t size, loff_t *pos)
3400{
45063097 3401 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3402 ssize_t result = 0;
3403 int r;
3404
3405 if (size & 0x3 || *pos & 0x3)
3406 return -EINVAL;
3407
3408 while (size) {
3409 uint32_t value;
3410
3411 value = RREG32_PCIE(*pos >> 2);
3412 r = put_user(value, (uint32_t *)buf);
3413 if (r)
3414 return r;
3415
3416 result += 4;
3417 buf += 4;
3418 *pos += 4;
3419 size -= 4;
3420 }
3421
3422 return result;
3423}
3424
3425static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3426 size_t size, loff_t *pos)
3427{
45063097 3428 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3429 ssize_t result = 0;
3430 int r;
3431
3432 if (size & 0x3 || *pos & 0x3)
3433 return -EINVAL;
3434
3435 while (size) {
3436 uint32_t value;
3437
3438 r = get_user(value, (uint32_t *)buf);
3439 if (r)
3440 return r;
3441
3442 WREG32_PCIE(*pos >> 2, value);
3443
3444 result += 4;
3445 buf += 4;
3446 *pos += 4;
3447 size -= 4;
3448 }
3449
3450 return result;
3451}
3452
3453static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3454 size_t size, loff_t *pos)
3455{
45063097 3456 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3457 ssize_t result = 0;
3458 int r;
3459
3460 if (size & 0x3 || *pos & 0x3)
3461 return -EINVAL;
3462
3463 while (size) {
3464 uint32_t value;
3465
3466 value = RREG32_DIDT(*pos >> 2);
3467 r = put_user(value, (uint32_t *)buf);
3468 if (r)
3469 return r;
3470
3471 result += 4;
3472 buf += 4;
3473 *pos += 4;
3474 size -= 4;
3475 }
3476
3477 return result;
3478}
3479
3480static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3481 size_t size, loff_t *pos)
3482{
45063097 3483 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3484 ssize_t result = 0;
3485 int r;
3486
3487 if (size & 0x3 || *pos & 0x3)
3488 return -EINVAL;
3489
3490 while (size) {
3491 uint32_t value;
3492
3493 r = get_user(value, (uint32_t *)buf);
3494 if (r)
3495 return r;
3496
3497 WREG32_DIDT(*pos >> 2, value);
3498
3499 result += 4;
3500 buf += 4;
3501 *pos += 4;
3502 size -= 4;
3503 }
3504
3505 return result;
3506}
3507
3508static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3509 size_t size, loff_t *pos)
3510{
45063097 3511 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3512 ssize_t result = 0;
3513 int r;
3514
3515 if (size & 0x3 || *pos & 0x3)
3516 return -EINVAL;
3517
3518 while (size) {
3519 uint32_t value;
3520
6fc0deaf 3521 value = RREG32_SMC(*pos);
adcec288
TSD
3522 r = put_user(value, (uint32_t *)buf);
3523 if (r)
3524 return r;
3525
3526 result += 4;
3527 buf += 4;
3528 *pos += 4;
3529 size -= 4;
3530 }
3531
3532 return result;
3533}
3534
3535static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3536 size_t size, loff_t *pos)
3537{
45063097 3538 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3539 ssize_t result = 0;
3540 int r;
3541
3542 if (size & 0x3 || *pos & 0x3)
3543 return -EINVAL;
3544
3545 while (size) {
3546 uint32_t value;
3547
3548 r = get_user(value, (uint32_t *)buf);
3549 if (r)
3550 return r;
3551
6fc0deaf 3552 WREG32_SMC(*pos, value);
adcec288
TSD
3553
3554 result += 4;
3555 buf += 4;
3556 *pos += 4;
3557 size -= 4;
3558 }
3559
3560 return result;
3561}
3562
1e051413
TSD
3563static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3564 size_t size, loff_t *pos)
3565{
45063097 3566 struct amdgpu_device *adev = file_inode(f)->i_private;
1e051413
TSD
3567 ssize_t result = 0;
3568 int r;
3569 uint32_t *config, no_regs = 0;
3570
3571 if (size & 0x3 || *pos & 0x3)
3572 return -EINVAL;
3573
ecab7668 3574 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
1e051413
TSD
3575 if (!config)
3576 return -ENOMEM;
3577
3578 /* version, increment each time something is added */
9a999359 3579 config[no_regs++] = 3;
1e051413
TSD
3580 config[no_regs++] = adev->gfx.config.max_shader_engines;
3581 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3582 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3583 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3584 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3585 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3586 config[no_regs++] = adev->gfx.config.max_gprs;
3587 config[no_regs++] = adev->gfx.config.max_gs_threads;
3588 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3589 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3590 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3591 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3592 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3593 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3594 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3595 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3596 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3597 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3598 config[no_regs++] = adev->gfx.config.num_gpus;
3599 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3600 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3601 config[no_regs++] = adev->gfx.config.gb_addr_config;
3602 config[no_regs++] = adev->gfx.config.num_rbs;
3603
89a8f309
TSD
3604 /* rev==1 */
3605 config[no_regs++] = adev->rev_id;
3606 config[no_regs++] = adev->pg_flags;
3607 config[no_regs++] = adev->cg_flags;
3608
e9f11dc8
TSD
3609 /* rev==2 */
3610 config[no_regs++] = adev->family;
3611 config[no_regs++] = adev->external_rev_id;
3612
9a999359
TSD
3613 /* rev==3 */
3614 config[no_regs++] = adev->pdev->device;
3615 config[no_regs++] = adev->pdev->revision;
3616 config[no_regs++] = adev->pdev->subsystem_device;
3617 config[no_regs++] = adev->pdev->subsystem_vendor;
3618
1e051413
TSD
3619 while (size && (*pos < no_regs * 4)) {
3620 uint32_t value;
3621
3622 value = config[*pos >> 2];
3623 r = put_user(value, (uint32_t *)buf);
3624 if (r) {
3625 kfree(config);
3626 return r;
3627 }
3628
3629 result += 4;
3630 buf += 4;
3631 *pos += 4;
3632 size -= 4;
3633 }
3634
3635 kfree(config);
3636 return result;
3637}
3638
f2cdaf20
TSD
3639static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3640 size_t size, loff_t *pos)
3641{
45063097 3642 struct amdgpu_device *adev = file_inode(f)->i_private;
9f8df7d7
TSD
3643 int idx, x, outsize, r, valuesize;
3644 uint32_t values[16];
f2cdaf20 3645
9f8df7d7 3646 if (size & 3 || *pos & 0x3)
f2cdaf20
TSD
3647 return -EINVAL;
3648
3cbc614f
SP
3649 if (amdgpu_dpm == 0)
3650 return -EINVAL;
3651
f2cdaf20
TSD
3652 /* convert offset to sensor number */
3653 idx = *pos >> 2;
3654
9f8df7d7 3655 valuesize = sizeof(values);
f2cdaf20 3656 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
cd4d7464 3657 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
f2cdaf20
TSD
3658 else
3659 return -EINVAL;
3660
9f8df7d7
TSD
3661 if (size > valuesize)
3662 return -EINVAL;
3663
3664 outsize = 0;
3665 x = 0;
3666 if (!r) {
3667 while (size) {
3668 r = put_user(values[x++], (int32_t *)buf);
3669 buf += 4;
3670 size -= 4;
3671 outsize += 4;
3672 }
3673 }
f2cdaf20 3674
9f8df7d7 3675 return !r ? outsize : r;
f2cdaf20 3676}
1e051413 3677
273d7aa1
TSD
3678static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3679 size_t size, loff_t *pos)
3680{
3681 struct amdgpu_device *adev = f->f_inode->i_private;
3682 int r, x;
3683 ssize_t result=0;
472259f0 3684 uint32_t offset, se, sh, cu, wave, simd, data[32];
273d7aa1
TSD
3685
3686 if (size & 3 || *pos & 3)
3687 return -EINVAL;
3688
3689 /* decode offset */
0b968650
TSD
3690 offset = (*pos & GENMASK_ULL(6, 0));
3691 se = (*pos & GENMASK_ULL(14, 7)) >> 7;
3692 sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
3693 cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
3694 wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
3695 simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
273d7aa1
TSD
3696
3697 /* switch to the specific se/sh/cu */
3698 mutex_lock(&adev->grbm_idx_mutex);
3699 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3700
3701 x = 0;
472259f0
TSD
3702 if (adev->gfx.funcs->read_wave_data)
3703 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
273d7aa1
TSD
3704
3705 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3706 mutex_unlock(&adev->grbm_idx_mutex);
3707
5ecfb3b8
TSD
3708 if (!x)
3709 return -EINVAL;
3710
472259f0 3711 while (size && (offset < x * 4)) {
273d7aa1
TSD
3712 uint32_t value;
3713
472259f0 3714 value = data[offset >> 2];
273d7aa1
TSD
3715 r = put_user(value, (uint32_t *)buf);
3716 if (r)
3717 return r;
3718
3719 result += 4;
3720 buf += 4;
472259f0 3721 offset += 4;
273d7aa1
TSD
3722 size -= 4;
3723 }
3724
3725 return result;
3726}
3727
c5a60ce8
TSD
3728static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3729 size_t size, loff_t *pos)
3730{
3731 struct amdgpu_device *adev = f->f_inode->i_private;
3732 int r;
3733 ssize_t result = 0;
3734 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3735
3736 if (size & 3 || *pos & 3)
3737 return -EINVAL;
3738
3739 /* decode offset */
0b968650
TSD
3740 offset = *pos & GENMASK_ULL(11, 0);
3741 se = (*pos & GENMASK_ULL(19, 12)) >> 12;
3742 sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
3743 cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
3744 wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
3745 simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
3746 thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
3747 bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
c5a60ce8
TSD
3748
3749 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3750 if (!data)
3751 return -ENOMEM;
3752
3753 /* switch to the specific se/sh/cu */
3754 mutex_lock(&adev->grbm_idx_mutex);
3755 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3756
3757 if (bank == 0) {
3758 if (adev->gfx.funcs->read_wave_vgprs)
3759 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3760 } else {
3761 if (adev->gfx.funcs->read_wave_sgprs)
3762 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3763 }
3764
3765 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3766 mutex_unlock(&adev->grbm_idx_mutex);
3767
3768 while (size) {
3769 uint32_t value;
3770
3771 value = data[offset++];
3772 r = put_user(value, (uint32_t *)buf);
3773 if (r) {
3774 result = r;
3775 goto err;
3776 }
3777
3778 result += 4;
3779 buf += 4;
3780 size -= 4;
3781 }
3782
3783err:
3784 kfree(data);
3785 return result;
3786}
3787
d38ceaf9
AD
3788static const struct file_operations amdgpu_debugfs_regs_fops = {
3789 .owner = THIS_MODULE,
3790 .read = amdgpu_debugfs_regs_read,
3791 .write = amdgpu_debugfs_regs_write,
3792 .llseek = default_llseek
3793};
adcec288
TSD
3794static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3795 .owner = THIS_MODULE,
3796 .read = amdgpu_debugfs_regs_didt_read,
3797 .write = amdgpu_debugfs_regs_didt_write,
3798 .llseek = default_llseek
3799};
3800static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3801 .owner = THIS_MODULE,
3802 .read = amdgpu_debugfs_regs_pcie_read,
3803 .write = amdgpu_debugfs_regs_pcie_write,
3804 .llseek = default_llseek
3805};
3806static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3807 .owner = THIS_MODULE,
3808 .read = amdgpu_debugfs_regs_smc_read,
3809 .write = amdgpu_debugfs_regs_smc_write,
3810 .llseek = default_llseek
3811};
3812
1e051413
TSD
3813static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3814 .owner = THIS_MODULE,
3815 .read = amdgpu_debugfs_gca_config_read,
3816 .llseek = default_llseek
3817};
3818
f2cdaf20
TSD
3819static const struct file_operations amdgpu_debugfs_sensors_fops = {
3820 .owner = THIS_MODULE,
3821 .read = amdgpu_debugfs_sensor_read,
3822 .llseek = default_llseek
3823};
3824
273d7aa1
TSD
3825static const struct file_operations amdgpu_debugfs_wave_fops = {
3826 .owner = THIS_MODULE,
3827 .read = amdgpu_debugfs_wave_read,
3828 .llseek = default_llseek
3829};
c5a60ce8
TSD
3830static const struct file_operations amdgpu_debugfs_gpr_fops = {
3831 .owner = THIS_MODULE,
3832 .read = amdgpu_debugfs_gpr_read,
3833 .llseek = default_llseek
3834};
273d7aa1 3835
adcec288
TSD
3836static const struct file_operations *debugfs_regs[] = {
3837 &amdgpu_debugfs_regs_fops,
3838 &amdgpu_debugfs_regs_didt_fops,
3839 &amdgpu_debugfs_regs_pcie_fops,
3840 &amdgpu_debugfs_regs_smc_fops,
1e051413 3841 &amdgpu_debugfs_gca_config_fops,
f2cdaf20 3842 &amdgpu_debugfs_sensors_fops,
273d7aa1 3843 &amdgpu_debugfs_wave_fops,
c5a60ce8 3844 &amdgpu_debugfs_gpr_fops,
adcec288
TSD
3845};
3846
3847static const char *debugfs_regs_names[] = {
3848 "amdgpu_regs",
3849 "amdgpu_regs_didt",
3850 "amdgpu_regs_pcie",
3851 "amdgpu_regs_smc",
1e051413 3852 "amdgpu_gca_config",
f2cdaf20 3853 "amdgpu_sensors",
273d7aa1 3854 "amdgpu_wave",
c5a60ce8 3855 "amdgpu_gpr",
adcec288 3856};
d38ceaf9
AD
3857
3858static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3859{
3860 struct drm_minor *minor = adev->ddev->primary;
3861 struct dentry *ent, *root = minor->debugfs_root;
adcec288
TSD
3862 unsigned i, j;
3863
3864 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3865 ent = debugfs_create_file(debugfs_regs_names[i],
3866 S_IFREG | S_IRUGO, root,
3867 adev, debugfs_regs[i]);
3868 if (IS_ERR(ent)) {
3869 for (j = 0; j < i; j++) {
3870 debugfs_remove(adev->debugfs_regs[i]);
3871 adev->debugfs_regs[i] = NULL;
3872 }
3873 return PTR_ERR(ent);
3874 }
d38ceaf9 3875
adcec288
TSD
3876 if (!i)
3877 i_size_write(ent->d_inode, adev->rmmio_size);
3878 adev->debugfs_regs[i] = ent;
3879 }
d38ceaf9
AD
3880
3881 return 0;
3882}
3883
3884static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3885{
adcec288
TSD
3886 unsigned i;
3887
3888 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3889 if (adev->debugfs_regs[i]) {
3890 debugfs_remove(adev->debugfs_regs[i]);
3891 adev->debugfs_regs[i] = NULL;
3892 }
3893 }
d38ceaf9
AD
3894}
3895
4f0955fc
HR
3896static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
3897{
3898 struct drm_info_node *node = (struct drm_info_node *) m->private;
3899 struct drm_device *dev = node->minor->dev;
3900 struct amdgpu_device *adev = dev->dev_private;
3901 int r = 0, i;
3902
3903 /* hold on the scheduler */
3904 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3905 struct amdgpu_ring *ring = adev->rings[i];
3906
3907 if (!ring || !ring->sched.thread)
3908 continue;
3909 kthread_park(ring->sched.thread);
3910 }
3911
3912 seq_printf(m, "run ib test:\n");
3913 r = amdgpu_ib_ring_tests(adev);
3914 if (r)
3915 seq_printf(m, "ib ring tests failed (%d).\n", r);
3916 else
3917 seq_printf(m, "ib ring tests passed.\n");
3918
3919 /* go on the scheduler */
3920 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3921 struct amdgpu_ring *ring = adev->rings[i];
3922
3923 if (!ring || !ring->sched.thread)
3924 continue;
3925 kthread_unpark(ring->sched.thread);
3926 }
3927
3928 return 0;
3929}
3930
3931static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
3932 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
3933};
3934
3935static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
3936{
3937 return amdgpu_debugfs_add_files(adev,
3938 amdgpu_debugfs_test_ib_ring_list, 1);
3939}
3940
d38ceaf9
AD
3941int amdgpu_debugfs_init(struct drm_minor *minor)
3942{
3943 return 0;
3944}
db95e218
KR
3945
3946static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
3947{
3948 struct drm_info_node *node = (struct drm_info_node *) m->private;
3949 struct drm_device *dev = node->minor->dev;
3950 struct amdgpu_device *adev = dev->dev_private;
3951
3952 seq_write(m, adev->bios, adev->bios_size);
3953 return 0;
3954}
3955
db95e218
KR
3956static const struct drm_info_list amdgpu_vbios_dump_list[] = {
3957 {"amdgpu_vbios",
3958 amdgpu_debugfs_get_vbios_dump,
3959 0, NULL},
3960};
3961
db95e218
KR
3962static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
3963{
3964 return amdgpu_debugfs_add_files(adev,
3965 amdgpu_vbios_dump_list, 1);
3966}
7cebc728 3967#else
27bad5b9 3968static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
4f0955fc
HR
3969{
3970 return 0;
3971}
7cebc728
AK
3972static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3973{
3974 return 0;
3975}
db95e218
KR
3976static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
3977{
3978 return 0;
3979}
7cebc728 3980static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
d38ceaf9 3981#endif